LLVM 23.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
54#include "llvm/Support/Debug.h"
56#include "llvm/Support/MD5.h"
67#include <optional>
68#include <random>
69
70using namespace llvm;
71
72#define DEBUG_TYPE "hwasan"
73
74const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
75const char kHwasanNoteName[] = "hwasan.note";
76const char kHwasanInitName[] = "__hwasan_init";
77const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
78
80 "__hwasan_shadow_memory_dynamic_address";
81
82// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
83static const size_t kNumberOfAccessSizes = 5;
84
85static const size_t kDefaultShadowScale = 4;
86
87static const unsigned kShadowBaseAlignment = 32;
88
89namespace {
90enum class OffsetKind {
91 kFixed = 0,
92 kGlobal,
93 kIfunc,
94 kTls,
95};
96}
97
99 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
100 cl::desc("Prefix for memory access callbacks"),
101 cl::Hidden, cl::init("__hwasan_"));
102
104 "hwasan-kernel-mem-intrinsic-prefix",
105 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
106 cl::init(false));
107
109 "hwasan-instrument-with-calls",
110 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
111 cl::init(false));
112
113static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
114 cl::desc("instrument read instructions"),
115 cl::Hidden, cl::init(true));
116
117static cl::opt<bool>
118 ClInstrumentWrites("hwasan-instrument-writes",
119 cl::desc("instrument write instructions"), cl::Hidden,
120 cl::init(true));
121
123 "hwasan-instrument-atomics",
124 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
125 cl::init(true));
126
127static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
128 cl::desc("instrument byval arguments"),
129 cl::Hidden, cl::init(true));
130
131static cl::opt<bool>
132 ClRecover("hwasan-recover",
133 cl::desc("Enable recovery mode (continue-after-error)."),
134 cl::Hidden, cl::init(false));
135
136static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
137 cl::desc("instrument stack (allocas)"),
138 cl::Hidden, cl::init(true));
139
140static cl::opt<bool>
141 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
142 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
144
146 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
148 cl::desc("How many lifetime ends to handle for a single alloca."),
150
151static cl::opt<bool>
152 ClUseAfterScope("hwasan-use-after-scope",
153 cl::desc("detect use after scope within function"),
154 cl::Hidden, cl::init(true));
155
157 "hwasan-strict-use-after-scope",
158 cl::desc("for complicated lifetimes, tag both on end and return"),
159 cl::Hidden, cl::init(true));
160
162 "hwasan-generate-tags-with-calls",
163 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
164 cl::init(false));
165
166static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
167 cl::Hidden, cl::init(false));
168
170 "hwasan-all-globals",
171 cl::desc(
172 "Instrument globals, even those within user-defined sections. Warning: "
173 "This may break existing code which walks globals via linker-generated "
174 "symbols, expects certain globals to be contiguous with each other, or "
175 "makes other assumptions which are invalidated by HWASan "
176 "instrumentation."),
177 cl::Hidden, cl::init(false));
178
180 "hwasan-match-all-tag",
181 cl::desc("don't report bad accesses via pointers with this tag"),
182 cl::Hidden, cl::init(-1));
183
184static cl::opt<bool>
185 ClEnableKhwasan("hwasan-kernel",
186 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
187 cl::Hidden, cl::init(false));
188
189// These flags allow to change the shadow mapping and control how shadow memory
190// is accessed. The shadow mapping looks like:
191// Shadow = (Mem >> scale) + offset
192
194 ClMappingOffset("hwasan-mapping-offset",
195 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
196 cl::Hidden);
197
199 "hwasan-mapping-offset-dynamic",
200 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
201 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
202 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
203 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
204
205static cl::opt<bool>
206 ClFrameRecords("hwasan-with-frame-record",
207 cl::desc("Use ring buffer for stack allocations"),
208 cl::Hidden);
209
210static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
211 cl::desc("Hot percentile cutoff."));
212
213static cl::opt<float>
214 ClRandomKeepRate("hwasan-random-rate",
215 cl::desc("Probability value in the range [0.0, 1.0] "
216 "to keep instrumentation of a function. "
217 "Note: instrumentation can be skipped randomly "
218 "OR because of the hot percentile cutoff, if "
219 "both are supplied."));
220
222 "hwasan-static-linking",
223 cl::desc("Don't use .note.hwasan.globals section to instrument globals "
224 "from loadable libraries. "
225 "Note: in static binaries, the global variables section can be "
226 "accessed directly via linker-provided "
227 "__start_hwasan_globals and __stop_hwasan_globals symbols"),
228 cl::Hidden, cl::init(false));
229
230// Mode for selecting how to insert frame record info into the stack ring
231// buffer.
233 // Do not record frame record info.
235
236 // Insert instructions into the prologue for storing into the stack ring
237 // buffer directly.
239
240 // Add a call to __hwasan_add_frame_record in the runtime.
242};
243
245 "hwasan-record-stack-history",
246 cl::desc("Record stack frames with tagged allocations in a thread-local "
247 "ring buffer"),
248 cl::values(clEnumVal(none, "Do not record stack ring history"),
249 clEnumVal(instr, "Insert instructions into the prologue for "
250 "storing into the stack ring buffer directly"),
251 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
252 "storing into the stack ring buffer")),
254
255static cl::opt<bool>
256 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
257 cl::desc("instrument memory intrinsics"),
258 cl::Hidden, cl::init(true));
259
260static cl::opt<bool>
261 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
262 cl::desc("instrument landing pads"), cl::Hidden,
263 cl::init(false));
264
266 "hwasan-use-short-granules",
267 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
268 cl::init(false));
269
271 "hwasan-instrument-personality-functions",
272 cl::desc("instrument personality functions"), cl::Hidden);
273
274static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
275 cl::desc("inline all checks"),
276 cl::Hidden, cl::init(false));
277
278static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
279 cl::desc("inline all checks"),
280 cl::Hidden, cl::init(false));
281
282// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
283static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
284 cl::desc("Use page aliasing in HWASan"),
285 cl::Hidden, cl::init(false));
286
288 ClTagBits("hwasan-tag-bits",
289 cl::desc("Restrict tag to at most N bits. Needs to be > 4."),
290 cl::Hidden, cl::init(0));
291
292STATISTIC(NumTotalFuncs, "Number of total funcs");
293STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
294STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
295
296namespace {
297
298template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
299 return Opt.getNumOccurrences() ? Opt : Other;
300}
301
302bool shouldUsePageAliases(const Triple &TargetTriple) {
303 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
304}
305
306bool shouldInstrumentStack(const Triple &TargetTriple) {
307 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
308}
309
310bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
311 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
312}
313
314bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
315 return optOr(ClUseStackSafety, !DisableOptimization);
316}
317
318bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
319 bool DisableOptimization) {
320 return shouldInstrumentStack(TargetTriple) &&
321 mightUseStackSafetyAnalysis(DisableOptimization);
322}
323
324bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
325 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
326}
327
328/// An instrumentation pass implementing detection of addressability bugs
329/// using tagged pointers.
330class HWAddressSanitizer {
331public:
332 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
333 const StackSafetyGlobalInfo *SSI)
334 : M(M), SSI(SSI) {
335 this->Recover = optOr(ClRecover, Recover);
336 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
337 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
338 : nullptr;
339
340 initializeModule();
341 }
342
343 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
344
345private:
346 struct ShadowTagCheckInfo {
347 Instruction *TagMismatchTerm = nullptr;
348 Value *PtrLong = nullptr;
349 Value *AddrLong = nullptr;
350 Value *PtrTag = nullptr;
351 Value *MemTag = nullptr;
352 };
353
354 bool selectiveInstrumentationShouldSkip(Function &F,
356 void initializeModule();
357 void createHwasanCtorComdat();
358 void createHwasanNote();
359
360 void initializeCallbacks(Module &M);
361
362 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
363
364 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
365 Value *getShadowNonTls(IRBuilder<> &IRB);
366
367 void untagPointerOperand(Instruction *I, Value *Addr);
368 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
369
370 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
371 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
372 DomTreeUpdater &DTU, LoopInfo *LI);
373 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
374 unsigned AccessSizeIndex,
375 Instruction *InsertBefore,
376 DomTreeUpdater &DTU, LoopInfo *LI);
377 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
378 unsigned AccessSizeIndex,
379 Instruction *InsertBefore, DomTreeUpdater &DTU,
380 LoopInfo *LI);
381 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
382 void instrumentMemIntrinsic(MemIntrinsic *MI);
383 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
384 LoopInfo *LI, const DataLayout &DL);
385 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
386 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
387 Value *Ptr);
388
390 OptimizationRemarkEmitter &ORE, Instruction *I,
391 const TargetLibraryInfo &TLI,
392 SmallVectorImpl<InterestingMemoryOperand> &Interesting);
393
394 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
395 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
396 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
397 void instrumentStack(OptimizationRemarkEmitter &ORE, memtag::StackInfo &Info,
398 Value *StackTag, Value *UARTag, const DominatorTree &DT,
399 const PostDominatorTree &PDT, const LoopInfo &LI);
400 void instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
401 Value *getNextTagWithCall(IRBuilder<> &IRB);
402 Value *getStackBaseTag(IRBuilder<> &IRB);
403 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
404 Value *getUARTag(IRBuilder<> &IRB);
405
406 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
407 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
408 unsigned retagMask(unsigned AllocaNo);
409
410 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
411
412 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
413 void instrumentGlobals();
414
415 Value *getCachedFP(IRBuilder<> &IRB);
416 Value *getFrameRecordInfo(IRBuilder<> &IRB);
417
418 void instrumentPersonalityFunctions();
419
420 LLVMContext *C;
421 Module &M;
422 const StackSafetyGlobalInfo *SSI;
423 Triple TargetTriple;
424 std::unique_ptr<RandomNumberGenerator> Rng;
425
426 /// This struct defines the shadow mapping using the rule:
427 /// If `kFixed`, then
428 /// shadow = (mem >> Scale) + Offset.
429 /// If `kGlobal`, then
430 /// extern char* __hwasan_shadow_memory_dynamic_address;
431 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
432 /// If `kIfunc`, then
433 /// extern char __hwasan_shadow[];
434 /// shadow = (mem >> Scale) + &__hwasan_shadow
435 /// If `kTls`, then
436 /// extern char *__hwasan_tls;
437 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
438 ///
439 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
440 /// ring buffer for storing stack allocations on targets that support it.
441 class ShadowMapping {
442 OffsetKind Kind;
443 uint64_t Offset;
444 uint8_t Scale;
445 bool WithFrameRecord;
446
447 void SetFixed(uint64_t O) {
448 Kind = OffsetKind::kFixed;
449 Offset = O;
450 }
451
452 public:
453 void init(Triple &TargetTriple, bool InstrumentWithCalls,
454 bool CompileKernel);
455 Align getObjectAlignment() const { return Align(1ULL << Scale); }
456 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
457 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
458 bool isInTls() const { return Kind == OffsetKind::kTls; }
459 bool isFixed() const { return Kind == OffsetKind::kFixed; }
460 uint8_t scale() const { return Scale; };
461 uint64_t offset() const {
462 assert(isFixed());
463 return Offset;
464 };
465 bool withFrameRecord() const { return WithFrameRecord; };
466 };
467
468 ShadowMapping Mapping;
469
470 Type *VoidTy = Type::getVoidTy(M.getContext());
471 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
472 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
473 Type *Int8Ty = Type::getInt8Ty(M.getContext());
474 Type *Int32Ty = Type::getInt32Ty(M.getContext());
475 Type *Int64Ty = Type::getInt64Ty(M.getContext());
476
477 bool CompileKernel;
478 bool Recover;
479 bool OutlinedChecks;
480 bool InlineFastPath;
481 bool UseShortGranules;
482 bool InstrumentLandingPads;
483 bool InstrumentWithCalls;
484 bool InstrumentStack;
485 bool InstrumentGlobals;
486 bool DetectUseAfterScope;
487 bool UsePageAliases;
488 bool UseMatchAllCallback;
489
490 std::optional<uint8_t> MatchAllTag;
491
492 unsigned PointerTagShift;
493 uint64_t TagMaskByte;
494
495 Function *HwasanCtorFunction;
496
497 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
498 FunctionCallee HwasanMemoryAccessCallbackSized[2];
499
500 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
501 FunctionCallee HwasanHandleVfork;
502
503 FunctionCallee HwasanTagMemoryFunc;
504 FunctionCallee HwasanGenerateTagFunc;
505 FunctionCallee HwasanRecordFrameRecordFunc;
506
507 Constant *ShadowGlobal;
508
509 Value *ShadowBase = nullptr;
510 Value *StackBaseTag = nullptr;
511 Value *CachedFP = nullptr;
512 GlobalValue *ThreadPtrGlobal = nullptr;
513};
514
515} // end anonymous namespace
516
519 // Return early if nosanitize_hwaddress module flag is present for the module.
520 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
521 return PreservedAnalyses::all();
522 const StackSafetyGlobalInfo *SSI = nullptr;
523 const Triple &TargetTriple = M.getTargetTriple();
524 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
525 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(M);
526
527 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
528 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
529 for (Function &F : M)
530 HWASan.sanitizeFunction(F, FAM);
531
533 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
534 // are incrementally updated throughout this pass whenever
535 // SplitBlockAndInsertIfThen is called.
539 // GlobalsAA is considered stateless and does not get invalidated unless
540 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
541 // make changes that require GlobalsAA to be invalidated.
542 PA.abandon<GlobalsAA>();
543 return PA;
544}
546 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
548 OS, MapClassName2PassName);
549 OS << '<';
550 if (Options.CompileKernel)
551 OS << "kernel;";
552 if (Options.Recover)
553 OS << "recover";
554 OS << '>';
555}
556
557void HWAddressSanitizer::createHwasanNote() {
558 // Create a note that contains pointers to the list of global
559 // descriptors. Adding a note to the output file will cause the linker to
560 // create a PT_NOTE program header pointing to the note that we can use to
561 // find the descriptor list starting from the program headers. A function
562 // provided by the runtime initializes the shadow memory for the globals by
563 // accessing the descriptor list via the note. The dynamic loader needs to
564 // call this function whenever a library is loaded.
565 //
566 // The reason why we use a note for this instead of a more conventional
567 // approach of having a global constructor pass a descriptor list pointer to
568 // the runtime is because of an order of initialization problem. With
569 // constructors we can encounter the following problematic scenario:
570 //
571 // 1) library A depends on library B and also interposes one of B's symbols
572 // 2) B's constructors are called before A's (as required for correctness)
573 // 3) during construction, B accesses one of its "own" globals (actually
574 // interposed by A) and triggers a HWASAN failure due to the initialization
575 // for A not having happened yet
576 //
577 // Even without interposition it is possible to run into similar situations in
578 // cases where two libraries mutually depend on each other.
579 //
580 // We only need one note per binary, so put everything for the note in a
581 // comdat. This needs to be a comdat with an .init_array section to prevent
582 // newer versions of lld from discarding the note.
583 //
584 // Create the note even if we aren't instrumenting globals. This ensures that
585 // binaries linked from object files with both instrumented and
586 // non-instrumented globals will end up with a note, even if a comdat from an
587 // object file with non-instrumented globals is selected. The note is harmless
588 // if the runtime doesn't support it, since it will just be ignored.
589 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
590
591 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
592 auto *Start =
593 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
594 nullptr, "__start_hwasan_globals");
595 Start->setVisibility(GlobalValue::HiddenVisibility);
596 auto *Stop =
597 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
598 nullptr, "__stop_hwasan_globals");
599 Stop->setVisibility(GlobalValue::HiddenVisibility);
600
601 // Null-terminated so actually 8 bytes, which are required in order to align
602 // the note properly.
603 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
604
605 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
607 auto *Note =
608 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
610 Note->setSection(".note.hwasan.globals");
611 Note->setComdat(NoteComdat);
612 Note->setAlignment(Align(4));
613
614 // The pointers in the note need to be relative so that the note ends up being
615 // placed in rodata, which is the standard location for notes.
616 auto CreateRelPtr = [&](Constant *Ptr) {
620 Int32Ty);
621 };
622 Note->setInitializer(ConstantStruct::getAnon(
623 {ConstantInt::get(Int32Ty, 8), // n_namesz
624 ConstantInt::get(Int32Ty, 8), // n_descsz
625 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
626 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
628
629 // Create a zero-length global in hwasan_globals so that the linker will
630 // always create start and stop symbols.
631 auto *Dummy = new GlobalVariable(
632 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
633 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
634 Dummy->setSection("hwasan_globals");
635 Dummy->setComdat(NoteComdat);
636 Dummy->setMetadata(LLVMContext::MD_associated,
638 appendToCompilerUsed(M, Dummy);
639}
640
641void HWAddressSanitizer::createHwasanCtorComdat() {
642 std::tie(HwasanCtorFunction, std::ignore) =
645 /*InitArgTypes=*/{},
646 /*InitArgs=*/{},
647 // This callback is invoked when the functions are created the first
648 // time. Hook them into the global ctors list in that case:
649 [&](Function *Ctor, FunctionCallee) {
650 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
651 Ctor->setComdat(CtorComdat);
652 appendToGlobalCtors(M, Ctor, 0, Ctor);
653 });
654
655 // Do not create .note.hwasan.globals for static binaries, as it is only
656 // needed for instrumenting globals from dynamic libraries. In static
657 // binaries, the global variables section can be accessed directly via the
658 // __start_hwasan_globals and __stop_hwasan_globals symbols inserted by the
659 // linker.
660 if (!ClStaticLinking)
661 createHwasanNote();
662}
663
664/// Module-level initialization.
665///
666/// inserts a call to __hwasan_init to the module's constructor list.
667void HWAddressSanitizer::initializeModule() {
668 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
669 TargetTriple = M.getTargetTriple();
670
671 // HWASan may do short granule checks on function arguments read from the
672 // argument memory (last byte of the granule), which invalidates writeonly.
673 for (Function &F : M.functions())
674 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
675
676 // x86_64 currently has two modes:
677 // - Intel LAM (default)
678 // - pointer aliasing (heap only)
679 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
680 UsePageAliases = shouldUsePageAliases(TargetTriple);
681 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
682 InstrumentStack = shouldInstrumentStack(TargetTriple);
683 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
684 PointerTagShift = IsX86_64 ? 57 : 56;
685 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
686 if (ClTagBits) {
687 if (TagMaskByte < 4)
689 "need more than 4 bits of tag to have non-short-granule tags");
690 TagMaskByte &= (1 << ClTagBits) - 1;
691 }
692
693 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
694
695 C = &(M.getContext());
696 IRBuilder<> IRB(*C);
697
698 HwasanCtorFunction = nullptr;
699
700 // Older versions of Android do not have the required runtime support for
701 // short granules, global or personality function instrumentation. On other
702 // platforms we currently require using the latest version of the runtime.
703 bool NewRuntime =
704 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
705
706 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
707 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
708 TargetTriple.isOSBinFormatELF() &&
709 !optOr(ClInlineAllChecks, Recover);
710
711 // These platforms may prefer less inlining to reduce binary size.
712 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
713 TargetTriple.isOSFuchsia()));
714
715 if (ClMatchAllTag.getNumOccurrences()) {
716 if (ClMatchAllTag != -1) {
717 MatchAllTag = ClMatchAllTag & 0xFF;
718 }
719 } else if (CompileKernel) {
720 MatchAllTag = 0xFF;
721 }
722 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
723
724 // If we don't have personality function support, fall back to landing pads.
725 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
726
727 InstrumentGlobals =
728 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
729
730 if (!CompileKernel) {
731 if (InstrumentGlobals)
732 instrumentGlobals();
733
734 createHwasanCtorComdat();
735
736 bool InstrumentPersonalityFunctions =
737 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
738 if (InstrumentPersonalityFunctions)
739 instrumentPersonalityFunctions();
740 }
741
742 if (!TargetTriple.isAndroid()) {
743 ThreadPtrGlobal = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
744 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
746 "__hwasan_tls", nullptr,
749 return GV;
750 });
751 }
752}
753
754void HWAddressSanitizer::initializeCallbacks(Module &M) {
755 IRBuilder<> IRB(*C);
756 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
757 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
758 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
759 *HwasanMemsetFnTy;
760 if (UseMatchAllCallback) {
761 HwasanMemoryAccessCallbackSizedFnTy =
762 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
763 HwasanMemoryAccessCallbackFnTy =
764 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
765 HwasanMemTransferFnTy =
766 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
767 HwasanMemsetFnTy =
768 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
769 } else {
770 HwasanMemoryAccessCallbackSizedFnTy =
771 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
772 HwasanMemoryAccessCallbackFnTy =
773 FunctionType::get(VoidTy, {IntptrTy}, false);
774 HwasanMemTransferFnTy =
775 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
776 HwasanMemsetFnTy =
777 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
778 }
779
780 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
781 const std::string TypeStr = AccessIsWrite ? "store" : "load";
782 const std::string EndingStr = Recover ? "_noabort" : "";
783
784 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
785 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
786 HwasanMemoryAccessCallbackSizedFnTy);
787
788 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
789 AccessSizeIndex++) {
790 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
791 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
792 itostr(1ULL << AccessSizeIndex) +
793 MatchAllStr + EndingStr,
794 HwasanMemoryAccessCallbackFnTy);
795 }
796 }
797
798 const std::string MemIntrinCallbackPrefix =
799 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
800 ? std::string("")
802
803 HwasanMemmove = M.getOrInsertFunction(
804 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
805 HwasanMemcpy = M.getOrInsertFunction(
806 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
807 HwasanMemset = M.getOrInsertFunction(
808 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
809
810 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
811 PtrTy, Int8Ty, IntptrTy);
812 HwasanGenerateTagFunc =
813 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
814
815 HwasanRecordFrameRecordFunc =
816 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
817
818 ShadowGlobal =
819 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
820
821 HwasanHandleVfork =
822 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
823}
824
825Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
826 // An empty inline asm with input reg == output reg.
827 // An opaque no-op cast, basically.
828 // This prevents code bloat as a result of rematerializing trivial definitions
829 // such as constants or global addresses at every load and store.
830 InlineAsm *Asm =
831 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
832 StringRef(""), StringRef("=r,0"),
833 /*hasSideEffects=*/false);
834 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
835}
836
837Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
838 return getOpaqueNoopCast(IRB, ShadowGlobal);
839}
840
841Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
842 if (Mapping.isFixed()) {
843 return getOpaqueNoopCast(
845 ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
846 }
847
848 if (Mapping.isInIfunc())
849 return getDynamicShadowIfunc(IRB);
850
851 Value *GlobalDynamicAddress =
854 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
855}
856
857bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
858 Value *Ptr) {
859 // Do not instrument accesses from different address spaces; we cannot deal
860 // with them.
861 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
862 if (PtrTy->getPointerAddressSpace() != 0)
863 return true;
864
865 // Ignore swifterror addresses.
866 // swifterror memory addresses are mem2reg promoted by instruction
867 // selection. As such they cannot have regular uses like an instrumentation
868 // function and it makes no sense to track them as memory.
869 if (Ptr->isSwiftError())
870 return true;
871
872 if (findAllocaForValue(Ptr)) {
873 if (!InstrumentStack)
874 return true;
875 if (SSI && SSI->stackAccessIsSafe(*Inst))
876 return true;
877 }
878
880 if (!InstrumentGlobals)
881 return true;
882 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
883 }
884
885 return false;
886}
887
888bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
889 Instruction *Inst, Value *Ptr) {
890 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
891 if (Ignored) {
892 ORE.emit(
893 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
894 } else {
895 ORE.emit([&]() {
896 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
897 });
898 }
899 return Ignored;
900}
901
902void HWAddressSanitizer::getInterestingMemoryOperands(
904 const TargetLibraryInfo &TLI,
906 // Skip memory accesses inserted by another instrumentation.
907 if (I->hasMetadata(LLVMContext::MD_nosanitize))
908 return;
909
910 // Do not instrument the load fetching the dynamic shadow address.
911 if (ShadowBase == I)
912 return;
913
914 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
915 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
916 return;
917 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
918 LI->getType(), LI->getAlign());
919 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
920 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
921 return;
922 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
923 SI->getValueOperand()->getType(), SI->getAlign());
924 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
925 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
926 return;
927 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
928 RMW->getValOperand()->getType(), std::nullopt);
929 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
930 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
931 return;
932 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
933 XCHG->getCompareOperand()->getType(),
934 std::nullopt);
935 } else if (auto *CI = dyn_cast<CallInst>(I)) {
936 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
937 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
938 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
939 continue;
940 Type *Ty = CI->getParamByValType(ArgNo);
941 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
942 }
944 }
945}
946
948 if (LoadInst *LI = dyn_cast<LoadInst>(I))
949 return LI->getPointerOperandIndex();
951 return SI->getPointerOperandIndex();
953 return RMW->getPointerOperandIndex();
955 return XCHG->getPointerOperandIndex();
956 report_fatal_error("Unexpected instruction");
957 return -1;
958}
959
961 size_t Res = llvm::countr_zero(TypeSize / 8);
963 return Res;
964}
965
966void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
967 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
968 TargetTriple.isRISCV64())
969 return;
970
971 IRBuilder<> IRB(I);
972 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
973 Value *UntaggedPtr =
974 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
975 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
976}
977
978Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
979 // Mem >> Scale
980 Value *Shadow = IRB.CreateLShr(Mem, Mapping.scale());
981 if (Mapping.isFixed() && Mapping.offset() == 0)
982 return IRB.CreateIntToPtr(Shadow, PtrTy);
983 // (Mem >> Scale) + Offset
984 return IRB.CreatePtrAdd(ShadowBase, Shadow);
985}
986
987int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
988 unsigned AccessSizeIndex) {
989 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
990 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
991 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
992 (Recover << HWASanAccessInfo::RecoverShift) |
993 (IsWrite << HWASanAccessInfo::IsWriteShift) |
994 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
995}
996
997HWAddressSanitizer::ShadowTagCheckInfo
998HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
999 DomTreeUpdater &DTU, LoopInfo *LI) {
1000 ShadowTagCheckInfo R;
1001
1002 IRBuilder<> IRB(InsertBefore);
1003
1004 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
1005 R.PtrTag =
1006 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
1007 R.AddrLong = untagPointer(IRB, R.PtrLong);
1008 Value *Shadow = memToShadow(R.AddrLong, IRB);
1009 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
1010 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
1011
1012 if (MatchAllTag.has_value()) {
1013 Value *TagNotIgnored = IRB.CreateICmpNE(
1014 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
1015 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
1016 }
1017
1018 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1019 TagMismatch, InsertBefore, false,
1020 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1021
1022 return R;
1023}
1024
1025void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1026 unsigned AccessSizeIndex,
1027 Instruction *InsertBefore,
1028 DomTreeUpdater &DTU,
1029 LoopInfo *LI) {
1030 assert(!UsePageAliases);
1031 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1032
1033 if (InlineFastPath)
1034 InsertBefore =
1035 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1036
1037 IRBuilder<> IRB(InsertBefore);
1038 bool UseFixedShadowIntrinsic = false;
1039 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1040 // which allows a 16-bit immediate to be left-shifted by 32.
1041 // Since kShadowBaseAlignment == 32, and Linux by default will not
1042 // mmap above 48-bits, practically any valid shadow offset is
1043 // representable.
1044 // In particular, an offset of 4TB (1024 << 32) is representable, and
1045 // ought to be good enough for anybody.
1046 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1047 uint16_t OffsetShifted = Mapping.offset() >> 32;
1048 UseFixedShadowIntrinsic =
1049 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1050 }
1051
1052 if (UseFixedShadowIntrinsic) {
1053 IRB.CreateIntrinsic(
1054 UseShortGranules
1055 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1056 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1057 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
1058 ConstantInt::get(Int64Ty, Mapping.offset())});
1059 } else {
1060 IRB.CreateIntrinsic(
1061 UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1062 : Intrinsic::hwasan_check_memaccess,
1063 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
1064 }
1065}
1066
1067void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1068 unsigned AccessSizeIndex,
1069 Instruction *InsertBefore,
1070 DomTreeUpdater &DTU,
1071 LoopInfo *LI) {
1072 assert(!UsePageAliases);
1073 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1074
1075 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1076
1077 IRBuilder<> IRB(TCI.TagMismatchTerm);
1078 Value *OutOfShortGranuleTagRange =
1079 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1080 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1081 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1082 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1083
1084 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1085 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1086 PtrLowBits = IRB.CreateAdd(
1087 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1088 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1089 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1091 LI, CheckFailTerm->getParent());
1092
1093 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1094 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1095 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1096 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1097 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1098 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1100 LI, CheckFailTerm->getParent());
1101
1102 IRB.SetInsertPoint(CheckFailTerm);
1103 InlineAsm *Asm;
1104 switch (TargetTriple.getArch()) {
1105 case Triple::x86_64:
1106 // The signal handler will find the data address in rdi.
1108 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1109 "int3\nnopl " +
1110 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1111 "(%rax)",
1112 "{rdi}",
1113 /*hasSideEffects=*/true);
1114 break;
1115 case Triple::aarch64:
1116 case Triple::aarch64_be:
1117 // The signal handler will find the data address in x0.
1119 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1120 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1121 "{x0}",
1122 /*hasSideEffects=*/true);
1123 break;
1124 case Triple::riscv64:
1125 // The signal handler will find the data address in x10.
1127 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1128 "ebreak\naddiw x0, x11, " +
1129 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1130 "{x10}",
1131 /*hasSideEffects=*/true);
1132 break;
1133 default:
1134 report_fatal_error("unsupported architecture");
1135 }
1136 IRB.CreateCall(Asm, TCI.PtrLong);
1137 if (Recover)
1138 cast<UncondBrInst>(CheckFailTerm)
1139 ->setSuccessor(TCI.TagMismatchTerm->getParent());
1140}
1141
1142bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1143 MemIntrinsic *MI) {
1145 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1146 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1147 }
1148 if (isa<MemSetInst>(MI))
1149 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1150 return false;
1151}
1152
1153void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1154 IRBuilder<> IRB(MI);
1155 if (isa<MemTransferInst>(MI)) {
1157 MI->getOperand(0), MI->getOperand(1),
1158 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1159
1160 if (UseMatchAllCallback)
1161 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1162 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1163 } else if (isa<MemSetInst>(MI)) {
1165 MI->getOperand(0),
1166 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1167 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1168 if (UseMatchAllCallback)
1169 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1170 IRB.CreateCall(HwasanMemset, Args);
1171 }
1172 MI->eraseFromParent();
1173}
1174
1175bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1176 DomTreeUpdater &DTU, LoopInfo *LI,
1177 const DataLayout &DL) {
1178 Value *Addr = O.getPtr();
1179
1180 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1181
1182 // If the pointer is statically known to be zero, the tag check will pass
1183 // since:
1184 // 1) it has a zero tag
1185 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1186 // never updated.
1187 // We can therefore elide the tag check.
1188 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1189 llvm::computeKnownBits(Addr, Known, DL);
1190 if (Known.isZero())
1191 return false;
1192
1193 if (O.MaybeMask)
1194 return false; // FIXME
1195
1196 IRBuilder<> IRB(O.getInsn());
1197 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1198 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1199 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1200 *O.Alignment >= O.TypeStoreSize / 8)) {
1201 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1202 if (InstrumentWithCalls) {
1203 SmallVector<Value *, 2> Args{IRB.CreatePointerCast(Addr, IntptrTy)};
1204 if (UseMatchAllCallback)
1205 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1206 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1207 Args);
1208 } else if (OutlinedChecks) {
1209 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1210 DTU, LI);
1211 } else {
1212 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1213 DTU, LI);
1214 }
1215 } else {
1217 IRB.CreatePointerCast(Addr, IntptrTy),
1218 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1219 ConstantInt::get(IntptrTy, 8))};
1220 if (UseMatchAllCallback)
1221 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1222 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1223 }
1224 untagPointerOperand(O.getInsn(), Addr);
1225
1226 return true;
1227}
1228
1229void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1230 size_t Size) {
1231 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1232 if (!UseShortGranules)
1233 Size = AlignedSize;
1234
1235 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1236 if (InstrumentWithCalls) {
1237 IRB.CreateCall(HwasanTagMemoryFunc,
1238 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1239 ConstantInt::get(IntptrTy, AlignedSize)});
1240 } else {
1241 size_t ShadowSize = Size >> Mapping.scale();
1242 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1243 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1244 // If this memset is not inlined, it will be intercepted in the hwasan
1245 // runtime library. That's OK, because the interceptor skips the checks if
1246 // the address is in the shadow region.
1247 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1248 // llvm.memset right here into either a sequence of stores, or a call to
1249 // hwasan_tag_memory.
1250 if (ShadowSize)
1251 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1252 if (Size != AlignedSize) {
1253 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1254 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1255 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1256 IRB.CreateStore(
1257 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1258 AlignedSize - 1));
1259 }
1260 }
1261}
1262
1263unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1264 if (TargetTriple.getArch() == Triple::x86_64)
1265 return AllocaNo & TagMaskByte;
1266
1267 // A list of 8-bit numbers that have at most one run of non-zero bits.
1268 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1269 // masks.
1270 // The list does not include the value 255, which is used for UAR.
1271 //
1272 // Because we are more likely to use earlier elements of this list than later
1273 // ones, it is sorted in increasing order of probability of collision with a
1274 // mask allocated (temporally) nearby. The program that generated this list
1275 // can be found at:
1276 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1277 static const unsigned FastMasks[] = {
1278 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1279 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1280 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1281 return FastMasks[AllocaNo % std::size(FastMasks)];
1282}
1283
1284Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1285 if (TagMaskByte == 0xFF)
1286 return OldTag; // No need to clear the tag byte.
1287 return IRB.CreateAnd(OldTag,
1288 ConstantInt::get(OldTag->getType(), TagMaskByte));
1289}
1290
1291Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1292 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1293}
1294
1295Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1297 return nullptr;
1298 if (StackBaseTag)
1299 return StackBaseTag;
1300 // Extract some entropy from the stack pointer for the tags.
1301 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1302 // between functions).
1303 Value *FramePointerLong = getCachedFP(IRB);
1304 Value *StackTag =
1305 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1306 IRB.CreateLShr(FramePointerLong, 20)));
1307 StackTag->setName("hwasan.stack.base.tag");
1308 return StackTag;
1309}
1310
1311Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1312 unsigned AllocaNo) {
1314 return getNextTagWithCall(IRB);
1315 return IRB.CreateXor(
1316 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1317}
1318
1319Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1320 Value *FramePointerLong = getCachedFP(IRB);
1321 Value *UARTag =
1322 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1323
1324 UARTag->setName("hwasan.uar.tag");
1325 return UARTag;
1326}
1327
1328// Add a tag to an address.
1329Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1330 Value *PtrLong, Value *Tag) {
1331 assert(!UsePageAliases);
1332 Value *TaggedPtrLong;
1333 if (CompileKernel) {
1334 // Kernel addresses have 0xFF in the most significant byte.
1335 Value *ShiftedTag =
1336 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1337 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1338 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1339 } else {
1340 // Userspace can simply do OR (tag << PointerTagShift);
1341 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1342 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1343 }
1344 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1345}
1346
1347// Remove tag from an address.
1348Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1349 assert(!UsePageAliases);
1350 Value *UntaggedPtrLong;
1351 if (CompileKernel) {
1352 // Kernel addresses have 0xFF in the most significant byte.
1353 UntaggedPtrLong =
1354 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1355 TagMaskByte << PointerTagShift));
1356 } else {
1357 // Userspace addresses have 0x00.
1358 UntaggedPtrLong = IRB.CreateAnd(
1359 PtrLong, ConstantInt::get(PtrLong->getType(),
1360 ~(TagMaskByte << PointerTagShift)));
1361 }
1362 return UntaggedPtrLong;
1363}
1364
1365Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1366 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1367 // in Bionic's libc/platform/bionic/tls_defines.h.
1368 constexpr int SanitizerSlot = 6;
1369 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1370 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1371 return ThreadPtrGlobal;
1372}
1373
1374Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1375 if (!CachedFP)
1376 CachedFP = memtag::getFP(IRB);
1377 return CachedFP;
1378}
1379
1380Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1381 // Prepare ring buffer data.
1382 Value *PC = memtag::getPC(TargetTriple, IRB);
1383 Value *FP = getCachedFP(IRB);
1384
1385 // Mix FP and PC.
1386 // Assumptions:
1387 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1388 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1389 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1390 // 0xFFFFPPPPPPPPPPPP
1391 //
1392 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1393 // prefer FP-relative offsets for functions compiled with HWASan.
1394 FP = IRB.CreateShl(FP, 44);
1395 return IRB.CreateOr(PC, FP);
1396}
1397
1398void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1399 if (!Mapping.isInTls())
1400 ShadowBase = getShadowNonTls(IRB);
1401 else if (!WithFrameRecord && TargetTriple.isAndroid())
1402 ShadowBase = getDynamicShadowIfunc(IRB);
1403
1404 if (!WithFrameRecord && ShadowBase)
1405 return;
1406
1407 Value *SlotPtr = nullptr;
1408 Value *ThreadLong = nullptr;
1409 Value *ThreadLongMaybeUntagged = nullptr;
1410
1411 auto getThreadLongMaybeUntagged = [&]() {
1412 if (!SlotPtr)
1413 SlotPtr = getHwasanThreadSlotPtr(IRB);
1414 if (!ThreadLong)
1415 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1416 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1417 // TBI.
1418 return TargetTriple.isAArch64() ? ThreadLong
1419 : untagPointer(IRB, ThreadLong);
1420 };
1421
1422 if (WithFrameRecord) {
1423 switch (ClRecordStackHistory) {
1424 case libcall: {
1425 // Emit a runtime call into hwasan rather than emitting instructions for
1426 // recording stack history.
1427 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1428 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1429 break;
1430 }
1431 case instr: {
1432 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1433
1434 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1435
1436 // Store data to ring buffer.
1437 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1438 Value *RecordPtr =
1439 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1440 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1441
1442 IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 8), SlotPtr);
1443 break;
1444 }
1445 case none: {
1447 "A stack history recording mode should've been selected.");
1448 }
1449 }
1450 }
1451
1452 if (!ShadowBase) {
1453 if (!ThreadLongMaybeUntagged)
1454 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1455
1456 // Get shadow base address by aligning RecordPtr up.
1457 // Note: this is not correct if the pointer is already aligned.
1458 // Runtime library will make sure this never happens.
1459 ShadowBase = IRB.CreateAdd(
1460 IRB.CreateOr(
1461 ThreadLongMaybeUntagged,
1462 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1463 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1464 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1465 }
1466}
1467
1468void HWAddressSanitizer::instrumentLandingPads(
1469 SmallVectorImpl<Instruction *> &LandingPadVec) {
1470 for (auto *LP : LandingPadVec) {
1471 IRBuilder<> IRB(LP->getNextNode());
1472 IRB.CreateCall(
1473 HwasanHandleVfork,
1475 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1476 }
1477}
1478
1479void HWAddressSanitizer::instrumentStack(OptimizationRemarkEmitter &ORE,
1480 memtag::StackInfo &SInfo,
1481 Value *StackTag, Value *UARTag,
1482 const DominatorTree &DT,
1483 const PostDominatorTree &PDT,
1484 const LoopInfo &LI) {
1485 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1486 // alloca addresses using that. Unfortunately, offsets are not known yet
1487 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1488 // temp, shift-OR it into each alloca address and xor with the retag mask.
1489 // This generates one extra instruction per alloca use.
1490 unsigned int I = 0;
1491
1492 for (auto &KV : SInfo.AllocasToInstrument) {
1493 auto N = I++;
1494 auto *AI = KV.first;
1495 memtag::AllocaInfo &Info = KV.second;
1496 IRBuilder<> IRB(AI->getNextNode());
1497
1498 // Replace uses of the alloca with tagged address.
1499 Value *Tag = getAllocaTag(IRB, StackTag, N);
1500 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1501 Value *AINoTagLong = untagPointer(IRB, AILong);
1502 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1503 std::string Name =
1504 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1505 Replacement->setName(Name + ".hwasan");
1506
1507 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1508 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1509
1510 AI->replaceUsesWithIf(Replacement, [AILong](const Use &U) {
1511 auto *User = U.getUser();
1512 return User != AILong && !isa<LifetimeIntrinsic>(User);
1513 });
1514
1515 memtag::annotateDebugRecords(Info, retagMask(N));
1516
1517 auto TagStarts = [&]() {
1518 for (IntrinsicInst *Start : Info.LifetimeStart) {
1519 IRB.SetInsertPoint(Start->getNextNode());
1520 tagAlloca(IRB, AI, Tag, Size);
1521 }
1522 };
1523 auto TagEnd = [&](Instruction *Node) {
1524 IRB.SetInsertPoint(Node);
1525 // When untagging, use the `AlignedSize` because we need to set the tags
1526 // for the entire alloca to original. If we used `Size` here, we would
1527 // keep the last granule tagged, and store zero in the last byte of the
1528 // last granule, due to how short granules are implemented.
1529 tagAlloca(IRB, AI, UARTag, AlignedSize);
1530 };
1531 auto EraseLifetimes = [&]() {
1532 for (auto &II : Info.LifetimeStart)
1533 II->eraseFromParent();
1534 for (auto &II : Info.LifetimeEnd)
1535 II->eraseFromParent();
1536 };
1537 // Calls to functions that may return twice (e.g. setjmp) confuse the
1538 // postdominator analysis, and will leave us to keep memory tagged after
1539 // function return. Work around this by always untagging at every return
1540 // statement if return_twice functions are called.
1541 if (DetectUseAfterScope && !SInfo.CallsReturnTwice &&
1542 memtag::isSupportedLifetime(Info, &DT, &LI)) {
1543 TagStarts();
1544 memtag::forAllReachableExits(DT, PDT, LI, Info, SInfo.RetVec, TagEnd);
1545 ORE.emit([&]() {
1546 return OptimizationRemark(DEBUG_TYPE, "supportedLifetime", AI);
1547 });
1548 } else if (DetectUseAfterScope && ClStrictUseAfterScope) {
1549 // SInfo.CallsReturnTwice || !isStandardLifetime
1550 ORE.emit([&]() {
1551 return OptimizationRemarkMissed(DEBUG_TYPE, "supportedLifetime", AI);
1552 });
1553
1554 tagAlloca(IRB, AI, Tag, Size);
1555 TagStarts();
1556 for_each(Info.LifetimeEnd, TagEnd);
1557 for_each(SInfo.RetVec, TagEnd);
1558 EraseLifetimes();
1559 } else {
1560 tagAlloca(IRB, AI, Tag, Size);
1561 for_each(SInfo.RetVec, TagEnd);
1562 EraseLifetimes();
1563 }
1564 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1565 }
1566}
1567
1569 bool Skip) {
1570 if (Skip) {
1571 ORE.emit([&]() {
1572 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1573 << "Skipped: F=" << ore::NV("Function", &F);
1574 });
1575 } else {
1576 ORE.emit([&]() {
1577 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1578 << "Sanitized: F=" << ore::NV("Function", &F);
1579 });
1580 }
1581}
1582
1583bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1585 auto SkipHot = [&]() {
1586 if (!ClHotPercentileCutoff.getNumOccurrences())
1587 return false;
1589 ProfileSummaryInfo *PSI =
1590 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1591 if (!PSI || !PSI->hasProfileSummary()) {
1592 ++NumNoProfileSummaryFuncs;
1593 return false;
1594 }
1595 return PSI->isFunctionHotInCallGraphNthPercentile(
1597 };
1598
1599 auto SkipRandom = [&]() {
1600 if (!ClRandomKeepRate.getNumOccurrences())
1601 return false;
1602 std::bernoulli_distribution D(ClRandomKeepRate);
1603 return !D(*Rng);
1604 };
1605
1606 bool Skip = SkipRandom() || SkipHot();
1608 return Skip;
1609}
1610
1611void HWAddressSanitizer::sanitizeFunction(Function &F,
1613 if (&F == HwasanCtorFunction)
1614 return;
1615
1616 // Do not apply any instrumentation for naked functions.
1617 if (F.hasFnAttribute(Attribute::Naked))
1618 return;
1619
1620 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1621 return;
1622
1623 if (F.empty())
1624 return;
1625
1626 if (F.isPresplitCoroutine())
1627 return;
1628
1629 NumTotalFuncs++;
1630
1633
1634 if (selectiveInstrumentationShouldSkip(F, FAM))
1635 return;
1636
1637 NumInstrumentedFuncs++;
1638
1639 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1640
1641 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1642 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1643 SmallVector<Instruction *, 8> LandingPadVec;
1645
1647 for (auto &Inst : instructions(F)) {
1648 if (InstrumentStack) {
1649 SIB.visit(ORE, Inst);
1650 }
1651
1652 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1653 LandingPadVec.push_back(&Inst);
1654
1655 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1656
1658 if (!ignoreMemIntrinsic(ORE, MI))
1659 IntrinToInstrument.push_back(MI);
1660 }
1661
1662 memtag::StackInfo &SInfo = SIB.get();
1663
1664 initializeCallbacks(*F.getParent());
1665
1666 if (!LandingPadVec.empty())
1667 instrumentLandingPads(LandingPadVec);
1668
1669 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1670 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1671 // __hwasan_personality_thunk is a no-op for functions without an
1672 // instrumented stack, so we can drop it.
1673 F.setPersonalityFn(nullptr);
1674 }
1675
1676 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1677 IntrinToInstrument.empty())
1678 return;
1679
1680 assert(!ShadowBase);
1681
1682 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1683 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1684 emitPrologue(EntryIRB,
1685 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1686 Mapping.withFrameRecord() &&
1687 !SInfo.AllocasToInstrument.empty());
1688
1689 if (!SInfo.AllocasToInstrument.empty()) {
1692 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1693 Value *StackTag = getStackBaseTag(EntryIRB);
1694 Value *UARTag = getUARTag(EntryIRB);
1695 instrumentStack(ORE, SInfo, StackTag, UARTag, DT, PDT, LI);
1696 }
1697
1698 // If we split the entry block, move any allocas that were originally in the
1699 // entry block back into the entry block so that they aren't treated as
1700 // dynamic allocas.
1701 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1702 InsertPt = F.getEntryBlock().begin();
1703 for (Instruction &I :
1704 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1705 if (auto *AI = dyn_cast<AllocaInst>(&I))
1706 if (isa<ConstantInt>(AI->getArraySize()))
1707 I.moveBefore(F.getEntryBlock(), InsertPt);
1708 }
1709 }
1710
1714 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1715 const DataLayout &DL = F.getDataLayout();
1716 for (auto &Operand : OperandsToInstrument)
1717 instrumentMemAccess(Operand, DTU, LI, DL);
1718 DTU.flush();
1719
1720 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1721 for (auto *Inst : IntrinToInstrument)
1722 instrumentMemIntrinsic(Inst);
1723 }
1724
1725 ShadowBase = nullptr;
1726 StackBaseTag = nullptr;
1727 CachedFP = nullptr;
1728}
1729
1730void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1731 assert(!UsePageAliases);
1732 Constant *Initializer = GV->getInitializer();
1733 uint64_t SizeInBytes =
1734 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1735 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1736 if (SizeInBytes != NewSize) {
1737 // Pad the initializer out to the next multiple of 16 bytes and add the
1738 // required short granule tag.
1739 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1740 Init.back() = Tag;
1742 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1743 }
1744
1745 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1746 GlobalValue::ExternalLinkage, Initializer,
1747 GV->getName() + ".hwasan");
1748 NewGV->copyAttributesFrom(GV);
1749 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1750 NewGV->copyMetadata(GV, 0);
1751 NewGV->setAlignment(
1752 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1753
1754 // It is invalid to ICF two globals that have different tags. In the case
1755 // where the size of the global is a multiple of the tag granularity the
1756 // contents of the globals may be the same but the tags (i.e. symbol values)
1757 // may be different, and the symbols are not considered during ICF. In the
1758 // case where the size is not a multiple of the granularity, the short granule
1759 // tags would discriminate two globals with different tags, but there would
1760 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1761 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1762 // granule tag in the last byte.
1763 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1764
1765 // Descriptor format (assuming little-endian):
1766 // bytes 0-3: relative address of global
1767 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1768 // it isn't, we create multiple descriptors)
1769 // byte 7: tag
1770 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1771 const uint64_t MaxDescriptorSize = 0xfffff0;
1772 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1773 DescriptorPos += MaxDescriptorSize) {
1774 auto *Descriptor =
1775 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1776 nullptr, GV->getName() + ".hwasan.descriptor");
1777 auto *GVRelPtr = ConstantExpr::getTrunc(
1780 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1781 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1782 ConstantInt::get(Int64Ty, DescriptorPos)),
1783 Int32Ty);
1784 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1785 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1786 Descriptor->setComdat(NewGV->getComdat());
1787 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1788 Descriptor->setSection("hwasan_globals");
1789 Descriptor->setMetadata(LLVMContext::MD_associated,
1791 appendToCompilerUsed(M, Descriptor);
1792 }
1793
1796 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1797 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1798 GV->getType());
1799 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1800 GV->getLinkage(), "", Aliasee, &M);
1801 Alias->setVisibility(GV->getVisibility());
1802 Alias->takeName(GV);
1803 GV->replaceAllUsesWith(Alias);
1804 GV->eraseFromParent();
1805}
1806
1807void HWAddressSanitizer::instrumentGlobals() {
1808 std::vector<GlobalVariable *> Globals;
1809 for (GlobalVariable &GV : M.globals()) {
1811 continue;
1812
1813 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1814 GV.isThreadLocal())
1815 continue;
1816
1817 // Common symbols can't have aliases point to them, so they can't be tagged.
1818 if (GV.hasCommonLinkage())
1819 continue;
1820
1821 if (ClAllGlobals) {
1822 // Avoid instrumenting intrinsic global variables.
1823 if (GV.getSection() == "llvm.metadata")
1824 continue;
1825 } else {
1826 // Globals with custom sections may be used in __start_/__stop_
1827 // enumeration, which would be broken both by adding tags and potentially
1828 // by the extra padding/alignment that we insert.
1829 if (GV.hasSection())
1830 continue;
1831 }
1832
1833 Globals.push_back(&GV);
1834 }
1835
1836 MD5 Hasher;
1837 Hasher.update(M.getSourceFileName());
1838 MD5::MD5Result Hash;
1839 Hasher.final(Hash);
1840 uint8_t Tag = Hash[0];
1841
1842 assert(TagMaskByte >= 16);
1843
1844 for (GlobalVariable *GV : Globals) {
1845 // Don't allow globals to be tagged with something that looks like a
1846 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1847 // the fast path shadow-vs-address check succeeds.
1848 if (Tag < 16 || Tag > TagMaskByte)
1849 Tag = 16;
1850 instrumentGlobal(GV, Tag++);
1851 }
1852}
1853
1854void HWAddressSanitizer::instrumentPersonalityFunctions() {
1855 // We need to untag stack frames as we unwind past them. That is the job of
1856 // the personality function wrapper, which either wraps an existing
1857 // personality function or acts as a personality function on its own. Each
1858 // function that has a personality function or that can be unwound past has
1859 // its personality function changed to a thunk that calls the personality
1860 // function wrapper in the runtime.
1862 for (Function &F : M) {
1863 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1864 continue;
1865
1866 if (F.hasPersonalityFn()) {
1867 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1868 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1869 PersonalityFns[nullptr].push_back(&F);
1870 }
1871 }
1872
1873 if (PersonalityFns.empty())
1874 return;
1875
1876 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1877 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1878 PtrTy, PtrTy, PtrTy, PtrTy);
1879 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1880 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1881
1882 for (auto &P : PersonalityFns) {
1883 std::string ThunkName = kHwasanPersonalityThunkName;
1884 if (P.first)
1885 ThunkName += ("." + P.first->getName()).str();
1886 FunctionType *ThunkFnTy = FunctionType::get(
1887 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1888 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1889 cast<GlobalValue>(P.first)->hasLocalLinkage());
1890 auto *ThunkFn = Function::Create(ThunkFnTy,
1893 ThunkName, &M);
1894 // TODO: think about other attributes as well.
1895 if (any_of(P.second, [](const Function *F) {
1896 return F->hasFnAttribute("branch-target-enforcement");
1897 })) {
1898 ThunkFn->addFnAttr("branch-target-enforcement");
1899 }
1900 if (!IsLocal) {
1901 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1902 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1903 }
1904
1905 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1906 IRBuilder<> IRB(BB);
1907 CallInst *WrapperCall = IRB.CreateCall(
1908 HwasanPersonalityWrapper,
1909 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1910 ThunkFn->getArg(3), ThunkFn->getArg(4),
1911 P.first ? P.first : Constant::getNullValue(PtrTy),
1912 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1913 WrapperCall->setTailCall();
1914 IRB.CreateRet(WrapperCall);
1915
1916 for (Function *F : P.second)
1917 F->setPersonalityFn(ThunkFn);
1918 }
1919}
1920
1921void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1922 bool InstrumentWithCalls,
1923 bool CompileKernel) {
1924 // Start with defaults.
1925 Scale = kDefaultShadowScale;
1926 Kind = OffsetKind::kTls;
1927 WithFrameRecord = true;
1928
1929 // Tune for the target.
1930 if (TargetTriple.isOSFuchsia()) {
1931 // Fuchsia is always PIE, which means that the beginning of the address
1932 // space is always available.
1933 Kind = OffsetKind::kGlobal;
1934 } else if (CompileKernel || InstrumentWithCalls) {
1935 SetFixed(0);
1936 WithFrameRecord = false;
1937 }
1938
1939 WithFrameRecord = optOr(ClFrameRecords, WithFrameRecord);
1940
1941 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1942 Kind = optOr(ClMappingOffsetDynamic, Kind);
1943 if (ClMappingOffset.getNumOccurrences() > 0 &&
1944 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1945 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1946 SetFixed(ClMappingOffset);
1947 }
1948}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define clEnumVal(ENUMVAL, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains constants used for implementing Dwarf debug support.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden)
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< bool > ClFrameRecords("hwasan-with-frame-record", cl::desc("Use ring buffer for stack allocations"), cl::Hidden)
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< uint64_t > ClTagBits("hwasan-tag-bits", cl::desc("Restrict tag to at most N bits. Needs to be > 4."), cl::Hidden, cl::init(0))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< float > ClRandomKeepRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function. " "Note: instrumentation can be skipped randomly " "OR because of the hot percentile cutoff, if " "both are supplied."))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClStrictUseAfterScope("hwasan-strict-use-after-scope", cl::desc("for complicated lifetimes, tag both on end and return"), cl::Hidden, cl::init(true))
static cl::opt< OffsetKind > ClMappingOffsetDynamic("hwasan-mapping-offset-dynamic", cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden, cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"), clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"), clEnumValN(OffsetKind::kTls, "tls", "Use TLS")))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
static cl::opt< bool > ClAllGlobals("hwasan-all-globals", cl::desc("Instrument globals, even those within user-defined sections. Warning: " "This may break existing code which walks globals via linker-generated " "symbols, expects certain globals to be contiguous with each other, or " "makes other assumptions which are invalidated by HWASan " "instrumentation."), cl::Hidden, cl::init(false))
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClStaticLinking("hwasan-static-linking", cl::desc("Don't use .note.hwasan.globals section to instrument globals " "from loadable libraries. " "Note: in static binaries, the global variables section can be " "accessed directly via linker-provided " "__start_hwasan_globals and __stop_hwasan_globals symbols"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cutoff."))
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
#define T
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:859
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:629
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Class to represent function types.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:621
StringRef getSection() const
Get the custom section of this global if it has one.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:223
bool hasSection() const
Check if this global has a custom object file section.
LLVM_ABI const SanitizerMetadata & getSanitizerMetadata() const
Definition Globals.cpp:254
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
LinkageTypes getLinkage() const
bool isDeclarationForLinker() const
bool hasSanitizerMetadata() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
bool hasCommonLinkage() const
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:56
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:538
Analysis pass providing a never-invalidated alias analysis result.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition IRBuilder.h:1998
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2264
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2212
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1547
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:586
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2066
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition IRBuilder.h:1207
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1488
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2353
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2357
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1895
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1526
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition IRBuilder.h:660
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2095
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1585
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1908
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2528
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2081
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:629
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2361
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2290
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1566
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1633
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1607
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2829
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LLVM_ABI void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition MD5.cpp:188
LLVM_ABI void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition MD5.cpp:233
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
bool empty() const
Definition MapVector.h:77
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition Module.cpp:262
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:483
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition Triple.h:818
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:816
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:429
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition Triple.h:1079
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition Triple.h:999
bool isOSFuchsia() const
Definition Triple.h:665
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:772
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1126
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:557
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition ELF.h:1802
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Value * getFP(IRBuilder<> &IRB)
void forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const AllocaInfo &AInfo, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
bool isSupportedLifetime(const AllocaInfo &AInfo, const DominatorTree *DT, const LoopInfo *LI)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
Value * incrementThreadLong(IRBuilder<> &IRB, Value *ThreadLong, unsigned int Inc, bool IsMemtagDarwin=false)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3889
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 8 > RetVec