LLVM 23.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/Value.h"
54#include "llvm/Support/Debug.h"
55#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
85
86static const unsigned kShadowBaseAlignment = 32;
87
88namespace {
89enum class OffsetKind {
90 kFixed = 0,
91 kGlobal,
92 kIfunc,
93 kTls,
94};
95}
96
98 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
99 cl::desc("Prefix for memory access callbacks"),
100 cl::Hidden, cl::init("__hwasan_"));
101
103 "hwasan-kernel-mem-intrinsic-prefix",
104 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
105 cl::init(false));
106
108 "hwasan-instrument-with-calls",
109 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
110 cl::init(false));
111
112static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
113 cl::desc("instrument read instructions"),
114 cl::Hidden, cl::init(true));
115
116static cl::opt<bool>
117 ClInstrumentWrites("hwasan-instrument-writes",
118 cl::desc("instrument write instructions"), cl::Hidden,
119 cl::init(true));
120
122 "hwasan-instrument-atomics",
123 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
124 cl::init(true));
125
126static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
127 cl::desc("instrument byval arguments"),
128 cl::Hidden, cl::init(true));
129
130static cl::opt<bool>
131 ClRecover("hwasan-recover",
132 cl::desc("Enable recovery mode (continue-after-error)."),
133 cl::Hidden, cl::init(false));
134
135static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
136 cl::desc("instrument stack (allocas)"),
137 cl::Hidden, cl::init(true));
138
139static cl::opt<bool>
140 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
141 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
143
145 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
147 cl::desc("How many lifetime ends to handle for a single alloca."),
149
150static cl::opt<bool>
151 ClUseAfterScope("hwasan-use-after-scope",
152 cl::desc("detect use after scope within function"),
153 cl::Hidden, cl::init(true));
154
156 "hwasan-generate-tags-with-calls",
157 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
158 cl::init(false));
159
160static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
161 cl::Hidden, cl::init(false));
162
164 "hwasan-all-globals",
165 cl::desc(
166 "Instrument globals, even those within user-defined sections. Warning: "
167 "This may break existing code which walks globals via linker-generated "
168 "symbols, expects certain globals to be contiguous with each other, or "
169 "makes other assumptions which are invalidated by HWASan "
170 "instrumentation."),
171 cl::Hidden, cl::init(false));
172
174 "hwasan-match-all-tag",
175 cl::desc("don't report bad accesses via pointers with this tag"),
176 cl::Hidden, cl::init(-1));
177
178static cl::opt<bool>
179 ClEnableKhwasan("hwasan-kernel",
180 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
181 cl::Hidden, cl::init(false));
182
183// These flags allow to change the shadow mapping and control how shadow memory
184// is accessed. The shadow mapping looks like:
185// Shadow = (Mem >> scale) + offset
186
188 ClMappingOffset("hwasan-mapping-offset",
189 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
190 cl::Hidden);
191
193 "hwasan-mapping-offset-dynamic",
194 cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden,
195 cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"),
196 clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"),
197 clEnumValN(OffsetKind::kTls, "tls", "Use TLS")));
198
199static cl::opt<bool>
200 ClFrameRecords("hwasan-with-frame-record",
201 cl::desc("Use ring buffer for stack allocations"),
202 cl::Hidden);
203
204static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
205 cl::desc("Hot percentile cutoff."));
206
207static cl::opt<float>
208 ClRandomKeepRate("hwasan-random-rate",
209 cl::desc("Probability value in the range [0.0, 1.0] "
210 "to keep instrumentation of a function. "
211 "Note: instrumentation can be skipped randomly "
212 "OR because of the hot percentile cutoff, if "
213 "both are supplied."));
214
216 "hwasan-static-linking",
217 cl::desc("Don't use .note.hwasan.globals section to instrument globals "
218 "from loadable libraries. "
219 "Note: in static binaries, the global variables section can be "
220 "accessed directly via linker-provided "
221 "__start_hwasan_globals and __stop_hwasan_globals symbols"),
222 cl::Hidden, cl::init(false));
223
224STATISTIC(NumTotalFuncs, "Number of total funcs");
225STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
226STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
227
228// Mode for selecting how to insert frame record info into the stack ring
229// buffer.
231 // Do not record frame record info.
233
234 // Insert instructions into the prologue for storing into the stack ring
235 // buffer directly.
237
238 // Add a call to __hwasan_add_frame_record in the runtime.
240};
241
243 "hwasan-record-stack-history",
244 cl::desc("Record stack frames with tagged allocations in a thread-local "
245 "ring buffer"),
246 cl::values(clEnumVal(none, "Do not record stack ring history"),
247 clEnumVal(instr, "Insert instructions into the prologue for "
248 "storing into the stack ring buffer directly"),
249 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
250 "storing into the stack ring buffer")),
252
253static cl::opt<bool>
254 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
255 cl::desc("instrument memory intrinsics"),
256 cl::Hidden, cl::init(true));
257
258static cl::opt<bool>
259 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
260 cl::desc("instrument landing pads"), cl::Hidden,
261 cl::init(false));
262
264 "hwasan-use-short-granules",
265 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
266 cl::init(false));
267
269 "hwasan-instrument-personality-functions",
270 cl::desc("instrument personality functions"), cl::Hidden);
271
272static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
273 cl::desc("inline all checks"),
274 cl::Hidden, cl::init(false));
275
276static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
277 cl::desc("inline all checks"),
278 cl::Hidden, cl::init(false));
279
280// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
281static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
282 cl::desc("Use page aliasing in HWASan"),
283 cl::Hidden, cl::init(false));
284
285namespace {
286
287template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
288 return Opt.getNumOccurrences() ? Opt : Other;
289}
290
291bool shouldUsePageAliases(const Triple &TargetTriple) {
292 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
293}
294
295bool shouldInstrumentStack(const Triple &TargetTriple) {
296 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
297}
298
299bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
300 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
301}
302
303bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
304 return optOr(ClUseStackSafety, !DisableOptimization);
305}
306
307bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
308 bool DisableOptimization) {
309 return shouldInstrumentStack(TargetTriple) &&
310 mightUseStackSafetyAnalysis(DisableOptimization);
311}
312
313bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
314 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
315}
316
317/// An instrumentation pass implementing detection of addressability bugs
318/// using tagged pointers.
319class HWAddressSanitizer {
320public:
321 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
322 const StackSafetyGlobalInfo *SSI)
323 : M(M), SSI(SSI) {
324 this->Recover = optOr(ClRecover, Recover);
325 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
326 this->Rng = ClRandomKeepRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
327 : nullptr;
328
329 initializeModule();
330 }
331
332 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
333
334private:
335 struct ShadowTagCheckInfo {
336 Instruction *TagMismatchTerm = nullptr;
337 Value *PtrLong = nullptr;
338 Value *AddrLong = nullptr;
339 Value *PtrTag = nullptr;
340 Value *MemTag = nullptr;
341 };
342
343 bool selectiveInstrumentationShouldSkip(Function &F,
345 void initializeModule();
346 void createHwasanCtorComdat();
347 void createHwasanNote();
348
349 void initializeCallbacks(Module &M);
350
351 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
352
353 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
354 Value *getShadowNonTls(IRBuilder<> &IRB);
355
356 void untagPointerOperand(Instruction *I, Value *Addr);
357 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
358
359 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
360 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
361 DomTreeUpdater &DTU, LoopInfo *LI);
362 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
363 unsigned AccessSizeIndex,
364 Instruction *InsertBefore,
365 DomTreeUpdater &DTU, LoopInfo *LI);
366 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
367 unsigned AccessSizeIndex,
368 Instruction *InsertBefore, DomTreeUpdater &DTU,
369 LoopInfo *LI);
370 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
371 void instrumentMemIntrinsic(MemIntrinsic *MI);
372 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
373 LoopInfo *LI, const DataLayout &DL);
374 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
375 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
376 Value *Ptr);
377
379 OptimizationRemarkEmitter &ORE, Instruction *I,
380 const TargetLibraryInfo &TLI,
381 SmallVectorImpl<InterestingMemoryOperand> &Interesting);
382
383 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
384 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
385 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
386 void instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
387 const DominatorTree &DT, const PostDominatorTree &PDT,
388 const LoopInfo &LI);
389 void instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
390 Value *getNextTagWithCall(IRBuilder<> &IRB);
391 Value *getStackBaseTag(IRBuilder<> &IRB);
392 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
393 Value *getUARTag(IRBuilder<> &IRB);
394
395 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
396 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
397 unsigned retagMask(unsigned AllocaNo);
398
399 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
400
401 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
402 void instrumentGlobals();
403
404 Value *getCachedFP(IRBuilder<> &IRB);
405 Value *getFrameRecordInfo(IRBuilder<> &IRB);
406
407 void instrumentPersonalityFunctions();
408
409 LLVMContext *C;
410 Module &M;
411 const StackSafetyGlobalInfo *SSI;
412 Triple TargetTriple;
413 std::unique_ptr<RandomNumberGenerator> Rng;
414
415 /// This struct defines the shadow mapping using the rule:
416 /// If `kFixed`, then
417 /// shadow = (mem >> Scale) + Offset.
418 /// If `kGlobal`, then
419 /// extern char* __hwasan_shadow_memory_dynamic_address;
420 /// shadow = (mem >> Scale) + __hwasan_shadow_memory_dynamic_address
421 /// If `kIfunc`, then
422 /// extern char __hwasan_shadow[];
423 /// shadow = (mem >> Scale) + &__hwasan_shadow
424 /// If `kTls`, then
425 /// extern char *__hwasan_tls;
426 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
427 ///
428 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
429 /// ring buffer for storing stack allocations on targets that support it.
430 class ShadowMapping {
431 OffsetKind Kind;
432 uint64_t Offset;
433 uint8_t Scale;
434 bool WithFrameRecord;
435
436 void SetFixed(uint64_t O) {
437 Kind = OffsetKind::kFixed;
438 Offset = O;
439 }
440
441 public:
442 void init(Triple &TargetTriple, bool InstrumentWithCalls,
443 bool CompileKernel);
444 Align getObjectAlignment() const { return Align(1ULL << Scale); }
445 bool isInGlobal() const { return Kind == OffsetKind::kGlobal; }
446 bool isInIfunc() const { return Kind == OffsetKind::kIfunc; }
447 bool isInTls() const { return Kind == OffsetKind::kTls; }
448 bool isFixed() const { return Kind == OffsetKind::kFixed; }
449 uint8_t scale() const { return Scale; };
450 uint64_t offset() const {
451 assert(isFixed());
452 return Offset;
453 };
454 bool withFrameRecord() const { return WithFrameRecord; };
455 };
456
457 ShadowMapping Mapping;
458
459 Type *VoidTy = Type::getVoidTy(M.getContext());
460 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
461 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
462 Type *Int8Ty = Type::getInt8Ty(M.getContext());
463 Type *Int32Ty = Type::getInt32Ty(M.getContext());
464 Type *Int64Ty = Type::getInt64Ty(M.getContext());
465
466 bool CompileKernel;
467 bool Recover;
468 bool OutlinedChecks;
469 bool InlineFastPath;
470 bool UseShortGranules;
471 bool InstrumentLandingPads;
472 bool InstrumentWithCalls;
473 bool InstrumentStack;
474 bool InstrumentGlobals;
475 bool DetectUseAfterScope;
476 bool UsePageAliases;
477 bool UseMatchAllCallback;
478
479 std::optional<uint8_t> MatchAllTag;
480
481 unsigned PointerTagShift;
482 uint64_t TagMaskByte;
483
484 Function *HwasanCtorFunction;
485
486 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
487 FunctionCallee HwasanMemoryAccessCallbackSized[2];
488
489 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
490 FunctionCallee HwasanHandleVfork;
491
492 FunctionCallee HwasanTagMemoryFunc;
493 FunctionCallee HwasanGenerateTagFunc;
494 FunctionCallee HwasanRecordFrameRecordFunc;
495
496 Constant *ShadowGlobal;
497
498 Value *ShadowBase = nullptr;
499 Value *StackBaseTag = nullptr;
500 Value *CachedFP = nullptr;
501 GlobalValue *ThreadPtrGlobal = nullptr;
502};
503
504} // end anonymous namespace
505
508 // Return early if nosanitize_hwaddress module flag is present for the module.
509 if (checkIfAlreadyInstrumented(M, "nosanitize_hwaddress"))
510 return PreservedAnalyses::all();
511 const StackSafetyGlobalInfo *SSI = nullptr;
512 const Triple &TargetTriple = M.getTargetTriple();
513 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
514 SSI = &MAM.getResult<StackSafetyGlobalAnalysis>(M);
515
516 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
517 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
518 for (Function &F : M)
519 HWASan.sanitizeFunction(F, FAM);
520
522 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
523 // are incrementally updated throughout this pass whenever
524 // SplitBlockAndInsertIfThen is called.
528 // GlobalsAA is considered stateless and does not get invalidated unless
529 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
530 // make changes that require GlobalsAA to be invalidated.
531 PA.abandon<GlobalsAA>();
532 return PA;
533}
535 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
537 OS, MapClassName2PassName);
538 OS << '<';
539 if (Options.CompileKernel)
540 OS << "kernel;";
541 if (Options.Recover)
542 OS << "recover";
543 OS << '>';
544}
545
546void HWAddressSanitizer::createHwasanNote() {
547 // Create a note that contains pointers to the list of global
548 // descriptors. Adding a note to the output file will cause the linker to
549 // create a PT_NOTE program header pointing to the note that we can use to
550 // find the descriptor list starting from the program headers. A function
551 // provided by the runtime initializes the shadow memory for the globals by
552 // accessing the descriptor list via the note. The dynamic loader needs to
553 // call this function whenever a library is loaded.
554 //
555 // The reason why we use a note for this instead of a more conventional
556 // approach of having a global constructor pass a descriptor list pointer to
557 // the runtime is because of an order of initialization problem. With
558 // constructors we can encounter the following problematic scenario:
559 //
560 // 1) library A depends on library B and also interposes one of B's symbols
561 // 2) B's constructors are called before A's (as required for correctness)
562 // 3) during construction, B accesses one of its "own" globals (actually
563 // interposed by A) and triggers a HWASAN failure due to the initialization
564 // for A not having happened yet
565 //
566 // Even without interposition it is possible to run into similar situations in
567 // cases where two libraries mutually depend on each other.
568 //
569 // We only need one note per binary, so put everything for the note in a
570 // comdat. This needs to be a comdat with an .init_array section to prevent
571 // newer versions of lld from discarding the note.
572 //
573 // Create the note even if we aren't instrumenting globals. This ensures that
574 // binaries linked from object files with both instrumented and
575 // non-instrumented globals will end up with a note, even if a comdat from an
576 // object file with non-instrumented globals is selected. The note is harmless
577 // if the runtime doesn't support it, since it will just be ignored.
578 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
579
580 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
581 auto *Start =
582 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
583 nullptr, "__start_hwasan_globals");
584 Start->setVisibility(GlobalValue::HiddenVisibility);
585 auto *Stop =
586 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
587 nullptr, "__stop_hwasan_globals");
588 Stop->setVisibility(GlobalValue::HiddenVisibility);
589
590 // Null-terminated so actually 8 bytes, which are required in order to align
591 // the note properly.
592 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
593
594 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
596 auto *Note =
597 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
599 Note->setSection(".note.hwasan.globals");
600 Note->setComdat(NoteComdat);
601 Note->setAlignment(Align(4));
602
603 // The pointers in the note need to be relative so that the note ends up being
604 // placed in rodata, which is the standard location for notes.
605 auto CreateRelPtr = [&](Constant *Ptr) {
609 Int32Ty);
610 };
611 Note->setInitializer(ConstantStruct::getAnon(
612 {ConstantInt::get(Int32Ty, 8), // n_namesz
613 ConstantInt::get(Int32Ty, 8), // n_descsz
614 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
615 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
617
618 // Create a zero-length global in hwasan_globals so that the linker will
619 // always create start and stop symbols.
620 auto *Dummy = new GlobalVariable(
621 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
622 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
623 Dummy->setSection("hwasan_globals");
624 Dummy->setComdat(NoteComdat);
625 Dummy->setMetadata(LLVMContext::MD_associated,
627 appendToCompilerUsed(M, Dummy);
628}
629
630void HWAddressSanitizer::createHwasanCtorComdat() {
631 std::tie(HwasanCtorFunction, std::ignore) =
634 /*InitArgTypes=*/{},
635 /*InitArgs=*/{},
636 // This callback is invoked when the functions are created the first
637 // time. Hook them into the global ctors list in that case:
638 [&](Function *Ctor, FunctionCallee) {
639 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
640 Ctor->setComdat(CtorComdat);
641 appendToGlobalCtors(M, Ctor, 0, Ctor);
642 });
643
644 // Do not create .note.hwasan.globals for static binaries, as it is only
645 // needed for instrumenting globals from dynamic libraries. In static
646 // binaries, the global variables section can be accessed directly via the
647 // __start_hwasan_globals and __stop_hwasan_globals symbols inserted by the
648 // linker.
649 if (!ClStaticLinking)
650 createHwasanNote();
651}
652
653/// Module-level initialization.
654///
655/// inserts a call to __hwasan_init to the module's constructor list.
656void HWAddressSanitizer::initializeModule() {
657 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
658 TargetTriple = M.getTargetTriple();
659
660 // HWASan may do short granule checks on function arguments read from the
661 // argument memory (last byte of the granule), which invalidates writeonly.
662 for (Function &F : M.functions())
663 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/true);
664
665 // x86_64 currently has two modes:
666 // - Intel LAM (default)
667 // - pointer aliasing (heap only)
668 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
669 UsePageAliases = shouldUsePageAliases(TargetTriple);
670 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
671 InstrumentStack = shouldInstrumentStack(TargetTriple);
672 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
673 PointerTagShift = IsX86_64 ? 57 : 56;
674 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
675
676 Mapping.init(TargetTriple, InstrumentWithCalls, CompileKernel);
677
678 C = &(M.getContext());
679 IRBuilder<> IRB(*C);
680
681 HwasanCtorFunction = nullptr;
682
683 // Older versions of Android do not have the required runtime support for
684 // short granules, global or personality function instrumentation. On other
685 // platforms we currently require using the latest version of the runtime.
686 bool NewRuntime =
687 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
688
689 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
690 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
691 TargetTriple.isOSBinFormatELF() &&
692 !optOr(ClInlineAllChecks, Recover);
693
694 // These platforms may prefer less inlining to reduce binary size.
695 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
696 TargetTriple.isOSFuchsia()));
697
698 if (ClMatchAllTag.getNumOccurrences()) {
699 if (ClMatchAllTag != -1) {
700 MatchAllTag = ClMatchAllTag & 0xFF;
701 }
702 } else if (CompileKernel) {
703 MatchAllTag = 0xFF;
704 }
705 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
706
707 // If we don't have personality function support, fall back to landing pads.
708 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
709
710 InstrumentGlobals =
711 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
712
713 if (!CompileKernel) {
714 if (InstrumentGlobals)
715 instrumentGlobals();
716
717 createHwasanCtorComdat();
718
719 bool InstrumentPersonalityFunctions =
720 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
721 if (InstrumentPersonalityFunctions)
722 instrumentPersonalityFunctions();
723 }
724
725 if (!TargetTriple.isAndroid()) {
726 ThreadPtrGlobal = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
727 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
729 "__hwasan_tls", nullptr,
732 return GV;
733 });
734 }
735}
736
737void HWAddressSanitizer::initializeCallbacks(Module &M) {
738 IRBuilder<> IRB(*C);
739 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
740 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
741 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
742 *HwasanMemsetFnTy;
743 if (UseMatchAllCallback) {
744 HwasanMemoryAccessCallbackSizedFnTy =
745 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
746 HwasanMemoryAccessCallbackFnTy =
747 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
748 HwasanMemTransferFnTy =
749 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
750 HwasanMemsetFnTy =
751 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
752 } else {
753 HwasanMemoryAccessCallbackSizedFnTy =
754 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
755 HwasanMemoryAccessCallbackFnTy =
756 FunctionType::get(VoidTy, {IntptrTy}, false);
757 HwasanMemTransferFnTy =
758 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
759 HwasanMemsetFnTy =
760 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
761 }
762
763 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
764 const std::string TypeStr = AccessIsWrite ? "store" : "load";
765 const std::string EndingStr = Recover ? "_noabort" : "";
766
767 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
768 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
769 HwasanMemoryAccessCallbackSizedFnTy);
770
771 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
772 AccessSizeIndex++) {
773 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
774 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
775 itostr(1ULL << AccessSizeIndex) +
776 MatchAllStr + EndingStr,
777 HwasanMemoryAccessCallbackFnTy);
778 }
779 }
780
781 const std::string MemIntrinCallbackPrefix =
782 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
783 ? std::string("")
785
786 HwasanMemmove = M.getOrInsertFunction(
787 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
788 HwasanMemcpy = M.getOrInsertFunction(
789 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
790 HwasanMemset = M.getOrInsertFunction(
791 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
792
793 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
794 PtrTy, Int8Ty, IntptrTy);
795 HwasanGenerateTagFunc =
796 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
797
798 HwasanRecordFrameRecordFunc =
799 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
800
801 ShadowGlobal =
802 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
803
804 HwasanHandleVfork =
805 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
806}
807
808Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
809 // An empty inline asm with input reg == output reg.
810 // An opaque no-op cast, basically.
811 // This prevents code bloat as a result of rematerializing trivial definitions
812 // such as constants or global addresses at every load and store.
813 InlineAsm *Asm =
814 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
815 StringRef(""), StringRef("=r,0"),
816 /*hasSideEffects=*/false);
817 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
818}
819
820Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
821 return getOpaqueNoopCast(IRB, ShadowGlobal);
822}
823
824Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
825 if (Mapping.isFixed()) {
826 return getOpaqueNoopCast(
828 ConstantInt::get(IntptrTy, Mapping.offset()), PtrTy));
829 }
830
831 if (Mapping.isInIfunc())
832 return getDynamicShadowIfunc(IRB);
833
834 Value *GlobalDynamicAddress =
837 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
838}
839
840bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
841 Value *Ptr) {
842 // Do not instrument accesses from different address spaces; we cannot deal
843 // with them.
844 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
845 if (PtrTy->getPointerAddressSpace() != 0)
846 return true;
847
848 // Ignore swifterror addresses.
849 // swifterror memory addresses are mem2reg promoted by instruction
850 // selection. As such they cannot have regular uses like an instrumentation
851 // function and it makes no sense to track them as memory.
852 if (Ptr->isSwiftError())
853 return true;
854
855 if (findAllocaForValue(Ptr)) {
856 if (!InstrumentStack)
857 return true;
858 if (SSI && SSI->stackAccessIsSafe(*Inst))
859 return true;
860 }
861
863 if (!InstrumentGlobals)
864 return true;
865 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
866 }
867
868 return false;
869}
870
871bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
872 Instruction *Inst, Value *Ptr) {
873 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
874 if (Ignored) {
875 ORE.emit(
876 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
877 } else {
878 ORE.emit([&]() {
879 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
880 });
881 }
882 return Ignored;
883}
884
885void HWAddressSanitizer::getInterestingMemoryOperands(
887 const TargetLibraryInfo &TLI,
889 // Skip memory accesses inserted by another instrumentation.
890 if (I->hasMetadata(LLVMContext::MD_nosanitize))
891 return;
892
893 // Do not instrument the load fetching the dynamic shadow address.
894 if (ShadowBase == I)
895 return;
896
897 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
898 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
899 return;
900 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
901 LI->getType(), LI->getAlign());
902 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
903 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
904 return;
905 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
906 SI->getValueOperand()->getType(), SI->getAlign());
907 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
908 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
909 return;
910 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
911 RMW->getValOperand()->getType(), std::nullopt);
912 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
913 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
914 return;
915 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
916 XCHG->getCompareOperand()->getType(),
917 std::nullopt);
918 } else if (auto *CI = dyn_cast<CallInst>(I)) {
919 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
920 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
921 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
922 continue;
923 Type *Ty = CI->getParamByValType(ArgNo);
924 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
925 }
927 }
928}
929
931 if (LoadInst *LI = dyn_cast<LoadInst>(I))
932 return LI->getPointerOperandIndex();
934 return SI->getPointerOperandIndex();
936 return RMW->getPointerOperandIndex();
938 return XCHG->getPointerOperandIndex();
939 report_fatal_error("Unexpected instruction");
940 return -1;
941}
942
944 size_t Res = llvm::countr_zero(TypeSize / 8);
946 return Res;
947}
948
949void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
950 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
951 TargetTriple.isRISCV64())
952 return;
953
954 IRBuilder<> IRB(I);
955 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
956 Value *UntaggedPtr =
957 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
958 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
959}
960
961Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
962 // Mem >> Scale
963 Value *Shadow = IRB.CreateLShr(Mem, Mapping.scale());
964 if (Mapping.isFixed() && Mapping.offset() == 0)
965 return IRB.CreateIntToPtr(Shadow, PtrTy);
966 // (Mem >> Scale) + Offset
967 return IRB.CreatePtrAdd(ShadowBase, Shadow);
968}
969
970int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
971 unsigned AccessSizeIndex) {
972 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
973 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
974 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
975 (Recover << HWASanAccessInfo::RecoverShift) |
976 (IsWrite << HWASanAccessInfo::IsWriteShift) |
977 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
978}
979
980HWAddressSanitizer::ShadowTagCheckInfo
981HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
982 DomTreeUpdater &DTU, LoopInfo *LI) {
983 ShadowTagCheckInfo R;
984
985 IRBuilder<> IRB(InsertBefore);
986
987 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
988 R.PtrTag =
989 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
990 R.AddrLong = untagPointer(IRB, R.PtrLong);
991 Value *Shadow = memToShadow(R.AddrLong, IRB);
992 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
993 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
994
995 if (MatchAllTag.has_value()) {
996 Value *TagNotIgnored = IRB.CreateICmpNE(
997 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
998 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
999 }
1000
1001 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
1002 TagMismatch, InsertBefore, false,
1003 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1004
1005 return R;
1006}
1007
1008void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
1009 unsigned AccessSizeIndex,
1010 Instruction *InsertBefore,
1011 DomTreeUpdater &DTU,
1012 LoopInfo *LI) {
1013 assert(!UsePageAliases);
1014 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1015
1016 if (InlineFastPath)
1017 InsertBefore =
1018 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
1019
1020 IRBuilder<> IRB(InsertBefore);
1021 bool UseFixedShadowIntrinsic = false;
1022 // The memaccess fixed shadow intrinsic is only supported on AArch64,
1023 // which allows a 16-bit immediate to be left-shifted by 32.
1024 // Since kShadowBaseAlignment == 32, and Linux by default will not
1025 // mmap above 48-bits, practically any valid shadow offset is
1026 // representable.
1027 // In particular, an offset of 4TB (1024 << 32) is representable, and
1028 // ought to be good enough for anybody.
1029 if (TargetTriple.isAArch64() && Mapping.isFixed()) {
1030 uint16_t OffsetShifted = Mapping.offset() >> 32;
1031 UseFixedShadowIntrinsic =
1032 static_cast<uint64_t>(OffsetShifted) << 32 == Mapping.offset();
1033 }
1034
1035 if (UseFixedShadowIntrinsic) {
1036 IRB.CreateIntrinsic(
1037 UseShortGranules
1038 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
1039 : Intrinsic::hwasan_check_memaccess_fixedshadow,
1040 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
1041 ConstantInt::get(Int64Ty, Mapping.offset())});
1042 } else {
1043 IRB.CreateIntrinsic(
1044 UseShortGranules ? Intrinsic::hwasan_check_memaccess_shortgranules
1045 : Intrinsic::hwasan_check_memaccess,
1046 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
1047 }
1048}
1049
1050void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
1051 unsigned AccessSizeIndex,
1052 Instruction *InsertBefore,
1053 DomTreeUpdater &DTU,
1054 LoopInfo *LI) {
1055 assert(!UsePageAliases);
1056 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
1057
1058 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
1059
1060 IRBuilder<> IRB(TCI.TagMismatchTerm);
1061 Value *OutOfShortGranuleTagRange =
1062 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
1063 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
1064 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
1065 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
1066
1067 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1068 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1069 PtrLowBits = IRB.CreateAdd(
1070 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1071 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1072 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1074 LI, CheckFailTerm->getParent());
1075
1076 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1077 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1078 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1079 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1080 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1081 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1083 LI, CheckFailTerm->getParent());
1084
1085 IRB.SetInsertPoint(CheckFailTerm);
1086 InlineAsm *Asm;
1087 switch (TargetTriple.getArch()) {
1088 case Triple::x86_64:
1089 // The signal handler will find the data address in rdi.
1091 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1092 "int3\nnopl " +
1093 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1094 "(%rax)",
1095 "{rdi}",
1096 /*hasSideEffects=*/true);
1097 break;
1098 case Triple::aarch64:
1099 case Triple::aarch64_be:
1100 // The signal handler will find the data address in x0.
1102 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1103 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1104 "{x0}",
1105 /*hasSideEffects=*/true);
1106 break;
1107 case Triple::riscv64:
1108 // The signal handler will find the data address in x10.
1110 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1111 "ebreak\naddiw x0, x11, " +
1112 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1113 "{x10}",
1114 /*hasSideEffects=*/true);
1115 break;
1116 default:
1117 report_fatal_error("unsupported architecture");
1118 }
1119 IRB.CreateCall(Asm, TCI.PtrLong);
1120 if (Recover)
1121 cast<BranchInst>(CheckFailTerm)
1122 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1123}
1124
1125bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1126 MemIntrinsic *MI) {
1128 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1129 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1130 }
1131 if (isa<MemSetInst>(MI))
1132 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1133 return false;
1134}
1135
1136void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1137 IRBuilder<> IRB(MI);
1138 if (isa<MemTransferInst>(MI)) {
1140 MI->getOperand(0), MI->getOperand(1),
1141 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1142
1143 if (UseMatchAllCallback)
1144 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1145 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1146 } else if (isa<MemSetInst>(MI)) {
1148 MI->getOperand(0),
1149 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1150 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1151 if (UseMatchAllCallback)
1152 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1153 IRB.CreateCall(HwasanMemset, Args);
1154 }
1155 MI->eraseFromParent();
1156}
1157
1158bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1159 DomTreeUpdater &DTU, LoopInfo *LI,
1160 const DataLayout &DL) {
1161 Value *Addr = O.getPtr();
1162
1163 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1164
1165 // If the pointer is statically known to be zero, the tag check will pass
1166 // since:
1167 // 1) it has a zero tag
1168 // 2) the shadow memory corresponding to address 0 is initialized to zero and
1169 // never updated.
1170 // We can therefore elide the tag check.
1171 llvm::KnownBits Known(DL.getPointerTypeSizeInBits(Addr->getType()));
1172 llvm::computeKnownBits(Addr, Known, DL);
1173 if (Known.isZero())
1174 return false;
1175
1176 if (O.MaybeMask)
1177 return false; // FIXME
1178
1179 IRBuilder<> IRB(O.getInsn());
1180 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1181 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1182 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1183 *O.Alignment >= O.TypeStoreSize / 8)) {
1184 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1185 if (InstrumentWithCalls) {
1186 SmallVector<Value *, 2> Args{IRB.CreatePointerCast(Addr, IntptrTy)};
1187 if (UseMatchAllCallback)
1188 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1189 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1190 Args);
1191 } else if (OutlinedChecks) {
1192 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1193 DTU, LI);
1194 } else {
1195 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1196 DTU, LI);
1197 }
1198 } else {
1200 IRB.CreatePointerCast(Addr, IntptrTy),
1201 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1202 ConstantInt::get(IntptrTy, 8))};
1203 if (UseMatchAllCallback)
1204 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1205 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1206 }
1207 untagPointerOperand(O.getInsn(), Addr);
1208
1209 return true;
1210}
1211
1212void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1213 size_t Size) {
1214 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1215 if (!UseShortGranules)
1216 Size = AlignedSize;
1217
1218 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1219 if (InstrumentWithCalls) {
1220 IRB.CreateCall(HwasanTagMemoryFunc,
1221 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1222 ConstantInt::get(IntptrTy, AlignedSize)});
1223 } else {
1224 size_t ShadowSize = Size >> Mapping.scale();
1225 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1226 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1227 // If this memset is not inlined, it will be intercepted in the hwasan
1228 // runtime library. That's OK, because the interceptor skips the checks if
1229 // the address is in the shadow region.
1230 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1231 // llvm.memset right here into either a sequence of stores, or a call to
1232 // hwasan_tag_memory.
1233 if (ShadowSize)
1234 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1235 if (Size != AlignedSize) {
1236 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1237 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1238 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1239 IRB.CreateStore(
1240 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1241 AlignedSize - 1));
1242 }
1243 }
1244}
1245
1246unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1247 if (TargetTriple.getArch() == Triple::x86_64)
1248 return AllocaNo & TagMaskByte;
1249
1250 // A list of 8-bit numbers that have at most one run of non-zero bits.
1251 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1252 // masks.
1253 // The list does not include the value 255, which is used for UAR.
1254 //
1255 // Because we are more likely to use earlier elements of this list than later
1256 // ones, it is sorted in increasing order of probability of collision with a
1257 // mask allocated (temporally) nearby. The program that generated this list
1258 // can be found at:
1259 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1260 static const unsigned FastMasks[] = {
1261 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1262 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1263 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1264 return FastMasks[AllocaNo % std::size(FastMasks)];
1265}
1266
1267Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1268 if (TagMaskByte == 0xFF)
1269 return OldTag; // No need to clear the tag byte.
1270 return IRB.CreateAnd(OldTag,
1271 ConstantInt::get(OldTag->getType(), TagMaskByte));
1272}
1273
1274Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1275 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1276}
1277
1278Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1280 return nullptr;
1281 if (StackBaseTag)
1282 return StackBaseTag;
1283 // Extract some entropy from the stack pointer for the tags.
1284 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1285 // between functions).
1286 Value *FramePointerLong = getCachedFP(IRB);
1287 Value *StackTag =
1288 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1289 IRB.CreateLShr(FramePointerLong, 20)));
1290 StackTag->setName("hwasan.stack.base.tag");
1291 return StackTag;
1292}
1293
1294Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1295 unsigned AllocaNo) {
1297 return getNextTagWithCall(IRB);
1298 return IRB.CreateXor(
1299 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1300}
1301
1302Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1303 Value *FramePointerLong = getCachedFP(IRB);
1304 Value *UARTag =
1305 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1306
1307 UARTag->setName("hwasan.uar.tag");
1308 return UARTag;
1309}
1310
1311// Add a tag to an address.
1312Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1313 Value *PtrLong, Value *Tag) {
1314 assert(!UsePageAliases);
1315 Value *TaggedPtrLong;
1316 if (CompileKernel) {
1317 // Kernel addresses have 0xFF in the most significant byte.
1318 Value *ShiftedTag =
1319 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1320 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1321 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1322 } else {
1323 // Userspace can simply do OR (tag << PointerTagShift);
1324 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1325 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1326 }
1327 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1328}
1329
1330// Remove tag from an address.
1331Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1332 assert(!UsePageAliases);
1333 Value *UntaggedPtrLong;
1334 if (CompileKernel) {
1335 // Kernel addresses have 0xFF in the most significant byte.
1336 UntaggedPtrLong =
1337 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1338 TagMaskByte << PointerTagShift));
1339 } else {
1340 // Userspace addresses have 0x00.
1341 UntaggedPtrLong = IRB.CreateAnd(
1342 PtrLong, ConstantInt::get(PtrLong->getType(),
1343 ~(TagMaskByte << PointerTagShift)));
1344 }
1345 return UntaggedPtrLong;
1346}
1347
1348Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1349 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1350 // in Bionic's libc/platform/bionic/tls_defines.h.
1351 constexpr int SanitizerSlot = 6;
1352 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1353 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1354 return ThreadPtrGlobal;
1355}
1356
1357Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1358 if (!CachedFP)
1359 CachedFP = memtag::getFP(IRB);
1360 return CachedFP;
1361}
1362
1363Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1364 // Prepare ring buffer data.
1365 Value *PC = memtag::getPC(TargetTriple, IRB);
1366 Value *FP = getCachedFP(IRB);
1367
1368 // Mix FP and PC.
1369 // Assumptions:
1370 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1371 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1372 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1373 // 0xFFFFPPPPPPPPPPPP
1374 //
1375 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1376 // prefer FP-relative offsets for functions compiled with HWASan.
1377 FP = IRB.CreateShl(FP, 44);
1378 return IRB.CreateOr(PC, FP);
1379}
1380
1381void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1382 if (!Mapping.isInTls())
1383 ShadowBase = getShadowNonTls(IRB);
1384 else if (!WithFrameRecord && TargetTriple.isAndroid())
1385 ShadowBase = getDynamicShadowIfunc(IRB);
1386
1387 if (!WithFrameRecord && ShadowBase)
1388 return;
1389
1390 Value *SlotPtr = nullptr;
1391 Value *ThreadLong = nullptr;
1392 Value *ThreadLongMaybeUntagged = nullptr;
1393
1394 auto getThreadLongMaybeUntagged = [&]() {
1395 if (!SlotPtr)
1396 SlotPtr = getHwasanThreadSlotPtr(IRB);
1397 if (!ThreadLong)
1398 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1399 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1400 // TBI.
1401 return TargetTriple.isAArch64() ? ThreadLong
1402 : untagPointer(IRB, ThreadLong);
1403 };
1404
1405 if (WithFrameRecord) {
1406 switch (ClRecordStackHistory) {
1407 case libcall: {
1408 // Emit a runtime call into hwasan rather than emitting instructions for
1409 // recording stack history.
1410 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1411 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1412 break;
1413 }
1414 case instr: {
1415 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1416
1417 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1418
1419 // Store data to ring buffer.
1420 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1421 Value *RecordPtr =
1422 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1423 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1424
1425 IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 8), SlotPtr);
1426 break;
1427 }
1428 case none: {
1430 "A stack history recording mode should've been selected.");
1431 }
1432 }
1433 }
1434
1435 if (!ShadowBase) {
1436 if (!ThreadLongMaybeUntagged)
1437 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1438
1439 // Get shadow base address by aligning RecordPtr up.
1440 // Note: this is not correct if the pointer is already aligned.
1441 // Runtime library will make sure this never happens.
1442 ShadowBase = IRB.CreateAdd(
1443 IRB.CreateOr(
1444 ThreadLongMaybeUntagged,
1445 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1446 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1447 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1448 }
1449}
1450
1451void HWAddressSanitizer::instrumentLandingPads(
1452 SmallVectorImpl<Instruction *> &LandingPadVec) {
1453 for (auto *LP : LandingPadVec) {
1454 IRBuilder<> IRB(LP->getNextNode());
1455 IRB.CreateCall(
1456 HwasanHandleVfork,
1458 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1459 }
1460}
1461
1462void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1463 Value *StackTag, Value *UARTag,
1464 const DominatorTree &DT,
1465 const PostDominatorTree &PDT,
1466 const LoopInfo &LI) {
1467 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1468 // alloca addresses using that. Unfortunately, offsets are not known yet
1469 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1470 // temp, shift-OR it into each alloca address and xor with the retag mask.
1471 // This generates one extra instruction per alloca use.
1472 unsigned int I = 0;
1473
1474 for (auto &KV : SInfo.AllocasToInstrument) {
1475 auto N = I++;
1476 auto *AI = KV.first;
1477 memtag::AllocaInfo &Info = KV.second;
1478 IRBuilder<> IRB(AI->getNextNode());
1479
1480 // Replace uses of the alloca with tagged address.
1481 Value *Tag = getAllocaTag(IRB, StackTag, N);
1482 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1483 Value *AINoTagLong = untagPointer(IRB, AILong);
1484 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1485 std::string Name =
1486 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1487 Replacement->setName(Name + ".hwasan");
1488
1489 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1490 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1491
1492 AI->replaceUsesWithIf(Replacement, [AILong](const Use &U) {
1493 auto *User = U.getUser();
1494 return User != AILong && !isa<LifetimeIntrinsic>(User);
1495 });
1496
1497 memtag::annotateDebugRecords(Info, retagMask(N));
1498
1499 auto TagEnd = [&](Instruction *Node) {
1500 IRB.SetInsertPoint(Node);
1501 // When untagging, use the `AlignedSize` because we need to set the tags
1502 // for the entire alloca to original. If we used `Size` here, we would
1503 // keep the last granule tagged, and store zero in the last byte of the
1504 // last granule, due to how short granules are implemented.
1505 tagAlloca(IRB, AI, UARTag, AlignedSize);
1506 };
1507 // Calls to functions that may return twice (e.g. setjmp) confuse the
1508 // postdominator analysis, and will leave us to keep memory tagged after
1509 // function return. Work around this by always untagging at every return
1510 // statement if return_twice functions are called.
1511 if (DetectUseAfterScope && !SInfo.CallsReturnTwice &&
1512 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1513 &LI, ClMaxLifetimes)) {
1514 for (IntrinsicInst *Start : Info.LifetimeStart) {
1515 IRB.SetInsertPoint(Start->getNextNode());
1516 tagAlloca(IRB, AI, Tag, Size);
1517 }
1518 if (!memtag::forAllReachableExits(DT, PDT, LI, Info, SInfo.RetVec,
1519 TagEnd)) {
1520 for (auto *End : Info.LifetimeEnd)
1521 End->eraseFromParent();
1522 }
1523 } else {
1524 tagAlloca(IRB, AI, Tag, Size);
1525 for (auto *RI : SInfo.RetVec)
1526 TagEnd(RI);
1527 // We inserted tagging outside of the lifetimes, so we have to remove
1528 // them.
1529 for (auto &II : Info.LifetimeStart)
1530 II->eraseFromParent();
1531 for (auto &II : Info.LifetimeEnd)
1532 II->eraseFromParent();
1533 }
1534 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1535 }
1536}
1537
1539 bool Skip) {
1540 if (Skip) {
1541 ORE.emit([&]() {
1542 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1543 << "Skipped: F=" << ore::NV("Function", &F);
1544 });
1545 } else {
1546 ORE.emit([&]() {
1547 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1548 << "Sanitized: F=" << ore::NV("Function", &F);
1549 });
1550 }
1551}
1552
1553bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1555 auto SkipHot = [&]() {
1556 if (!ClHotPercentileCutoff.getNumOccurrences())
1557 return false;
1559 ProfileSummaryInfo *PSI =
1560 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1561 if (!PSI || !PSI->hasProfileSummary()) {
1562 ++NumNoProfileSummaryFuncs;
1563 return false;
1564 }
1565 return PSI->isFunctionHotInCallGraphNthPercentile(
1567 };
1568
1569 auto SkipRandom = [&]() {
1570 if (!ClRandomKeepRate.getNumOccurrences())
1571 return false;
1572 std::bernoulli_distribution D(ClRandomKeepRate);
1573 return !D(*Rng);
1574 };
1575
1576 bool Skip = SkipRandom() || SkipHot();
1578 return Skip;
1579}
1580
1581void HWAddressSanitizer::sanitizeFunction(Function &F,
1583 if (&F == HwasanCtorFunction)
1584 return;
1585
1586 // Do not apply any instrumentation for naked functions.
1587 if (F.hasFnAttribute(Attribute::Naked))
1588 return;
1589
1590 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1591 return;
1592
1593 if (F.empty())
1594 return;
1595
1596 if (F.isPresplitCoroutine())
1597 return;
1598
1599 NumTotalFuncs++;
1600
1603
1604 if (selectiveInstrumentationShouldSkip(F, FAM))
1605 return;
1606
1607 NumInstrumentedFuncs++;
1608
1609 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1610
1611 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1612 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1613 SmallVector<Instruction *, 8> LandingPadVec;
1615
1617 for (auto &Inst : instructions(F)) {
1618 if (InstrumentStack) {
1619 SIB.visit(ORE, Inst);
1620 }
1621
1622 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1623 LandingPadVec.push_back(&Inst);
1624
1625 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1626
1628 if (!ignoreMemIntrinsic(ORE, MI))
1629 IntrinToInstrument.push_back(MI);
1630 }
1631
1632 memtag::StackInfo &SInfo = SIB.get();
1633
1634 initializeCallbacks(*F.getParent());
1635
1636 if (!LandingPadVec.empty())
1637 instrumentLandingPads(LandingPadVec);
1638
1639 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1640 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1641 // __hwasan_personality_thunk is a no-op for functions without an
1642 // instrumented stack, so we can drop it.
1643 F.setPersonalityFn(nullptr);
1644 }
1645
1646 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1647 IntrinToInstrument.empty())
1648 return;
1649
1650 assert(!ShadowBase);
1651
1652 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1653 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1654 emitPrologue(EntryIRB,
1655 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1656 Mapping.withFrameRecord() &&
1657 !SInfo.AllocasToInstrument.empty());
1658
1659 if (!SInfo.AllocasToInstrument.empty()) {
1662 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1663 Value *StackTag = getStackBaseTag(EntryIRB);
1664 Value *UARTag = getUARTag(EntryIRB);
1665 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1666 }
1667
1668 // If we split the entry block, move any allocas that were originally in the
1669 // entry block back into the entry block so that they aren't treated as
1670 // dynamic allocas.
1671 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1672 InsertPt = F.getEntryBlock().begin();
1673 for (Instruction &I :
1674 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1675 if (auto *AI = dyn_cast<AllocaInst>(&I))
1676 if (isa<ConstantInt>(AI->getArraySize()))
1677 I.moveBefore(F.getEntryBlock(), InsertPt);
1678 }
1679 }
1680
1684 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1685 const DataLayout &DL = F.getDataLayout();
1686 for (auto &Operand : OperandsToInstrument)
1687 instrumentMemAccess(Operand, DTU, LI, DL);
1688 DTU.flush();
1689
1690 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1691 for (auto *Inst : IntrinToInstrument)
1692 instrumentMemIntrinsic(Inst);
1693 }
1694
1695 ShadowBase = nullptr;
1696 StackBaseTag = nullptr;
1697 CachedFP = nullptr;
1698}
1699
1700void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1701 assert(!UsePageAliases);
1702 Constant *Initializer = GV->getInitializer();
1703 uint64_t SizeInBytes =
1704 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1705 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1706 if (SizeInBytes != NewSize) {
1707 // Pad the initializer out to the next multiple of 16 bytes and add the
1708 // required short granule tag.
1709 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1710 Init.back() = Tag;
1712 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1713 }
1714
1715 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1716 GlobalValue::ExternalLinkage, Initializer,
1717 GV->getName() + ".hwasan");
1718 NewGV->copyAttributesFrom(GV);
1719 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1720 NewGV->copyMetadata(GV, 0);
1721 NewGV->setAlignment(
1722 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1723
1724 // It is invalid to ICF two globals that have different tags. In the case
1725 // where the size of the global is a multiple of the tag granularity the
1726 // contents of the globals may be the same but the tags (i.e. symbol values)
1727 // may be different, and the symbols are not considered during ICF. In the
1728 // case where the size is not a multiple of the granularity, the short granule
1729 // tags would discriminate two globals with different tags, but there would
1730 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1731 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1732 // granule tag in the last byte.
1733 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1734
1735 // Descriptor format (assuming little-endian):
1736 // bytes 0-3: relative address of global
1737 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1738 // it isn't, we create multiple descriptors)
1739 // byte 7: tag
1740 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1741 const uint64_t MaxDescriptorSize = 0xfffff0;
1742 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1743 DescriptorPos += MaxDescriptorSize) {
1744 auto *Descriptor =
1745 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1746 nullptr, GV->getName() + ".hwasan.descriptor");
1747 auto *GVRelPtr = ConstantExpr::getTrunc(
1750 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1751 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1752 ConstantInt::get(Int64Ty, DescriptorPos)),
1753 Int32Ty);
1754 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1755 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1756 Descriptor->setComdat(NewGV->getComdat());
1757 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1758 Descriptor->setSection("hwasan_globals");
1759 Descriptor->setMetadata(LLVMContext::MD_associated,
1761 appendToCompilerUsed(M, Descriptor);
1762 }
1763
1766 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1767 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1768 GV->getType());
1769 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1770 GV->getLinkage(), "", Aliasee, &M);
1771 Alias->setVisibility(GV->getVisibility());
1772 Alias->takeName(GV);
1773 GV->replaceAllUsesWith(Alias);
1774 GV->eraseFromParent();
1775}
1776
1777void HWAddressSanitizer::instrumentGlobals() {
1778 std::vector<GlobalVariable *> Globals;
1779 for (GlobalVariable &GV : M.globals()) {
1781 continue;
1782
1783 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1784 GV.isThreadLocal())
1785 continue;
1786
1787 // Common symbols can't have aliases point to them, so they can't be tagged.
1788 if (GV.hasCommonLinkage())
1789 continue;
1790
1791 if (ClAllGlobals) {
1792 // Avoid instrumenting intrinsic global variables.
1793 if (GV.getSection() == "llvm.metadata")
1794 continue;
1795 } else {
1796 // Globals with custom sections may be used in __start_/__stop_
1797 // enumeration, which would be broken both by adding tags and potentially
1798 // by the extra padding/alignment that we insert.
1799 if (GV.hasSection())
1800 continue;
1801 }
1802
1803 Globals.push_back(&GV);
1804 }
1805
1806 MD5 Hasher;
1807 Hasher.update(M.getSourceFileName());
1808 MD5::MD5Result Hash;
1809 Hasher.final(Hash);
1810 uint8_t Tag = Hash[0];
1811
1812 assert(TagMaskByte >= 16);
1813
1814 for (GlobalVariable *GV : Globals) {
1815 // Don't allow globals to be tagged with something that looks like a
1816 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1817 // the fast path shadow-vs-address check succeeds.
1818 if (Tag < 16 || Tag > TagMaskByte)
1819 Tag = 16;
1820 instrumentGlobal(GV, Tag++);
1821 }
1822}
1823
1824void HWAddressSanitizer::instrumentPersonalityFunctions() {
1825 // We need to untag stack frames as we unwind past them. That is the job of
1826 // the personality function wrapper, which either wraps an existing
1827 // personality function or acts as a personality function on its own. Each
1828 // function that has a personality function or that can be unwound past has
1829 // its personality function changed to a thunk that calls the personality
1830 // function wrapper in the runtime.
1832 for (Function &F : M) {
1833 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1834 continue;
1835
1836 if (F.hasPersonalityFn()) {
1837 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1838 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1839 PersonalityFns[nullptr].push_back(&F);
1840 }
1841 }
1842
1843 if (PersonalityFns.empty())
1844 return;
1845
1846 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1847 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1848 PtrTy, PtrTy, PtrTy, PtrTy);
1849 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1850 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1851
1852 for (auto &P : PersonalityFns) {
1853 std::string ThunkName = kHwasanPersonalityThunkName;
1854 if (P.first)
1855 ThunkName += ("." + P.first->getName()).str();
1856 FunctionType *ThunkFnTy = FunctionType::get(
1857 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1858 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1859 cast<GlobalValue>(P.first)->hasLocalLinkage());
1860 auto *ThunkFn = Function::Create(ThunkFnTy,
1863 ThunkName, &M);
1864 // TODO: think about other attributes as well.
1865 if (any_of(P.second, [](const Function *F) {
1866 return F->hasFnAttribute("branch-target-enforcement");
1867 })) {
1868 ThunkFn->addFnAttr("branch-target-enforcement");
1869 }
1870 if (!IsLocal) {
1871 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1872 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1873 }
1874
1875 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1876 IRBuilder<> IRB(BB);
1877 CallInst *WrapperCall = IRB.CreateCall(
1878 HwasanPersonalityWrapper,
1879 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1880 ThunkFn->getArg(3), ThunkFn->getArg(4),
1881 P.first ? P.first : Constant::getNullValue(PtrTy),
1882 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1883 WrapperCall->setTailCall();
1884 IRB.CreateRet(WrapperCall);
1885
1886 for (Function *F : P.second)
1887 F->setPersonalityFn(ThunkFn);
1888 }
1889}
1890
1891void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1892 bool InstrumentWithCalls,
1893 bool CompileKernel) {
1894 // Start with defaults.
1895 Scale = kDefaultShadowScale;
1896 Kind = OffsetKind::kTls;
1897 WithFrameRecord = true;
1898
1899 // Tune for the target.
1900 if (TargetTriple.isOSFuchsia()) {
1901 // Fuchsia is always PIE, which means that the beginning of the address
1902 // space is always available.
1903 SetFixed(0);
1904 } else if (CompileKernel || InstrumentWithCalls) {
1905 SetFixed(0);
1906 WithFrameRecord = false;
1907 }
1908
1909 WithFrameRecord = optOr(ClFrameRecords, WithFrameRecord);
1910
1911 // Apply the last of ClMappingOffset and ClMappingOffsetDynamic.
1912 Kind = optOr(ClMappingOffsetDynamic, Kind);
1913 if (ClMappingOffset.getNumOccurrences() > 0 &&
1914 !(ClMappingOffsetDynamic.getNumOccurrences() > 0 &&
1915 ClMappingOffsetDynamic.getPosition() > ClMappingOffset.getPosition())) {
1916 SetFixed(ClMappingOffset);
1917 }
1918}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define clEnumVal(ENUMVAL, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains constants used for implementing Dwarf debug support.
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden)
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< bool > ClFrameRecords("hwasan-with-frame-record", cl::desc("Use ring buffer for stack allocations"), cl::Hidden)
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< float > ClRandomKeepRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function. " "Note: instrumentation can be skipped randomly " "OR because of the hot percentile cutoff, if " "both are supplied."))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< OffsetKind > ClMappingOffsetDynamic("hwasan-mapping-offset-dynamic", cl::desc("HWASan shadow mapping dynamic offset location"), cl::Hidden, cl::values(clEnumValN(OffsetKind::kGlobal, "global", "Use global"), clEnumValN(OffsetKind::kIfunc, "ifunc", "Use ifunc global"), clEnumValN(OffsetKind::kTls, "tls", "Use TLS")))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
static cl::opt< bool > ClAllGlobals("hwasan-all-globals", cl::desc("Instrument globals, even those within user-defined sections. Warning: " "This may break existing code which walks globals via linker-generated " "symbols, expects certain globals to be contiguous with each other, or " "makes other assumptions which are invalidated by HWASan " "instrumentation."), cl::Hidden, cl::init(false))
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClStaticLinking("hwasan-static-linking", cl::desc("Don't use .note.hwasan.globals section to instrument globals " "from loadable libraries. " "Note: in static binaries, the global variables section can be " "accessed directly via linker-provided " "__start_hwasan_globals and __stop_hwasan_globals symbols"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cutoff."))
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file implements a map that provides insertion order iteration.
#define T
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
const Value * getArraySize() const
Get the number of elements allocated.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:720
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:491
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Class to represent function types.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:613
StringRef getSection() const
Get the custom section of this global if it has one.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:215
bool hasSection() const
Check if this global has a custom object file section.
LLVM_ABI const SanitizerMetadata & getSanitizerMetadata() const
Definition Globals.cpp:246
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
LinkageTypes getLinkage() const
bool isDeclarationForLinker() const
bool hasSanitizerMetadata() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
bool hasCommonLinkage() const
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:56
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
MaybeAlign getAlign() const
Returns the alignment of the given variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:530
Analysis pass providing a never-invalidated alias analysis result.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition IRBuilder.h:1958
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2224
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2172
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1517
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2026
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition IRBuilder.h:1176
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1458
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2306
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2310
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1855
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1496
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition IRBuilder.h:630
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2055
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1555
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1868
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1407
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2481
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2041
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:605
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2314
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2250
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1536
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1603
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1577
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2776
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LLVM_ABI void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition MD5.cpp:188
LLVM_ABI void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition MD5.cpp:233
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
bool empty() const
Definition MapVector.h:77
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition Module.cpp:262
The optimization diagnostic interface.
LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition Triple.h:849
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:847
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:418
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition Triple.h:1114
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition Triple.h:1034
bool isOSFuchsia() const
Definition Triple.h:663
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:791
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1124
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
int getNumOccurrences() const
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition ELF.h:1801
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
Value * incrementThreadLong(IRBuilder<> &IRB, Value *ThreadLong, unsigned int Inc, bool IsMemtagDarwin=false)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const AllocaInfo &AInfo, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3890
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 8 > RetVec