LLVM 19.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/MD5.h"
66#include <optional>
67#include <random>
68
69using namespace llvm;
70
71#define DEBUG_TYPE "hwasan"
72
73const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
74const char kHwasanNoteName[] = "hwasan.note";
75const char kHwasanInitName[] = "__hwasan_init";
76const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
77
79 "__hwasan_shadow_memory_dynamic_address";
80
81// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
82static const size_t kNumberOfAccessSizes = 5;
83
84static const size_t kDefaultShadowScale = 4;
86 std::numeric_limits<uint64_t>::max();
87
88static const unsigned kShadowBaseAlignment = 32;
89
91 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
92 cl::desc("Prefix for memory access callbacks"),
93 cl::Hidden, cl::init("__hwasan_"));
94
96 "hwasan-kernel-mem-intrinsic-prefix",
97 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
98 cl::init(false));
99
101 "hwasan-instrument-with-calls",
102 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
103 cl::init(false));
104
105static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
106 cl::desc("instrument read instructions"),
107 cl::Hidden, cl::init(true));
108
109static cl::opt<bool>
110 ClInstrumentWrites("hwasan-instrument-writes",
111 cl::desc("instrument write instructions"), cl::Hidden,
112 cl::init(true));
113
115 "hwasan-instrument-atomics",
116 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
117 cl::init(true));
118
119static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
120 cl::desc("instrument byval arguments"),
121 cl::Hidden, cl::init(true));
122
123static cl::opt<bool>
124 ClRecover("hwasan-recover",
125 cl::desc("Enable recovery mode (continue-after-error)."),
126 cl::Hidden, cl::init(false));
127
128static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
129 cl::desc("instrument stack (allocas)"),
130 cl::Hidden, cl::init(true));
131
132static cl::opt<bool>
133 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
134 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
136
138 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
140 cl::desc("How many lifetime ends to handle for a single alloca."),
142
143static cl::opt<bool>
144 ClUseAfterScope("hwasan-use-after-scope",
145 cl::desc("detect use after scope within function"),
146 cl::Hidden, cl::init(true));
147
149 "hwasan-generate-tags-with-calls",
150 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
151 cl::init(false));
152
153static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
154 cl::Hidden, cl::init(false));
155
157 "hwasan-match-all-tag",
158 cl::desc("don't report bad accesses via pointers with this tag"),
159 cl::Hidden, cl::init(-1));
160
161static cl::opt<bool>
162 ClEnableKhwasan("hwasan-kernel",
163 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
164 cl::Hidden, cl::init(false));
165
166// These flags allow to change the shadow mapping and control how shadow memory
167// is accessed. The shadow mapping looks like:
168// Shadow = (Mem >> scale) + offset
169
171 ClMappingOffset("hwasan-mapping-offset",
172 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
173 cl::Hidden, cl::init(0));
174
175static cl::opt<bool>
176 ClWithIfunc("hwasan-with-ifunc",
177 cl::desc("Access dynamic shadow through an ifunc global on "
178 "platforms that support this"),
179 cl::Hidden, cl::init(false));
180
182 "hwasan-with-tls",
183 cl::desc("Access dynamic shadow through an thread-local pointer on "
184 "platforms that support this"),
185 cl::Hidden, cl::init(true));
186
187static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
188 cl::desc("Hot percentile cuttoff."));
189
190static cl::opt<float>
191 ClRandomSkipRate("hwasan-random-rate",
192 cl::desc("Probability value in the range [0.0, 1.0] "
193 "to keep instrumentation of a function."));
194
195STATISTIC(NumTotalFuncs, "Number of total funcs");
196STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
197STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
198
199// Mode for selecting how to insert frame record info into the stack ring
200// buffer.
202 // Do not record frame record info.
204
205 // Insert instructions into the prologue for storing into the stack ring
206 // buffer directly.
208
209 // Add a call to __hwasan_add_frame_record in the runtime.
211};
212
214 "hwasan-record-stack-history",
215 cl::desc("Record stack frames with tagged allocations in a thread-local "
216 "ring buffer"),
217 cl::values(clEnumVal(none, "Do not record stack ring history"),
218 clEnumVal(instr, "Insert instructions into the prologue for "
219 "storing into the stack ring buffer directly"),
220 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
221 "storing into the stack ring buffer")),
223
224static cl::opt<bool>
225 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
226 cl::desc("instrument memory intrinsics"),
227 cl::Hidden, cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
231 cl::desc("instrument landing pads"), cl::Hidden,
232 cl::init(false));
233
235 "hwasan-use-short-granules",
236 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
237 cl::init(false));
238
240 "hwasan-instrument-personality-functions",
241 cl::desc("instrument personality functions"), cl::Hidden);
242
243static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
244 cl::desc("inline all checks"),
245 cl::Hidden, cl::init(false));
246
247static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
248 cl::desc("inline all checks"),
249 cl::Hidden, cl::init(false));
250
251// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
252static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
253 cl::desc("Use page aliasing in HWASan"),
254 cl::Hidden, cl::init(false));
255
256namespace {
257
258template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
259 return Opt.getNumOccurrences() ? Opt : Other;
260}
261
262bool shouldUsePageAliases(const Triple &TargetTriple) {
263 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
264}
265
266bool shouldInstrumentStack(const Triple &TargetTriple) {
267 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
268}
269
270bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
271 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
272}
273
274bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
275 return optOr(ClUseStackSafety, !DisableOptimization);
276}
277
278bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
279 bool DisableOptimization) {
280 return shouldInstrumentStack(TargetTriple) &&
281 mightUseStackSafetyAnalysis(DisableOptimization);
282}
283
284bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
285 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
286}
287
288/// An instrumentation pass implementing detection of addressability bugs
289/// using tagged pointers.
290class HWAddressSanitizer {
291public:
292 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
293 const StackSafetyGlobalInfo *SSI)
294 : M(M), SSI(SSI) {
295 this->Recover = optOr(ClRecover, Recover);
296 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
297 this->Rng = ClRandomSkipRate.getNumOccurrences() ? M.createRNG(DEBUG_TYPE)
298 : nullptr;
299
300 initializeModule();
301 }
302
303 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
304
305private:
306 struct ShadowTagCheckInfo {
307 Instruction *TagMismatchTerm = nullptr;
308 Value *PtrLong = nullptr;
309 Value *AddrLong = nullptr;
310 Value *PtrTag = nullptr;
311 Value *MemTag = nullptr;
312 };
313
314 bool selectiveInstrumentationShouldSkip(Function &F,
316 void initializeModule();
317 void createHwasanCtorComdat();
318
319 void initializeCallbacks(Module &M);
320
321 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
322
323 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
324 Value *getShadowNonTls(IRBuilder<> &IRB);
325
326 void untagPointerOperand(Instruction *I, Value *Addr);
327 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
328
329 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
330 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
331 DomTreeUpdater &DTU, LoopInfo *LI);
332 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
333 unsigned AccessSizeIndex,
334 Instruction *InsertBefore,
335 DomTreeUpdater &DTU, LoopInfo *LI);
336 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
337 unsigned AccessSizeIndex,
338 Instruction *InsertBefore, DomTreeUpdater &DTU,
339 LoopInfo *LI);
340 bool ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE, MemIntrinsic *MI);
341 void instrumentMemIntrinsic(MemIntrinsic *MI);
342 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
343 LoopInfo *LI);
344 bool ignoreAccessWithoutRemark(Instruction *Inst, Value *Ptr);
345 bool ignoreAccess(OptimizationRemarkEmitter &ORE, Instruction *Inst,
346 Value *Ptr);
347
348 void getInterestingMemoryOperands(
350 const TargetLibraryInfo &TLI,
352
353 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
354 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
355 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
356 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
357 const DominatorTree &DT, const PostDominatorTree &PDT,
358 const LoopInfo &LI);
359 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
360 Value *getNextTagWithCall(IRBuilder<> &IRB);
361 Value *getStackBaseTag(IRBuilder<> &IRB);
362 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
363 Value *getUARTag(IRBuilder<> &IRB);
364
365 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
366 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
367 unsigned retagMask(unsigned AllocaNo);
368
369 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
370
371 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
372 void instrumentGlobals();
373
374 Value *getCachedFP(IRBuilder<> &IRB);
375 Value *getFrameRecordInfo(IRBuilder<> &IRB);
376
377 void instrumentPersonalityFunctions();
378
379 LLVMContext *C;
380 Module &M;
381 const StackSafetyGlobalInfo *SSI;
382 Triple TargetTriple;
383 std::unique_ptr<RandomNumberGenerator> Rng;
384
385 /// This struct defines the shadow mapping using the rule:
386 /// shadow = (mem >> Scale) + Offset.
387 /// If InGlobal is true, then
388 /// extern char __hwasan_shadow[];
389 /// shadow = (mem >> Scale) + &__hwasan_shadow
390 /// If InTls is true, then
391 /// extern char *__hwasan_tls;
392 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
393 ///
394 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
395 /// ring buffer for storing stack allocations on targets that support it.
396 struct ShadowMapping {
397 uint8_t Scale;
399 bool InGlobal;
400 bool InTls;
401 bool WithFrameRecord;
402
403 void init(Triple &TargetTriple, bool InstrumentWithCalls);
404 Align getObjectAlignment() const { return Align(1ULL << Scale); }
405 };
406
407 ShadowMapping Mapping;
408
409 Type *VoidTy = Type::getVoidTy(M.getContext());
410 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
411 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
412 Type *Int8Ty = Type::getInt8Ty(M.getContext());
413 Type *Int32Ty = Type::getInt32Ty(M.getContext());
414 Type *Int64Ty = Type::getInt64Ty(M.getContext());
415
416 bool CompileKernel;
417 bool Recover;
418 bool OutlinedChecks;
419 bool InlineFastPath;
420 bool UseShortGranules;
421 bool InstrumentLandingPads;
422 bool InstrumentWithCalls;
423 bool InstrumentStack;
424 bool InstrumentGlobals;
425 bool DetectUseAfterScope;
426 bool UsePageAliases;
427 bool UseMatchAllCallback;
428
429 std::optional<uint8_t> MatchAllTag;
430
431 unsigned PointerTagShift;
432 uint64_t TagMaskByte;
433
434 Function *HwasanCtorFunction;
435
436 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
437 FunctionCallee HwasanMemoryAccessCallbackSized[2];
438
439 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
440 FunctionCallee HwasanHandleVfork;
441
442 FunctionCallee HwasanTagMemoryFunc;
443 FunctionCallee HwasanGenerateTagFunc;
444 FunctionCallee HwasanRecordFrameRecordFunc;
445
446 Constant *ShadowGlobal;
447
448 Value *ShadowBase = nullptr;
449 Value *StackBaseTag = nullptr;
450 Value *CachedFP = nullptr;
451 GlobalValue *ThreadPtrGlobal = nullptr;
452};
453
454} // end anonymous namespace
455
458 const StackSafetyGlobalInfo *SSI = nullptr;
459 auto TargetTriple = llvm::Triple(M.getTargetTriple());
460 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
462
463 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
464 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
465 for (Function &F : M)
466 HWASan.sanitizeFunction(F, FAM);
467
469 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
470 // are incrementally updated throughout this pass whenever
471 // SplitBlockAndInsertIfThen is called.
475 // GlobalsAA is considered stateless and does not get invalidated unless
476 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
477 // make changes that require GlobalsAA to be invalidated.
478 PA.abandon<GlobalsAA>();
479 return PA;
480}
482 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
484 OS, MapClassName2PassName);
485 OS << '<';
486 if (Options.CompileKernel)
487 OS << "kernel;";
488 if (Options.Recover)
489 OS << "recover";
490 OS << '>';
491}
492
493void HWAddressSanitizer::createHwasanCtorComdat() {
494 std::tie(HwasanCtorFunction, std::ignore) =
497 /*InitArgTypes=*/{},
498 /*InitArgs=*/{},
499 // This callback is invoked when the functions are created the first
500 // time. Hook them into the global ctors list in that case:
501 [&](Function *Ctor, FunctionCallee) {
502 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
503 Ctor->setComdat(CtorComdat);
504 appendToGlobalCtors(M, Ctor, 0, Ctor);
505 });
506
507 // Create a note that contains pointers to the list of global
508 // descriptors. Adding a note to the output file will cause the linker to
509 // create a PT_NOTE program header pointing to the note that we can use to
510 // find the descriptor list starting from the program headers. A function
511 // provided by the runtime initializes the shadow memory for the globals by
512 // accessing the descriptor list via the note. The dynamic loader needs to
513 // call this function whenever a library is loaded.
514 //
515 // The reason why we use a note for this instead of a more conventional
516 // approach of having a global constructor pass a descriptor list pointer to
517 // the runtime is because of an order of initialization problem. With
518 // constructors we can encounter the following problematic scenario:
519 //
520 // 1) library A depends on library B and also interposes one of B's symbols
521 // 2) B's constructors are called before A's (as required for correctness)
522 // 3) during construction, B accesses one of its "own" globals (actually
523 // interposed by A) and triggers a HWASAN failure due to the initialization
524 // for A not having happened yet
525 //
526 // Even without interposition it is possible to run into similar situations in
527 // cases where two libraries mutually depend on each other.
528 //
529 // We only need one note per binary, so put everything for the note in a
530 // comdat. This needs to be a comdat with an .init_array section to prevent
531 // newer versions of lld from discarding the note.
532 //
533 // Create the note even if we aren't instrumenting globals. This ensures that
534 // binaries linked from object files with both instrumented and
535 // non-instrumented globals will end up with a note, even if a comdat from an
536 // object file with non-instrumented globals is selected. The note is harmless
537 // if the runtime doesn't support it, since it will just be ignored.
538 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
539
540 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
541 auto *Start =
542 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
543 nullptr, "__start_hwasan_globals");
544 Start->setVisibility(GlobalValue::HiddenVisibility);
545 auto *Stop =
546 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
547 nullptr, "__stop_hwasan_globals");
548 Stop->setVisibility(GlobalValue::HiddenVisibility);
549
550 // Null-terminated so actually 8 bytes, which are required in order to align
551 // the note properly.
552 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
553
554 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
555 Int32Ty, Int32Ty);
556 auto *Note =
557 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
559 Note->setSection(".note.hwasan.globals");
560 Note->setComdat(NoteComdat);
561 Note->setAlignment(Align(4));
562
563 // The pointers in the note need to be relative so that the note ends up being
564 // placed in rodata, which is the standard location for notes.
565 auto CreateRelPtr = [&](Constant *Ptr) {
569 Int32Ty);
570 };
571 Note->setInitializer(ConstantStruct::getAnon(
572 {ConstantInt::get(Int32Ty, 8), // n_namesz
573 ConstantInt::get(Int32Ty, 8), // n_descsz
574 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
575 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
577
578 // Create a zero-length global in hwasan_globals so that the linker will
579 // always create start and stop symbols.
580 auto *Dummy = new GlobalVariable(
581 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
582 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
583 Dummy->setSection("hwasan_globals");
584 Dummy->setComdat(NoteComdat);
585 Dummy->setMetadata(LLVMContext::MD_associated,
587 appendToCompilerUsed(M, Dummy);
588}
589
590/// Module-level initialization.
591///
592/// inserts a call to __hwasan_init to the module's constructor list.
593void HWAddressSanitizer::initializeModule() {
594 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
595 TargetTriple = Triple(M.getTargetTriple());
596
597 // x86_64 currently has two modes:
598 // - Intel LAM (default)
599 // - pointer aliasing (heap only)
600 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
601 UsePageAliases = shouldUsePageAliases(TargetTriple);
602 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
603 InstrumentStack = shouldInstrumentStack(TargetTriple);
604 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
605 PointerTagShift = IsX86_64 ? 57 : 56;
606 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
607
608 Mapping.init(TargetTriple, InstrumentWithCalls);
609
610 C = &(M.getContext());
611 IRBuilder<> IRB(*C);
612
613 HwasanCtorFunction = nullptr;
614
615 // Older versions of Android do not have the required runtime support for
616 // short granules, global or personality function instrumentation. On other
617 // platforms we currently require using the latest version of the runtime.
618 bool NewRuntime =
619 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
620
621 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
622 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
623 TargetTriple.isOSBinFormatELF() &&
624 !optOr(ClInlineAllChecks, Recover);
625
626 // These platforms may prefer less inlining to reduce binary size.
627 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
628 TargetTriple.isOSFuchsia()));
629
630 if (ClMatchAllTag.getNumOccurrences()) {
631 if (ClMatchAllTag != -1) {
632 MatchAllTag = ClMatchAllTag & 0xFF;
633 }
634 } else if (CompileKernel) {
635 MatchAllTag = 0xFF;
636 }
637 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
638
639 // If we don't have personality function support, fall back to landing pads.
640 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
641
642 InstrumentGlobals =
643 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
644
645 if (!CompileKernel) {
646 createHwasanCtorComdat();
647
648 if (InstrumentGlobals)
649 instrumentGlobals();
650
651 bool InstrumentPersonalityFunctions =
652 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
653 if (InstrumentPersonalityFunctions)
654 instrumentPersonalityFunctions();
655 }
656
657 if (!TargetTriple.isAndroid()) {
658 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
659 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
661 "__hwasan_tls", nullptr,
664 return GV;
665 });
666 ThreadPtrGlobal = cast<GlobalVariable>(C);
667 }
668}
669
670void HWAddressSanitizer::initializeCallbacks(Module &M) {
671 IRBuilder<> IRB(*C);
672 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
673 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
674 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
675 *HwasanMemsetFnTy;
676 if (UseMatchAllCallback) {
677 HwasanMemoryAccessCallbackSizedFnTy =
678 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
679 HwasanMemoryAccessCallbackFnTy =
680 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
681 HwasanMemTransferFnTy =
682 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
683 HwasanMemsetFnTy =
684 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
685 } else {
686 HwasanMemoryAccessCallbackSizedFnTy =
687 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
688 HwasanMemoryAccessCallbackFnTy =
689 FunctionType::get(VoidTy, {IntptrTy}, false);
690 HwasanMemTransferFnTy =
691 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
692 HwasanMemsetFnTy =
693 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
694 }
695
696 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
697 const std::string TypeStr = AccessIsWrite ? "store" : "load";
698 const std::string EndingStr = Recover ? "_noabort" : "";
699
700 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
701 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
702 HwasanMemoryAccessCallbackSizedFnTy);
703
704 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
705 AccessSizeIndex++) {
706 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
707 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
708 itostr(1ULL << AccessSizeIndex) +
709 MatchAllStr + EndingStr,
710 HwasanMemoryAccessCallbackFnTy);
711 }
712 }
713
714 const std::string MemIntrinCallbackPrefix =
715 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
716 ? std::string("")
718
719 HwasanMemmove = M.getOrInsertFunction(
720 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
721 HwasanMemcpy = M.getOrInsertFunction(
722 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
723 HwasanMemset = M.getOrInsertFunction(
724 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
725
726 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
727 PtrTy, Int8Ty, IntptrTy);
728 HwasanGenerateTagFunc =
729 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
730
731 HwasanRecordFrameRecordFunc =
732 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
733
734 ShadowGlobal =
735 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
736
737 HwasanHandleVfork =
738 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
739}
740
741Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
742 // An empty inline asm with input reg == output reg.
743 // An opaque no-op cast, basically.
744 // This prevents code bloat as a result of rematerializing trivial definitions
745 // such as constants or global addresses at every load and store.
746 InlineAsm *Asm =
747 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
748 StringRef(""), StringRef("=r,0"),
749 /*hasSideEffects=*/false);
750 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
751}
752
753Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
754 return getOpaqueNoopCast(IRB, ShadowGlobal);
755}
756
757Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
758 if (Mapping.Offset != kDynamicShadowSentinel)
759 return getOpaqueNoopCast(
761 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
762
763 if (Mapping.InGlobal)
764 return getDynamicShadowIfunc(IRB);
765
766 Value *GlobalDynamicAddress =
769 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
770}
771
772bool HWAddressSanitizer::ignoreAccessWithoutRemark(Instruction *Inst,
773 Value *Ptr) {
774 // Do not instrument accesses from different address spaces; we cannot deal
775 // with them.
776 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
777 if (PtrTy->getPointerAddressSpace() != 0)
778 return true;
779
780 // Ignore swifterror addresses.
781 // swifterror memory addresses are mem2reg promoted by instruction
782 // selection. As such they cannot have regular uses like an instrumentation
783 // function and it makes no sense to track them as memory.
784 if (Ptr->isSwiftError())
785 return true;
786
787 if (findAllocaForValue(Ptr)) {
788 if (!InstrumentStack)
789 return true;
790 if (SSI && SSI->stackAccessIsSafe(*Inst))
791 return true;
792 }
793
794 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
795 if (!InstrumentGlobals)
796 return true;
797 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
798 }
799
800 return false;
801}
802
803bool HWAddressSanitizer::ignoreAccess(OptimizationRemarkEmitter &ORE,
804 Instruction *Inst, Value *Ptr) {
805 bool Ignored = ignoreAccessWithoutRemark(Inst, Ptr);
806 if (Ignored) {
807 ORE.emit(
808 [&]() { return OptimizationRemark(DEBUG_TYPE, "ignoreAccess", Inst); });
809 } else {
810 ORE.emit([&]() {
811 return OptimizationRemarkMissed(DEBUG_TYPE, "ignoreAccess", Inst);
812 });
813 }
814 return Ignored;
815}
816
817void HWAddressSanitizer::getInterestingMemoryOperands(
819 const TargetLibraryInfo &TLI,
821 // Skip memory accesses inserted by another instrumentation.
822 if (I->hasMetadata(LLVMContext::MD_nosanitize))
823 return;
824
825 // Do not instrument the load fetching the dynamic shadow address.
826 if (ShadowBase == I)
827 return;
828
829 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
830 if (!ClInstrumentReads || ignoreAccess(ORE, I, LI->getPointerOperand()))
831 return;
832 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
833 LI->getType(), LI->getAlign());
834 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
835 if (!ClInstrumentWrites || ignoreAccess(ORE, I, SI->getPointerOperand()))
836 return;
837 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
838 SI->getValueOperand()->getType(), SI->getAlign());
839 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
840 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, RMW->getPointerOperand()))
841 return;
842 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
843 RMW->getValOperand()->getType(), std::nullopt);
844 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
845 if (!ClInstrumentAtomics || ignoreAccess(ORE, I, XCHG->getPointerOperand()))
846 return;
847 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
848 XCHG->getCompareOperand()->getType(),
849 std::nullopt);
850 } else if (auto *CI = dyn_cast<CallInst>(I)) {
851 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
852 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
853 ignoreAccess(ORE, I, CI->getArgOperand(ArgNo)))
854 continue;
855 Type *Ty = CI->getParamByValType(ArgNo);
856 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
857 }
859 }
860}
861
863 if (LoadInst *LI = dyn_cast<LoadInst>(I))
864 return LI->getPointerOperandIndex();
865 if (StoreInst *SI = dyn_cast<StoreInst>(I))
866 return SI->getPointerOperandIndex();
867 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
868 return RMW->getPointerOperandIndex();
869 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
870 return XCHG->getPointerOperandIndex();
871 report_fatal_error("Unexpected instruction");
872 return -1;
873}
874
876 size_t Res = llvm::countr_zero(TypeSize / 8);
878 return Res;
879}
880
881void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
882 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
883 TargetTriple.isRISCV64())
884 return;
885
886 IRBuilder<> IRB(I);
887 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
888 Value *UntaggedPtr =
889 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
890 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
891}
892
893Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
894 // Mem >> Scale
895 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
896 if (Mapping.Offset == 0)
897 return IRB.CreateIntToPtr(Shadow, PtrTy);
898 // (Mem >> Scale) + Offset
899 return IRB.CreatePtrAdd(ShadowBase, Shadow);
900}
901
902int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
903 unsigned AccessSizeIndex) {
904 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
905 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
906 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
907 (Recover << HWASanAccessInfo::RecoverShift) |
908 (IsWrite << HWASanAccessInfo::IsWriteShift) |
909 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
910}
911
912HWAddressSanitizer::ShadowTagCheckInfo
913HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
914 DomTreeUpdater &DTU, LoopInfo *LI) {
915 ShadowTagCheckInfo R;
916
917 IRBuilder<> IRB(InsertBefore);
918
919 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
920 R.PtrTag =
921 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
922 R.AddrLong = untagPointer(IRB, R.PtrLong);
923 Value *Shadow = memToShadow(R.AddrLong, IRB);
924 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
925 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
926
927 if (MatchAllTag.has_value()) {
928 Value *TagNotIgnored = IRB.CreateICmpNE(
929 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
930 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
931 }
932
933 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
934 TagMismatch, InsertBefore, false,
935 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
936
937 return R;
938}
939
940void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
941 unsigned AccessSizeIndex,
942 Instruction *InsertBefore,
943 DomTreeUpdater &DTU,
944 LoopInfo *LI) {
945 assert(!UsePageAliases);
946 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
947
948 if (InlineFastPath)
949 InsertBefore =
950 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
951
952 IRBuilder<> IRB(InsertBefore);
954 bool useFixedShadowIntrinsic = false;
955 // The memaccess fixed shadow intrinsic is only supported on AArch64,
956 // which allows a 16-bit immediate to be left-shifted by 32.
957 // Since kShadowBaseAlignment == 32, and Linux by default will not
958 // mmap above 48-bits, practically any valid shadow offset is
959 // representable.
960 // In particular, an offset of 4TB (1024 << 32) is representable, and
961 // ought to be good enough for anybody.
962 if (TargetTriple.isAArch64() && Mapping.Offset != kDynamicShadowSentinel) {
963 uint16_t offset_shifted = Mapping.Offset >> 32;
964 useFixedShadowIntrinsic = (uint64_t)offset_shifted << 32 == Mapping.Offset;
965 }
966
967 if (useFixedShadowIntrinsic)
968 IRB.CreateCall(
970 M, UseShortGranules
971 ? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
972 : Intrinsic::hwasan_check_memaccess_fixedshadow),
973 {Ptr, ConstantInt::get(Int32Ty, AccessInfo),
974 ConstantInt::get(Int64Ty, Mapping.Offset)});
975 else
977 M, UseShortGranules
978 ? Intrinsic::hwasan_check_memaccess_shortgranules
979 : Intrinsic::hwasan_check_memaccess),
980 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
981}
982
983void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
984 unsigned AccessSizeIndex,
985 Instruction *InsertBefore,
986 DomTreeUpdater &DTU,
987 LoopInfo *LI) {
988 assert(!UsePageAliases);
989 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
990
991 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
992
993 IRBuilder<> IRB(TCI.TagMismatchTerm);
994 Value *OutOfShortGranuleTagRange =
995 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
996 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
997 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
998 MDBuilder(*C).createUnlikelyBranchWeights(), &DTU, LI);
999
1000 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1001 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
1002 PtrLowBits = IRB.CreateAdd(
1003 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
1004 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
1005 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
1007 LI, CheckFailTerm->getParent());
1008
1009 IRB.SetInsertPoint(TCI.TagMismatchTerm);
1010 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
1011 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
1012 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
1013 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
1014 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
1016 LI, CheckFailTerm->getParent());
1017
1018 IRB.SetInsertPoint(CheckFailTerm);
1019 InlineAsm *Asm;
1020 switch (TargetTriple.getArch()) {
1021 case Triple::x86_64:
1022 // The signal handler will find the data address in rdi.
1024 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1025 "int3\nnopl " +
1026 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
1027 "(%rax)",
1028 "{rdi}",
1029 /*hasSideEffects=*/true);
1030 break;
1031 case Triple::aarch64:
1032 case Triple::aarch64_be:
1033 // The signal handler will find the data address in x0.
1035 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1036 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1037 "{x0}",
1038 /*hasSideEffects=*/true);
1039 break;
1040 case Triple::riscv64:
1041 // The signal handler will find the data address in x10.
1043 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1044 "ebreak\naddiw x0, x11, " +
1045 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1046 "{x10}",
1047 /*hasSideEffects=*/true);
1048 break;
1049 default:
1050 report_fatal_error("unsupported architecture");
1051 }
1052 IRB.CreateCall(Asm, TCI.PtrLong);
1053 if (Recover)
1054 cast<BranchInst>(CheckFailTerm)
1055 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1056}
1057
1058bool HWAddressSanitizer::ignoreMemIntrinsic(OptimizationRemarkEmitter &ORE,
1059 MemIntrinsic *MI) {
1060 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1061 return (!ClInstrumentWrites || ignoreAccess(ORE, MTI, MTI->getDest())) &&
1062 (!ClInstrumentReads || ignoreAccess(ORE, MTI, MTI->getSource()));
1063 }
1064 if (isa<MemSetInst>(MI))
1065 return !ClInstrumentWrites || ignoreAccess(ORE, MI, MI->getDest());
1066 return false;
1067}
1068
1069void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1070 IRBuilder<> IRB(MI);
1071 if (isa<MemTransferInst>(MI)) {
1073 MI->getOperand(0), MI->getOperand(1),
1074 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1075
1076 if (UseMatchAllCallback)
1077 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1078 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1079 } else if (isa<MemSetInst>(MI)) {
1081 MI->getOperand(0),
1082 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1083 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1084 if (UseMatchAllCallback)
1085 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1086 IRB.CreateCall(HwasanMemset, Args);
1087 }
1088 MI->eraseFromParent();
1089}
1090
1091bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1092 DomTreeUpdater &DTU,
1093 LoopInfo *LI) {
1094 Value *Addr = O.getPtr();
1095
1096 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1097
1098 if (O.MaybeMask)
1099 return false; // FIXME
1100
1101 IRBuilder<> IRB(O.getInsn());
1102 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1103 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1104 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1105 *O.Alignment >= O.TypeStoreSize / 8)) {
1106 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1107 if (InstrumentWithCalls) {
1109 if (UseMatchAllCallback)
1110 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1111 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1112 Args);
1113 } else if (OutlinedChecks) {
1114 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1115 DTU, LI);
1116 } else {
1117 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1118 DTU, LI);
1119 }
1120 } else {
1122 IRB.CreatePointerCast(Addr, IntptrTy),
1123 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1124 ConstantInt::get(IntptrTy, 8))};
1125 if (UseMatchAllCallback)
1126 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1127 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1128 }
1129 untagPointerOperand(O.getInsn(), Addr);
1130
1131 return true;
1132}
1133
1134void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1135 size_t Size) {
1136 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1137 if (!UseShortGranules)
1138 Size = AlignedSize;
1139
1140 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1141 if (InstrumentWithCalls) {
1142 IRB.CreateCall(HwasanTagMemoryFunc,
1143 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1144 ConstantInt::get(IntptrTy, AlignedSize)});
1145 } else {
1146 size_t ShadowSize = Size >> Mapping.Scale;
1147 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1148 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1149 // If this memset is not inlined, it will be intercepted in the hwasan
1150 // runtime library. That's OK, because the interceptor skips the checks if
1151 // the address is in the shadow region.
1152 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1153 // llvm.memset right here into either a sequence of stores, or a call to
1154 // hwasan_tag_memory.
1155 if (ShadowSize)
1156 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1157 if (Size != AlignedSize) {
1158 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1159 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1160 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1161 IRB.CreateStore(
1162 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1163 AlignedSize - 1));
1164 }
1165 }
1166}
1167
1168unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1169 if (TargetTriple.getArch() == Triple::x86_64)
1170 return AllocaNo & TagMaskByte;
1171
1172 // A list of 8-bit numbers that have at most one run of non-zero bits.
1173 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1174 // masks.
1175 // The list does not include the value 255, which is used for UAR.
1176 //
1177 // Because we are more likely to use earlier elements of this list than later
1178 // ones, it is sorted in increasing order of probability of collision with a
1179 // mask allocated (temporally) nearby. The program that generated this list
1180 // can be found at:
1181 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1182 static const unsigned FastMasks[] = {
1183 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1184 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1185 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1186 return FastMasks[AllocaNo % std::size(FastMasks)];
1187}
1188
1189Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1190 if (TagMaskByte == 0xFF)
1191 return OldTag; // No need to clear the tag byte.
1192 return IRB.CreateAnd(OldTag,
1193 ConstantInt::get(OldTag->getType(), TagMaskByte));
1194}
1195
1196Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1197 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1198}
1199
1200Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1202 return nullptr;
1203 if (StackBaseTag)
1204 return StackBaseTag;
1205 // Extract some entropy from the stack pointer for the tags.
1206 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1207 // between functions).
1208 Value *FramePointerLong = getCachedFP(IRB);
1209 Value *StackTag =
1210 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1211 IRB.CreateLShr(FramePointerLong, 20)));
1212 StackTag->setName("hwasan.stack.base.tag");
1213 return StackTag;
1214}
1215
1216Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1217 unsigned AllocaNo) {
1219 return getNextTagWithCall(IRB);
1220 return IRB.CreateXor(
1221 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1222}
1223
1224Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1225 Value *FramePointerLong = getCachedFP(IRB);
1226 Value *UARTag =
1227 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1228
1229 UARTag->setName("hwasan.uar.tag");
1230 return UARTag;
1231}
1232
1233// Add a tag to an address.
1234Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1235 Value *PtrLong, Value *Tag) {
1236 assert(!UsePageAliases);
1237 Value *TaggedPtrLong;
1238 if (CompileKernel) {
1239 // Kernel addresses have 0xFF in the most significant byte.
1240 Value *ShiftedTag =
1241 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1242 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1243 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1244 } else {
1245 // Userspace can simply do OR (tag << PointerTagShift);
1246 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1247 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1248 }
1249 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1250}
1251
1252// Remove tag from an address.
1253Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1254 assert(!UsePageAliases);
1255 Value *UntaggedPtrLong;
1256 if (CompileKernel) {
1257 // Kernel addresses have 0xFF in the most significant byte.
1258 UntaggedPtrLong =
1259 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1260 TagMaskByte << PointerTagShift));
1261 } else {
1262 // Userspace addresses have 0x00.
1263 UntaggedPtrLong = IRB.CreateAnd(
1264 PtrLong, ConstantInt::get(PtrLong->getType(),
1265 ~(TagMaskByte << PointerTagShift)));
1266 }
1267 return UntaggedPtrLong;
1268}
1269
1270Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1271 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1272 // in Bionic's libc/platform/bionic/tls_defines.h.
1273 constexpr int SanitizerSlot = 6;
1274 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1275 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1276 return ThreadPtrGlobal;
1277}
1278
1279Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1280 if (!CachedFP)
1281 CachedFP = memtag::getFP(IRB);
1282 return CachedFP;
1283}
1284
1285Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1286 // Prepare ring buffer data.
1287 Value *PC = memtag::getPC(TargetTriple, IRB);
1288 Value *FP = getCachedFP(IRB);
1289
1290 // Mix FP and PC.
1291 // Assumptions:
1292 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1293 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1294 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1295 // 0xFFFFPPPPPPPPPPPP
1296 //
1297 // FP works because in AArch64FrameLowering::getFrameIndexReference, we
1298 // prefer FP-relative offsets for functions compiled with HWASan.
1299 FP = IRB.CreateShl(FP, 44);
1300 return IRB.CreateOr(PC, FP);
1301}
1302
1303void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1304 if (!Mapping.InTls)
1305 ShadowBase = getShadowNonTls(IRB);
1306 else if (!WithFrameRecord && TargetTriple.isAndroid())
1307 ShadowBase = getDynamicShadowIfunc(IRB);
1308
1309 if (!WithFrameRecord && ShadowBase)
1310 return;
1311
1312 Value *SlotPtr = nullptr;
1313 Value *ThreadLong = nullptr;
1314 Value *ThreadLongMaybeUntagged = nullptr;
1315
1316 auto getThreadLongMaybeUntagged = [&]() {
1317 if (!SlotPtr)
1318 SlotPtr = getHwasanThreadSlotPtr(IRB);
1319 if (!ThreadLong)
1320 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1321 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1322 // TBI.
1323 return TargetTriple.isAArch64() ? ThreadLong
1324 : untagPointer(IRB, ThreadLong);
1325 };
1326
1327 if (WithFrameRecord) {
1328 switch (ClRecordStackHistory) {
1329 case libcall: {
1330 // Emit a runtime call into hwasan rather than emitting instructions for
1331 // recording stack history.
1332 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1333 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1334 break;
1335 }
1336 case instr: {
1337 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1338
1339 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1340
1341 // Store data to ring buffer.
1342 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1343 Value *RecordPtr =
1344 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1345 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1346
1347 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1348 // buffer in pages, it must be a power of two, and the start of the buffer
1349 // must be aligned by twice that much. Therefore wrap around of the ring
1350 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1351 // The use of AShr instead of LShr is due to
1352 // https://bugs.llvm.org/show_bug.cgi?id=39030
1353 // Runtime library makes sure not to use the highest bit.
1354 //
1355 // Mechanical proof of this address calculation can be found at:
1356 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
1357 //
1358 // Example of the wrap case for N = 1
1359 // Pointer: 0x01AAAAAAAAAAAFF8
1360 // +
1361 // 0x0000000000000008
1362 // =
1363 // 0x01AAAAAAAAAAB000
1364 // &
1365 // WrapMask: 0xFFFFFFFFFFFFF000
1366 // =
1367 // 0x01AAAAAAAAAAA000
1368 //
1369 // Then the WrapMask will be a no-op until the next wrap case.
1370 Value *WrapMask = IRB.CreateXor(
1371 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1372 ConstantInt::get(IntptrTy, (uint64_t)-1));
1373 Value *ThreadLongNew = IRB.CreateAnd(
1374 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1375 IRB.CreateStore(ThreadLongNew, SlotPtr);
1376 break;
1377 }
1378 case none: {
1380 "A stack history recording mode should've been selected.");
1381 }
1382 }
1383 }
1384
1385 if (!ShadowBase) {
1386 if (!ThreadLongMaybeUntagged)
1387 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1388
1389 // Get shadow base address by aligning RecordPtr up.
1390 // Note: this is not correct if the pointer is already aligned.
1391 // Runtime library will make sure this never happens.
1392 ShadowBase = IRB.CreateAdd(
1393 IRB.CreateOr(
1394 ThreadLongMaybeUntagged,
1395 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1396 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1397 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1398 }
1399}
1400
1401bool HWAddressSanitizer::instrumentLandingPads(
1402 SmallVectorImpl<Instruction *> &LandingPadVec) {
1403 for (auto *LP : LandingPadVec) {
1404 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1405 IRB.CreateCall(
1406 HwasanHandleVfork,
1408 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1409 }
1410 return true;
1411}
1412
1413bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1414 Value *StackTag, Value *UARTag,
1415 const DominatorTree &DT,
1416 const PostDominatorTree &PDT,
1417 const LoopInfo &LI) {
1418 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1419 // alloca addresses using that. Unfortunately, offsets are not known yet
1420 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1421 // temp, shift-OR it into each alloca address and xor with the retag mask.
1422 // This generates one extra instruction per alloca use.
1423 unsigned int I = 0;
1424
1425 for (auto &KV : SInfo.AllocasToInstrument) {
1426 auto N = I++;
1427 auto *AI = KV.first;
1428 memtag::AllocaInfo &Info = KV.second;
1430
1431 // Replace uses of the alloca with tagged address.
1432 Value *Tag = getAllocaTag(IRB, StackTag, N);
1433 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1434 Value *AINoTagLong = untagPointer(IRB, AILong);
1435 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1436 std::string Name =
1437 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1438 Replacement->setName(Name + ".hwasan");
1439
1440 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1441 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1442
1443 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1444
1445 auto HandleLifetime = [&](IntrinsicInst *II) {
1446 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1447 // set of assumptions we need to make about the lifetime. Without this we
1448 // would need to ensure that we can track the lifetime pointer to a
1449 // constant offset from the alloca, and would still need to change the
1450 // size to include the extra alignment we use for the untagging to make
1451 // the size consistent.
1452 //
1453 // The check for standard lifetime below makes sure that we have exactly
1454 // one set of start / end in any execution (i.e. the ends are not
1455 // reachable from each other), so this will not cause any problems.
1456 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1457 II->setArgOperand(1, AICast);
1458 };
1459 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1460 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1461
1462 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1463 auto *User = U.getUser();
1464 return User != AILong && User != AICast &&
1466 });
1467
1468 memtag::annotateDebugRecords(Info, retagMask(N));
1469
1470 auto TagEnd = [&](Instruction *Node) {
1471 IRB.SetInsertPoint(Node);
1472 // When untagging, use the `AlignedSize` because we need to set the tags
1473 // for the entire alloca to original. If we used `Size` here, we would
1474 // keep the last granule tagged, and store zero in the last byte of the
1475 // last granule, due to how short granules are implemented.
1476 tagAlloca(IRB, AI, UARTag, AlignedSize);
1477 };
1478 // Calls to functions that may return twice (e.g. setjmp) confuse the
1479 // postdominator analysis, and will leave us to keep memory tagged after
1480 // function return. Work around this by always untagging at every return
1481 // statement if return_twice functions are called.
1482 bool StandardLifetime =
1483 !SInfo.CallsReturnTwice &&
1484 SInfo.UnrecognizedLifetimes.empty() &&
1485 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1486 &LI, ClMaxLifetimes);
1487 if (DetectUseAfterScope && StandardLifetime) {
1488 IntrinsicInst *Start = Info.LifetimeStart[0];
1489 IRB.SetInsertPoint(Start->getNextNode());
1490 tagAlloca(IRB, AI, Tag, Size);
1491 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1492 SInfo.RetVec, TagEnd)) {
1493 for (auto *End : Info.LifetimeEnd)
1494 End->eraseFromParent();
1495 }
1496 } else {
1497 tagAlloca(IRB, AI, Tag, Size);
1498 for (auto *RI : SInfo.RetVec)
1499 TagEnd(RI);
1500 // We inserted tagging outside of the lifetimes, so we have to remove
1501 // them.
1502 for (auto &II : Info.LifetimeStart)
1503 II->eraseFromParent();
1504 for (auto &II : Info.LifetimeEnd)
1505 II->eraseFromParent();
1506 }
1507 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1508 }
1509 for (auto &I : SInfo.UnrecognizedLifetimes)
1510 I->eraseFromParent();
1511 return true;
1512}
1513
1515 bool Skip) {
1516 if (Skip) {
1517 ORE.emit([&]() {
1518 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1519 << "Skipped: F=" << ore::NV("Function", &F);
1520 });
1521 } else {
1522 ORE.emit([&]() {
1523 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1524 << "Sanitized: F=" << ore::NV("Function", &F);
1525 });
1526 }
1527}
1528
1529bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1531 bool Skip = [&]() {
1532 if (ClRandomSkipRate.getNumOccurrences()) {
1533 std::bernoulli_distribution D(ClRandomSkipRate);
1534 return !D(*Rng);
1535 }
1536 if (!ClHotPercentileCutoff.getNumOccurrences())
1537 return false;
1539 ProfileSummaryInfo *PSI =
1540 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1541 if (!PSI || !PSI->hasProfileSummary()) {
1542 ++NumNoProfileSummaryFuncs;
1543 return false;
1544 }
1545 return PSI->isFunctionHotInCallGraphNthPercentile(
1547 }();
1549 return Skip;
1550}
1551
1552void HWAddressSanitizer::sanitizeFunction(Function &F,
1554 if (&F == HwasanCtorFunction)
1555 return;
1556
1557 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1558 return;
1559
1560 if (F.empty())
1561 return;
1562
1563 NumTotalFuncs++;
1564
1567
1568 if (selectiveInstrumentationShouldSkip(F, FAM))
1569 return;
1570
1571 NumInstrumentedFuncs++;
1572
1573 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1574
1575 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1576 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1577 SmallVector<Instruction *, 8> LandingPadVec;
1579
1580 memtag::StackInfoBuilder SIB(SSI);
1581 for (auto &Inst : instructions(F)) {
1582 if (InstrumentStack) {
1583 SIB.visit(Inst);
1584 }
1585
1586 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1587 LandingPadVec.push_back(&Inst);
1588
1589 getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
1590
1591 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1592 if (!ignoreMemIntrinsic(ORE, MI))
1593 IntrinToInstrument.push_back(MI);
1594 }
1595
1596 memtag::StackInfo &SInfo = SIB.get();
1597
1598 initializeCallbacks(*F.getParent());
1599
1600 if (!LandingPadVec.empty())
1601 instrumentLandingPads(LandingPadVec);
1602
1603 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1604 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1605 // __hwasan_personality_thunk is a no-op for functions without an
1606 // instrumented stack, so we can drop it.
1607 F.setPersonalityFn(nullptr);
1608 }
1609
1610 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1611 IntrinToInstrument.empty())
1612 return;
1613
1614 assert(!ShadowBase);
1615
1616 // Remove memory attributes that are about to become invalid.
1617 // HWASan checks read from shadow, which invalidates memory(argmem: *)
1618 // Short granule checks on function arguments read from the argument memory
1619 // (last byte of the granule), which invalidates writeonly.
1620 F.removeFnAttr(llvm::Attribute::Memory);
1621 for (auto &A : F.args())
1622 A.removeAttr(llvm::Attribute::WriteOnly);
1623
1624 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1625 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1626 emitPrologue(EntryIRB,
1627 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1628 Mapping.WithFrameRecord &&
1629 !SInfo.AllocasToInstrument.empty());
1630
1631 if (!SInfo.AllocasToInstrument.empty()) {
1634 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1635 Value *StackTag = getStackBaseTag(EntryIRB);
1636 Value *UARTag = getUARTag(EntryIRB);
1637 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1638 }
1639
1640 // If we split the entry block, move any allocas that were originally in the
1641 // entry block back into the entry block so that they aren't treated as
1642 // dynamic allocas.
1643 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1644 InsertPt = F.getEntryBlock().begin();
1645 for (Instruction &I :
1646 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1647 if (auto *AI = dyn_cast<AllocaInst>(&I))
1648 if (isa<ConstantInt>(AI->getArraySize()))
1649 I.moveBefore(F.getEntryBlock(), InsertPt);
1650 }
1651 }
1652
1656 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
1657 for (auto &Operand : OperandsToInstrument)
1658 instrumentMemAccess(Operand, DTU, LI);
1659 DTU.flush();
1660
1661 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1662 for (auto *Inst : IntrinToInstrument)
1663 instrumentMemIntrinsic(Inst);
1664 }
1665
1666 ShadowBase = nullptr;
1667 StackBaseTag = nullptr;
1668 CachedFP = nullptr;
1669}
1670
1671void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1672 assert(!UsePageAliases);
1673 Constant *Initializer = GV->getInitializer();
1674 uint64_t SizeInBytes =
1675 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1676 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1677 if (SizeInBytes != NewSize) {
1678 // Pad the initializer out to the next multiple of 16 bytes and add the
1679 // required short granule tag.
1680 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1681 Init.back() = Tag;
1683 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1684 }
1685
1686 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1687 GlobalValue::ExternalLinkage, Initializer,
1688 GV->getName() + ".hwasan");
1689 NewGV->copyAttributesFrom(GV);
1690 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1691 NewGV->copyMetadata(GV, 0);
1692 NewGV->setAlignment(
1693 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1694
1695 // It is invalid to ICF two globals that have different tags. In the case
1696 // where the size of the global is a multiple of the tag granularity the
1697 // contents of the globals may be the same but the tags (i.e. symbol values)
1698 // may be different, and the symbols are not considered during ICF. In the
1699 // case where the size is not a multiple of the granularity, the short granule
1700 // tags would discriminate two globals with different tags, but there would
1701 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1702 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1703 // granule tag in the last byte.
1704 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1705
1706 // Descriptor format (assuming little-endian):
1707 // bytes 0-3: relative address of global
1708 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1709 // it isn't, we create multiple descriptors)
1710 // byte 7: tag
1711 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1712 const uint64_t MaxDescriptorSize = 0xfffff0;
1713 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1714 DescriptorPos += MaxDescriptorSize) {
1715 auto *Descriptor =
1716 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1717 nullptr, GV->getName() + ".hwasan.descriptor");
1718 auto *GVRelPtr = ConstantExpr::getTrunc(
1721 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1722 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1723 ConstantInt::get(Int64Ty, DescriptorPos)),
1724 Int32Ty);
1725 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1726 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1727 Descriptor->setComdat(NewGV->getComdat());
1728 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1729 Descriptor->setSection("hwasan_globals");
1730 Descriptor->setMetadata(LLVMContext::MD_associated,
1732 appendToCompilerUsed(M, Descriptor);
1733 }
1734
1737 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1738 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1739 GV->getType());
1740 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1741 GV->getLinkage(), "", Aliasee, &M);
1742 Alias->setVisibility(GV->getVisibility());
1743 Alias->takeName(GV);
1744 GV->replaceAllUsesWith(Alias);
1745 GV->eraseFromParent();
1746}
1747
1748void HWAddressSanitizer::instrumentGlobals() {
1749 std::vector<GlobalVariable *> Globals;
1750 for (GlobalVariable &GV : M.globals()) {
1752 continue;
1753
1754 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1755 GV.isThreadLocal())
1756 continue;
1757
1758 // Common symbols can't have aliases point to them, so they can't be tagged.
1759 if (GV.hasCommonLinkage())
1760 continue;
1761
1762 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1763 // which would be broken both by adding tags and potentially by the extra
1764 // padding/alignment that we insert.
1765 if (GV.hasSection())
1766 continue;
1767
1768 Globals.push_back(&GV);
1769 }
1770
1771 MD5 Hasher;
1772 Hasher.update(M.getSourceFileName());
1773 MD5::MD5Result Hash;
1774 Hasher.final(Hash);
1775 uint8_t Tag = Hash[0];
1776
1777 assert(TagMaskByte >= 16);
1778
1779 for (GlobalVariable *GV : Globals) {
1780 // Don't allow globals to be tagged with something that looks like a
1781 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1782 // the fast path shadow-vs-address check succeeds.
1783 if (Tag < 16 || Tag > TagMaskByte)
1784 Tag = 16;
1785 instrumentGlobal(GV, Tag++);
1786 }
1787}
1788
1789void HWAddressSanitizer::instrumentPersonalityFunctions() {
1790 // We need to untag stack frames as we unwind past them. That is the job of
1791 // the personality function wrapper, which either wraps an existing
1792 // personality function or acts as a personality function on its own. Each
1793 // function that has a personality function or that can be unwound past has
1794 // its personality function changed to a thunk that calls the personality
1795 // function wrapper in the runtime.
1797 for (Function &F : M) {
1798 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1799 continue;
1800
1801 if (F.hasPersonalityFn()) {
1802 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1803 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1804 PersonalityFns[nullptr].push_back(&F);
1805 }
1806 }
1807
1808 if (PersonalityFns.empty())
1809 return;
1810
1811 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1812 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1813 PtrTy, PtrTy, PtrTy, PtrTy);
1814 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1815 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1816
1817 for (auto &P : PersonalityFns) {
1818 std::string ThunkName = kHwasanPersonalityThunkName;
1819 if (P.first)
1820 ThunkName += ("." + P.first->getName()).str();
1821 FunctionType *ThunkFnTy = FunctionType::get(
1822 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1823 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1824 cast<GlobalValue>(P.first)->hasLocalLinkage());
1825 auto *ThunkFn = Function::Create(ThunkFnTy,
1828 ThunkName, &M);
1829 if (!IsLocal) {
1830 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1831 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1832 }
1833
1834 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1835 IRBuilder<> IRB(BB);
1836 CallInst *WrapperCall = IRB.CreateCall(
1837 HwasanPersonalityWrapper,
1838 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1839 ThunkFn->getArg(3), ThunkFn->getArg(4),
1840 P.first ? P.first : Constant::getNullValue(PtrTy),
1841 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1842 WrapperCall->setTailCall();
1843 IRB.CreateRet(WrapperCall);
1844
1845 for (Function *F : P.second)
1846 F->setPersonalityFn(ThunkFn);
1847 }
1848}
1849
1850void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1851 bool InstrumentWithCalls) {
1852 Scale = kDefaultShadowScale;
1853 if (TargetTriple.isOSFuchsia()) {
1854 // Fuchsia is always PIE, which means that the beginning of the address
1855 // space is always available.
1856 InGlobal = false;
1857 InTls = false;
1858 Offset = 0;
1859 WithFrameRecord = true;
1860 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1861 InGlobal = false;
1862 InTls = false;
1864 WithFrameRecord = false;
1865 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1866 InGlobal = false;
1867 InTls = false;
1868 Offset = 0;
1869 WithFrameRecord = false;
1870 } else if (ClWithIfunc) {
1871 InGlobal = true;
1872 InTls = false;
1874 WithFrameRecord = false;
1875 } else if (ClWithTls) {
1876 InGlobal = false;
1877 InTls = true;
1879 WithFrameRecord = true;
1880 } else {
1881 InGlobal = false;
1882 InTls = false;
1884 WithFrameRecord = false;
1885 }
1886}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static cl::opt< StackTaggingRecordStackHistoryMode > ClRecordStackHistory("stack-tagging-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer")), cl::Hidden, cl::init(none))
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:684
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1294
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
#define DEBUG_TYPE
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:424
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:202
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:706
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2269
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2606
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2255
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2599
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2241
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:477
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:165
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:544
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:206
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:237
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:481
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1884
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2175
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:592
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1442
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1981
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1100
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1383
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2250
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2254
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1795
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1421
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2026
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1480
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1808
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1332
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2012
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1502
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:566
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2258
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2201
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2417
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1461
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1524
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2671
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:571
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:223
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:688
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:215
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:769
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:767
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:371
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:987
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:909
bool isOSFuchsia() const
Definition: Triple.h:586
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:717
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:399
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1711
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1513
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void annotateDebugRecords(AllocaInfo &Info, unsigned int Tag)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec