LLVM 19.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/MDBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
65#include <optional>
66#include <random>
67
68using namespace llvm;
69
70#define DEBUG_TYPE "hwasan"
71
72const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
73const char kHwasanNoteName[] = "hwasan.note";
74const char kHwasanInitName[] = "__hwasan_init";
75const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
76
78 "__hwasan_shadow_memory_dynamic_address";
79
80// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
81static const size_t kNumberOfAccessSizes = 5;
82
83static const size_t kDefaultShadowScale = 4;
85 std::numeric_limits<uint64_t>::max();
86
87static const unsigned kShadowBaseAlignment = 32;
88
90 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
91 cl::desc("Prefix for memory access callbacks"),
92 cl::Hidden, cl::init("__hwasan_"));
93
95 "hwasan-kernel-mem-intrinsic-prefix",
96 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
97 cl::init(false));
98
100 "hwasan-instrument-with-calls",
101 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
102 cl::init(false));
103
104static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
105 cl::desc("instrument read instructions"),
106 cl::Hidden, cl::init(true));
107
108static cl::opt<bool>
109 ClInstrumentWrites("hwasan-instrument-writes",
110 cl::desc("instrument write instructions"), cl::Hidden,
111 cl::init(true));
112
114 "hwasan-instrument-atomics",
115 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
116 cl::init(true));
117
118static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
119 cl::desc("instrument byval arguments"),
120 cl::Hidden, cl::init(true));
121
122static cl::opt<bool>
123 ClRecover("hwasan-recover",
124 cl::desc("Enable recovery mode (continue-after-error)."),
125 cl::Hidden, cl::init(false));
126
127static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
128 cl::desc("instrument stack (allocas)"),
129 cl::Hidden, cl::init(true));
130
131static cl::opt<bool>
132 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
133 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
135
137 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
139 cl::desc("How many lifetime ends to handle for a single alloca."),
141
142static cl::opt<bool>
143 ClUseAfterScope("hwasan-use-after-scope",
144 cl::desc("detect use after scope within function"),
145 cl::Hidden, cl::init(true));
146
148 "hwasan-generate-tags-with-calls",
149 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
150 cl::init(false));
151
152static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
153 cl::Hidden, cl::init(false));
154
156 "hwasan-match-all-tag",
157 cl::desc("don't report bad accesses via pointers with this tag"),
158 cl::Hidden, cl::init(-1));
159
160static cl::opt<bool>
161 ClEnableKhwasan("hwasan-kernel",
162 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
163 cl::Hidden, cl::init(false));
164
165// These flags allow to change the shadow mapping and control how shadow memory
166// is accessed. The shadow mapping looks like:
167// Shadow = (Mem >> scale) + offset
168
170 ClMappingOffset("hwasan-mapping-offset",
171 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
172 cl::Hidden, cl::init(0));
173
174static cl::opt<bool>
175 ClWithIfunc("hwasan-with-ifunc",
176 cl::desc("Access dynamic shadow through an ifunc global on "
177 "platforms that support this"),
178 cl::Hidden, cl::init(false));
179
181 "hwasan-with-tls",
182 cl::desc("Access dynamic shadow through an thread-local pointer on "
183 "platforms that support this"),
184 cl::Hidden, cl::init(true));
185
186static cl::opt<int> ClHotPercentileCutoff("hwasan-percentile-cutoff-hot",
187 cl::desc("Hot percentile cuttoff."));
188
189static cl::opt<float>
190 ClRandomSkipRate("hwasan-random-rate",
191 cl::desc("Probability value in the range [0.0, 1.0] "
192 "to keep instrumentation of a function."));
193
194STATISTIC(NumTotalFuncs, "Number of total funcs");
195STATISTIC(NumInstrumentedFuncs, "Number of instrumented funcs");
196STATISTIC(NumNoProfileSummaryFuncs, "Number of funcs without PS");
197
198// Mode for selecting how to insert frame record info into the stack ring
199// buffer.
201 // Do not record frame record info.
203
204 // Insert instructions into the prologue for storing into the stack ring
205 // buffer directly.
207
208 // Add a call to __hwasan_add_frame_record in the runtime.
210};
211
213 "hwasan-record-stack-history",
214 cl::desc("Record stack frames with tagged allocations in a thread-local "
215 "ring buffer"),
216 cl::values(clEnumVal(none, "Do not record stack ring history"),
217 clEnumVal(instr, "Insert instructions into the prologue for "
218 "storing into the stack ring buffer directly"),
219 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
220 "storing into the stack ring buffer")),
222
223static cl::opt<bool>
224 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
225 cl::desc("instrument memory intrinsics"),
226 cl::Hidden, cl::init(true));
227
228static cl::opt<bool>
229 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
230 cl::desc("instrument landing pads"), cl::Hidden,
231 cl::init(false));
232
234 "hwasan-use-short-granules",
235 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
236 cl::init(false));
237
239 "hwasan-instrument-personality-functions",
240 cl::desc("instrument personality functions"), cl::Hidden);
241
242static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
243 cl::desc("inline all checks"),
244 cl::Hidden, cl::init(false));
245
246static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
247 cl::desc("inline all checks"),
248 cl::Hidden, cl::init(false));
249
250// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
251static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
252 cl::desc("Use page aliasing in HWASan"),
253 cl::Hidden, cl::init(false));
254
255namespace {
256
257template <typename T> T optOr(cl::opt<T> &Opt, T Other) {
258 return Opt.getNumOccurrences() ? Opt : Other;
259}
260
261bool shouldUsePageAliases(const Triple &TargetTriple) {
262 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
263}
264
265bool shouldInstrumentStack(const Triple &TargetTriple) {
266 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
267}
268
269bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
270 return optOr(ClInstrumentWithCalls, TargetTriple.getArch() == Triple::x86_64);
271}
272
273bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
274 return optOr(ClUseStackSafety, !DisableOptimization);
275}
276
277bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
278 bool DisableOptimization) {
279 return shouldInstrumentStack(TargetTriple) &&
280 mightUseStackSafetyAnalysis(DisableOptimization);
281}
282
283bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
284 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
285}
286
287/// An instrumentation pass implementing detection of addressability bugs
288/// using tagged pointers.
289class HWAddressSanitizer {
290public:
291 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
292 const StackSafetyGlobalInfo *SSI)
293 : M(M), SSI(SSI) {
294 this->Recover = optOr(ClRecover, Recover);
295 this->CompileKernel = optOr(ClEnableKhwasan, CompileKernel);
296 this->Rng =
297 ClRandomSkipRate.getNumOccurrences() ? M.createRNG("hwasan") : nullptr;
298
299 initializeModule();
300 }
301
302 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
303
304private:
305 struct ShadowTagCheckInfo {
306 Instruction *TagMismatchTerm = nullptr;
307 Value *PtrLong = nullptr;
308 Value *AddrLong = nullptr;
309 Value *PtrTag = nullptr;
310 Value *MemTag = nullptr;
311 };
312
313 bool selectiveInstrumentationShouldSkip(Function &F,
315 void initializeModule();
316 void createHwasanCtorComdat();
317
318 void initializeCallbacks(Module &M);
319
320 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
321
322 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
323 Value *getShadowNonTls(IRBuilder<> &IRB);
324
325 void untagPointerOperand(Instruction *I, Value *Addr);
326 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
327
328 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
329 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
330 DomTreeUpdater &DTU, LoopInfo *LI);
331 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
332 unsigned AccessSizeIndex,
333 Instruction *InsertBefore,
334 DomTreeUpdater &DTU, LoopInfo *LI);
335 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
336 unsigned AccessSizeIndex,
337 Instruction *InsertBefore, DomTreeUpdater &DTU,
338 LoopInfo *LI);
339 bool ignoreMemIntrinsic(MemIntrinsic *MI);
340 void instrumentMemIntrinsic(MemIntrinsic *MI);
341 bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
342 LoopInfo *LI);
343 bool ignoreAccess(Instruction *Inst, Value *Ptr);
344 void getInterestingMemoryOperands(
345 Instruction *I, const TargetLibraryInfo &TLI,
347
348 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
349 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
350 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
351 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
352 const DominatorTree &DT, const PostDominatorTree &PDT,
353 const LoopInfo &LI);
354 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
355 Value *getNextTagWithCall(IRBuilder<> &IRB);
356 Value *getStackBaseTag(IRBuilder<> &IRB);
357 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
358 Value *getUARTag(IRBuilder<> &IRB);
359
360 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
361 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
362 unsigned retagMask(unsigned AllocaNo);
363
364 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
365
366 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
367 void instrumentGlobals();
368
369 Value *getCachedFP(IRBuilder<> &IRB);
370 Value *getFrameRecordInfo(IRBuilder<> &IRB);
371
372 void instrumentPersonalityFunctions();
373
374 LLVMContext *C;
375 Module &M;
376 const StackSafetyGlobalInfo *SSI;
377 Triple TargetTriple;
378 std::unique_ptr<RandomNumberGenerator> Rng;
379
380 /// This struct defines the shadow mapping using the rule:
381 /// shadow = (mem >> Scale) + Offset.
382 /// If InGlobal is true, then
383 /// extern char __hwasan_shadow[];
384 /// shadow = (mem >> Scale) + &__hwasan_shadow
385 /// If InTls is true, then
386 /// extern char *__hwasan_tls;
387 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
388 ///
389 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
390 /// ring buffer for storing stack allocations on targets that support it.
391 struct ShadowMapping {
392 uint8_t Scale;
394 bool InGlobal;
395 bool InTls;
396 bool WithFrameRecord;
397
398 void init(Triple &TargetTriple, bool InstrumentWithCalls);
399 Align getObjectAlignment() const { return Align(1ULL << Scale); }
400 };
401
402 ShadowMapping Mapping;
403
404 Type *VoidTy = Type::getVoidTy(M.getContext());
405 Type *IntptrTy = M.getDataLayout().getIntPtrType(M.getContext());
406 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
407 Type *Int8Ty = Type::getInt8Ty(M.getContext());
408 Type *Int32Ty = Type::getInt32Ty(M.getContext());
409 Type *Int64Ty = Type::getInt64Ty(M.getContext());
410
411 bool CompileKernel;
412 bool Recover;
413 bool OutlinedChecks;
414 bool InlineFastPath;
415 bool UseShortGranules;
416 bool InstrumentLandingPads;
417 bool InstrumentWithCalls;
418 bool InstrumentStack;
419 bool InstrumentGlobals;
420 bool DetectUseAfterScope;
421 bool UsePageAliases;
422 bool UseMatchAllCallback;
423
424 std::optional<uint8_t> MatchAllTag;
425
426 unsigned PointerTagShift;
427 uint64_t TagMaskByte;
428
429 Function *HwasanCtorFunction;
430
431 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
432 FunctionCallee HwasanMemoryAccessCallbackSized[2];
433
434 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
435 FunctionCallee HwasanHandleVfork;
436
437 FunctionCallee HwasanTagMemoryFunc;
438 FunctionCallee HwasanGenerateTagFunc;
439 FunctionCallee HwasanRecordFrameRecordFunc;
440
441 Constant *ShadowGlobal;
442
443 Value *ShadowBase = nullptr;
444 Value *StackBaseTag = nullptr;
445 Value *CachedFP = nullptr;
446 GlobalValue *ThreadPtrGlobal = nullptr;
447};
448
449} // end anonymous namespace
450
453 const StackSafetyGlobalInfo *SSI = nullptr;
454 auto TargetTriple = llvm::Triple(M.getTargetTriple());
455 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
457
458 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
459 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
460 for (Function &F : M)
461 HWASan.sanitizeFunction(F, FAM);
462
464 // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
465 // are incrementally updated throughout this pass whenever
466 // SplitBlockAndInsertIfThen is called.
470 // GlobalsAA is considered stateless and does not get invalidated unless
471 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
472 // make changes that require GlobalsAA to be invalidated.
473 PA.abandon<GlobalsAA>();
474 return PA;
475}
477 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
479 OS, MapClassName2PassName);
480 OS << '<';
481 if (Options.CompileKernel)
482 OS << "kernel;";
483 if (Options.Recover)
484 OS << "recover";
485 OS << '>';
486}
487
488void HWAddressSanitizer::createHwasanCtorComdat() {
489 std::tie(HwasanCtorFunction, std::ignore) =
492 /*InitArgTypes=*/{},
493 /*InitArgs=*/{},
494 // This callback is invoked when the functions are created the first
495 // time. Hook them into the global ctors list in that case:
496 [&](Function *Ctor, FunctionCallee) {
497 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
498 Ctor->setComdat(CtorComdat);
499 appendToGlobalCtors(M, Ctor, 0, Ctor);
500 });
501
502 // Create a note that contains pointers to the list of global
503 // descriptors. Adding a note to the output file will cause the linker to
504 // create a PT_NOTE program header pointing to the note that we can use to
505 // find the descriptor list starting from the program headers. A function
506 // provided by the runtime initializes the shadow memory for the globals by
507 // accessing the descriptor list via the note. The dynamic loader needs to
508 // call this function whenever a library is loaded.
509 //
510 // The reason why we use a note for this instead of a more conventional
511 // approach of having a global constructor pass a descriptor list pointer to
512 // the runtime is because of an order of initialization problem. With
513 // constructors we can encounter the following problematic scenario:
514 //
515 // 1) library A depends on library B and also interposes one of B's symbols
516 // 2) B's constructors are called before A's (as required for correctness)
517 // 3) during construction, B accesses one of its "own" globals (actually
518 // interposed by A) and triggers a HWASAN failure due to the initialization
519 // for A not having happened yet
520 //
521 // Even without interposition it is possible to run into similar situations in
522 // cases where two libraries mutually depend on each other.
523 //
524 // We only need one note per binary, so put everything for the note in a
525 // comdat. This needs to be a comdat with an .init_array section to prevent
526 // newer versions of lld from discarding the note.
527 //
528 // Create the note even if we aren't instrumenting globals. This ensures that
529 // binaries linked from object files with both instrumented and
530 // non-instrumented globals will end up with a note, even if a comdat from an
531 // object file with non-instrumented globals is selected. The note is harmless
532 // if the runtime doesn't support it, since it will just be ignored.
533 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
534
535 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
536 auto *Start =
537 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
538 nullptr, "__start_hwasan_globals");
539 Start->setVisibility(GlobalValue::HiddenVisibility);
540 auto *Stop =
541 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
542 nullptr, "__stop_hwasan_globals");
543 Stop->setVisibility(GlobalValue::HiddenVisibility);
544
545 // Null-terminated so actually 8 bytes, which are required in order to align
546 // the note properly.
547 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
548
549 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
551 auto *Note =
552 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
554 Note->setSection(".note.hwasan.globals");
555 Note->setComdat(NoteComdat);
556 Note->setAlignment(Align(4));
557
558 // The pointers in the note need to be relative so that the note ends up being
559 // placed in rodata, which is the standard location for notes.
560 auto CreateRelPtr = [&](Constant *Ptr) {
564 Int32Ty);
565 };
566 Note->setInitializer(ConstantStruct::getAnon(
567 {ConstantInt::get(Int32Ty, 8), // n_namesz
568 ConstantInt::get(Int32Ty, 8), // n_descsz
569 ConstantInt::get(Int32Ty, ELF::NT_LLVM_HWASAN_GLOBALS), // n_type
570 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
572
573 // Create a zero-length global in hwasan_globals so that the linker will
574 // always create start and stop symbols.
575 auto *Dummy = new GlobalVariable(
576 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
577 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
578 Dummy->setSection("hwasan_globals");
579 Dummy->setComdat(NoteComdat);
580 Dummy->setMetadata(LLVMContext::MD_associated,
582 appendToCompilerUsed(M, Dummy);
583}
584
585/// Module-level initialization.
586///
587/// inserts a call to __hwasan_init to the module's constructor list.
588void HWAddressSanitizer::initializeModule() {
589 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
590 TargetTriple = Triple(M.getTargetTriple());
591
592 // x86_64 currently has two modes:
593 // - Intel LAM (default)
594 // - pointer aliasing (heap only)
595 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
596 UsePageAliases = shouldUsePageAliases(TargetTriple);
597 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
598 InstrumentStack = shouldInstrumentStack(TargetTriple);
599 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
600 PointerTagShift = IsX86_64 ? 57 : 56;
601 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
602
603 Mapping.init(TargetTriple, InstrumentWithCalls);
604
605 C = &(M.getContext());
606 IRBuilder<> IRB(*C);
607
608 HwasanCtorFunction = nullptr;
609
610 // Older versions of Android do not have the required runtime support for
611 // short granules, global or personality function instrumentation. On other
612 // platforms we currently require using the latest version of the runtime.
613 bool NewRuntime =
614 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
615
616 UseShortGranules = optOr(ClUseShortGranules, NewRuntime);
617 OutlinedChecks = (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
618 TargetTriple.isOSBinFormatELF() &&
619 !optOr(ClInlineAllChecks, Recover);
620
621 // These platforms may prefer less inlining to reduce binary size.
622 InlineFastPath = optOr(ClInlineFastPathChecks, !(TargetTriple.isAndroid() ||
623 TargetTriple.isOSFuchsia()));
624
625 if (ClMatchAllTag.getNumOccurrences()) {
626 if (ClMatchAllTag != -1) {
627 MatchAllTag = ClMatchAllTag & 0xFF;
628 }
629 } else if (CompileKernel) {
630 MatchAllTag = 0xFF;
631 }
632 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
633
634 // If we don't have personality function support, fall back to landing pads.
635 InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
636
637 InstrumentGlobals =
638 !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
639
640 if (!CompileKernel) {
641 createHwasanCtorComdat();
642
643 if (InstrumentGlobals)
644 instrumentGlobals();
645
646 bool InstrumentPersonalityFunctions =
647 optOr(ClInstrumentPersonalityFunctions, NewRuntime);
648 if (InstrumentPersonalityFunctions)
649 instrumentPersonalityFunctions();
650 }
651
652 if (!TargetTriple.isAndroid()) {
653 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
654 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
656 "__hwasan_tls", nullptr,
659 return GV;
660 });
661 ThreadPtrGlobal = cast<GlobalVariable>(C);
662 }
663}
664
665void HWAddressSanitizer::initializeCallbacks(Module &M) {
666 IRBuilder<> IRB(*C);
667 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
668 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
669 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
670 *HwasanMemsetFnTy;
671 if (UseMatchAllCallback) {
672 HwasanMemoryAccessCallbackSizedFnTy =
673 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
674 HwasanMemoryAccessCallbackFnTy =
675 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
676 HwasanMemTransferFnTy =
677 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
678 HwasanMemsetFnTy =
679 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
680 } else {
681 HwasanMemoryAccessCallbackSizedFnTy =
682 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
683 HwasanMemoryAccessCallbackFnTy =
684 FunctionType::get(VoidTy, {IntptrTy}, false);
685 HwasanMemTransferFnTy =
686 FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
687 HwasanMemsetFnTy =
688 FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
689 }
690
691 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
692 const std::string TypeStr = AccessIsWrite ? "store" : "load";
693 const std::string EndingStr = Recover ? "_noabort" : "";
694
695 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
696 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
697 HwasanMemoryAccessCallbackSizedFnTy);
698
699 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
700 AccessSizeIndex++) {
701 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
702 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
703 itostr(1ULL << AccessSizeIndex) +
704 MatchAllStr + EndingStr,
705 HwasanMemoryAccessCallbackFnTy);
706 }
707 }
708
709 const std::string MemIntrinCallbackPrefix =
710 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
711 ? std::string("")
713
714 HwasanMemmove = M.getOrInsertFunction(
715 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
716 HwasanMemcpy = M.getOrInsertFunction(
717 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
718 HwasanMemset = M.getOrInsertFunction(
719 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
720
721 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
722 PtrTy, Int8Ty, IntptrTy);
723 HwasanGenerateTagFunc =
724 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
725
726 HwasanRecordFrameRecordFunc =
727 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
728
729 ShadowGlobal =
730 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
731
732 HwasanHandleVfork =
733 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
734}
735
736Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
737 // An empty inline asm with input reg == output reg.
738 // An opaque no-op cast, basically.
739 // This prevents code bloat as a result of rematerializing trivial definitions
740 // such as constants or global addresses at every load and store.
741 InlineAsm *Asm =
742 InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
743 StringRef(""), StringRef("=r,0"),
744 /*hasSideEffects=*/false);
745 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
746}
747
748Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
749 return getOpaqueNoopCast(IRB, ShadowGlobal);
750}
751
752Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
753 if (Mapping.Offset != kDynamicShadowSentinel)
754 return getOpaqueNoopCast(
756 ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
757
758 if (Mapping.InGlobal)
759 return getDynamicShadowIfunc(IRB);
760
761 Value *GlobalDynamicAddress =
764 return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
765}
766
767bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
768 // Do not instrument accesses from different address spaces; we cannot deal
769 // with them.
770 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
771 if (PtrTy->getPointerAddressSpace() != 0)
772 return true;
773
774 // Ignore swifterror addresses.
775 // swifterror memory addresses are mem2reg promoted by instruction
776 // selection. As such they cannot have regular uses like an instrumentation
777 // function and it makes no sense to track them as memory.
778 if (Ptr->isSwiftError())
779 return true;
780
781 if (findAllocaForValue(Ptr)) {
782 if (!InstrumentStack)
783 return true;
784 if (SSI && SSI->stackAccessIsSafe(*Inst))
785 return true;
786 }
787
788 if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
789 if (!InstrumentGlobals)
790 return true;
791 // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
792 }
793
794 return false;
795}
796
797void HWAddressSanitizer::getInterestingMemoryOperands(
798 Instruction *I, const TargetLibraryInfo &TLI,
800 // Skip memory accesses inserted by another instrumentation.
801 if (I->hasMetadata(LLVMContext::MD_nosanitize))
802 return;
803
804 // Do not instrument the load fetching the dynamic shadow address.
805 if (ShadowBase == I)
806 return;
807
808 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
809 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
810 return;
811 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
812 LI->getType(), LI->getAlign());
813 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
814 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
815 return;
816 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
817 SI->getValueOperand()->getType(), SI->getAlign());
818 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
819 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
820 return;
821 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
822 RMW->getValOperand()->getType(), std::nullopt);
823 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
824 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
825 return;
826 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
827 XCHG->getCompareOperand()->getType(),
828 std::nullopt);
829 } else if (auto *CI = dyn_cast<CallInst>(I)) {
830 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
831 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
832 ignoreAccess(I, CI->getArgOperand(ArgNo)))
833 continue;
834 Type *Ty = CI->getParamByValType(ArgNo);
835 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
836 }
838 }
839}
840
842 if (LoadInst *LI = dyn_cast<LoadInst>(I))
843 return LI->getPointerOperandIndex();
844 if (StoreInst *SI = dyn_cast<StoreInst>(I))
845 return SI->getPointerOperandIndex();
846 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
847 return RMW->getPointerOperandIndex();
848 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
849 return XCHG->getPointerOperandIndex();
850 report_fatal_error("Unexpected instruction");
851 return -1;
852}
853
855 size_t Res = llvm::countr_zero(TypeSize / 8);
857 return Res;
858}
859
860void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
861 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
862 TargetTriple.isRISCV64())
863 return;
864
865 IRBuilder<> IRB(I);
866 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
867 Value *UntaggedPtr =
868 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
869 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
870}
871
872Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
873 // Mem >> Scale
874 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
875 if (Mapping.Offset == 0)
876 return IRB.CreateIntToPtr(Shadow, PtrTy);
877 // (Mem >> Scale) + Offset
878 return IRB.CreatePtrAdd(ShadowBase, Shadow);
879}
880
881int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
882 unsigned AccessSizeIndex) {
883 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
884 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
885 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
886 (Recover << HWASanAccessInfo::RecoverShift) |
887 (IsWrite << HWASanAccessInfo::IsWriteShift) |
888 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
889}
890
891HWAddressSanitizer::ShadowTagCheckInfo
892HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
893 DomTreeUpdater &DTU, LoopInfo *LI) {
894 ShadowTagCheckInfo R;
895
896 IRBuilder<> IRB(InsertBefore);
897
898 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
899 R.PtrTag =
900 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
901 R.AddrLong = untagPointer(IRB, R.PtrLong);
902 Value *Shadow = memToShadow(R.AddrLong, IRB);
903 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
904 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
905
906 if (MatchAllTag.has_value()) {
907 Value *TagNotIgnored = IRB.CreateICmpNE(
908 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
909 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
910 }
911
912 R.TagMismatchTerm = SplitBlockAndInsertIfThen(
913 TagMismatch, InsertBefore, false,
914 MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
915
916 return R;
917}
918
919void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
920 unsigned AccessSizeIndex,
921 Instruction *InsertBefore,
922 DomTreeUpdater &DTU,
923 LoopInfo *LI) {
924 assert(!UsePageAliases);
925 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
926
927 if (InlineFastPath)
928 InsertBefore =
929 insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
930
931 IRBuilder<> IRB(InsertBefore);
934 M, UseShortGranules
935 ? Intrinsic::hwasan_check_memaccess_shortgranules
936 : Intrinsic::hwasan_check_memaccess),
937 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
938}
939
940void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
941 unsigned AccessSizeIndex,
942 Instruction *InsertBefore,
943 DomTreeUpdater &DTU,
944 LoopInfo *LI) {
945 assert(!UsePageAliases);
946 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
947
948 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
949
950 IRBuilder<> IRB(TCI.TagMismatchTerm);
951 Value *OutOfShortGranuleTagRange =
952 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
953 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
954 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
955 MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
956
957 IRB.SetInsertPoint(TCI.TagMismatchTerm);
958 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
959 PtrLowBits = IRB.CreateAdd(
960 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
961 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
962 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
963 MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
964 LI, CheckFailTerm->getParent());
965
966 IRB.SetInsertPoint(TCI.TagMismatchTerm);
967 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
968 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
969 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
970 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
971 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
972 MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
973 LI, CheckFailTerm->getParent());
974
975 IRB.SetInsertPoint(CheckFailTerm);
976 InlineAsm *Asm;
977 switch (TargetTriple.getArch()) {
978 case Triple::x86_64:
979 // The signal handler will find the data address in rdi.
981 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
982 "int3\nnopl " +
983 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
984 "(%rax)",
985 "{rdi}",
986 /*hasSideEffects=*/true);
987 break;
988 case Triple::aarch64:
990 // The signal handler will find the data address in x0.
992 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
993 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
994 "{x0}",
995 /*hasSideEffects=*/true);
996 break;
997 case Triple::riscv64:
998 // The signal handler will find the data address in x10.
1000 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
1001 "ebreak\naddiw x0, x11, " +
1002 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
1003 "{x10}",
1004 /*hasSideEffects=*/true);
1005 break;
1006 default:
1007 report_fatal_error("unsupported architecture");
1008 }
1009 IRB.CreateCall(Asm, TCI.PtrLong);
1010 if (Recover)
1011 cast<BranchInst>(CheckFailTerm)
1012 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
1013}
1014
1015bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
1016 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
1017 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
1018 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
1019 }
1020 if (isa<MemSetInst>(MI))
1021 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
1022 return false;
1023}
1024
1025void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1026 IRBuilder<> IRB(MI);
1027 if (isa<MemTransferInst>(MI)) {
1029 MI->getOperand(0), MI->getOperand(1),
1030 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1031
1032 if (UseMatchAllCallback)
1033 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1034 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1035 } else if (isa<MemSetInst>(MI)) {
1037 MI->getOperand(0),
1038 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1039 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1040 if (UseMatchAllCallback)
1041 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1042 IRB.CreateCall(HwasanMemset, Args);
1043 }
1044 MI->eraseFromParent();
1045}
1046
1047bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
1048 DomTreeUpdater &DTU,
1049 LoopInfo *LI) {
1050 Value *Addr = O.getPtr();
1051
1052 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1053
1054 if (O.MaybeMask)
1055 return false; // FIXME
1056
1057 IRBuilder<> IRB(O.getInsn());
1058 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1059 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1060 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1061 *O.Alignment >= O.TypeStoreSize / 8)) {
1062 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1063 if (InstrumentWithCalls) {
1065 if (UseMatchAllCallback)
1066 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1067 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1068 Args);
1069 } else if (OutlinedChecks) {
1070 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1071 DTU, LI);
1072 } else {
1073 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
1074 DTU, LI);
1075 }
1076 } else {
1078 IRB.CreatePointerCast(Addr, IntptrTy),
1079 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1080 ConstantInt::get(IntptrTy, 8))};
1081 if (UseMatchAllCallback)
1082 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1083 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1084 }
1085 untagPointerOperand(O.getInsn(), Addr);
1086
1087 return true;
1088}
1089
1090void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1091 size_t Size) {
1092 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1093 if (!UseShortGranules)
1094 Size = AlignedSize;
1095
1096 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1097 if (InstrumentWithCalls) {
1098 IRB.CreateCall(HwasanTagMemoryFunc,
1099 {IRB.CreatePointerCast(AI, PtrTy), Tag,
1100 ConstantInt::get(IntptrTy, AlignedSize)});
1101 } else {
1102 size_t ShadowSize = Size >> Mapping.Scale;
1103 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1104 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1105 // If this memset is not inlined, it will be intercepted in the hwasan
1106 // runtime library. That's OK, because the interceptor skips the checks if
1107 // the address is in the shadow region.
1108 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1109 // llvm.memset right here into either a sequence of stores, or a call to
1110 // hwasan_tag_memory.
1111 if (ShadowSize)
1112 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1113 if (Size != AlignedSize) {
1114 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1115 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1116 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1117 IRB.CreateStore(
1118 Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
1119 AlignedSize - 1));
1120 }
1121 }
1122}
1123
1124unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1125 if (TargetTriple.getArch() == Triple::x86_64)
1126 return AllocaNo & TagMaskByte;
1127
1128 // A list of 8-bit numbers that have at most one run of non-zero bits.
1129 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1130 // masks.
1131 // The list does not include the value 255, which is used for UAR.
1132 //
1133 // Because we are more likely to use earlier elements of this list than later
1134 // ones, it is sorted in increasing order of probability of collision with a
1135 // mask allocated (temporally) nearby. The program that generated this list
1136 // can be found at:
1137 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1138 static const unsigned FastMasks[] = {
1139 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1140 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1141 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1142 return FastMasks[AllocaNo % std::size(FastMasks)];
1143}
1144
1145Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1146 if (TagMaskByte == 0xFF)
1147 return OldTag; // No need to clear the tag byte.
1148 return IRB.CreateAnd(OldTag,
1149 ConstantInt::get(OldTag->getType(), TagMaskByte));
1150}
1151
1152Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1153 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1154}
1155
1156Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1158 return nullptr;
1159 if (StackBaseTag)
1160 return StackBaseTag;
1161 // Extract some entropy from the stack pointer for the tags.
1162 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1163 // between functions).
1164 Value *FramePointerLong = getCachedFP(IRB);
1165 Value *StackTag =
1166 applyTagMask(IRB, IRB.CreateXor(FramePointerLong,
1167 IRB.CreateLShr(FramePointerLong, 20)));
1168 StackTag->setName("hwasan.stack.base.tag");
1169 return StackTag;
1170}
1171
1172Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1173 unsigned AllocaNo) {
1175 return getNextTagWithCall(IRB);
1176 return IRB.CreateXor(
1177 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1178}
1179
1180Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1181 Value *FramePointerLong = getCachedFP(IRB);
1182 Value *UARTag =
1183 applyTagMask(IRB, IRB.CreateLShr(FramePointerLong, PointerTagShift));
1184
1185 UARTag->setName("hwasan.uar.tag");
1186 return UARTag;
1187}
1188
1189// Add a tag to an address.
1190Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1191 Value *PtrLong, Value *Tag) {
1192 assert(!UsePageAliases);
1193 Value *TaggedPtrLong;
1194 if (CompileKernel) {
1195 // Kernel addresses have 0xFF in the most significant byte.
1196 Value *ShiftedTag =
1197 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1198 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1199 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1200 } else {
1201 // Userspace can simply do OR (tag << PointerTagShift);
1202 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1203 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1204 }
1205 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1206}
1207
1208// Remove tag from an address.
1209Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1210 assert(!UsePageAliases);
1211 Value *UntaggedPtrLong;
1212 if (CompileKernel) {
1213 // Kernel addresses have 0xFF in the most significant byte.
1214 UntaggedPtrLong =
1215 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1216 TagMaskByte << PointerTagShift));
1217 } else {
1218 // Userspace addresses have 0x00.
1219 UntaggedPtrLong = IRB.CreateAnd(
1220 PtrLong, ConstantInt::get(PtrLong->getType(),
1221 ~(TagMaskByte << PointerTagShift)));
1222 }
1223 return UntaggedPtrLong;
1224}
1225
1226Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
1227 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1228 // in Bionic's libc/platform/bionic/tls_defines.h.
1229 constexpr int SanitizerSlot = 6;
1230 if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
1231 return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
1232 return ThreadPtrGlobal;
1233}
1234
1235Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
1236 if (!CachedFP)
1237 CachedFP = memtag::getFP(IRB);
1238 return CachedFP;
1239}
1240
1241Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1242 // Prepare ring buffer data.
1243 Value *PC = memtag::getPC(TargetTriple, IRB);
1244 Value *FP = getCachedFP(IRB);
1245
1246 // Mix FP and PC.
1247 // Assumptions:
1248 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1249 // FP is 0xfffffffffffFFFF0 (4 lower bits are zero)
1250 // We only really need ~20 lower non-zero bits (FFFF), so we mix like this:
1251 // 0xFFFFPPPPPPPPPPPP
1252 FP = IRB.CreateShl(FP, 44);
1253 return IRB.CreateOr(PC, FP);
1254}
1255
1256void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1257 if (!Mapping.InTls)
1258 ShadowBase = getShadowNonTls(IRB);
1259 else if (!WithFrameRecord && TargetTriple.isAndroid())
1260 ShadowBase = getDynamicShadowIfunc(IRB);
1261
1262 if (!WithFrameRecord && ShadowBase)
1263 return;
1264
1265 Value *SlotPtr = nullptr;
1266 Value *ThreadLong = nullptr;
1267 Value *ThreadLongMaybeUntagged = nullptr;
1268
1269 auto getThreadLongMaybeUntagged = [&]() {
1270 if (!SlotPtr)
1271 SlotPtr = getHwasanThreadSlotPtr(IRB);
1272 if (!ThreadLong)
1273 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1274 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1275 // TBI.
1276 return TargetTriple.isAArch64() ? ThreadLong
1277 : untagPointer(IRB, ThreadLong);
1278 };
1279
1280 if (WithFrameRecord) {
1281 switch (ClRecordStackHistory) {
1282 case libcall: {
1283 // Emit a runtime call into hwasan rather than emitting instructions for
1284 // recording stack history.
1285 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1286 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1287 break;
1288 }
1289 case instr: {
1290 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1291
1292 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1293
1294 // Store data to ring buffer.
1295 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1296 Value *RecordPtr =
1297 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
1298 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1299
1300 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1301 // buffer in pages, it must be a power of two, and the start of the buffer
1302 // must be aligned by twice that much. Therefore wrap around of the ring
1303 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1304 // The use of AShr instead of LShr is due to
1305 // https://bugs.llvm.org/show_bug.cgi?id=39030
1306 // Runtime library makes sure not to use the highest bit.
1307 //
1308 // Mechanical proof of this address calculation can be found at:
1309 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
1310 //
1311 // Example of the wrap case for N = 1
1312 // Pointer: 0x01AAAAAAAAAAAFF8
1313 // +
1314 // 0x0000000000000008
1315 // =
1316 // 0x01AAAAAAAAAAB000
1317 // &
1318 // WrapMask: 0xFFFFFFFFFFFFF000
1319 // =
1320 // 0x01AAAAAAAAAAA000
1321 //
1322 // Then the WrapMask will be a no-op until the next wrap case.
1323 Value *WrapMask = IRB.CreateXor(
1324 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1325 ConstantInt::get(IntptrTy, (uint64_t)-1));
1326 Value *ThreadLongNew = IRB.CreateAnd(
1327 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1328 IRB.CreateStore(ThreadLongNew, SlotPtr);
1329 break;
1330 }
1331 case none: {
1333 "A stack history recording mode should've been selected.");
1334 }
1335 }
1336 }
1337
1338 if (!ShadowBase) {
1339 if (!ThreadLongMaybeUntagged)
1340 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1341
1342 // Get shadow base address by aligning RecordPtr up.
1343 // Note: this is not correct if the pointer is already aligned.
1344 // Runtime library will make sure this never happens.
1345 ShadowBase = IRB.CreateAdd(
1346 IRB.CreateOr(
1347 ThreadLongMaybeUntagged,
1348 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1349 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1350 ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
1351 }
1352}
1353
1354bool HWAddressSanitizer::instrumentLandingPads(
1355 SmallVectorImpl<Instruction *> &LandingPadVec) {
1356 for (auto *LP : LandingPadVec) {
1357 IRBuilder<> IRB(LP->getNextNonDebugInstruction());
1358 IRB.CreateCall(
1359 HwasanHandleVfork,
1361 IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp" : "sp")});
1362 }
1363 return true;
1364}
1365
1367 return dyn_cast<DbgAssignIntrinsic>(DVI);
1368}
1369
1371 return DVR->isDbgAssign() ? DVR : nullptr;
1372}
1373
1374bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1375 Value *StackTag, Value *UARTag,
1376 const DominatorTree &DT,
1377 const PostDominatorTree &PDT,
1378 const LoopInfo &LI) {
1379 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1380 // alloca addresses using that. Unfortunately, offsets are not known yet
1381 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1382 // temp, shift-OR it into each alloca address and xor with the retag mask.
1383 // This generates one extra instruction per alloca use.
1384 unsigned int I = 0;
1385
1386 for (auto &KV : SInfo.AllocasToInstrument) {
1387 auto N = I++;
1388 auto *AI = KV.first;
1389 memtag::AllocaInfo &Info = KV.second;
1391
1392 // Replace uses of the alloca with tagged address.
1393 Value *Tag = getAllocaTag(IRB, StackTag, N);
1394 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1395 Value *AINoTagLong = untagPointer(IRB, AILong);
1396 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1397 std::string Name =
1398 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1399 Replacement->setName(Name + ".hwasan");
1400
1401 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1402 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1403
1404 Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
1405
1406 auto HandleLifetime = [&](IntrinsicInst *II) {
1407 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1408 // set of assumptions we need to make about the lifetime. Without this we
1409 // would need to ensure that we can track the lifetime pointer to a
1410 // constant offset from the alloca, and would still need to change the
1411 // size to include the extra alignment we use for the untagging to make
1412 // the size consistent.
1413 //
1414 // The check for standard lifetime below makes sure that we have exactly
1415 // one set of start / end in any execution (i.e. the ends are not
1416 // reachable from each other), so this will not cause any problems.
1417 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1418 II->setArgOperand(1, AICast);
1419 };
1420 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1421 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1422
1423 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1424 auto *User = U.getUser();
1425 return User != AILong && User != AICast &&
1427 });
1428
1429 // Helper utility for adding DW_OP_LLVM_tag_offset to debug-info records,
1430 // abstracted over whether they're intrinsic-stored or DbgVariableRecord
1431 // stored.
1432 auto AnnotateDbgRecord = [&](auto *DPtr) {
1433 // Prepend "tag_offset, N" to the dwarf expression.
1434 // Tag offset logically applies to the alloca pointer, and it makes sense
1435 // to put it at the beginning of the expression.
1437 retagMask(N)};
1438 for (size_t LocNo = 0; LocNo < DPtr->getNumVariableLocationOps(); ++LocNo)
1439 if (DPtr->getVariableLocationOp(LocNo) == AI)
1440 DPtr->setExpression(DIExpression::appendOpsToArg(
1441 DPtr->getExpression(), NewOps, LocNo));
1442 if (auto *DAI = DynCastToDbgAssign(DPtr)) {
1443 if (DAI->getAddress() == AI)
1444 DAI->setAddressExpression(DIExpression::prependOpcodes(
1445 DAI->getAddressExpression(), NewOps));
1446 }
1447 };
1448
1449 llvm::for_each(Info.DbgVariableIntrinsics, AnnotateDbgRecord);
1450 llvm::for_each(Info.DbgVariableRecords, AnnotateDbgRecord);
1451
1452 auto TagEnd = [&](Instruction *Node) {
1453 IRB.SetInsertPoint(Node);
1454 // When untagging, use the `AlignedSize` because we need to set the tags
1455 // for the entire alloca to original. If we used `Size` here, we would
1456 // keep the last granule tagged, and store zero in the last byte of the
1457 // last granule, due to how short granules are implemented.
1458 tagAlloca(IRB, AI, UARTag, AlignedSize);
1459 };
1460 // Calls to functions that may return twice (e.g. setjmp) confuse the
1461 // postdominator analysis, and will leave us to keep memory tagged after
1462 // function return. Work around this by always untagging at every return
1463 // statement if return_twice functions are called.
1464 bool StandardLifetime =
1465 !SInfo.CallsReturnTwice &&
1466 SInfo.UnrecognizedLifetimes.empty() &&
1467 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1468 &LI, ClMaxLifetimes);
1469 if (DetectUseAfterScope && StandardLifetime) {
1470 IntrinsicInst *Start = Info.LifetimeStart[0];
1471 IRB.SetInsertPoint(Start->getNextNode());
1472 tagAlloca(IRB, AI, Tag, Size);
1473 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1474 SInfo.RetVec, TagEnd)) {
1475 for (auto *End : Info.LifetimeEnd)
1476 End->eraseFromParent();
1477 }
1478 } else {
1479 tagAlloca(IRB, AI, Tag, Size);
1480 for (auto *RI : SInfo.RetVec)
1481 TagEnd(RI);
1482 // We inserted tagging outside of the lifetimes, so we have to remove
1483 // them.
1484 for (auto &II : Info.LifetimeStart)
1485 II->eraseFromParent();
1486 for (auto &II : Info.LifetimeEnd)
1487 II->eraseFromParent();
1488 }
1489 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1490 }
1491 for (auto &I : SInfo.UnrecognizedLifetimes)
1492 I->eraseFromParent();
1493 return true;
1494}
1495
1497 bool Skip) {
1498 if (Skip) {
1499 ORE.emit([&]() {
1500 return OptimizationRemark(DEBUG_TYPE, "Skip", &F)
1501 << "Skipped: F=" << ore::NV("Function", &F);
1502 });
1503 } else {
1504 ORE.emit([&]() {
1505 return OptimizationRemarkMissed(DEBUG_TYPE, "Sanitize", &F)
1506 << "Sanitized: F=" << ore::NV("Function", &F);
1507 });
1508 }
1509}
1510
1511bool HWAddressSanitizer::selectiveInstrumentationShouldSkip(
1513 bool Skip = [&]() {
1514 if (ClRandomSkipRate.getNumOccurrences()) {
1515 std::bernoulli_distribution D(ClRandomSkipRate);
1516 return !D(*Rng);
1517 }
1518 if (!ClHotPercentileCutoff.getNumOccurrences())
1519 return false;
1521 ProfileSummaryInfo *PSI =
1522 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
1523 if (!PSI || !PSI->hasProfileSummary()) {
1524 ++NumNoProfileSummaryFuncs;
1525 return false;
1526 }
1527 return PSI->isFunctionHotInCallGraphNthPercentile(
1529 }();
1531 return Skip;
1532}
1533
1534void HWAddressSanitizer::sanitizeFunction(Function &F,
1536 if (&F == HwasanCtorFunction)
1537 return;
1538
1539 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1540 return;
1541
1542 if (F.empty())
1543 return;
1544
1545 NumTotalFuncs++;
1546
1547 if (selectiveInstrumentationShouldSkip(F, FAM))
1548 return;
1549
1550 NumInstrumentedFuncs++;
1551
1552 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1553
1554 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1555 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1556 SmallVector<Instruction *, 8> LandingPadVec;
1558
1559 memtag::StackInfoBuilder SIB(SSI);
1560 for (auto &Inst : instructions(F)) {
1561 if (InstrumentStack) {
1562 SIB.visit(Inst);
1563 }
1564
1565 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1566 LandingPadVec.push_back(&Inst);
1567
1568 getInterestingMemoryOperands(&Inst, TLI, OperandsToInstrument);
1569
1570 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1571 if (!ignoreMemIntrinsic(MI))
1572 IntrinToInstrument.push_back(MI);
1573 }
1574
1575 memtag::StackInfo &SInfo = SIB.get();
1576
1577 initializeCallbacks(*F.getParent());
1578
1579 if (!LandingPadVec.empty())
1580 instrumentLandingPads(LandingPadVec);
1581
1582 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1583 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1584 // __hwasan_personality_thunk is a no-op for functions without an
1585 // instrumented stack, so we can drop it.
1586 F.setPersonalityFn(nullptr);
1587 }
1588
1589 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1590 IntrinToInstrument.empty())
1591 return;
1592
1593 assert(!ShadowBase);
1594
1595 BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
1596 IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
1597 emitPrologue(EntryIRB,
1598 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1599 Mapping.WithFrameRecord &&
1600 !SInfo.AllocasToInstrument.empty());
1601
1602 if (!SInfo.AllocasToInstrument.empty()) {
1605 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1606 Value *StackTag = getStackBaseTag(EntryIRB);
1607 Value *UARTag = getUARTag(EntryIRB);
1608 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1609 }
1610
1611 // If we split the entry block, move any allocas that were originally in the
1612 // entry block back into the entry block so that they aren't treated as
1613 // dynamic allocas.
1614 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1615 InsertPt = F.getEntryBlock().begin();
1616 for (Instruction &I :
1617 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1618 if (auto *AI = dyn_cast<AllocaInst>(&I))
1619 if (isa<ConstantInt>(AI->getArraySize()))
1620 I.moveBefore(F.getEntryBlock(), InsertPt);
1621 }
1622 }
1623
1628 for (auto &Operand : OperandsToInstrument)
1629 instrumentMemAccess(Operand, DTU, LI);
1630 DTU.flush();
1631
1632 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1633 for (auto *Inst : IntrinToInstrument)
1634 instrumentMemIntrinsic(Inst);
1635 }
1636
1637 ShadowBase = nullptr;
1638 StackBaseTag = nullptr;
1639 CachedFP = nullptr;
1640}
1641
1642void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1643 assert(!UsePageAliases);
1644 Constant *Initializer = GV->getInitializer();
1645 uint64_t SizeInBytes =
1646 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1647 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1648 if (SizeInBytes != NewSize) {
1649 // Pad the initializer out to the next multiple of 16 bytes and add the
1650 // required short granule tag.
1651 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1652 Init.back() = Tag;
1654 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1655 }
1656
1657 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1658 GlobalValue::ExternalLinkage, Initializer,
1659 GV->getName() + ".hwasan");
1660 NewGV->copyAttributesFrom(GV);
1661 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1662 NewGV->copyMetadata(GV, 0);
1663 NewGV->setAlignment(
1664 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1665
1666 // It is invalid to ICF two globals that have different tags. In the case
1667 // where the size of the global is a multiple of the tag granularity the
1668 // contents of the globals may be the same but the tags (i.e. symbol values)
1669 // may be different, and the symbols are not considered during ICF. In the
1670 // case where the size is not a multiple of the granularity, the short granule
1671 // tags would discriminate two globals with different tags, but there would
1672 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1673 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1674 // granule tag in the last byte.
1675 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1676
1677 // Descriptor format (assuming little-endian):
1678 // bytes 0-3: relative address of global
1679 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1680 // it isn't, we create multiple descriptors)
1681 // byte 7: tag
1682 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1683 const uint64_t MaxDescriptorSize = 0xfffff0;
1684 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1685 DescriptorPos += MaxDescriptorSize) {
1686 auto *Descriptor =
1687 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1688 nullptr, GV->getName() + ".hwasan.descriptor");
1689 auto *GVRelPtr = ConstantExpr::getTrunc(
1692 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1693 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1694 ConstantInt::get(Int64Ty, DescriptorPos)),
1695 Int32Ty);
1696 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1697 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1698 Descriptor->setComdat(NewGV->getComdat());
1699 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1700 Descriptor->setSection("hwasan_globals");
1701 Descriptor->setMetadata(LLVMContext::MD_associated,
1703 appendToCompilerUsed(M, Descriptor);
1704 }
1705
1708 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1709 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1710 GV->getType());
1711 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1712 GV->getLinkage(), "", Aliasee, &M);
1713 Alias->setVisibility(GV->getVisibility());
1714 Alias->takeName(GV);
1715 GV->replaceAllUsesWith(Alias);
1716 GV->eraseFromParent();
1717}
1718
1719void HWAddressSanitizer::instrumentGlobals() {
1720 std::vector<GlobalVariable *> Globals;
1721 for (GlobalVariable &GV : M.globals()) {
1723 continue;
1724
1725 if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
1726 GV.isThreadLocal())
1727 continue;
1728
1729 // Common symbols can't have aliases point to them, so they can't be tagged.
1730 if (GV.hasCommonLinkage())
1731 continue;
1732
1733 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1734 // which would be broken both by adding tags and potentially by the extra
1735 // padding/alignment that we insert.
1736 if (GV.hasSection())
1737 continue;
1738
1739 Globals.push_back(&GV);
1740 }
1741
1742 MD5 Hasher;
1743 Hasher.update(M.getSourceFileName());
1744 MD5::MD5Result Hash;
1745 Hasher.final(Hash);
1746 uint8_t Tag = Hash[0];
1747
1748 assert(TagMaskByte >= 16);
1749
1750 for (GlobalVariable *GV : Globals) {
1751 // Don't allow globals to be tagged with something that looks like a
1752 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1753 // the fast path shadow-vs-address check succeeds.
1754 if (Tag < 16 || Tag > TagMaskByte)
1755 Tag = 16;
1756 instrumentGlobal(GV, Tag++);
1757 }
1758}
1759
1760void HWAddressSanitizer::instrumentPersonalityFunctions() {
1761 // We need to untag stack frames as we unwind past them. That is the job of
1762 // the personality function wrapper, which either wraps an existing
1763 // personality function or acts as a personality function on its own. Each
1764 // function that has a personality function or that can be unwound past has
1765 // its personality function changed to a thunk that calls the personality
1766 // function wrapper in the runtime.
1768 for (Function &F : M) {
1769 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1770 continue;
1771
1772 if (F.hasPersonalityFn()) {
1773 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1774 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1775 PersonalityFns[nullptr].push_back(&F);
1776 }
1777 }
1778
1779 if (PersonalityFns.empty())
1780 return;
1781
1782 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1783 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
1784 PtrTy, PtrTy, PtrTy, PtrTy);
1785 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1786 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1787
1788 for (auto &P : PersonalityFns) {
1789 std::string ThunkName = kHwasanPersonalityThunkName;
1790 if (P.first)
1791 ThunkName += ("." + P.first->getName()).str();
1792 FunctionType *ThunkFnTy = FunctionType::get(
1793 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
1794 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1795 cast<GlobalValue>(P.first)->hasLocalLinkage());
1796 auto *ThunkFn = Function::Create(ThunkFnTy,
1799 ThunkName, &M);
1800 if (!IsLocal) {
1801 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1802 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1803 }
1804
1805 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1806 IRBuilder<> IRB(BB);
1807 CallInst *WrapperCall = IRB.CreateCall(
1808 HwasanPersonalityWrapper,
1809 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1810 ThunkFn->getArg(3), ThunkFn->getArg(4),
1811 P.first ? P.first : Constant::getNullValue(PtrTy),
1812 UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
1813 WrapperCall->setTailCall();
1814 IRB.CreateRet(WrapperCall);
1815
1816 for (Function *F : P.second)
1817 F->setPersonalityFn(ThunkFn);
1818 }
1819}
1820
1821void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1822 bool InstrumentWithCalls) {
1823 Scale = kDefaultShadowScale;
1824 if (TargetTriple.isOSFuchsia()) {
1825 // Fuchsia is always PIE, which means that the beginning of the address
1826 // space is always available.
1827 InGlobal = false;
1828 InTls = false;
1829 Offset = 0;
1830 WithFrameRecord = true;
1831 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1832 InGlobal = false;
1833 InTls = false;
1835 WithFrameRecord = false;
1836 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1837 InGlobal = false;
1838 InTls = false;
1839 Offset = 0;
1840 WithFrameRecord = false;
1841 } else if (ClWithIfunc) {
1842 InGlobal = true;
1843 InTls = false;
1845 WithFrameRecord = false;
1846 } else if (ClWithTls) {
1847 InGlobal = false;
1848 InTls = true;
1850 WithFrameRecord = true;
1851 } else {
1852 InGlobal = false;
1853 InTls = false;
1855 WithFrameRecord = false;
1856 }
1857}
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:691
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1291
bool End
Definition: ELF_riscv.cpp:480
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
static cl::opt< float > ClRandomSkipRate("hwasan-random-rate", cl::desc("Probability value in the range [0.0, 1.0] " "to keep instrumentation of a function."))
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static cl::opt< int > ClHotPercentileCutoff("hwasan-percentile-cutoff-hot", cl::desc("Hot percentile cuttoff."))
static DbgAssignIntrinsic * DynCastToDbgAssign(DbgVariableIntrinsic *DVI)
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
static void emitRemark(const Function &F, OptimizationRemarkEmitter &ORE, bool Skip)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:107
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:492
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
Analysis pass which computes BlockFrequencyInfo.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:705
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2542
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2535
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2098
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:476
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
This represents the llvm.dbg.assign instruction.
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
void flush()
Apply all pending updates to available trees and flush all BasicBlocks awaiting deletion.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:163
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:525
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:80
void setComdat(Comdat *C)
Definition: Globals.cpp:197
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:110
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:228
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:263
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:355
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
bool hasCommonLinkage() const
Definition: GlobalValue.h:532
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:462
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1881
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1978
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2170
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:595
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2122
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:104
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1437
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:526
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1095
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1378
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2245
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2249
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1790
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1416
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2021
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1475
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1803
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2007
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1497
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition: IRBuilder.h:569
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2253
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2196
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1456
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1519
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:631
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:221
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:756
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:162
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:373
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:755
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:753
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:361
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:973
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:895
bool isOSFuchsia() const
Definition: Triple.h:572
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:703
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
int getNumOccurrences() const
Definition: CommandLine.h:406
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1695
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1461
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ DW_OP_LLVM_tag_offset
Only used in LLVM metadata.
Definition: Dwarf.h:143
Value * getFP(IRBuilder<> &IRB)
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Value * getAndroidSlotPtr(IRBuilder<> &IRB, int Slot)
Value * readRegister(IRBuilder<> &IRB, StringRef Name)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
Value * getPC(const Triple &TargetTriple, IRBuilder<> &IRB)
bool isLifetimeIntrinsic(Value *V)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:73
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4040
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:74
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec