LLVM 18.0.0git
HWAddressSanitizer.cpp
Go to the documentation of this file.
1//===- HWAddressSanitizer.cpp - memory access error detector --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of HWAddressSanitizer, an address basic correctness
11/// checker based on tagged addressing.
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/InlineAsm.h"
38#include "llvm/IR/Instruction.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/LLVMContext.h"
43#include "llvm/IR/MDBuilder.h"
44#include "llvm/IR/Module.h"
45#include "llvm/IR/Type.h"
46#include "llvm/IR/Value.h"
49#include "llvm/Support/Debug.h"
57#include <optional>
58
59using namespace llvm;
60
61#define DEBUG_TYPE "hwasan"
62
63const char kHwasanModuleCtorName[] = "hwasan.module_ctor";
64const char kHwasanNoteName[] = "hwasan.note";
65const char kHwasanInitName[] = "__hwasan_init";
66const char kHwasanPersonalityThunkName[] = "__hwasan_personality_thunk";
67
69 "__hwasan_shadow_memory_dynamic_address";
70
71// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
72static const size_t kNumberOfAccessSizes = 5;
73
74static const size_t kDefaultShadowScale = 4;
76 std::numeric_limits<uint64_t>::max();
77
78static const unsigned kShadowBaseAlignment = 32;
79
81 ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix",
82 cl::desc("Prefix for memory access callbacks"),
83 cl::Hidden, cl::init("__hwasan_"));
84
86 "hwasan-kernel-mem-intrinsic-prefix",
87 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
88 cl::init(false));
89
91 "hwasan-instrument-with-calls",
92 cl::desc("instrument reads and writes with callbacks"), cl::Hidden,
93 cl::init(false));
94
95static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
96 cl::desc("instrument read instructions"),
97 cl::Hidden, cl::init(true));
98
99static cl::opt<bool>
100 ClInstrumentWrites("hwasan-instrument-writes",
101 cl::desc("instrument write instructions"), cl::Hidden,
102 cl::init(true));
103
105 "hwasan-instrument-atomics",
106 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
107 cl::init(true));
108
109static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
110 cl::desc("instrument byval arguments"),
111 cl::Hidden, cl::init(true));
112
113static cl::opt<bool>
114 ClRecover("hwasan-recover",
115 cl::desc("Enable recovery mode (continue-after-error)."),
116 cl::Hidden, cl::init(false));
117
118static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
119 cl::desc("instrument stack (allocas)"),
120 cl::Hidden, cl::init(true));
121
122static cl::opt<bool>
123 ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true),
124 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
126
128 "hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
130 cl::desc("How many lifetime ends to handle for a single alloca."),
132
133static cl::opt<bool>
134 ClUseAfterScope("hwasan-use-after-scope",
135 cl::desc("detect use after scope within function"),
136 cl::Hidden, cl::init(true));
137
139 "hwasan-generate-tags-with-calls",
140 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
141 cl::init(false));
142
143static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
144 cl::Hidden, cl::init(false));
145
147 "hwasan-match-all-tag",
148 cl::desc("don't report bad accesses via pointers with this tag"),
149 cl::Hidden, cl::init(-1));
150
151static cl::opt<bool>
152 ClEnableKhwasan("hwasan-kernel",
153 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
154 cl::Hidden, cl::init(false));
155
156// These flags allow to change the shadow mapping and control how shadow memory
157// is accessed. The shadow mapping looks like:
158// Shadow = (Mem >> scale) + offset
159
161 ClMappingOffset("hwasan-mapping-offset",
162 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
163 cl::Hidden, cl::init(0));
164
165static cl::opt<bool>
166 ClWithIfunc("hwasan-with-ifunc",
167 cl::desc("Access dynamic shadow through an ifunc global on "
168 "platforms that support this"),
169 cl::Hidden, cl::init(false));
170
172 "hwasan-with-tls",
173 cl::desc("Access dynamic shadow through an thread-local pointer on "
174 "platforms that support this"),
175 cl::Hidden, cl::init(true));
176
177// Mode for selecting how to insert frame record info into the stack ring
178// buffer.
180 // Do not record frame record info.
182
183 // Insert instructions into the prologue for storing into the stack ring
184 // buffer directly.
186
187 // Add a call to __hwasan_add_frame_record in the runtime.
189};
190
192 "hwasan-record-stack-history",
193 cl::desc("Record stack frames with tagged allocations in a thread-local "
194 "ring buffer"),
195 cl::values(clEnumVal(none, "Do not record stack ring history"),
196 clEnumVal(instr, "Insert instructions into the prologue for "
197 "storing into the stack ring buffer directly"),
198 clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for "
199 "storing into the stack ring buffer")),
201
202static cl::opt<bool>
203 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
204 cl::desc("instrument memory intrinsics"),
205 cl::Hidden, cl::init(true));
206
207static cl::opt<bool>
208 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
209 cl::desc("instrument landing pads"), cl::Hidden,
210 cl::init(false));
211
213 "hwasan-use-short-granules",
214 cl::desc("use short granules in allocas and outlined checks"), cl::Hidden,
215 cl::init(false));
216
218 "hwasan-instrument-personality-functions",
219 cl::desc("instrument personality functions"), cl::Hidden);
220
221static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
222 cl::desc("inline all checks"),
223 cl::Hidden, cl::init(false));
224
225static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
226 cl::desc("inline all checks"),
227 cl::Hidden, cl::init(false));
228
229// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
230static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
231 cl::desc("Use page aliasing in HWASan"),
232 cl::Hidden, cl::init(false));
233
234namespace {
235
236bool shouldUsePageAliases(const Triple &TargetTriple) {
237 return ClUsePageAliases && TargetTriple.getArch() == Triple::x86_64;
238}
239
240bool shouldInstrumentStack(const Triple &TargetTriple) {
241 return !shouldUsePageAliases(TargetTriple) && ClInstrumentStack;
242}
243
244bool shouldInstrumentWithCalls(const Triple &TargetTriple) {
245 return ClInstrumentWithCalls.getNumOccurrences()
247 : TargetTriple.getArch() == Triple::x86_64;
248}
249
250bool mightUseStackSafetyAnalysis(bool DisableOptimization) {
251 return ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
252 : !DisableOptimization;
253}
254
255bool shouldUseStackSafetyAnalysis(const Triple &TargetTriple,
256 bool DisableOptimization) {
257 return shouldInstrumentStack(TargetTriple) &&
258 mightUseStackSafetyAnalysis(DisableOptimization);
259}
260
261bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
262 return ClUseAfterScope && shouldInstrumentStack(TargetTriple);
263}
264
265/// An instrumentation pass implementing detection of addressability bugs
266/// using tagged pointers.
267class HWAddressSanitizer {
268public:
269 HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
270 const StackSafetyGlobalInfo *SSI)
271 : M(M), SSI(SSI) {
272 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
273 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0
275 : CompileKernel;
276
277 initializeModule();
278 }
279
280 void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
281
282private:
283 struct ShadowTagCheckInfo {
284 Instruction *TagMismatchTerm = nullptr;
285 Value *PtrLong = nullptr;
286 Value *AddrLong = nullptr;
287 Value *PtrTag = nullptr;
288 Value *MemTag = nullptr;
289 };
290 void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
291
292 void initializeModule();
293 void createHwasanCtorComdat();
294
295 void initializeCallbacks(Module &M);
296
297 Value *getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val);
298
299 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
300 Value *getShadowNonTls(IRBuilder<> &IRB);
301
302 void untagPointerOperand(Instruction *I, Value *Addr);
303 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
304
305 int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
306 ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr,
307 Instruction *InsertBefore);
308 void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
309 unsigned AccessSizeIndex,
310 Instruction *InsertBefore);
311 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
312 unsigned AccessSizeIndex,
313 Instruction *InsertBefore);
314 bool ignoreMemIntrinsic(MemIntrinsic *MI);
315 void instrumentMemIntrinsic(MemIntrinsic *MI);
316 bool instrumentMemAccess(InterestingMemoryOperand &O);
317 bool ignoreAccess(Instruction *Inst, Value *Ptr);
318 void getInterestingMemoryOperands(
320
321 void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
322 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
323 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
324 bool instrumentStack(memtag::StackInfo &Info, Value *StackTag, Value *UARTag,
325 const DominatorTree &DT, const PostDominatorTree &PDT,
326 const LoopInfo &LI);
327 Value *readRegister(IRBuilder<> &IRB, StringRef Name);
328 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
329 Value *getNextTagWithCall(IRBuilder<> &IRB);
330 Value *getStackBaseTag(IRBuilder<> &IRB);
331 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
332 Value *getUARTag(IRBuilder<> &IRB);
333
334 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
335 Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
336 unsigned retagMask(unsigned AllocaNo);
337
338 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
339
340 void instrumentGlobal(GlobalVariable *GV, uint8_t Tag);
341 void instrumentGlobals();
342
343 Value *getPC(IRBuilder<> &IRB);
344 Value *getSP(IRBuilder<> &IRB);
345 Value *getFrameRecordInfo(IRBuilder<> &IRB);
346
347 void instrumentPersonalityFunctions();
348
349 LLVMContext *C;
350 Module &M;
351 const StackSafetyGlobalInfo *SSI;
352 Triple TargetTriple;
353
354 /// This struct defines the shadow mapping using the rule:
355 /// shadow = (mem >> Scale) + Offset.
356 /// If InGlobal is true, then
357 /// extern char __hwasan_shadow[];
358 /// shadow = (mem >> Scale) + &__hwasan_shadow
359 /// If InTls is true, then
360 /// extern char *__hwasan_tls;
361 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
362 ///
363 /// If WithFrameRecord is true, then __hwasan_tls will be used to access the
364 /// ring buffer for storing stack allocations on targets that support it.
365 struct ShadowMapping {
366 uint8_t Scale;
368 bool InGlobal;
369 bool InTls;
370 bool WithFrameRecord;
371
372 void init(Triple &TargetTriple, bool InstrumentWithCalls);
373 Align getObjectAlignment() const { return Align(1ULL << Scale); }
374 };
375
376 ShadowMapping Mapping;
377
378 Type *VoidTy = Type::getVoidTy(M.getContext());
379 Type *IntptrTy;
380 Type *Int8PtrTy;
381 Type *Int8Ty;
382 Type *Int32Ty;
383 Type *Int64Ty = Type::getInt64Ty(M.getContext());
384
385 bool CompileKernel;
386 bool Recover;
387 bool OutlinedChecks;
388 bool InlineFastPath;
389 bool UseShortGranules;
390 bool InstrumentLandingPads;
391 bool InstrumentWithCalls;
392 bool InstrumentStack;
393 bool DetectUseAfterScope;
394 bool UsePageAliases;
395 bool UseMatchAllCallback;
396
397 std::optional<uint8_t> MatchAllTag;
398
399 unsigned PointerTagShift;
400 uint64_t TagMaskByte;
401
402 Function *HwasanCtorFunction;
403
404 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
405 FunctionCallee HwasanMemoryAccessCallbackSized[2];
406
407 FunctionCallee HwasanMemmove, HwasanMemcpy, HwasanMemset;
408 FunctionCallee HwasanHandleVfork;
409
410 FunctionCallee HwasanTagMemoryFunc;
411 FunctionCallee HwasanGenerateTagFunc;
412 FunctionCallee HwasanRecordFrameRecordFunc;
413
414 Constant *ShadowGlobal;
415
416 Value *ShadowBase = nullptr;
417 Value *StackBaseTag = nullptr;
418 Value *CachedSP = nullptr;
419 GlobalValue *ThreadPtrGlobal = nullptr;
420};
421
422} // end anonymous namespace
423
426 const StackSafetyGlobalInfo *SSI = nullptr;
427 auto TargetTriple = llvm::Triple(M.getTargetTriple());
428 if (shouldUseStackSafetyAnalysis(TargetTriple, Options.DisableOptimization))
430
431 HWAddressSanitizer HWASan(M, Options.CompileKernel, Options.Recover, SSI);
432 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
433 for (Function &F : M)
434 HWASan.sanitizeFunction(F, FAM);
435
437 // GlobalsAA is considered stateless and does not get invalidated unless
438 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
439 // make changes that require GlobalsAA to be invalidated.
440 PA.abandon<GlobalsAA>();
441 return PA;
442}
444 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
446 OS, MapClassName2PassName);
447 OS << '<';
448 if (Options.CompileKernel)
449 OS << "kernel;";
450 if (Options.Recover)
451 OS << "recover";
452 OS << '>';
453}
454
455void HWAddressSanitizer::createHwasanCtorComdat() {
456 std::tie(HwasanCtorFunction, std::ignore) =
459 /*InitArgTypes=*/{},
460 /*InitArgs=*/{},
461 // This callback is invoked when the functions are created the first
462 // time. Hook them into the global ctors list in that case:
463 [&](Function *Ctor, FunctionCallee) {
464 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
465 Ctor->setComdat(CtorComdat);
466 appendToGlobalCtors(M, Ctor, 0, Ctor);
467 });
468
469 // Create a note that contains pointers to the list of global
470 // descriptors. Adding a note to the output file will cause the linker to
471 // create a PT_NOTE program header pointing to the note that we can use to
472 // find the descriptor list starting from the program headers. A function
473 // provided by the runtime initializes the shadow memory for the globals by
474 // accessing the descriptor list via the note. The dynamic loader needs to
475 // call this function whenever a library is loaded.
476 //
477 // The reason why we use a note for this instead of a more conventional
478 // approach of having a global constructor pass a descriptor list pointer to
479 // the runtime is because of an order of initialization problem. With
480 // constructors we can encounter the following problematic scenario:
481 //
482 // 1) library A depends on library B and also interposes one of B's symbols
483 // 2) B's constructors are called before A's (as required for correctness)
484 // 3) during construction, B accesses one of its "own" globals (actually
485 // interposed by A) and triggers a HWASAN failure due to the initialization
486 // for A not having happened yet
487 //
488 // Even without interposition it is possible to run into similar situations in
489 // cases where two libraries mutually depend on each other.
490 //
491 // We only need one note per binary, so put everything for the note in a
492 // comdat. This needs to be a comdat with an .init_array section to prevent
493 // newer versions of lld from discarding the note.
494 //
495 // Create the note even if we aren't instrumenting globals. This ensures that
496 // binaries linked from object files with both instrumented and
497 // non-instrumented globals will end up with a note, even if a comdat from an
498 // object file with non-instrumented globals is selected. The note is harmless
499 // if the runtime doesn't support it, since it will just be ignored.
500 Comdat *NoteComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
501
502 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
503 auto *Start =
504 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
505 nullptr, "__start_hwasan_globals");
506 Start->setVisibility(GlobalValue::HiddenVisibility);
507 auto *Stop =
508 new GlobalVariable(M, Int8Arr0Ty, true, GlobalVariable::ExternalLinkage,
509 nullptr, "__stop_hwasan_globals");
510 Stop->setVisibility(GlobalValue::HiddenVisibility);
511
512 // Null-terminated so actually 8 bytes, which are required in order to align
513 // the note properly.
514 auto *Name = ConstantDataArray::get(*C, "LLVM\0\0\0");
515
516 auto *NoteTy = StructType::get(Int32Ty, Int32Ty, Int32Ty, Name->getType(),
518 auto *Note =
519 new GlobalVariable(M, NoteTy, /*isConstant=*/true,
521 Note->setSection(".note.hwasan.globals");
522 Note->setComdat(NoteComdat);
523 Note->setAlignment(Align(4));
524
525 // The pointers in the note need to be relative so that the note ends up being
526 // placed in rodata, which is the standard location for notes.
527 auto CreateRelPtr = [&](Constant *Ptr) {
531 Int32Ty);
532 };
533 Note->setInitializer(ConstantStruct::getAnon(
534 {ConstantInt::get(Int32Ty, 8), // n_namesz
535 ConstantInt::get(Int32Ty, 8), // n_descsz
537 Name, CreateRelPtr(Start), CreateRelPtr(Stop)}));
539
540 // Create a zero-length global in hwasan_globals so that the linker will
541 // always create start and stop symbols.
542 auto *Dummy = new GlobalVariable(
543 M, Int8Arr0Ty, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
544 Constant::getNullValue(Int8Arr0Ty), "hwasan.dummy.global");
545 Dummy->setSection("hwasan_globals");
546 Dummy->setComdat(NoteComdat);
547 Dummy->setMetadata(LLVMContext::MD_associated,
549 appendToCompilerUsed(M, Dummy);
550}
551
552/// Module-level initialization.
553///
554/// inserts a call to __hwasan_init to the module's constructor list.
555void HWAddressSanitizer::initializeModule() {
556 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
557 auto &DL = M.getDataLayout();
558
559 TargetTriple = Triple(M.getTargetTriple());
560
561 // x86_64 currently has two modes:
562 // - Intel LAM (default)
563 // - pointer aliasing (heap only)
564 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
565 UsePageAliases = shouldUsePageAliases(TargetTriple);
566 InstrumentWithCalls = shouldInstrumentWithCalls(TargetTriple);
567 InstrumentStack = shouldInstrumentStack(TargetTriple);
568 DetectUseAfterScope = shouldDetectUseAfterScope(TargetTriple);
569 PointerTagShift = IsX86_64 ? 57 : 56;
570 TagMaskByte = IsX86_64 ? 0x3F : 0xFF;
571
572 Mapping.init(TargetTriple, InstrumentWithCalls);
573
574 C = &(M.getContext());
575 IRBuilder<> IRB(*C);
576 IntptrTy = IRB.getIntPtrTy(DL);
577 Int8PtrTy = IRB.getInt8PtrTy();
578 Int8Ty = IRB.getInt8Ty();
579 Int32Ty = IRB.getInt32Ty();
580
581 HwasanCtorFunction = nullptr;
582
583 // Older versions of Android do not have the required runtime support for
584 // short granules, global or personality function instrumentation. On other
585 // platforms we currently require using the latest version of the runtime.
586 bool NewRuntime =
587 !TargetTriple.isAndroid() || !TargetTriple.isAndroidVersionLT(30);
588
589 UseShortGranules =
590 ClUseShortGranules.getNumOccurrences() ? ClUseShortGranules : NewRuntime;
591 OutlinedChecks =
592 (TargetTriple.isAArch64() || TargetTriple.isRISCV64()) &&
593 TargetTriple.isOSBinFormatELF() &&
594 (ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
595
596 InlineFastPath =
597 (ClInlineFastPathChecks.getNumOccurrences()
599 : !(TargetTriple.isAndroid() ||
600 TargetTriple.isOSFuchsia())); // These platforms may prefer less
601 // inlining to reduce binary size.
602
603 if (ClMatchAllTag.getNumOccurrences()) {
604 if (ClMatchAllTag != -1) {
605 MatchAllTag = ClMatchAllTag & 0xFF;
606 }
607 } else if (CompileKernel) {
608 MatchAllTag = 0xFF;
609 }
610 UseMatchAllCallback = !CompileKernel && MatchAllTag.has_value();
611
612 // If we don't have personality function support, fall back to landing pads.
613 InstrumentLandingPads = ClInstrumentLandingPads.getNumOccurrences()
615 : !NewRuntime;
616
617 if (!CompileKernel) {
618 createHwasanCtorComdat();
619 bool InstrumentGlobals =
620 ClGlobals.getNumOccurrences() ? ClGlobals : NewRuntime;
621
622 if (InstrumentGlobals && !UsePageAliases)
623 instrumentGlobals();
624
625 bool InstrumentPersonalityFunctions =
626 ClInstrumentPersonalityFunctions.getNumOccurrences()
628 : NewRuntime;
629 if (InstrumentPersonalityFunctions)
630 instrumentPersonalityFunctions();
631 }
632
633 if (!TargetTriple.isAndroid()) {
634 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
635 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
637 "__hwasan_tls", nullptr,
640 return GV;
641 });
642 ThreadPtrGlobal = cast<GlobalVariable>(C);
643 }
644}
645
646void HWAddressSanitizer::initializeCallbacks(Module &M) {
647 IRBuilder<> IRB(*C);
648 const std::string MatchAllStr = UseMatchAllCallback ? "_match_all" : "";
649 FunctionType *HwasanMemoryAccessCallbackSizedFnTy,
650 *HwasanMemoryAccessCallbackFnTy, *HwasanMemTransferFnTy,
651 *HwasanMemsetFnTy;
652 if (UseMatchAllCallback) {
653 HwasanMemoryAccessCallbackSizedFnTy =
654 FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
655 HwasanMemoryAccessCallbackFnTy =
656 FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
657 HwasanMemTransferFnTy = FunctionType::get(
658 Int8PtrTy, {Int8PtrTy, Int8PtrTy, IntptrTy, Int8Ty}, false);
659 HwasanMemsetFnTy = FunctionType::get(
660 Int8PtrTy, {Int8PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
661 } else {
662 HwasanMemoryAccessCallbackSizedFnTy =
663 FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
664 HwasanMemoryAccessCallbackFnTy =
665 FunctionType::get(VoidTy, {IntptrTy}, false);
666 HwasanMemTransferFnTy =
667 FunctionType::get(Int8PtrTy, {Int8PtrTy, Int8PtrTy, IntptrTy}, false);
668 HwasanMemsetFnTy =
669 FunctionType::get(Int8PtrTy, {Int8PtrTy, Int32Ty, IntptrTy}, false);
670 }
671
672 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
673 const std::string TypeStr = AccessIsWrite ? "store" : "load";
674 const std::string EndingStr = Recover ? "_noabort" : "";
675
676 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
677 ClMemoryAccessCallbackPrefix + TypeStr + "N" + MatchAllStr + EndingStr,
678 HwasanMemoryAccessCallbackSizedFnTy);
679
680 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
681 AccessSizeIndex++) {
682 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
683 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + TypeStr +
684 itostr(1ULL << AccessSizeIndex) +
685 MatchAllStr + EndingStr,
686 HwasanMemoryAccessCallbackFnTy);
687 }
688 }
689
690 const std::string MemIntrinCallbackPrefix =
691 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
692 ? std::string("")
694
695 HwasanMemmove = M.getOrInsertFunction(
696 MemIntrinCallbackPrefix + "memmove" + MatchAllStr, HwasanMemTransferFnTy);
697 HwasanMemcpy = M.getOrInsertFunction(
698 MemIntrinCallbackPrefix + "memcpy" + MatchAllStr, HwasanMemTransferFnTy);
699 HwasanMemset = M.getOrInsertFunction(
700 MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
701
702 HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
703 Int8PtrTy, Int8Ty, IntptrTy);
704 HwasanGenerateTagFunc =
705 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
706
707 HwasanRecordFrameRecordFunc =
708 M.getOrInsertFunction("__hwasan_add_frame_record", VoidTy, Int64Ty);
709
710 ShadowGlobal =
711 M.getOrInsertGlobal("__hwasan_shadow", ArrayType::get(Int8Ty, 0));
712
713 HwasanHandleVfork =
714 M.getOrInsertFunction("__hwasan_handle_vfork", VoidTy, IntptrTy);
715}
716
717Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
718 // An empty inline asm with input reg == output reg.
719 // An opaque no-op cast, basically.
720 // This prevents code bloat as a result of rematerializing trivial definitions
721 // such as constants or global addresses at every load and store.
722 InlineAsm *Asm =
723 InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
724 StringRef(""), StringRef("=r,0"),
725 /*hasSideEffects=*/false);
726 return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
727}
728
729Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
730 return getOpaqueNoopCast(IRB, ShadowGlobal);
731}
732
733Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
734 if (Mapping.Offset != kDynamicShadowSentinel)
735 return getOpaqueNoopCast(
737 ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
738
739 if (Mapping.InGlobal)
740 return getDynamicShadowIfunc(IRB);
741
742 Value *GlobalDynamicAddress =
745 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
746}
747
748bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
749 // Do not instrument accesses from different address spaces; we cannot deal
750 // with them.
751 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
752 if (PtrTy->getPointerAddressSpace() != 0)
753 return true;
754
755 // Ignore swifterror addresses.
756 // swifterror memory addresses are mem2reg promoted by instruction
757 // selection. As such they cannot have regular uses like an instrumentation
758 // function and it makes no sense to track them as memory.
759 if (Ptr->isSwiftError())
760 return true;
761
762 if (findAllocaForValue(Ptr)) {
763 if (!InstrumentStack)
764 return true;
765 if (SSI && SSI->stackAccessIsSafe(*Inst))
766 return true;
767 }
768 return false;
769}
770
771void HWAddressSanitizer::getInterestingMemoryOperands(
773 // Skip memory accesses inserted by another instrumentation.
774 if (I->hasMetadata(LLVMContext::MD_nosanitize))
775 return;
776
777 // Do not instrument the load fetching the dynamic shadow address.
778 if (ShadowBase == I)
779 return;
780
781 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
782 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
783 return;
784 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
785 LI->getType(), LI->getAlign());
786 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
787 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
788 return;
789 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
790 SI->getValueOperand()->getType(), SI->getAlign());
791 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
792 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
793 return;
794 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
795 RMW->getValOperand()->getType(), std::nullopt);
796 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
797 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
798 return;
799 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
800 XCHG->getCompareOperand()->getType(),
801 std::nullopt);
802 } else if (auto *CI = dyn_cast<CallInst>(I)) {
803 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
804 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
805 ignoreAccess(I, CI->getArgOperand(ArgNo)))
806 continue;
807 Type *Ty = CI->getParamByValType(ArgNo);
808 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
809 }
810 }
811}
812
814 if (LoadInst *LI = dyn_cast<LoadInst>(I))
815 return LI->getPointerOperandIndex();
816 if (StoreInst *SI = dyn_cast<StoreInst>(I))
817 return SI->getPointerOperandIndex();
818 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
819 return RMW->getPointerOperandIndex();
820 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
821 return XCHG->getPointerOperandIndex();
822 report_fatal_error("Unexpected instruction");
823 return -1;
824}
825
827 size_t Res = llvm::countr_zero(TypeSize / 8);
829 return Res;
830}
831
832void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
833 if (TargetTriple.isAArch64() || TargetTriple.getArch() == Triple::x86_64 ||
834 TargetTriple.isRISCV64())
835 return;
836
837 IRBuilder<> IRB(I);
838 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
839 Value *UntaggedPtr =
840 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
841 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
842}
843
844Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
845 // Mem >> Scale
846 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
847 if (Mapping.Offset == 0)
848 return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
849 // (Mem >> Scale) + Offset
850 return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
851}
852
853int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
854 unsigned AccessSizeIndex) {
855 return (CompileKernel << HWASanAccessInfo::CompileKernelShift) |
856 (MatchAllTag.has_value() << HWASanAccessInfo::HasMatchAllShift) |
857 (MatchAllTag.value_or(0) << HWASanAccessInfo::MatchAllShift) |
858 (Recover << HWASanAccessInfo::RecoverShift) |
859 (IsWrite << HWASanAccessInfo::IsWriteShift) |
860 (AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
861}
862
863HWAddressSanitizer::ShadowTagCheckInfo
864HWAddressSanitizer::insertShadowTagCheck(Value *Ptr,
865 Instruction *InsertBefore) {
866 ShadowTagCheckInfo R;
867
868 IRBuilder<> IRB(InsertBefore);
869
870 R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
871 R.PtrTag =
872 IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
873 R.AddrLong = untagPointer(IRB, R.PtrLong);
874 Value *Shadow = memToShadow(R.AddrLong, IRB);
875 R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
876 Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
877
878 if (MatchAllTag.has_value()) {
879 Value *TagNotIgnored = IRB.CreateICmpNE(
880 R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
881 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
882 }
883
884 R.TagMismatchTerm =
885 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
886 MDBuilder(*C).createBranchWeights(1, 100000));
887
888 return R;
889}
890
891void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
892 unsigned AccessSizeIndex,
893 Instruction *InsertBefore) {
894 assert(!UsePageAliases);
895 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
896
897 if (InlineFastPath)
898 InsertBefore = insertShadowTagCheck(Ptr, InsertBefore).TagMismatchTerm;
899
900 IRBuilder<> IRB(InsertBefore);
902 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
904 M, UseShortGranules
905 ? Intrinsic::hwasan_check_memaccess_shortgranules
906 : Intrinsic::hwasan_check_memaccess),
907 {ShadowBase, Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
908}
909
910void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
911 unsigned AccessSizeIndex,
912 Instruction *InsertBefore) {
913 assert(!UsePageAliases);
914 const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
915
916 ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore);
917
918 IRBuilder<> IRB(TCI.TagMismatchTerm);
919 Value *OutOfShortGranuleTagRange =
920 IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
921 Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
922 OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
923 MDBuilder(*C).createBranchWeights(1, 100000));
924
925 IRB.SetInsertPoint(TCI.TagMismatchTerm);
926 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
927 PtrLowBits = IRB.CreateAdd(
928 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
929 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
930 SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
931 MDBuilder(*C).createBranchWeights(1, 100000),
932 (DomTreeUpdater *)nullptr, nullptr,
933 CheckFailTerm->getParent());
934
935 IRB.SetInsertPoint(TCI.TagMismatchTerm);
936 Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
937 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
938 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
939 Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
940 SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
941 MDBuilder(*C).createBranchWeights(1, 100000),
942 (DomTreeUpdater *)nullptr, nullptr,
943 CheckFailTerm->getParent());
944
945 IRB.SetInsertPoint(CheckFailTerm);
946 InlineAsm *Asm;
947 switch (TargetTriple.getArch()) {
948 case Triple::x86_64:
949 // The signal handler will find the data address in rdi.
951 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
952 "int3\nnopl " +
953 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
954 "(%rax)",
955 "{rdi}",
956 /*hasSideEffects=*/true);
957 break;
958 case Triple::aarch64:
960 // The signal handler will find the data address in x0.
962 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
963 "brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
964 "{x0}",
965 /*hasSideEffects=*/true);
966 break;
967 case Triple::riscv64:
968 // The signal handler will find the data address in x10.
970 FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
971 "ebreak\naddiw x0, x11, " +
972 itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
973 "{x10}",
974 /*hasSideEffects=*/true);
975 break;
976 default:
977 report_fatal_error("unsupported architecture");
978 }
979 IRB.CreateCall(Asm, TCI.PtrLong);
980 if (Recover)
981 cast<BranchInst>(CheckFailTerm)
982 ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
983}
984
985bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
986 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
987 return (!ClInstrumentWrites || ignoreAccess(MTI, MTI->getDest())) &&
988 (!ClInstrumentReads || ignoreAccess(MTI, MTI->getSource()));
989 }
990 if (isa<MemSetInst>(MI))
991 return !ClInstrumentWrites || ignoreAccess(MI, MI->getDest());
992 return false;
993}
994
995void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
996 IRBuilder<> IRB(MI);
997 if (isa<MemTransferInst>(MI)) {
999 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1000 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1001 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1002
1003 if (UseMatchAllCallback)
1004 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1005 IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
1006 } else if (isa<MemSetInst>(MI)) {
1008 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1009 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1010 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
1011 if (UseMatchAllCallback)
1012 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1013 IRB.CreateCall(HwasanMemset, Args);
1014 }
1015 MI->eraseFromParent();
1016}
1017
1018bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
1019 Value *Addr = O.getPtr();
1020
1021 LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
1022
1023 if (O.MaybeMask)
1024 return false; // FIXME
1025
1026 IRBuilder<> IRB(O.getInsn());
1027 if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
1028 (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
1029 (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
1030 *O.Alignment >= O.TypeStoreSize / 8)) {
1031 size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
1032 if (InstrumentWithCalls) {
1034 if (UseMatchAllCallback)
1035 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1036 IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
1037 Args);
1038 } else if (OutlinedChecks) {
1039 instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
1040 } else {
1041 instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
1042 }
1043 } else {
1045 IRB.CreatePointerCast(Addr, IntptrTy),
1046 IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
1047 ConstantInt::get(IntptrTy, 8))};
1048 if (UseMatchAllCallback)
1049 Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
1050 IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
1051 }
1052 untagPointerOperand(O.getInsn(), Addr);
1053
1054 return true;
1055}
1056
1057void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
1058 size_t Size) {
1059 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1060 if (!UseShortGranules)
1061 Size = AlignedSize;
1062
1063 Tag = IRB.CreateTrunc(Tag, Int8Ty);
1064 if (InstrumentWithCalls) {
1065 IRB.CreateCall(HwasanTagMemoryFunc,
1066 {IRB.CreatePointerCast(AI, Int8PtrTy), Tag,
1067 ConstantInt::get(IntptrTy, AlignedSize)});
1068 } else {
1069 size_t ShadowSize = Size >> Mapping.Scale;
1070 Value *AddrLong = untagPointer(IRB, IRB.CreatePointerCast(AI, IntptrTy));
1071 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1072 // If this memset is not inlined, it will be intercepted in the hwasan
1073 // runtime library. That's OK, because the interceptor skips the checks if
1074 // the address is in the shadow region.
1075 // FIXME: the interceptor is not as fast as real memset. Consider lowering
1076 // llvm.memset right here into either a sequence of stores, or a call to
1077 // hwasan_tag_memory.
1078 if (ShadowSize)
1079 IRB.CreateMemSet(ShadowPtr, Tag, ShadowSize, Align(1));
1080 if (Size != AlignedSize) {
1081 const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
1082 IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
1083 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
1085 Int8Ty, IRB.CreatePointerCast(AI, Int8PtrTy),
1086 AlignedSize - 1));
1087 }
1088 }
1089}
1090
1091unsigned HWAddressSanitizer::retagMask(unsigned AllocaNo) {
1092 if (TargetTriple.getArch() == Triple::x86_64)
1093 return AllocaNo & TagMaskByte;
1094
1095 // A list of 8-bit numbers that have at most one run of non-zero bits.
1096 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
1097 // masks.
1098 // The list does not include the value 255, which is used for UAR.
1099 //
1100 // Because we are more likely to use earlier elements of this list than later
1101 // ones, it is sorted in increasing order of probability of collision with a
1102 // mask allocated (temporally) nearby. The program that generated this list
1103 // can be found at:
1104 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
1105 static const unsigned FastMasks[] = {
1106 0, 128, 64, 192, 32, 96, 224, 112, 240, 48, 16, 120,
1107 248, 56, 24, 8, 124, 252, 60, 28, 12, 4, 126, 254,
1108 62, 30, 14, 6, 2, 127, 63, 31, 15, 7, 3, 1};
1109 return FastMasks[AllocaNo % std::size(FastMasks)];
1110}
1111
1112Value *HWAddressSanitizer::applyTagMask(IRBuilder<> &IRB, Value *OldTag) {
1113 if (TagMaskByte == 0xFF)
1114 return OldTag; // No need to clear the tag byte.
1115 return IRB.CreateAnd(OldTag,
1116 ConstantInt::get(OldTag->getType(), TagMaskByte));
1117}
1118
1119Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
1120 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
1121}
1122
1123Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
1125 return nullptr;
1126 if (StackBaseTag)
1127 return StackBaseTag;
1128 // Extract some entropy from the stack pointer for the tags.
1129 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
1130 // between functions).
1131 Value *StackPointerLong = getSP(IRB);
1132 Value *StackTag =
1133 applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
1134 IRB.CreateLShr(StackPointerLong, 20)));
1135 StackTag->setName("hwasan.stack.base.tag");
1136 return StackTag;
1137}
1138
1139Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
1140 unsigned AllocaNo) {
1142 return getNextTagWithCall(IRB);
1143 return IRB.CreateXor(
1144 StackTag, ConstantInt::get(StackTag->getType(), retagMask(AllocaNo)));
1145}
1146
1147Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
1148 Value *StackPointerLong = getSP(IRB);
1149 Value *UARTag =
1150 applyTagMask(IRB, IRB.CreateLShr(StackPointerLong, PointerTagShift));
1151
1152 UARTag->setName("hwasan.uar.tag");
1153 return UARTag;
1154}
1155
1156// Add a tag to an address.
1157Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
1158 Value *PtrLong, Value *Tag) {
1159 assert(!UsePageAliases);
1160 Value *TaggedPtrLong;
1161 if (CompileKernel) {
1162 // Kernel addresses have 0xFF in the most significant byte.
1163 Value *ShiftedTag =
1164 IRB.CreateOr(IRB.CreateShl(Tag, PointerTagShift),
1165 ConstantInt::get(IntptrTy, (1ULL << PointerTagShift) - 1));
1166 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
1167 } else {
1168 // Userspace can simply do OR (tag << PointerTagShift);
1169 Value *ShiftedTag = IRB.CreateShl(Tag, PointerTagShift);
1170 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
1171 }
1172 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
1173}
1174
1175// Remove tag from an address.
1176Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
1177 assert(!UsePageAliases);
1178 Value *UntaggedPtrLong;
1179 if (CompileKernel) {
1180 // Kernel addresses have 0xFF in the most significant byte.
1181 UntaggedPtrLong =
1182 IRB.CreateOr(PtrLong, ConstantInt::get(PtrLong->getType(),
1183 TagMaskByte << PointerTagShift));
1184 } else {
1185 // Userspace addresses have 0x00.
1186 UntaggedPtrLong = IRB.CreateAnd(
1187 PtrLong, ConstantInt::get(PtrLong->getType(),
1188 ~(TagMaskByte << PointerTagShift)));
1189 }
1190 return UntaggedPtrLong;
1191}
1192
1193Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
1194 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1195 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
1196 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
1197 // in Bionic's libc/private/bionic_tls.h.
1198 Function *ThreadPointerFunc =
1199 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
1200 Value *SlotPtr = IRB.CreatePointerCast(
1201 IRB.CreateConstGEP1_32(Int8Ty, IRB.CreateCall(ThreadPointerFunc), 0x30),
1202 Ty->getPointerTo(0));
1203 return SlotPtr;
1204 }
1205 if (ThreadPtrGlobal)
1206 return ThreadPtrGlobal;
1207
1208 return nullptr;
1209}
1210
1211Value *HWAddressSanitizer::getPC(IRBuilder<> &IRB) {
1212 if (TargetTriple.getArch() == Triple::aarch64)
1213 return readRegister(IRB, "pc");
1214 return IRB.CreatePtrToInt(IRB.GetInsertBlock()->getParent(), IntptrTy);
1215}
1216
1217Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
1218 if (!CachedSP) {
1219 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
1220 // first).
1221 Function *F = IRB.GetInsertBlock()->getParent();
1222 Module *M = F->getParent();
1223 auto *GetStackPointerFn = Intrinsic::getDeclaration(
1224 M, Intrinsic::frameaddress,
1225 IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
1226 CachedSP = IRB.CreatePtrToInt(
1227 IRB.CreateCall(GetStackPointerFn, {Constant::getNullValue(Int32Ty)}),
1228 IntptrTy);
1229 }
1230 return CachedSP;
1231}
1232
1233Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
1234 // Prepare ring buffer data.
1235 Value *PC = getPC(IRB);
1236 Value *SP = getSP(IRB);
1237
1238 // Mix SP and PC.
1239 // Assumptions:
1240 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
1241 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
1242 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
1243 // 0xSSSSPPPPPPPPPPPP
1244 SP = IRB.CreateShl(SP, 44);
1245 return IRB.CreateOr(PC, SP);
1246}
1247
1248void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
1249 if (!Mapping.InTls)
1250 ShadowBase = getShadowNonTls(IRB);
1251 else if (!WithFrameRecord && TargetTriple.isAndroid())
1252 ShadowBase = getDynamicShadowIfunc(IRB);
1253
1254 if (!WithFrameRecord && ShadowBase)
1255 return;
1256
1257 Value *SlotPtr = nullptr;
1258 Value *ThreadLong = nullptr;
1259 Value *ThreadLongMaybeUntagged = nullptr;
1260
1261 auto getThreadLongMaybeUntagged = [&]() {
1262 if (!SlotPtr)
1263 SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
1264 if (!ThreadLong)
1265 ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
1266 // Extract the address field from ThreadLong. Unnecessary on AArch64 with
1267 // TBI.
1268 return TargetTriple.isAArch64() ? ThreadLong
1269 : untagPointer(IRB, ThreadLong);
1270 };
1271
1272 if (WithFrameRecord) {
1273 switch (ClRecordStackHistory) {
1274 case libcall: {
1275 // Emit a runtime call into hwasan rather than emitting instructions for
1276 // recording stack history.
1277 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1278 IRB.CreateCall(HwasanRecordFrameRecordFunc, {FrameRecordInfo});
1279 break;
1280 }
1281 case instr: {
1282 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1283
1284 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
1285
1286 // Store data to ring buffer.
1287 Value *FrameRecordInfo = getFrameRecordInfo(IRB);
1288 Value *RecordPtr = IRB.CreateIntToPtr(ThreadLongMaybeUntagged,
1289 IntptrTy->getPointerTo(0));
1290 IRB.CreateStore(FrameRecordInfo, RecordPtr);
1291
1292 // Update the ring buffer. Top byte of ThreadLong defines the size of the
1293 // buffer in pages, it must be a power of two, and the start of the buffer
1294 // must be aligned by twice that much. Therefore wrap around of the ring
1295 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
1296 // The use of AShr instead of LShr is due to
1297 // https://bugs.llvm.org/show_bug.cgi?id=39030
1298 // Runtime library makes sure not to use the highest bit.
1299 Value *WrapMask = IRB.CreateXor(
1300 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
1301 ConstantInt::get(IntptrTy, (uint64_t)-1));
1302 Value *ThreadLongNew = IRB.CreateAnd(
1303 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
1304 IRB.CreateStore(ThreadLongNew, SlotPtr);
1305 break;
1306 }
1307 case none: {
1309 "A stack history recording mode should've been selected.");
1310 }
1311 }
1312 }
1313
1314 if (!ShadowBase) {
1315 if (!ThreadLongMaybeUntagged)
1316 ThreadLongMaybeUntagged = getThreadLongMaybeUntagged();
1317
1318 // Get shadow base address by aligning RecordPtr up.
1319 // Note: this is not correct if the pointer is already aligned.
1320 // Runtime library will make sure this never happens.
1321 ShadowBase = IRB.CreateAdd(
1322 IRB.CreateOr(
1323 ThreadLongMaybeUntagged,
1324 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
1325 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
1326 ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
1327 }
1328}
1329
1330Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
1331 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1332 Function *ReadRegister =
1333 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
1334 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
1335 Value *Args[] = {MetadataAsValue::get(*C, MD)};
1336 return IRB.CreateCall(ReadRegister, Args);
1337}
1338
1339bool HWAddressSanitizer::instrumentLandingPads(
1340 SmallVectorImpl<Instruction *> &LandingPadVec) {
1341 for (auto *LP : LandingPadVec) {
1342 IRBuilder<> IRB(LP->getNextNode());
1343 IRB.CreateCall(
1344 HwasanHandleVfork,
1345 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
1346 : "sp")});
1347 }
1348 return true;
1349}
1350
1352 auto *II = dyn_cast<IntrinsicInst>(V);
1353 return II && II->isLifetimeStartOrEnd();
1354}
1355
1356bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
1357 Value *StackTag, Value *UARTag,
1358 const DominatorTree &DT,
1359 const PostDominatorTree &PDT,
1360 const LoopInfo &LI) {
1361 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
1362 // alloca addresses using that. Unfortunately, offsets are not known yet
1363 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
1364 // temp, shift-OR it into each alloca address and xor with the retag mask.
1365 // This generates one extra instruction per alloca use.
1366 unsigned int I = 0;
1367
1368 for (auto &KV : SInfo.AllocasToInstrument) {
1369 auto N = I++;
1370 auto *AI = KV.first;
1371 memtag::AllocaInfo &Info = KV.second;
1372 IRBuilder<> IRB(AI->getNextNode());
1373
1374 // Replace uses of the alloca with tagged address.
1375 Value *Tag = getAllocaTag(IRB, StackTag, N);
1376 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1377 Value *AINoTagLong = untagPointer(IRB, AILong);
1378 Value *Replacement = tagPointer(IRB, AI->getType(), AINoTagLong, Tag);
1379 std::string Name =
1380 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1381 Replacement->setName(Name + ".hwasan");
1382
1383 size_t Size = memtag::getAllocaSizeInBytes(*AI);
1384 size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
1385
1386 Value *AICast = IRB.CreatePointerCast(AI, Int8PtrTy);
1387
1388 auto HandleLifetime = [&](IntrinsicInst *II) {
1389 // Set the lifetime intrinsic to cover the whole alloca. This reduces the
1390 // set of assumptions we need to make about the lifetime. Without this we
1391 // would need to ensure that we can track the lifetime pointer to a
1392 // constant offset from the alloca, and would still need to change the
1393 // size to include the extra alignment we use for the untagging to make
1394 // the size consistent.
1395 //
1396 // The check for standard lifetime below makes sure that we have exactly
1397 // one set of start / end in any execution (i.e. the ends are not
1398 // reachable from each other), so this will not cause any problems.
1399 II->setArgOperand(0, ConstantInt::get(Int64Ty, AlignedSize));
1400 II->setArgOperand(1, AICast);
1401 };
1402 llvm::for_each(Info.LifetimeStart, HandleLifetime);
1403 llvm::for_each(Info.LifetimeEnd, HandleLifetime);
1404
1405 AI->replaceUsesWithIf(Replacement, [AICast, AILong](const Use &U) {
1406 auto *User = U.getUser();
1407 return User != AILong && User != AICast && !isLifetimeIntrinsic(User);
1408 });
1409
1410 for (auto *DDI : Info.DbgVariableIntrinsics) {
1411 // Prepend "tag_offset, N" to the dwarf expression.
1412 // Tag offset logically applies to the alloca pointer, and it makes sense
1413 // to put it at the beginning of the expression.
1415 retagMask(N)};
1416 for (size_t LocNo = 0; LocNo < DDI->getNumVariableLocationOps(); ++LocNo)
1417 if (DDI->getVariableLocationOp(LocNo) == AI)
1418 DDI->setExpression(DIExpression::appendOpsToArg(DDI->getExpression(),
1419 NewOps, LocNo));
1420 }
1421
1422 auto TagEnd = [&](Instruction *Node) {
1423 IRB.SetInsertPoint(Node);
1424 // When untagging, use the `AlignedSize` because we need to set the tags
1425 // for the entire alloca to original. If we used `Size` here, we would
1426 // keep the last granule tagged, and store zero in the last byte of the
1427 // last granule, due to how short granules are implemented.
1428 tagAlloca(IRB, AI, UARTag, AlignedSize);
1429 };
1430 // Calls to functions that may return twice (e.g. setjmp) confuse the
1431 // postdominator analysis, and will leave us to keep memory tagged after
1432 // function return. Work around this by always untagging at every return
1433 // statement if return_twice functions are called.
1434 bool StandardLifetime =
1435 SInfo.UnrecognizedLifetimes.empty() &&
1436 memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
1437 &LI, ClMaxLifetimes) &&
1438 !SInfo.CallsReturnTwice;
1439 if (DetectUseAfterScope && StandardLifetime) {
1440 IntrinsicInst *Start = Info.LifetimeStart[0];
1441 IRB.SetInsertPoint(Start->getNextNode());
1442 tagAlloca(IRB, AI, Tag, Size);
1443 if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
1444 SInfo.RetVec, TagEnd)) {
1445 for (auto *End : Info.LifetimeEnd)
1446 End->eraseFromParent();
1447 }
1448 } else {
1449 tagAlloca(IRB, AI, Tag, Size);
1450 for (auto *RI : SInfo.RetVec)
1451 TagEnd(RI);
1452 // We inserted tagging outside of the lifetimes, so we have to remove
1453 // them.
1454 for (auto &II : Info.LifetimeStart)
1455 II->eraseFromParent();
1456 for (auto &II : Info.LifetimeEnd)
1457 II->eraseFromParent();
1458 }
1459 memtag::alignAndPadAlloca(Info, Mapping.getObjectAlignment());
1460 }
1461 for (auto &I : SInfo.UnrecognizedLifetimes)
1462 I->eraseFromParent();
1463 return true;
1464}
1465
1466void HWAddressSanitizer::sanitizeFunction(Function &F,
1468 if (&F == HwasanCtorFunction)
1469 return;
1470
1471 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1472 return;
1473
1474 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1475
1476 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1477 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
1478 SmallVector<Instruction *, 8> LandingPadVec;
1479
1480 memtag::StackInfoBuilder SIB(SSI);
1481 for (auto &Inst : instructions(F)) {
1482 if (InstrumentStack) {
1483 SIB.visit(Inst);
1484 }
1485
1486 if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
1487 LandingPadVec.push_back(&Inst);
1488
1489 getInterestingMemoryOperands(&Inst, OperandsToInstrument);
1490
1491 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
1492 if (!ignoreMemIntrinsic(MI))
1493 IntrinToInstrument.push_back(MI);
1494 }
1495
1496 memtag::StackInfo &SInfo = SIB.get();
1497
1498 initializeCallbacks(*F.getParent());
1499
1500 if (!LandingPadVec.empty())
1501 instrumentLandingPads(LandingPadVec);
1502
1503 if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
1504 F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
1505 // __hwasan_personality_thunk is a no-op for functions without an
1506 // instrumented stack, so we can drop it.
1507 F.setPersonalityFn(nullptr);
1508 }
1509
1510 if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
1511 IntrinToInstrument.empty())
1512 return;
1513
1514 assert(!ShadowBase);
1515
1516 Instruction *InsertPt = &*F.getEntryBlock().begin();
1517 IRBuilder<> EntryIRB(InsertPt);
1518 emitPrologue(EntryIRB,
1519 /*WithFrameRecord*/ ClRecordStackHistory != none &&
1520 Mapping.WithFrameRecord &&
1521 !SInfo.AllocasToInstrument.empty());
1522
1523 if (!SInfo.AllocasToInstrument.empty()) {
1526 const LoopInfo &LI = FAM.getResult<LoopAnalysis>(F);
1527 Value *StackTag = getStackBaseTag(EntryIRB);
1528 Value *UARTag = getUARTag(EntryIRB);
1529 instrumentStack(SInfo, StackTag, UARTag, DT, PDT, LI);
1530 }
1531
1532 // If we split the entry block, move any allocas that were originally in the
1533 // entry block back into the entry block so that they aren't treated as
1534 // dynamic allocas.
1535 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1536 InsertPt = &*F.getEntryBlock().begin();
1537 for (Instruction &I :
1538 llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
1539 if (auto *AI = dyn_cast<AllocaInst>(&I))
1540 if (isa<ConstantInt>(AI->getArraySize()))
1541 I.moveBefore(InsertPt);
1542 }
1543 }
1544
1545 for (auto &Operand : OperandsToInstrument)
1546 instrumentMemAccess(Operand);
1547
1548 if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
1549 for (auto *Inst : IntrinToInstrument)
1550 instrumentMemIntrinsic(Inst);
1551 }
1552
1553 ShadowBase = nullptr;
1554 StackBaseTag = nullptr;
1555 CachedSP = nullptr;
1556}
1557
1558void HWAddressSanitizer::instrumentGlobal(GlobalVariable *GV, uint8_t Tag) {
1559 assert(!UsePageAliases);
1560 Constant *Initializer = GV->getInitializer();
1561 uint64_t SizeInBytes =
1562 M.getDataLayout().getTypeAllocSize(Initializer->getType());
1563 uint64_t NewSize = alignTo(SizeInBytes, Mapping.getObjectAlignment());
1564 if (SizeInBytes != NewSize) {
1565 // Pad the initializer out to the next multiple of 16 bytes and add the
1566 // required short granule tag.
1567 std::vector<uint8_t> Init(NewSize - SizeInBytes, 0);
1568 Init.back() = Tag;
1570 Initializer = ConstantStruct::getAnon({Initializer, Padding});
1571 }
1572
1573 auto *NewGV = new GlobalVariable(M, Initializer->getType(), GV->isConstant(),
1574 GlobalValue::ExternalLinkage, Initializer,
1575 GV->getName() + ".hwasan");
1576 NewGV->copyAttributesFrom(GV);
1577 NewGV->setLinkage(GlobalValue::PrivateLinkage);
1578 NewGV->copyMetadata(GV, 0);
1579 NewGV->setAlignment(
1580 std::max(GV->getAlign().valueOrOne(), Mapping.getObjectAlignment()));
1581
1582 // It is invalid to ICF two globals that have different tags. In the case
1583 // where the size of the global is a multiple of the tag granularity the
1584 // contents of the globals may be the same but the tags (i.e. symbol values)
1585 // may be different, and the symbols are not considered during ICF. In the
1586 // case where the size is not a multiple of the granularity, the short granule
1587 // tags would discriminate two globals with different tags, but there would
1588 // otherwise be nothing stopping such a global from being incorrectly ICF'd
1589 // with an uninstrumented (i.e. tag 0) global that happened to have the short
1590 // granule tag in the last byte.
1591 NewGV->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
1592
1593 // Descriptor format (assuming little-endian):
1594 // bytes 0-3: relative address of global
1595 // bytes 4-6: size of global (16MB ought to be enough for anyone, but in case
1596 // it isn't, we create multiple descriptors)
1597 // byte 7: tag
1598 auto *DescriptorTy = StructType::get(Int32Ty, Int32Ty);
1599 const uint64_t MaxDescriptorSize = 0xfffff0;
1600 for (uint64_t DescriptorPos = 0; DescriptorPos < SizeInBytes;
1601 DescriptorPos += MaxDescriptorSize) {
1602 auto *Descriptor =
1603 new GlobalVariable(M, DescriptorTy, true, GlobalValue::PrivateLinkage,
1604 nullptr, GV->getName() + ".hwasan.descriptor");
1605 auto *GVRelPtr = ConstantExpr::getTrunc(
1608 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1609 ConstantExpr::getPtrToInt(Descriptor, Int64Ty)),
1610 ConstantInt::get(Int64Ty, DescriptorPos)),
1611 Int32Ty);
1612 uint32_t Size = std::min(SizeInBytes - DescriptorPos, MaxDescriptorSize);
1613 auto *SizeAndTag = ConstantInt::get(Int32Ty, Size | (uint32_t(Tag) << 24));
1614 Descriptor->setComdat(NewGV->getComdat());
1615 Descriptor->setInitializer(ConstantStruct::getAnon({GVRelPtr, SizeAndTag}));
1616 Descriptor->setSection("hwasan_globals");
1617 Descriptor->setMetadata(LLVMContext::MD_associated,
1619 appendToCompilerUsed(M, Descriptor);
1620 }
1621
1624 ConstantExpr::getPtrToInt(NewGV, Int64Ty),
1625 ConstantInt::get(Int64Ty, uint64_t(Tag) << PointerTagShift)),
1626 GV->getType());
1627 auto *Alias = GlobalAlias::create(GV->getValueType(), GV->getAddressSpace(),
1628 GV->getLinkage(), "", Aliasee, &M);
1629 Alias->setVisibility(GV->getVisibility());
1630 Alias->takeName(GV);
1631 GV->replaceAllUsesWith(Alias);
1632 GV->eraseFromParent();
1633}
1634
1635void HWAddressSanitizer::instrumentGlobals() {
1636 std::vector<GlobalVariable *> Globals;
1637 for (GlobalVariable &GV : M.globals()) {
1639 continue;
1640
1641 if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
1642 GV.isThreadLocal())
1643 continue;
1644
1645 // Common symbols can't have aliases point to them, so they can't be tagged.
1646 if (GV.hasCommonLinkage())
1647 continue;
1648
1649 // Globals with custom sections may be used in __start_/__stop_ enumeration,
1650 // which would be broken both by adding tags and potentially by the extra
1651 // padding/alignment that we insert.
1652 if (GV.hasSection())
1653 continue;
1654
1655 Globals.push_back(&GV);
1656 }
1657
1658 MD5 Hasher;
1659 Hasher.update(M.getSourceFileName());
1660 MD5::MD5Result Hash;
1661 Hasher.final(Hash);
1662 uint8_t Tag = Hash[0];
1663
1664 assert(TagMaskByte >= 16);
1665
1666 for (GlobalVariable *GV : Globals) {
1667 // Don't allow globals to be tagged with something that looks like a
1668 // short-granule tag, otherwise we lose inter-granule overflow detection, as
1669 // the fast path shadow-vs-address check succeeds.
1670 if (Tag < 16 || Tag > TagMaskByte)
1671 Tag = 16;
1672 instrumentGlobal(GV, Tag++);
1673 }
1674}
1675
1676void HWAddressSanitizer::instrumentPersonalityFunctions() {
1677 // We need to untag stack frames as we unwind past them. That is the job of
1678 // the personality function wrapper, which either wraps an existing
1679 // personality function or acts as a personality function on its own. Each
1680 // function that has a personality function or that can be unwound past has
1681 // its personality function changed to a thunk that calls the personality
1682 // function wrapper in the runtime.
1684 for (Function &F : M) {
1685 if (F.isDeclaration() || !F.hasFnAttribute(Attribute::SanitizeHWAddress))
1686 continue;
1687
1688 if (F.hasPersonalityFn()) {
1689 PersonalityFns[F.getPersonalityFn()->stripPointerCasts()].push_back(&F);
1690 } else if (!F.hasFnAttribute(Attribute::NoUnwind)) {
1691 PersonalityFns[nullptr].push_back(&F);
1692 }
1693 }
1694
1695 if (PersonalityFns.empty())
1696 return;
1697
1698 FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
1699 "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
1700 Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
1701 FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
1702 FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
1703
1704 for (auto &P : PersonalityFns) {
1705 std::string ThunkName = kHwasanPersonalityThunkName;
1706 if (P.first)
1707 ThunkName += ("." + P.first->getName()).str();
1708 FunctionType *ThunkFnTy = FunctionType::get(
1709 Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
1710 bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
1711 cast<GlobalValue>(P.first)->hasLocalLinkage());
1712 auto *ThunkFn = Function::Create(ThunkFnTy,
1715 ThunkName, &M);
1716 if (!IsLocal) {
1717 ThunkFn->setVisibility(GlobalValue::HiddenVisibility);
1718 ThunkFn->setComdat(M.getOrInsertComdat(ThunkName));
1719 }
1720
1721 auto *BB = BasicBlock::Create(*C, "entry", ThunkFn);
1722 IRBuilder<> IRB(BB);
1723 CallInst *WrapperCall = IRB.CreateCall(
1724 HwasanPersonalityWrapper,
1725 {ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
1726 ThunkFn->getArg(3), ThunkFn->getArg(4),
1727 P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
1728 : Constant::getNullValue(Int8PtrTy),
1729 IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
1730 IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
1731 WrapperCall->setTailCall();
1732 IRB.CreateRet(WrapperCall);
1733
1734 for (Function *F : P.second)
1735 F->setPersonalityFn(ThunkFn);
1736 }
1737}
1738
1739void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple,
1740 bool InstrumentWithCalls) {
1741 Scale = kDefaultShadowScale;
1742 if (TargetTriple.isOSFuchsia()) {
1743 // Fuchsia is always PIE, which means that the beginning of the address
1744 // space is always available.
1745 InGlobal = false;
1746 InTls = false;
1747 Offset = 0;
1748 WithFrameRecord = true;
1749 } else if (ClMappingOffset.getNumOccurrences() > 0) {
1750 InGlobal = false;
1751 InTls = false;
1753 WithFrameRecord = false;
1754 } else if (ClEnableKhwasan || InstrumentWithCalls) {
1755 InGlobal = false;
1756 InTls = false;
1757 Offset = 0;
1758 WithFrameRecord = false;
1759 } else if (ClWithIfunc) {
1760 InGlobal = true;
1761 InTls = false;
1763 WithFrameRecord = false;
1764 } else if (ClWithTls) {
1765 InGlobal = false;
1766 InTls = true;
1768 WithFrameRecord = true;
1769 } else {
1770 InGlobal = false;
1771 InTls = false;
1773 WithFrameRecord = false;
1774 }
1775}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< size_t > ClMaxLifetimes("stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
static const uint64_t kDefaultShadowScale
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define clEnumVal(ENUMVAL, DESC)
Definition: CommandLine.h:678
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file contains constants used for implementing Dwarf debug support.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:469
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
static cl::opt< bool > ClInstrumentWrites("hwasan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static const size_t kDefaultShadowScale
static cl::opt< RecordStackHistoryMode > ClRecordStackHistory("hwasan-record-stack-history", cl::desc("Record stack frames with tagged allocations in a thread-local " "ring buffer"), cl::values(clEnumVal(none, "Do not record stack ring history"), clEnumVal(instr, "Insert instructions into the prologue for " "storing into the stack ring buffer directly"), clEnumVal(libcall, "Add a call to __hwasan_add_frame_record for " "storing into the stack ring buffer")), cl::Hidden, cl::init(instr))
const char kHwasanModuleCtorName[]
static cl::opt< int > ClMatchAllTag("hwasan-match-all-tag", cl::desc("don't report bad accesses via pointers with this tag"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUseAfterScope("hwasan-use-after-scope", cl::desc("detect use after scope within function"), cl::Hidden, cl::init(true))
const char kHwasanNoteName[]
static const unsigned kShadowBaseAlignment
static cl::opt< bool > ClGenerateTagsWithCalls("hwasan-generate-tags-with-calls", cl::desc("generate new tags with runtime library calls"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentReads("hwasan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWithCalls("hwasan-instrument-with-calls", cl::desc("instrument reads and writes with callbacks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClUseStackSafety("hwasan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
static cl::opt< bool > ClInstrumentAtomics("hwasan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentStack("hwasan-instrument-stack", cl::desc("instrument stack (allocas)"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClMappingOffset("hwasan-mapping-offset", cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClRecover("hwasan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClEnableKhwasan("hwasan-kernel", cl::desc("Enable KernelHWAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInlineAllChecks("hwasan-inline-all-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static const uint64_t kDynamicShadowSentinel
static cl::opt< bool > ClUsePageAliases("hwasan-experimental-use-page-aliases", cl::desc("Use page aliasing in HWASan"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("hwasan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__hwasan_"))
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithTls("hwasan-with-tls", cl::desc("Access dynamic shadow through an thread-local pointer on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClGlobals("hwasan-globals", cl::desc("Instrument globals"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("hwasan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentByval("hwasan-instrument-byval", cl::desc("instrument byval arguments"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseShortGranules("hwasan-use-short-granules", cl::desc("use short granules in allocas and outlined checks"), cl::Hidden, cl::init(false))
const char kHwasanShadowMemoryDynamicAddress[]
static unsigned getPointerOperandIndex(Instruction *I)
static cl::opt< bool > ClInlineFastPathChecks("hwasan-inline-fast-path-checks", cl::desc("inline all checks"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentPersonalityFunctions("hwasan-instrument-personality-functions", cl::desc("instrument personality functions"), cl::Hidden)
const char kHwasanInitName[]
RecordStackHistoryMode
static cl::opt< bool > ClInstrumentLandingPads("hwasan-instrument-landing-pads", cl::desc("instrument landing pads"), cl::Hidden, cl::init(false))
static cl::opt< size_t > ClMaxLifetimes("hwasan-max-lifetimes-for-alloca", cl::Hidden, cl::init(3), cl::ReallyHidden, cl::desc("How many lifetime ends to handle for a single alloca."), cl::Optional)
const char kHwasanPersonalityThunkName[]
static cl::opt< bool > ClWithIfunc("hwasan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(false))
IRTranslator LLVM IR MI
Select target instructions out of generic instructions
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
#define P(N)
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:648
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:513
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition: Constants.h:690
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2199
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2573
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2185
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2566
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2075
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition: Constants.h:461
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:138
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:506
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setComdat(Comdat *C)
Definition: Globals.cpp:196
bool hasSection() const
Check if this global has a custom object file section.
Definition: GlobalObject.h:109
const SanitizerMetadata & getSanitizerMetadata() const
Definition: Globals.cpp:227
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
Definition: GlobalValue.h:259
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:244
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
bool isDeclarationForLinker() const
Definition: GlobalValue.h:614
bool hasSanitizerMetadata() const
Definition: GlobalValue.h:351
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:64
bool hasCommonLinkage() const
Definition: GlobalValue.h:527
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:48
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:51
Type * getValueType() const
Definition: GlobalValue.h:292
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:454
Analysis pass providing a never-invalidated alias analysis result.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1993
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1877
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2135
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Definition: IRBuilder.h:586
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2084
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:104
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1428
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:512
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1086
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1369
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2207
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1997
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2089
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2211
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1786
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1407
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:560
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1466
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1799
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1318
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2079
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1488
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2215
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2158
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2374
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1447
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1510
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1862
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2628
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:569
Definition: MD5.h:41
void update(ArrayRef< uint8_t > Data)
Updates the hash for the byte stream provided.
Definition: MD5.cpp:189
void final(MD5Result &Result)
Finishes off the hash and puts the result in result.
Definition: MD5.cpp:234
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:499
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
bool empty() const
Definition: MapVector.h:79
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:102
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Constant * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition: Module.cpp:205
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
void abandon()
Mark an analysis as abandoned.
Definition: PassManager.h:206
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool startswith(StringRef Prefix) const
Definition: StringRef.h:261
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:374
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:727
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:725
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:355
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
Definition: Triple.h:943
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Definition: Triple.h:865
bool isOSFuchsia() const
Definition: Triple.h:548
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:675
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:394
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:543
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
An efficient, type-erasing, non-owning reference to a callable.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ NT_LLVM_HWASAN_GLOBALS
Definition: ELF.h:1625
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1422
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:705
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ DW_OP_LLVM_tag_offset
Only used in LLVM metadata.
Definition: Dwarf.h:143
bool isStandardLifetime(const SmallVectorImpl< IntrinsicInst * > &LifetimeStart, const SmallVectorImpl< IntrinsicInst * > &LifetimeEnd, const DominatorTree *DT, const LoopInfo *LI, size_t MaxLifetimes)
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT, const LoopInfo &LI, const Instruction *Start, const SmallVectorImpl< IntrinsicInst * > &Ends, const SmallVectorImpl< Instruction * > &RetVec, llvm::function_ref< void(Instruction *)> Callback)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1720
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:666
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:179
static bool isLifetimeIntrinsic(Intrinsic::ID ID)
Check if ID corresponds to a lifetime intrinsic.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:73
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:371
MapVector< AllocaInst *, AllocaInfo > AllocasToInstrument
SmallVector< Instruction *, 4 > UnrecognizedLifetimes
SmallVector< Instruction *, 8 > RetVec