LLVM 22.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
23#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/ADT/Twine.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Comdat.h"
41#include "llvm/IR/Constant.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DIBuilder.h"
44#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/DebugLoc.h"
49#include "llvm/IR/Function.h"
50#include "llvm/IR/GlobalAlias.h"
51#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InlineAsm.h"
55#include "llvm/IR/InstVisitor.h"
56#include "llvm/IR/InstrTypes.h"
57#include "llvm/IR/Instruction.h"
60#include "llvm/IR/Intrinsics.h"
61#include "llvm/IR/LLVMContext.h"
62#include "llvm/IR/MDBuilder.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/Use.h"
67#include "llvm/IR/Value.h"
71#include "llvm/Support/Debug.h"
84#include <algorithm>
85#include <cassert>
86#include <cstddef>
87#include <cstdint>
88#include <iomanip>
89#include <limits>
90#include <sstream>
91#include <string>
92#include <tuple>
93
94using namespace llvm;
95
96#define DEBUG_TYPE "asan"
97
99static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
100static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
102 std::numeric_limits<uint64_t>::max();
103static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
105static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
106static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
107static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
108static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
109static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
110static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
111static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
112static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
115static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
116static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
117static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
118static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
119static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
120static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
121static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
122static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
124
125// The shadow memory space is dynamically allocated.
127
128static const size_t kMinStackMallocSize = 1 << 6; // 64B
129static const size_t kMaxStackMallocSize = 1 << 16; // 64K
130static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
131static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
132
133const char kAsanModuleCtorName[] = "asan.module_ctor";
134const char kAsanModuleDtorName[] = "asan.module_dtor";
136// On Emscripten, the system needs more than one priorities for constructors.
138const char kAsanReportErrorTemplate[] = "__asan_report_";
139const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
140const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
141const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
143 "__asan_unregister_image_globals";
144const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
145const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
146const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
147const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
148const char kAsanInitName[] = "__asan_init";
149const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
150const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
151const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
152const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
153static const int kMaxAsanStackMallocSizeClass = 10;
154const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
156 "__asan_stack_malloc_always_";
157const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
158const char kAsanGenPrefix[] = "___asan_gen_";
159const char kODRGenPrefix[] = "__odr_asan_gen_";
160const char kSanCovGenPrefix[] = "__sancov_gen_";
161const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
162const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
163const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
164
165// ASan version script has __asan_* wildcard. Triple underscore prevents a
166// linker (gold) warning about attempting to export a local symbol.
167const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
168
170 "__asan_option_detect_stack_use_after_return";
171
173 "__asan_shadow_memory_dynamic_address";
174
175const char kAsanAllocaPoison[] = "__asan_alloca_poison";
176const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
177
178const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
179const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
180const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
181const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
182
183// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
184static const size_t kNumberOfAccessSizes = 5;
185
186static const uint64_t kAllocaRzSize = 32;
187
188// ASanAccessInfo implementation constants.
189constexpr size_t kCompileKernelShift = 0;
190constexpr size_t kCompileKernelMask = 0x1;
191constexpr size_t kAccessSizeIndexShift = 1;
192constexpr size_t kAccessSizeIndexMask = 0xf;
193constexpr size_t kIsWriteShift = 5;
194constexpr size_t kIsWriteMask = 0x1;
195
196// Command-line flags.
197
199 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
200 cl::Hidden, cl::init(false));
201
203 "asan-recover",
204 cl::desc("Enable recovery mode (continue-after-error)."),
205 cl::Hidden, cl::init(false));
206
208 "asan-guard-against-version-mismatch",
209 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
210 cl::init(true));
211
212// This flag may need to be replaced with -f[no-]asan-reads.
213static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
214 cl::desc("instrument read instructions"),
215 cl::Hidden, cl::init(true));
216
218 "asan-instrument-writes", cl::desc("instrument write instructions"),
219 cl::Hidden, cl::init(true));
220
221static cl::opt<bool>
222 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
223 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
225
227 "asan-instrument-atomics",
228 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
229 cl::init(true));
230
231static cl::opt<bool>
232 ClInstrumentByval("asan-instrument-byval",
233 cl::desc("instrument byval call arguments"), cl::Hidden,
234 cl::init(true));
235
237 "asan-always-slow-path",
238 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
239 cl::init(false));
240
242 "asan-force-dynamic-shadow",
243 cl::desc("Load shadow address into a local variable for each function"),
244 cl::Hidden, cl::init(false));
245
246static cl::opt<bool>
247 ClWithIfunc("asan-with-ifunc",
248 cl::desc("Access dynamic shadow through an ifunc global on "
249 "platforms that support this"),
250 cl::Hidden, cl::init(true));
251
252static cl::opt<int>
253 ClShadowAddrSpace("asan-shadow-addr-space",
254 cl::desc("Address space for pointers to the shadow map"),
255 cl::Hidden, cl::init(0));
256
258 "asan-with-ifunc-suppress-remat",
259 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
260 "it through inline asm in prologue."),
261 cl::Hidden, cl::init(true));
262
263// This flag limits the number of instructions to be instrumented
264// in any given BB. Normally, this should be set to unlimited (INT_MAX),
265// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
266// set it to 10000.
268 "asan-max-ins-per-bb", cl::init(10000),
269 cl::desc("maximal number of instructions to instrument in any given BB"),
270 cl::Hidden);
271
272// This flag may need to be replaced with -f[no]asan-stack.
273static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
274 cl::Hidden, cl::init(true));
276 "asan-max-inline-poisoning-size",
277 cl::desc(
278 "Inline shadow poisoning for blocks up to the given size in bytes."),
279 cl::Hidden, cl::init(64));
280
282 "asan-use-after-return",
283 cl::desc("Sets the mode of detection for stack-use-after-return."),
286 "Never detect stack use after return."),
289 "Detect stack use after return if "
290 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
292 "Always detect stack use after return.")),
294
295static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
296 cl::desc("Create redzones for byval "
297 "arguments (extra copy "
298 "required)"), cl::Hidden,
299 cl::init(true));
300
301static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
302 cl::desc("Check stack-use-after-scope"),
303 cl::Hidden, cl::init(false));
304
305// This flag may need to be replaced with -f[no]asan-globals.
306static cl::opt<bool> ClGlobals("asan-globals",
307 cl::desc("Handle global objects"), cl::Hidden,
308 cl::init(true));
309
310static cl::opt<bool> ClInitializers("asan-initialization-order",
311 cl::desc("Handle C++ initializer order"),
312 cl::Hidden, cl::init(true));
313
315 "asan-detect-invalid-pointer-pair",
316 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
317 cl::init(false));
318
320 "asan-detect-invalid-pointer-cmp",
321 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
322 cl::init(false));
323
325 "asan-detect-invalid-pointer-sub",
326 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
327 cl::init(false));
328
330 "asan-realign-stack",
331 cl::desc("Realign stack to the value of this flag (power of two)"),
332 cl::Hidden, cl::init(32));
333
335 "asan-instrumentation-with-call-threshold",
336 cl::desc("If the function being instrumented contains more than "
337 "this number of memory accesses, use callbacks instead of "
338 "inline checks (-1 means never use callbacks)."),
339 cl::Hidden, cl::init(7000));
340
342 "asan-memory-access-callback-prefix",
343 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
344 cl::init("__asan_"));
345
347 "asan-kernel-mem-intrinsic-prefix",
348 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
349 cl::init(false));
350
351static cl::opt<bool>
352 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
353 cl::desc("instrument dynamic allocas"),
354 cl::Hidden, cl::init(true));
355
357 "asan-skip-promotable-allocas",
358 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
359 cl::init(true));
360
362 "asan-constructor-kind",
363 cl::desc("Sets the ASan constructor kind"),
364 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
366 "Use global constructors")),
368// These flags allow to change the shadow mapping.
369// The shadow mapping looks like
370// Shadow = (Mem >> scale) + offset
371
372static cl::opt<int> ClMappingScale("asan-mapping-scale",
373 cl::desc("scale of asan shadow mapping"),
374 cl::Hidden, cl::init(0));
375
377 ClMappingOffset("asan-mapping-offset",
378 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
379 cl::Hidden, cl::init(0));
380
381// Optimization flags. Not user visible, used mostly for testing
382// and benchmarking the tool.
383
384static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
385 cl::Hidden, cl::init(true));
386
387static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
388 cl::desc("Optimize callbacks"),
389 cl::Hidden, cl::init(false));
390
392 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
393 cl::Hidden, cl::init(true));
394
395static cl::opt<bool> ClOptGlobals("asan-opt-globals",
396 cl::desc("Don't instrument scalar globals"),
397 cl::Hidden, cl::init(true));
398
400 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
401 cl::Hidden, cl::init(false));
402
404 "asan-stack-dynamic-alloca",
405 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
406 cl::init(true));
407
409 "asan-force-experiment",
410 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
411 cl::init(0));
412
413static cl::opt<bool>
414 ClUsePrivateAlias("asan-use-private-alias",
415 cl::desc("Use private aliases for global variables"),
416 cl::Hidden, cl::init(true));
417
418static cl::opt<bool>
419 ClUseOdrIndicator("asan-use-odr-indicator",
420 cl::desc("Use odr indicators to improve ODR reporting"),
421 cl::Hidden, cl::init(true));
422
423static cl::opt<bool>
424 ClUseGlobalsGC("asan-globals-live-support",
425 cl::desc("Use linker features to support dead "
426 "code stripping of globals"),
427 cl::Hidden, cl::init(true));
428
429// This is on by default even though there is a bug in gold:
430// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
431static cl::opt<bool>
432 ClWithComdat("asan-with-comdat",
433 cl::desc("Place ASan constructors in comdat sections"),
434 cl::Hidden, cl::init(true));
435
437 "asan-destructor-kind",
438 cl::desc("Sets the ASan destructor kind. The default is to use the value "
439 "provided to the pass constructor"),
440 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
442 "Use global destructors")),
444
447 "asan-instrument-address-spaces",
448 cl::desc("Only instrument variables in the specified address spaces."),
450 cl::callback([](const unsigned &AddrSpace) {
451 SrcAddrSpaces.insert(AddrSpace);
452 }));
453
454// Debug flags.
455
456static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
457 cl::init(0));
458
459static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
460 cl::Hidden, cl::init(0));
461
463 cl::desc("Debug func"));
464
465static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
466 cl::Hidden, cl::init(-1));
467
468static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
469 cl::Hidden, cl::init(-1));
470
471STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
472STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
473STATISTIC(NumOptimizedAccessesToGlobalVar,
474 "Number of optimized accesses to global vars");
475STATISTIC(NumOptimizedAccessesToStackVar,
476 "Number of optimized accesses to stack vars");
477
478namespace {
479
480/// This struct defines the shadow mapping using the rule:
481/// shadow = (mem >> Scale) ADD-or-OR Offset.
482/// If InGlobal is true, then
483/// extern char __asan_shadow[];
484/// shadow = (mem >> Scale) + &__asan_shadow
485struct ShadowMapping {
486 int Scale;
488 bool OrShadowOffset;
489 bool InGlobal;
490};
491
492} // end anonymous namespace
493
494static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
495 bool IsKasan) {
496 bool IsAndroid = TargetTriple.isAndroid();
497 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
498 TargetTriple.isDriverKit();
499 bool IsMacOS = TargetTriple.isMacOSX();
500 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
501 bool IsNetBSD = TargetTriple.isOSNetBSD();
502 bool IsPS = TargetTriple.isPS();
503 bool IsLinux = TargetTriple.isOSLinux();
504 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
505 TargetTriple.getArch() == Triple::ppc64le;
506 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
507 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
508 bool IsMIPSN32ABI = TargetTriple.isABIN32();
509 bool IsMIPS32 = TargetTriple.isMIPS32();
510 bool IsMIPS64 = TargetTriple.isMIPS64();
511 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
512 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
513 TargetTriple.getArch() == Triple::aarch64_be;
514 bool IsLoongArch64 = TargetTriple.isLoongArch64();
515 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
516 bool IsWindows = TargetTriple.isOSWindows();
517 bool IsFuchsia = TargetTriple.isOSFuchsia();
518 bool IsAMDGPU = TargetTriple.isAMDGPU();
519 bool IsHaiku = TargetTriple.isOSHaiku();
520 bool IsWasm = TargetTriple.isWasm();
521 bool IsBPF = TargetTriple.isBPF();
522
523 ShadowMapping Mapping;
524
525 Mapping.Scale = kDefaultShadowScale;
526 if (ClMappingScale.getNumOccurrences() > 0) {
527 Mapping.Scale = ClMappingScale;
528 }
529
530 if (LongSize == 32) {
531 if (IsAndroid)
532 Mapping.Offset = kDynamicShadowSentinel;
533 else if (IsMIPSN32ABI)
534 Mapping.Offset = kMIPS_ShadowOffsetN32;
535 else if (IsMIPS32)
536 Mapping.Offset = kMIPS32_ShadowOffset32;
537 else if (IsFreeBSD)
538 Mapping.Offset = kFreeBSD_ShadowOffset32;
539 else if (IsNetBSD)
540 Mapping.Offset = kNetBSD_ShadowOffset32;
541 else if (IsIOS)
542 Mapping.Offset = kDynamicShadowSentinel;
543 else if (IsWindows)
544 Mapping.Offset = kWindowsShadowOffset32;
545 else if (IsWasm)
546 Mapping.Offset = kWebAssemblyShadowOffset;
547 else
548 Mapping.Offset = kDefaultShadowOffset32;
549 } else { // LongSize == 64
550 // Fuchsia is always PIE, which means that the beginning of the address
551 // space is always available.
552 if (IsFuchsia)
553 Mapping.Offset = 0;
554 else if (IsPPC64)
555 Mapping.Offset = kPPC64_ShadowOffset64;
556 else if (IsSystemZ)
557 Mapping.Offset = kSystemZ_ShadowOffset64;
558 else if (IsFreeBSD && IsAArch64)
559 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
560 else if (IsFreeBSD && !IsMIPS64) {
561 if (IsKasan)
562 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
563 else
564 Mapping.Offset = kFreeBSD_ShadowOffset64;
565 } else if (IsNetBSD) {
566 if (IsKasan)
567 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
568 else
569 Mapping.Offset = kNetBSD_ShadowOffset64;
570 } else if (IsPS)
571 Mapping.Offset = kPS_ShadowOffset64;
572 else if (IsLinux && IsX86_64) {
573 if (IsKasan)
574 Mapping.Offset = kLinuxKasan_ShadowOffset64;
575 else
576 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
577 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
578 } else if (IsWindows && IsX86_64) {
579 Mapping.Offset = kWindowsShadowOffset64;
580 } else if (IsMIPS64)
581 Mapping.Offset = kMIPS64_ShadowOffset64;
582 else if (IsIOS)
583 Mapping.Offset = kDynamicShadowSentinel;
584 else if (IsMacOS && IsAArch64)
585 Mapping.Offset = kDynamicShadowSentinel;
586 else if (IsAArch64)
587 Mapping.Offset = kAArch64_ShadowOffset64;
588 else if (IsLoongArch64)
589 Mapping.Offset = kLoongArch64_ShadowOffset64;
590 else if (IsRISCV64)
591 Mapping.Offset = kRISCV64_ShadowOffset64;
592 else if (IsAMDGPU)
593 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
594 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
595 else if (IsHaiku && IsX86_64)
596 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
597 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
598 else if (IsBPF)
599 Mapping.Offset = kDynamicShadowSentinel;
600 else
601 Mapping.Offset = kDefaultShadowOffset64;
602 }
603
605 Mapping.Offset = kDynamicShadowSentinel;
606 }
607
608 if (ClMappingOffset.getNumOccurrences() > 0) {
609 Mapping.Offset = ClMappingOffset;
610 }
611
612 // OR-ing shadow offset if more efficient (at least on x86) if the offset
613 // is a power of two, but on ppc64 and loongarch64 we have to use add since
614 // the shadow offset is not necessarily 1/8-th of the address space. On
615 // SystemZ, we could OR the constant in a single instruction, but it's more
616 // efficient to load it once and use indexed addressing.
617 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
618 !IsRISCV64 && !IsLoongArch64 &&
619 !(Mapping.Offset & (Mapping.Offset - 1)) &&
620 Mapping.Offset != kDynamicShadowSentinel;
621 Mapping.InGlobal = ClWithIfunc && IsAndroid && IsArmOrThumb;
622
623 return Mapping;
624}
625
626void llvm::getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
627 bool IsKasan, uint64_t *ShadowBase,
628 int *MappingScale, bool *OrShadowOffset) {
629 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
630 *ShadowBase = Mapping.Offset;
631 *MappingScale = Mapping.Scale;
632 *OrShadowOffset = Mapping.OrShadowOffset;
633}
634
636 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
637 //
638 // This is not only true for sanitized functions, because AttrInfer can
639 // infer those attributes on libc functions, which is not true if those
640 // are instrumented (Android) or intercepted.
641 //
642 // We might want to model ASan shadow memory more opaquely to get rid of
643 // this problem altogether, by hiding the shadow memory write in an
644 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
645 // for another day.
646
647 // The API is weird. `onlyReadsMemory` actually means "does not write", and
648 // `onlyWritesMemory` actually means "does not read". So we reconstruct
649 // "accesses memory" && "does not read" <=> "writes".
650 bool Changed = false;
651 if (!F.doesNotAccessMemory()) {
652 bool WritesMemory = !F.onlyReadsMemory();
653 bool ReadsMemory = !F.onlyWritesMemory();
654 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
655 F.removeFnAttr(Attribute::Memory);
656 Changed = true;
657 }
658 }
659 if (ReadsArgMem) {
660 for (Argument &A : F.args()) {
661 if (A.hasAttribute(Attribute::WriteOnly)) {
662 A.removeAttr(Attribute::WriteOnly);
663 Changed = true;
664 }
665 }
666 }
667 if (Changed) {
668 // nobuiltin makes sure later passes don't restore assumptions about
669 // the function.
670 F.addFnAttr(Attribute::NoBuiltin);
671 }
672}
673
679
687
688static uint64_t getRedzoneSizeForScale(int MappingScale) {
689 // Redzone used for stack and globals is at least 32 bytes.
690 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
691 return std::max(32U, 1U << MappingScale);
692}
693
695 if (TargetTriple.isOSEmscripten())
697 else
699}
700
701static Twine genName(StringRef suffix) {
702 return Twine(kAsanGenPrefix) + suffix;
703}
704
705namespace {
706/// Helper RAII class to post-process inserted asan runtime calls during a
707/// pass on a single Function. Upon end of scope, detects and applies the
708/// required funclet OpBundle.
709class RuntimeCallInserter {
710 Function *OwnerFn = nullptr;
711 bool TrackInsertedCalls = false;
712 SmallVector<CallInst *> InsertedCalls;
713
714public:
715 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
716 if (Fn.hasPersonalityFn()) {
717 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
718 if (isScopedEHPersonality(Personality))
719 TrackInsertedCalls = true;
720 }
721 }
722
723 ~RuntimeCallInserter() {
724 if (InsertedCalls.empty())
725 return;
726 assert(TrackInsertedCalls && "Calls were wrongly tracked");
727
728 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
729 for (CallInst *CI : InsertedCalls) {
730 BasicBlock *BB = CI->getParent();
731 assert(BB && "Instruction doesn't belong to a BasicBlock");
732 assert(BB->getParent() == OwnerFn &&
733 "Instruction doesn't belong to the expected Function!");
734
735 ColorVector &Colors = BlockColors[BB];
736 // funclet opbundles are only valid in monochromatic BBs.
737 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
738 // and will be DCE'ed later.
739 if (Colors.empty())
740 continue;
741 if (Colors.size() != 1) {
742 OwnerFn->getContext().emitError(
743 "Instruction's BasicBlock is not monochromatic");
744 continue;
745 }
746
747 BasicBlock *Color = Colors.front();
748 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
749
750 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
751 // Replace CI with a clone with an added funclet OperandBundle
752 OperandBundleDef OB("funclet", &*EHPadIt);
754 OB, CI->getIterator());
755 NewCall->copyMetadata(*CI);
756 CI->replaceAllUsesWith(NewCall);
757 CI->eraseFromParent();
758 }
759 }
760 }
761
762 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
763 ArrayRef<Value *> Args = {},
764 const Twine &Name = "") {
765 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
766
767 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
768 if (TrackInsertedCalls)
769 InsertedCalls.push_back(Inst);
770 return Inst;
771 }
772};
773
774/// AddressSanitizer: instrument the code in module to find memory bugs.
775struct AddressSanitizer {
776 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
777 int InstrumentationWithCallsThreshold,
778 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
779 bool Recover = false, bool UseAfterScope = false,
780 AsanDetectStackUseAfterReturnMode UseAfterReturn =
781 AsanDetectStackUseAfterReturnMode::Runtime)
782 : M(M),
783 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
784 : CompileKernel),
785 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
786 UseAfterScope(UseAfterScope || ClUseAfterScope),
787 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
788 : UseAfterReturn),
789 SSGI(SSGI),
790 InstrumentationWithCallsThreshold(
791 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
793 : InstrumentationWithCallsThreshold),
794 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
796 : MaxInlinePoisoningSize) {
797 C = &(M.getContext());
798 DL = &M.getDataLayout();
799 LongSize = M.getDataLayout().getPointerSizeInBits();
800 IntptrTy = Type::getIntNTy(*C, LongSize);
801 PtrTy = PointerType::getUnqual(*C);
802 Int32Ty = Type::getInt32Ty(*C);
803 TargetTriple = M.getTargetTriple();
804
805 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
806
807 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
808 }
809
810 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
811 return *AI.getAllocationSize(AI.getDataLayout());
812 }
813
814 /// Check if we want (and can) handle this alloca.
815 bool isInterestingAlloca(const AllocaInst &AI);
816
817 bool ignoreAccess(Instruction *Inst, Value *Ptr);
819 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
820 const TargetTransformInfo *TTI);
821
822 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
823 InterestingMemoryOperand &O, bool UseCalls,
824 const DataLayout &DL, RuntimeCallInserter &RTCI);
825 void instrumentPointerComparisonOrSubtraction(Instruction *I,
826 RuntimeCallInserter &RTCI);
827 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
828 Value *Addr, MaybeAlign Alignment,
829 uint32_t TypeStoreSize, bool IsWrite,
830 Value *SizeArgument, bool UseCalls, uint32_t Exp,
831 RuntimeCallInserter &RTCI);
832 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
833 Instruction *InsertBefore, Value *Addr,
834 uint32_t TypeStoreSize, bool IsWrite,
835 Value *SizeArgument);
836 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
837 bool Recover);
838 void instrumentUnusualSizeOrAlignment(Instruction *I,
839 Instruction *InsertBefore, Value *Addr,
840 TypeSize TypeStoreSize, bool IsWrite,
841 Value *SizeArgument, bool UseCalls,
842 uint32_t Exp,
843 RuntimeCallInserter &RTCI);
844 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
845 Type *IntptrTy, Value *Mask, Value *EVL,
846 Value *Stride, Instruction *I, Value *Addr,
847 MaybeAlign Alignment, unsigned Granularity,
848 Type *OpType, bool IsWrite,
849 Value *SizeArgument, bool UseCalls,
850 uint32_t Exp, RuntimeCallInserter &RTCI);
851 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
852 Value *ShadowValue, uint32_t TypeStoreSize);
853 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
854 bool IsWrite, size_t AccessSizeIndex,
855 Value *SizeArgument, uint32_t Exp,
856 RuntimeCallInserter &RTCI);
857 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
858 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
859 bool suppressInstrumentationSiteForDebug(int &Instrumented);
860 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
861 const TargetTransformInfo *TTI);
862 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
863 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
864 void markEscapedLocalAllocas(Function &F);
865 void markCatchParametersAsUninteresting(Function &F);
866
867private:
868 friend struct FunctionStackPoisoner;
869
870 void initializeCallbacks(const TargetLibraryInfo *TLI);
871
872 bool LooksLikeCodeInBug11395(Instruction *I);
873 bool GlobalIsLinkerInitialized(GlobalVariable *G);
874 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
875 TypeSize TypeStoreSize) const;
876
877 /// Helper to cleanup per-function state.
878 struct FunctionStateRAII {
879 AddressSanitizer *Pass;
880
881 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
882 assert(Pass->ProcessedAllocas.empty() &&
883 "last pass forgot to clear cache");
884 assert(!Pass->LocalDynamicShadow);
885 }
886
887 ~FunctionStateRAII() {
888 Pass->LocalDynamicShadow = nullptr;
889 Pass->ProcessedAllocas.clear();
890 }
891 };
892
893 Module &M;
894 LLVMContext *C;
895 const DataLayout *DL;
896 Triple TargetTriple;
897 int LongSize;
898 bool CompileKernel;
899 bool Recover;
900 bool UseAfterScope;
902 Type *IntptrTy;
903 Type *Int32Ty;
904 PointerType *PtrTy;
905 ShadowMapping Mapping;
906 FunctionCallee AsanHandleNoReturnFunc;
907 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
908 Constant *AsanShadowGlobal;
909
910 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
911 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
912 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
913
914 // These arrays is indexed by AccessIsWrite and Experiment.
915 FunctionCallee AsanErrorCallbackSized[2][2];
916 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
917
918 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
919 Value *LocalDynamicShadow = nullptr;
920 const StackSafetyGlobalInfo *SSGI;
921 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
922
923 FunctionCallee AMDGPUAddressShared;
924 FunctionCallee AMDGPUAddressPrivate;
925 int InstrumentationWithCallsThreshold;
926 uint32_t MaxInlinePoisoningSize;
927};
928
929class ModuleAddressSanitizer {
930public:
931 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
932 bool CompileKernel = false, bool Recover = false,
933 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
934 AsanDtorKind DestructorKind = AsanDtorKind::Global,
935 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
936 : M(M),
937 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
938 : CompileKernel),
939 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
941 : InsertVersionCheck),
942 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
943 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
944 // Enable aliases as they should have no downside with ODR indicators.
945 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
947 : UseOdrIndicator),
948 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
950 : UseOdrIndicator),
951 // Not a typo: ClWithComdat is almost completely pointless without
952 // ClUseGlobalsGC (because then it only works on modules without
953 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
954 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
955 // argument is designed as workaround. Therefore, disable both
956 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
957 // do globals-gc.
958 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
959 DestructorKind(DestructorKind),
960 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
962 : ConstructorKind) {
963 C = &(M.getContext());
964 int LongSize = M.getDataLayout().getPointerSizeInBits();
965 IntptrTy = Type::getIntNTy(*C, LongSize);
966 PtrTy = PointerType::getUnqual(*C);
967 TargetTriple = M.getTargetTriple();
968 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
969
970 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
971 this->DestructorKind = ClOverrideDestructorKind;
972 assert(this->DestructorKind != AsanDtorKind::Invalid);
973 }
974
975 bool instrumentModule();
976
977private:
978 void initializeCallbacks();
979
980 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
981 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
982 ArrayRef<GlobalVariable *> ExtendedGlobals,
983 ArrayRef<Constant *> MetadataInitializers);
984 void instrumentGlobalsELF(IRBuilder<> &IRB,
985 ArrayRef<GlobalVariable *> ExtendedGlobals,
986 ArrayRef<Constant *> MetadataInitializers,
987 const std::string &UniqueModuleId);
988 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
989 ArrayRef<GlobalVariable *> ExtendedGlobals,
990 ArrayRef<Constant *> MetadataInitializers);
991 void
992 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
993 ArrayRef<GlobalVariable *> ExtendedGlobals,
994 ArrayRef<Constant *> MetadataInitializers);
995
996 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
997 StringRef OriginalName);
998 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
999 StringRef InternalSuffix);
1000 Instruction *CreateAsanModuleDtor();
1001
1002 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
1003 bool shouldInstrumentGlobal(GlobalVariable *G) const;
1004 bool ShouldUseMachOGlobalsSection() const;
1005 StringRef getGlobalMetadataSection() const;
1006 void poisonOneInitializer(Function &GlobalInit);
1007 void createInitializerPoisonCalls();
1008 uint64_t getMinRedzoneSizeForGlobal() const {
1009 return getRedzoneSizeForScale(Mapping.Scale);
1010 }
1011 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
1012 int GetAsanVersion() const;
1013 GlobalVariable *getOrCreateModuleName();
1014
1015 Module &M;
1016 bool CompileKernel;
1017 bool InsertVersionCheck;
1018 bool Recover;
1019 bool UseGlobalsGC;
1020 bool UsePrivateAlias;
1021 bool UseOdrIndicator;
1022 bool UseCtorComdat;
1023 AsanDtorKind DestructorKind;
1024 AsanCtorKind ConstructorKind;
1025 Type *IntptrTy;
1026 PointerType *PtrTy;
1027 LLVMContext *C;
1028 Triple TargetTriple;
1029 ShadowMapping Mapping;
1030 FunctionCallee AsanPoisonGlobals;
1031 FunctionCallee AsanUnpoisonGlobals;
1032 FunctionCallee AsanRegisterGlobals;
1033 FunctionCallee AsanUnregisterGlobals;
1034 FunctionCallee AsanRegisterImageGlobals;
1035 FunctionCallee AsanUnregisterImageGlobals;
1036 FunctionCallee AsanRegisterElfGlobals;
1037 FunctionCallee AsanUnregisterElfGlobals;
1038
1039 Function *AsanCtorFunction = nullptr;
1040 Function *AsanDtorFunction = nullptr;
1041 GlobalVariable *ModuleName = nullptr;
1042};
1043
1044// Stack poisoning does not play well with exception handling.
1045// When an exception is thrown, we essentially bypass the code
1046// that unpoisones the stack. This is why the run-time library has
1047// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1048// stack in the interceptor. This however does not work inside the
1049// actual function which catches the exception. Most likely because the
1050// compiler hoists the load of the shadow value somewhere too high.
1051// This causes asan to report a non-existing bug on 453.povray.
1052// It sounds like an LLVM bug.
1053struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1054 Function &F;
1055 AddressSanitizer &ASan;
1056 RuntimeCallInserter &RTCI;
1057 DIBuilder DIB;
1058 LLVMContext *C;
1059 Type *IntptrTy;
1060 Type *IntptrPtrTy;
1061 ShadowMapping Mapping;
1062
1064 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1065 SmallVector<Instruction *, 8> RetVec;
1066
1067 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1068 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1069 FunctionCallee AsanSetShadowFunc[0x100] = {};
1070 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1071 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1072
1073 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1074 struct AllocaPoisonCall {
1075 IntrinsicInst *InsBefore;
1076 AllocaInst *AI;
1077 uint64_t Size;
1078 bool DoPoison;
1079 };
1080 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1081 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1082
1083 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1084 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1085 AllocaInst *DynamicAllocaLayout = nullptr;
1086 IntrinsicInst *LocalEscapeCall = nullptr;
1087
1088 bool HasInlineAsm = false;
1089 bool HasReturnsTwiceCall = false;
1090 bool PoisonStack;
1091
1092 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1093 RuntimeCallInserter &RTCI)
1094 : F(F), ASan(ASan), RTCI(RTCI),
1095 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1096 IntptrTy(ASan.IntptrTy),
1097 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1098 Mapping(ASan.Mapping),
1099 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1100
1101 bool runOnFunction() {
1102 if (!PoisonStack)
1103 return false;
1104
1106 copyArgsPassedByValToAllocas();
1107
1108 // Collect alloca, ret, lifetime instructions etc.
1109 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1110
1111 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1112
1113 initializeCallbacks(*F.getParent());
1114
1115 processDynamicAllocas();
1116 processStaticAllocas();
1117
1118 if (ClDebugStack) {
1119 LLVM_DEBUG(dbgs() << F);
1120 }
1121 return true;
1122 }
1123
1124 // Arguments marked with the "byval" attribute are implicitly copied without
1125 // using an alloca instruction. To produce redzones for those arguments, we
1126 // copy them a second time into memory allocated with an alloca instruction.
1127 void copyArgsPassedByValToAllocas();
1128
1129 // Finds all Alloca instructions and puts
1130 // poisoned red zones around all of them.
1131 // Then unpoison everything back before the function returns.
1132 void processStaticAllocas();
1133 void processDynamicAllocas();
1134
1135 void createDynamicAllocasInitStorage();
1136
1137 // ----------------------- Visitors.
1138 /// Collect all Ret instructions, or the musttail call instruction if it
1139 /// precedes the return instruction.
1140 void visitReturnInst(ReturnInst &RI) {
1141 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1142 RetVec.push_back(CI);
1143 else
1144 RetVec.push_back(&RI);
1145 }
1146
1147 /// Collect all Resume instructions.
1148 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1149
1150 /// Collect all CatchReturnInst instructions.
1151 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1152
1153 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1154 Value *SavedStack) {
1155 IRBuilder<> IRB(InstBefore);
1156 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1157 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1158 // need to adjust extracted SP to compute the address of the most recent
1159 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1160 // this purpose.
1161 if (!isa<ReturnInst>(InstBefore)) {
1162 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1163 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1164
1165 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1166 DynamicAreaOffset);
1167 }
1168
1169 RTCI.createRuntimeCall(
1170 IRB, AsanAllocasUnpoisonFunc,
1171 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1172 }
1173
1174 // Unpoison dynamic allocas redzones.
1175 void unpoisonDynamicAllocas() {
1176 for (Instruction *Ret : RetVec)
1177 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1178
1179 for (Instruction *StackRestoreInst : StackRestoreVec)
1180 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1181 StackRestoreInst->getOperand(0));
1182 }
1183
1184 // Deploy and poison redzones around dynamic alloca call. To do this, we
1185 // should replace this call with another one with changed parameters and
1186 // replace all its uses with new address, so
1187 // addr = alloca type, old_size, align
1188 // is replaced by
1189 // new_size = (old_size + additional_size) * sizeof(type)
1190 // tmp = alloca i8, new_size, max(align, 32)
1191 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1192 // Additional_size is added to make new memory allocation contain not only
1193 // requested memory, but also left, partial and right redzones.
1194 void handleDynamicAllocaCall(AllocaInst *AI);
1195
1196 /// Collect Alloca instructions we want (and can) handle.
1197 void visitAllocaInst(AllocaInst &AI) {
1198 // FIXME: Handle scalable vectors instead of ignoring them.
1199 const Type *AllocaType = AI.getAllocatedType();
1200 const auto *STy = dyn_cast<StructType>(AllocaType);
1201 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1202 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1203 if (AI.isStaticAlloca()) {
1204 // Skip over allocas that are present *before* the first instrumented
1205 // alloca, we don't want to move those around.
1206 if (AllocaVec.empty())
1207 return;
1208
1209 StaticAllocasToMoveUp.push_back(&AI);
1210 }
1211 return;
1212 }
1213
1214 if (!AI.isStaticAlloca())
1215 DynamicAllocaVec.push_back(&AI);
1216 else
1217 AllocaVec.push_back(&AI);
1218 }
1219
1220 /// Collect lifetime intrinsic calls to check for use-after-scope
1221 /// errors.
1222 void visitIntrinsicInst(IntrinsicInst &II) {
1223 Intrinsic::ID ID = II.getIntrinsicID();
1224 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1225 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1226 if (!ASan.UseAfterScope)
1227 return;
1228 if (!II.isLifetimeStartOrEnd())
1229 return;
1230 // Find alloca instruction that corresponds to llvm.lifetime argument.
1231 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1232 // We're interested only in allocas we can handle.
1233 if (!AI || !ASan.isInterestingAlloca(*AI))
1234 return;
1235
1236 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1237 // Check that size is known and can be stored in IntptrTy.
1238 // TODO: Add support for scalable vectors if possible.
1239 if (!Size || Size->isScalable() ||
1241 return;
1242
1243 bool DoPoison = (ID == Intrinsic::lifetime_end);
1244 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1245 if (AI->isStaticAlloca())
1246 StaticAllocaPoisonCallVec.push_back(APC);
1248 DynamicAllocaPoisonCallVec.push_back(APC);
1249 }
1250
1251 void visitCallBase(CallBase &CB) {
1252 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1253 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1254 HasReturnsTwiceCall |= CI->canReturnTwice();
1255 }
1256 }
1257
1258 // ---------------------- Helpers.
1259 void initializeCallbacks(Module &M);
1260
1261 // Copies bytes from ShadowBytes into shadow memory for indexes where
1262 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1263 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1264 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1265 IRBuilder<> &IRB, Value *ShadowBase);
1266 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1267 size_t Begin, size_t End, IRBuilder<> &IRB,
1268 Value *ShadowBase);
1269 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1270 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1271 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1272
1273 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1274
1275 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1276 bool Dynamic);
1277 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1278 Instruction *ThenTerm, Value *ValueIfFalse);
1279};
1280
1281} // end anonymous namespace
1282
1284 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1286 OS, MapClassName2PassName);
1287 OS << '<';
1288 if (Options.CompileKernel)
1289 OS << "kernel;";
1290 if (Options.UseAfterScope)
1291 OS << "use-after-scope";
1292 OS << '>';
1293}
1294
1296 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1297 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1298 AsanCtorKind ConstructorKind)
1299 : Options(Options), UseGlobalGC(UseGlobalGC),
1300 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1301 ConstructorKind(ConstructorKind) {}
1302
1305 // Return early if nosanitize_address module flag is present for the module.
1306 // This implies that asan pass has already run before.
1307 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1308 return PreservedAnalyses::all();
1309
1310 ModuleAddressSanitizer ModuleSanitizer(
1311 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1312 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1313 bool Modified = false;
1314 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1315 const StackSafetyGlobalInfo *const SSGI =
1316 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1317 for (Function &F : M) {
1318 if (F.empty())
1319 continue;
1320 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1321 continue;
1322 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1323 continue;
1324 if (F.getName().starts_with("__asan_"))
1325 continue;
1326 if (F.isPresplitCoroutine())
1327 continue;
1328 AddressSanitizer FunctionSanitizer(
1329 M, SSGI, Options.InstrumentationWithCallsThreshold,
1330 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1331 Options.UseAfterScope, Options.UseAfterReturn);
1332 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1333 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1334 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1335 }
1336 Modified |= ModuleSanitizer.instrumentModule();
1337 if (!Modified)
1338 return PreservedAnalyses::all();
1339
1341 // GlobalsAA is considered stateless and does not get invalidated unless
1342 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1343 // make changes that require GlobalsAA to be invalidated.
1344 PA.abandon<GlobalsAA>();
1345 return PA;
1346}
1347
1349 size_t Res = llvm::countr_zero(TypeSize / 8);
1351 return Res;
1352}
1353
1354/// Check if \p G has been created by a trusted compiler pass.
1356 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1357 if (G->getName().starts_with("llvm.") ||
1358 // Do not instrument gcov counter arrays.
1359 G->getName().starts_with("__llvm_gcov_ctr") ||
1360 // Do not instrument rtti proxy symbols for function sanitizer.
1361 G->getName().starts_with("__llvm_rtti_proxy"))
1362 return true;
1363
1364 // Do not instrument asan globals.
1365 if (G->getName().starts_with(kAsanGenPrefix) ||
1366 G->getName().starts_with(kSanCovGenPrefix) ||
1367 G->getName().starts_with(kODRGenPrefix))
1368 return true;
1369
1370 return false;
1371}
1372
1374 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1375 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1376 // Globals in address space 1 and 4 are supported for AMDGPU.
1377 if (AddrSpace == 3 || AddrSpace == 5)
1378 return true;
1379 return false;
1380}
1381
1382static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr) {
1383 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1384 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1385
1386 if (!SrcAddrSpaces.empty())
1387 return SrcAddrSpaces.count(AddrSpace);
1388
1389 if (TargetTriple.isAMDGPU())
1390 return !isUnsupportedAMDGPUAddrspace(Addr);
1391
1392 return AddrSpace == 0;
1393}
1394
1395Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1396 // Shadow >> scale
1397 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1398 if (Mapping.Offset == 0) return Shadow;
1399 // (Shadow >> scale) | offset
1400 Value *ShadowBase;
1401 if (LocalDynamicShadow)
1402 ShadowBase = LocalDynamicShadow;
1403 else
1404 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1405 if (Mapping.OrShadowOffset)
1406 return IRB.CreateOr(Shadow, ShadowBase);
1407 else
1408 return IRB.CreateAdd(Shadow, ShadowBase);
1409}
1410
1411// Instrument memset/memmove/memcpy
1412void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1413 RuntimeCallInserter &RTCI) {
1415 if (isa<MemTransferInst>(MI)) {
1416 RTCI.createRuntimeCall(
1417 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1418 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1419 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1420 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1421 } else if (isa<MemSetInst>(MI)) {
1422 RTCI.createRuntimeCall(
1423 IRB, AsanMemset,
1424 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1425 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1426 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1427 }
1428 MI->eraseFromParent();
1429}
1430
1431/// Check if we want (and can) handle this alloca.
1432bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1433 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1434
1435 if (!Inserted)
1436 return It->getSecond();
1437
1438 bool IsInteresting =
1439 (AI.getAllocatedType()->isSized() &&
1440 // alloca() may be called with 0 size, ignore it.
1441 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1442 // We are only interested in allocas not promotable to registers.
1443 // Promotable allocas are common under -O0.
1445 // inalloca allocas are not treated as static, and we don't want
1446 // dynamic alloca instrumentation for them as well.
1447 !AI.isUsedWithInAlloca() &&
1448 // swifterror allocas are register promoted by ISel
1449 !AI.isSwiftError() &&
1450 // safe allocas are not interesting
1451 !(SSGI && SSGI->isSafe(AI)));
1452
1453 It->second = IsInteresting;
1454 return IsInteresting;
1455}
1456
1457bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1458 // Check whether the target supports sanitizing the address space
1459 // of the pointer.
1460 if (!isSupportedAddrspace(TargetTriple, Ptr))
1461 return true;
1462
1463 // Ignore swifterror addresses.
1464 // swifterror memory addresses are mem2reg promoted by instruction
1465 // selection. As such they cannot have regular uses like an instrumentation
1466 // function and it makes no sense to track them as memory.
1467 if (Ptr->isSwiftError())
1468 return true;
1469
1470 // Treat memory accesses to promotable allocas as non-interesting since they
1471 // will not cause memory violations. This greatly speeds up the instrumented
1472 // executable at -O0.
1473 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1474 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1475 return true;
1476
1477 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1478 findAllocaForValue(Ptr))
1479 return true;
1480
1481 return false;
1482}
1483
1484void AddressSanitizer::getInterestingMemoryOperands(
1486 const TargetTransformInfo *TTI) {
1487 // Do not instrument the load fetching the dynamic shadow address.
1488 if (LocalDynamicShadow == I)
1489 return;
1490
1491 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1492 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1493 return;
1494 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1495 LI->getType(), LI->getAlign());
1496 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1497 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1498 return;
1499 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1500 SI->getValueOperand()->getType(), SI->getAlign());
1501 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1502 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1503 return;
1504 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1505 RMW->getValOperand()->getType(), std::nullopt);
1506 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1507 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1508 return;
1509 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1510 XCHG->getCompareOperand()->getType(),
1511 std::nullopt);
1512 } else if (auto CI = dyn_cast<CallInst>(I)) {
1513 switch (CI->getIntrinsicID()) {
1514 case Intrinsic::masked_load:
1515 case Intrinsic::masked_store:
1516 case Intrinsic::masked_gather:
1517 case Intrinsic::masked_scatter: {
1518 bool IsWrite = CI->getType()->isVoidTy();
1519 // Masked store has an initial operand for the value.
1520 unsigned OpOffset = IsWrite ? 1 : 0;
1521 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1522 return;
1523
1524 auto BasePtr = CI->getOperand(OpOffset);
1525 if (ignoreAccess(I, BasePtr))
1526 return;
1527 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1528 MaybeAlign Alignment = CI->getParamAlign(0);
1529 Value *Mask = CI->getOperand(1 + OpOffset);
1530 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1531 break;
1532 }
1533 case Intrinsic::masked_expandload:
1534 case Intrinsic::masked_compressstore: {
1535 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1536 unsigned OpOffset = IsWrite ? 1 : 0;
1537 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1538 return;
1539 auto BasePtr = CI->getOperand(OpOffset);
1540 if (ignoreAccess(I, BasePtr))
1541 return;
1542 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1543 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1544
1545 IRBuilder IB(I);
1546 Value *Mask = CI->getOperand(1 + OpOffset);
1547 // Use the popcount of Mask as the effective vector length.
1548 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1549 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1550 Value *EVL = IB.CreateAddReduce(ExtMask);
1551 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1552 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1553 EVL);
1554 break;
1555 }
1556 case Intrinsic::vp_load:
1557 case Intrinsic::vp_store:
1558 case Intrinsic::experimental_vp_strided_load:
1559 case Intrinsic::experimental_vp_strided_store: {
1560 auto *VPI = cast<VPIntrinsic>(CI);
1561 unsigned IID = CI->getIntrinsicID();
1562 bool IsWrite = CI->getType()->isVoidTy();
1563 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1564 return;
1565 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1566 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1567 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1568 Value *Stride = nullptr;
1569 if (IID == Intrinsic::experimental_vp_strided_store ||
1570 IID == Intrinsic::experimental_vp_strided_load) {
1571 Stride = VPI->getOperand(PtrOpNo + 1);
1572 // Use the pointer alignment as the element alignment if the stride is a
1573 // multiple of the pointer alignment. Otherwise, the element alignment
1574 // should be Align(1).
1575 unsigned PointerAlign = Alignment.valueOrOne().value();
1576 if (!isa<ConstantInt>(Stride) ||
1577 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1578 Alignment = Align(1);
1579 }
1580 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1581 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1582 Stride);
1583 break;
1584 }
1585 case Intrinsic::vp_gather:
1586 case Intrinsic::vp_scatter: {
1587 auto *VPI = cast<VPIntrinsic>(CI);
1588 unsigned IID = CI->getIntrinsicID();
1589 bool IsWrite = IID == Intrinsic::vp_scatter;
1590 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1591 return;
1592 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1593 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1594 MaybeAlign Alignment = VPI->getPointerAlignment();
1595 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1596 VPI->getMaskParam(),
1597 VPI->getVectorLengthParam());
1598 break;
1599 }
1600 default:
1601 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1602 MemIntrinsicInfo IntrInfo;
1603 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1604 Interesting = IntrInfo.InterestingOperands;
1605 return;
1606 }
1607 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1608 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1609 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1610 continue;
1611 Type *Ty = CI->getParamByValType(ArgNo);
1612 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1613 }
1614 }
1615 }
1616}
1617
1618static bool isPointerOperand(Value *V) {
1619 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1620}
1621
1622// This is a rough heuristic; it may cause both false positives and
1623// false negatives. The proper implementation requires cooperation with
1624// the frontend.
1626 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1627 if (!Cmp->isRelational())
1628 return false;
1629 } else {
1630 return false;
1631 }
1632 return isPointerOperand(I->getOperand(0)) &&
1633 isPointerOperand(I->getOperand(1));
1634}
1635
1636// This is a rough heuristic; it may cause both false positives and
1637// false negatives. The proper implementation requires cooperation with
1638// the frontend.
1641 if (BO->getOpcode() != Instruction::Sub)
1642 return false;
1643 } else {
1644 return false;
1645 }
1646 return isPointerOperand(I->getOperand(0)) &&
1647 isPointerOperand(I->getOperand(1));
1648}
1649
1650bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1651 // If a global variable does not have dynamic initialization we don't
1652 // have to instrument it. However, if a global does not have initializer
1653 // at all, we assume it has dynamic initializer (in other TU).
1654 if (!G->hasInitializer())
1655 return false;
1656
1657 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1658 return false;
1659
1660 return true;
1661}
1662
1663void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1664 Instruction *I, RuntimeCallInserter &RTCI) {
1665 IRBuilder<> IRB(I);
1666 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1667 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1668 for (Value *&i : Param) {
1669 if (i->getType()->isPointerTy())
1670 i = IRB.CreatePointerCast(i, IntptrTy);
1671 }
1672 RTCI.createRuntimeCall(IRB, F, Param);
1673}
1674
1675static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1676 Instruction *InsertBefore, Value *Addr,
1677 MaybeAlign Alignment, unsigned Granularity,
1678 TypeSize TypeStoreSize, bool IsWrite,
1679 Value *SizeArgument, bool UseCalls,
1680 uint32_t Exp, RuntimeCallInserter &RTCI) {
1681 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1682 // if the data is properly aligned.
1683 if (!TypeStoreSize.isScalable()) {
1684 const auto FixedSize = TypeStoreSize.getFixedValue();
1685 switch (FixedSize) {
1686 case 8:
1687 case 16:
1688 case 32:
1689 case 64:
1690 case 128:
1691 if (!Alignment || *Alignment >= Granularity ||
1692 *Alignment >= FixedSize / 8)
1693 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1694 FixedSize, IsWrite, nullptr, UseCalls,
1695 Exp, RTCI);
1696 }
1697 }
1698 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1699 IsWrite, nullptr, UseCalls, Exp, RTCI);
1700}
1701
1702void AddressSanitizer::instrumentMaskedLoadOrStore(
1703 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1704 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1705 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1706 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1707 RuntimeCallInserter &RTCI) {
1708 auto *VTy = cast<VectorType>(OpType);
1709 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1710 auto Zero = ConstantInt::get(IntptrTy, 0);
1711
1712 IRBuilder IB(I);
1713 Instruction *LoopInsertBefore = I;
1714 if (EVL) {
1715 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1716 // than zero, so we should check whether EVL is zero here.
1717 Type *EVLType = EVL->getType();
1718 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1719 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1720 IB.SetInsertPoint(LoopInsertBefore);
1721 // Cast EVL to IntptrTy.
1722 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1723 // To avoid undefined behavior for extracting with out of range index, use
1724 // the minimum of evl and element count as trip count.
1725 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1726 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1727 } else {
1728 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1729 }
1730
1731 // Cast Stride to IntptrTy.
1732 if (Stride)
1733 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1734
1735 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1736 [&](IRBuilderBase &IRB, Value *Index) {
1737 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1738 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1739 if (MaskElemC->isZero())
1740 // No check
1741 return;
1742 // Unconditional check
1743 } else {
1744 // Conditional check
1745 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1746 MaskElem, &*IRB.GetInsertPoint(), false);
1747 IRB.SetInsertPoint(ThenTerm);
1748 }
1749
1750 Value *InstrumentedAddress;
1751 if (isa<VectorType>(Addr->getType())) {
1752 assert(
1753 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1754 "Expected vector of pointer.");
1755 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1756 } else if (Stride) {
1757 Index = IRB.CreateMul(Index, Stride);
1758 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1759 } else {
1760 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1761 }
1762 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1763 Alignment, Granularity, ElemTypeSize, IsWrite,
1764 SizeArgument, UseCalls, Exp, RTCI);
1765 });
1766}
1767
1768void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1769 InterestingMemoryOperand &O, bool UseCalls,
1770 const DataLayout &DL,
1771 RuntimeCallInserter &RTCI) {
1772 Value *Addr = O.getPtr();
1773
1774 // Optimization experiments.
1775 // The experiments can be used to evaluate potential optimizations that remove
1776 // instrumentation (assess false negatives). Instead of completely removing
1777 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1778 // experiments that want to remove instrumentation of this instruction).
1779 // If Exp is non-zero, this pass will emit special calls into runtime
1780 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1781 // make runtime terminate the program in a special way (with a different
1782 // exit status). Then you run the new compiler on a buggy corpus, collect
1783 // the special terminations (ideally, you don't see them at all -- no false
1784 // negatives) and make the decision on the optimization.
1785 uint32_t Exp = ClForceExperiment;
1786
1787 if (ClOpt && ClOptGlobals) {
1788 // If initialization order checking is disabled, a simple access to a
1789 // dynamically initialized global is always valid.
1791 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1792 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1793 NumOptimizedAccessesToGlobalVar++;
1794 return;
1795 }
1796 }
1797
1798 if (ClOpt && ClOptStack) {
1799 // A direct inbounds access to a stack variable is always valid.
1801 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1802 NumOptimizedAccessesToStackVar++;
1803 return;
1804 }
1805 }
1806
1807 if (O.IsWrite)
1808 NumInstrumentedWrites++;
1809 else
1810 NumInstrumentedReads++;
1811
1812 if (O.MaybeByteOffset) {
1813 Type *Ty = Type::getInt8Ty(*C);
1814 IRBuilder IB(O.getInsn());
1815
1816 Value *OffsetOp = O.MaybeByteOffset;
1817 if (TargetTriple.isRISCV()) {
1818 Type *OffsetTy = OffsetOp->getType();
1819 // RVV indexed loads/stores zero-extend offset operands which are narrower
1820 // than XLEN to XLEN.
1821 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1822 static_cast<unsigned>(LongSize)) {
1823 VectorType *OrigType = cast<VectorType>(OffsetTy);
1824 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1825 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1826 }
1827 }
1828 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1829 }
1830
1831 unsigned Granularity = 1 << Mapping.Scale;
1832 if (O.MaybeMask) {
1833 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1834 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1835 Granularity, O.OpType, O.IsWrite, nullptr,
1836 UseCalls, Exp, RTCI);
1837 } else {
1838 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1839 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1840 UseCalls, Exp, RTCI);
1841 }
1842}
1843
1844Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1845 Value *Addr, bool IsWrite,
1846 size_t AccessSizeIndex,
1847 Value *SizeArgument,
1848 uint32_t Exp,
1849 RuntimeCallInserter &RTCI) {
1850 InstrumentationIRBuilder IRB(InsertBefore);
1851 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1852 CallInst *Call = nullptr;
1853 if (SizeArgument) {
1854 if (Exp == 0)
1855 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1856 {Addr, SizeArgument});
1857 else
1858 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1859 {Addr, SizeArgument, ExpVal});
1860 } else {
1861 if (Exp == 0)
1862 Call = RTCI.createRuntimeCall(
1863 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1864 else
1865 Call = RTCI.createRuntimeCall(
1866 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1867 }
1868
1870 return Call;
1871}
1872
1873Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1874 Value *ShadowValue,
1875 uint32_t TypeStoreSize) {
1876 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1877 // Addr & (Granularity - 1)
1878 Value *LastAccessedByte =
1879 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1880 // (Addr & (Granularity - 1)) + size - 1
1881 if (TypeStoreSize / 8 > 1)
1882 LastAccessedByte = IRB.CreateAdd(
1883 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1884 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1885 LastAccessedByte =
1886 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1887 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1888 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1889}
1890
1891Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1892 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1893 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1894 // Do not instrument unsupported addrspaces.
1896 return nullptr;
1897 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1898 // Follow host instrumentation for global and constant addresses.
1899 if (PtrTy->getPointerAddressSpace() != 0)
1900 return InsertBefore;
1901 // Instrument generic addresses in supported addressspaces.
1902 IRBuilder<> IRB(InsertBefore);
1903 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1904 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1905 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1906 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1907 Value *AddrSpaceZeroLanding =
1908 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1909 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1910 return InsertBefore;
1911}
1912
1913Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1914 Value *Cond, bool Recover) {
1915 Module &M = *IRB.GetInsertBlock()->getModule();
1916 Value *ReportCond = Cond;
1917 if (!Recover) {
1918 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1919 IRB.getInt1Ty());
1920 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1921 }
1922
1923 auto *Trm =
1924 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1926 Trm->getParent()->setName("asan.report");
1927
1928 if (Recover)
1929 return Trm;
1930
1931 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1932 IRB.SetInsertPoint(Trm);
1933 return IRB.CreateCall(
1934 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1935}
1936
1937void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1938 Instruction *InsertBefore, Value *Addr,
1939 MaybeAlign Alignment,
1940 uint32_t TypeStoreSize, bool IsWrite,
1941 Value *SizeArgument, bool UseCalls,
1942 uint32_t Exp,
1943 RuntimeCallInserter &RTCI) {
1944 if (TargetTriple.isAMDGPU()) {
1945 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1946 TypeStoreSize, IsWrite, SizeArgument);
1947 if (!InsertBefore)
1948 return;
1949 }
1950
1951 InstrumentationIRBuilder IRB(InsertBefore);
1952 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1953
1954 if (UseCalls && ClOptimizeCallbacks) {
1955 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1956 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1957 {IRB.CreatePointerCast(Addr, PtrTy),
1958 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1959 return;
1960 }
1961
1962 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1963 if (UseCalls) {
1964 if (Exp == 0)
1965 RTCI.createRuntimeCall(
1966 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1967 else
1968 RTCI.createRuntimeCall(
1969 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1970 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1971 return;
1972 }
1973
1974 Type *ShadowTy =
1975 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1976 Type *ShadowPtrTy = PointerType::get(*C, ClShadowAddrSpace);
1977 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1978 const uint64_t ShadowAlign =
1979 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1980 Value *ShadowValue = IRB.CreateAlignedLoad(
1981 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1982
1983 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1984 size_t Granularity = 1ULL << Mapping.Scale;
1985 Instruction *CrashTerm = nullptr;
1986
1987 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1988
1989 if (TargetTriple.isAMDGCN()) {
1990 if (GenSlowPath) {
1991 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1992 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1993 }
1994 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1995 } else if (GenSlowPath) {
1996 // We use branch weights for the slow path check, to indicate that the slow
1997 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1999 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
2000 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
2001 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
2002 IRB.SetInsertPoint(CheckTerm);
2003 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
2004 if (Recover) {
2005 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
2006 } else {
2007 BasicBlock *CrashBlock =
2008 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
2009 CrashTerm = new UnreachableInst(*C, CrashBlock);
2010 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
2011 ReplaceInstWithInst(CheckTerm, NewTerm);
2012 }
2013 } else {
2014 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
2015 }
2016
2017 Instruction *Crash = generateCrashCode(
2018 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
2019 if (OrigIns->getDebugLoc())
2020 Crash->setDebugLoc(OrigIns->getDebugLoc());
2021}
2022
2023// Instrument unusual size or unusual alignment.
2024// We can not do it with a single check, so we do 1-byte check for the first
2025// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
2026// to report the actual access size.
2027void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2028 Instruction *I, Instruction *InsertBefore, Value *Addr,
2029 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
2030 uint32_t Exp, RuntimeCallInserter &RTCI) {
2031 InstrumentationIRBuilder IRB(InsertBefore);
2032 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2033 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2034
2035 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2036 if (UseCalls) {
2037 if (Exp == 0)
2038 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2039 {AddrLong, Size});
2040 else
2041 RTCI.createRuntimeCall(
2042 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2043 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2044 } else {
2045 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2046 Value *LastByte = IRB.CreateIntToPtr(
2047 IRB.CreateAdd(AddrLong, SizeMinusOne),
2048 Addr->getType());
2049 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2050 RTCI);
2051 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2052 Exp, RTCI);
2053 }
2054}
2055
2056void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2057 // Set up the arguments to our poison/unpoison functions.
2058 IRBuilder<> IRB(&GlobalInit.front(),
2059 GlobalInit.front().getFirstInsertionPt());
2060
2061 // Add a call to poison all external globals before the given function starts.
2062 Value *ModuleNameAddr =
2063 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2064 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2065
2066 // Add calls to unpoison all globals before each return instruction.
2067 for (auto &BB : GlobalInit)
2069 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2070}
2071
2072void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2073 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2074 if (!GV)
2075 return;
2076
2078 if (!CA)
2079 return;
2080
2081 for (Use &OP : CA->operands()) {
2082 if (isa<ConstantAggregateZero>(OP)) continue;
2084
2085 // Must have a function or null ptr.
2086 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2087 if (F->getName() == kAsanModuleCtorName) continue;
2088 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2089 // Don't instrument CTORs that will run before asan.module_ctor.
2090 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2091 continue;
2092 poisonOneInitializer(*F);
2093 }
2094 }
2095}
2096
2097const GlobalVariable *
2098ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2099 // In case this function should be expanded to include rules that do not just
2100 // apply when CompileKernel is true, either guard all existing rules with an
2101 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2102 // should also apply to user space.
2103 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2104
2105 const Constant *C = GA.getAliasee();
2106
2107 // When compiling the kernel, globals that are aliased by symbols prefixed
2108 // by "__" are special and cannot be padded with a redzone.
2109 if (GA.getName().starts_with("__"))
2110 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2111
2112 return nullptr;
2113}
2114
2115bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2116 Type *Ty = G->getValueType();
2117 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2118
2119 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2120 return false;
2121 if (!Ty->isSized()) return false;
2122 if (!G->hasInitializer()) return false;
2123 if (!isSupportedAddrspace(TargetTriple, G))
2124 return false;
2125 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2126 // Two problems with thread-locals:
2127 // - The address of the main thread's copy can't be computed at link-time.
2128 // - Need to poison all copies, not just the main thread's one.
2129 if (G->isThreadLocal()) return false;
2130 // For now, just ignore this Global if the alignment is large.
2131 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2132
2133 // For non-COFF targets, only instrument globals known to be defined by this
2134 // TU.
2135 // FIXME: We can instrument comdat globals on ELF if we are using the
2136 // GC-friendly metadata scheme.
2137 if (!TargetTriple.isOSBinFormatCOFF()) {
2138 if (!G->hasExactDefinition() || G->hasComdat())
2139 return false;
2140 } else {
2141 // On COFF, don't instrument non-ODR linkages.
2142 if (G->isInterposable())
2143 return false;
2144 // If the global has AvailableExternally linkage, then it is not in this
2145 // module, which means it does not need to be instrumented.
2146 if (G->hasAvailableExternallyLinkage())
2147 return false;
2148 }
2149
2150 // If a comdat is present, it must have a selection kind that implies ODR
2151 // semantics: no duplicates, any, or exact match.
2152 if (Comdat *C = G->getComdat()) {
2153 switch (C->getSelectionKind()) {
2154 case Comdat::Any:
2155 case Comdat::ExactMatch:
2157 break;
2158 case Comdat::Largest:
2159 case Comdat::SameSize:
2160 return false;
2161 }
2162 }
2163
2164 if (G->hasSection()) {
2165 // The kernel uses explicit sections for mostly special global variables
2166 // that we should not instrument. E.g. the kernel may rely on their layout
2167 // without redzones, or remove them at link time ("discard.*"), etc.
2168 if (CompileKernel)
2169 return false;
2170
2171 StringRef Section = G->getSection();
2172
2173 // Globals from llvm.metadata aren't emitted, do not instrument them.
2174 if (Section == "llvm.metadata") return false;
2175 // Do not instrument globals from special LLVM sections.
2176 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2177 return false;
2178
2179 // Do not instrument function pointers to initialization and termination
2180 // routines: dynamic linker will not properly handle redzones.
2181 if (Section.starts_with(".preinit_array") ||
2182 Section.starts_with(".init_array") ||
2183 Section.starts_with(".fini_array")) {
2184 return false;
2185 }
2186
2187 // Do not instrument user-defined sections (with names resembling
2188 // valid C identifiers)
2189 if (TargetTriple.isOSBinFormatELF()) {
2190 if (llvm::all_of(Section,
2191 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2192 return false;
2193 }
2194
2195 // On COFF, if the section name contains '$', it is highly likely that the
2196 // user is using section sorting to create an array of globals similar to
2197 // the way initialization callbacks are registered in .init_array and
2198 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2199 // to such globals is counterproductive, because the intent is that they
2200 // will form an array, and out-of-bounds accesses are expected.
2201 // See https://github.com/google/sanitizers/issues/305
2202 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2203 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2204 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2205 << *G << "\n");
2206 return false;
2207 }
2208
2209 if (TargetTriple.isOSBinFormatMachO()) {
2210 StringRef ParsedSegment, ParsedSection;
2211 unsigned TAA = 0, StubSize = 0;
2212 bool TAAParsed;
2214 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2215
2216 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2217 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2218 // them.
2219 if (ParsedSegment == "__OBJC" ||
2220 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2221 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2222 return false;
2223 }
2224 // See https://github.com/google/sanitizers/issues/32
2225 // Constant CFString instances are compiled in the following way:
2226 // -- the string buffer is emitted into
2227 // __TEXT,__cstring,cstring_literals
2228 // -- the constant NSConstantString structure referencing that buffer
2229 // is placed into __DATA,__cfstring
2230 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2231 // Moreover, it causes the linker to crash on OS X 10.7
2232 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2233 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2234 return false;
2235 }
2236 // The linker merges the contents of cstring_literals and removes the
2237 // trailing zeroes.
2238 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2239 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2240 return false;
2241 }
2242 }
2243 }
2244
2245 if (CompileKernel) {
2246 // Globals that prefixed by "__" are special and cannot be padded with a
2247 // redzone.
2248 if (G->getName().starts_with("__"))
2249 return false;
2250 }
2251
2252 return true;
2253}
2254
2255// On Mach-O platforms, we emit global metadata in a separate section of the
2256// binary in order to allow the linker to properly dead strip. This is only
2257// supported on recent versions of ld64.
2258bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2259 if (!TargetTriple.isOSBinFormatMachO())
2260 return false;
2261
2262 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2263 return true;
2264 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2265 return true;
2266 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2267 return true;
2268 if (TargetTriple.isDriverKit())
2269 return true;
2270 if (TargetTriple.isXROS())
2271 return true;
2272
2273 return false;
2274}
2275
2276StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2277 switch (TargetTriple.getObjectFormat()) {
2278 case Triple::COFF: return ".ASAN$GL";
2279 case Triple::ELF: return "asan_globals";
2280 case Triple::MachO: return "__DATA,__asan_globals,regular";
2281 case Triple::Wasm:
2282 case Triple::GOFF:
2283 case Triple::SPIRV:
2284 case Triple::XCOFF:
2287 "ModuleAddressSanitizer not implemented for object file format");
2289 break;
2290 }
2291 llvm_unreachable("unsupported object format");
2292}
2293
2294void ModuleAddressSanitizer::initializeCallbacks() {
2295 IRBuilder<> IRB(*C);
2296
2297 // Declare our poisoning and unpoisoning functions.
2298 AsanPoisonGlobals =
2299 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2300 AsanUnpoisonGlobals =
2301 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2302
2303 // Declare functions that register/unregister globals.
2304 AsanRegisterGlobals = M.getOrInsertFunction(
2305 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2306 AsanUnregisterGlobals = M.getOrInsertFunction(
2307 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2308
2309 // Declare the functions that find globals in a shared object and then invoke
2310 // the (un)register function on them.
2311 AsanRegisterImageGlobals = M.getOrInsertFunction(
2312 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2313 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2315
2316 AsanRegisterElfGlobals =
2317 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2318 IntptrTy, IntptrTy, IntptrTy);
2319 AsanUnregisterElfGlobals =
2320 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2321 IntptrTy, IntptrTy, IntptrTy);
2322}
2323
2324// Put the metadata and the instrumented global in the same group. This ensures
2325// that the metadata is discarded if the instrumented global is discarded.
2326void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2327 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2328 Module &M = *G->getParent();
2329 Comdat *C = G->getComdat();
2330 if (!C) {
2331 if (!G->hasName()) {
2332 // If G is unnamed, it must be internal. Give it an artificial name
2333 // so we can put it in a comdat.
2334 assert(G->hasLocalLinkage());
2335 G->setName(genName("anon_global"));
2336 }
2337
2338 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2339 std::string Name = std::string(G->getName());
2340 Name += InternalSuffix;
2341 C = M.getOrInsertComdat(Name);
2342 } else {
2343 C = M.getOrInsertComdat(G->getName());
2344 }
2345
2346 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2347 // linkage to internal linkage so that a symbol table entry is emitted. This
2348 // is necessary in order to create the comdat group.
2349 if (TargetTriple.isOSBinFormatCOFF()) {
2350 C->setSelectionKind(Comdat::NoDeduplicate);
2351 if (G->hasPrivateLinkage())
2352 G->setLinkage(GlobalValue::InternalLinkage);
2353 }
2354 G->setComdat(C);
2355 }
2356
2357 assert(G->hasComdat());
2358 Metadata->setComdat(G->getComdat());
2359}
2360
2361// Create a separate metadata global and put it in the appropriate ASan
2362// global registration section.
2364ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2365 StringRef OriginalName) {
2366 auto Linkage = TargetTriple.isOSBinFormatMachO()
2370 M, Initializer->getType(), false, Linkage, Initializer,
2371 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2372 Metadata->setSection(getGlobalMetadataSection());
2373 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2374 // relocation pressure.
2376 return Metadata;
2377}
2378
2379Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2380 AsanDtorFunction = Function::createWithDefaultAttr(
2383 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2384 // Ensure Dtor cannot be discarded, even if in a comdat.
2385 appendToUsed(M, {AsanDtorFunction});
2386 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2387
2388 return ReturnInst::Create(*C, AsanDtorBB);
2389}
2390
2391void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2392 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2393 ArrayRef<Constant *> MetadataInitializers) {
2394 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2395 auto &DL = M.getDataLayout();
2396
2397 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2398 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2399 Constant *Initializer = MetadataInitializers[i];
2400 GlobalVariable *G = ExtendedGlobals[i];
2401 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2402 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2403 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2404 MetadataGlobals[i] = Metadata;
2405
2406 // The MSVC linker always inserts padding when linking incrementally. We
2407 // cope with that by aligning each struct to its size, which must be a power
2408 // of two.
2409 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2410 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2411 "global metadata will not be padded appropriately");
2412 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2413
2414 SetComdatForGlobalMetadata(G, Metadata, "");
2415 }
2416
2417 // Update llvm.compiler.used, adding the new metadata globals. This is
2418 // needed so that during LTO these variables stay alive.
2419 if (!MetadataGlobals.empty())
2420 appendToCompilerUsed(M, MetadataGlobals);
2421}
2422
2423void ModuleAddressSanitizer::instrumentGlobalsELF(
2424 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2425 ArrayRef<Constant *> MetadataInitializers,
2426 const std::string &UniqueModuleId) {
2427 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2428
2429 // Putting globals in a comdat changes the semantic and potentially cause
2430 // false negative odr violations at link time. If odr indicators are used, we
2431 // keep the comdat sections, as link time odr violations will be detected on
2432 // the odr indicator symbols.
2433 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2434
2435 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2436 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2437 GlobalVariable *G = ExtendedGlobals[i];
2439 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2440 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2441 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2442 MetadataGlobals[i] = Metadata;
2443
2444 if (UseComdatForGlobalsGC)
2445 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2446 }
2447
2448 // Update llvm.compiler.used, adding the new metadata globals. This is
2449 // needed so that during LTO these variables stay alive.
2450 if (!MetadataGlobals.empty())
2451 appendToCompilerUsed(M, MetadataGlobals);
2452
2453 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2454 // to look up the loaded image that contains it. Second, we can store in it
2455 // whether registration has already occurred, to prevent duplicate
2456 // registration.
2457 //
2458 // Common linkage ensures that there is only one global per shared library.
2459 GlobalVariable *RegisteredFlag = new GlobalVariable(
2460 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2461 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2463
2464 // Create start and stop symbols.
2465 GlobalVariable *StartELFMetadata = new GlobalVariable(
2466 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2467 "__start_" + getGlobalMetadataSection());
2469 GlobalVariable *StopELFMetadata = new GlobalVariable(
2470 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2471 "__stop_" + getGlobalMetadataSection());
2473
2474 // Create a call to register the globals with the runtime.
2475 if (ConstructorKind == AsanCtorKind::Global)
2476 IRB.CreateCall(AsanRegisterElfGlobals,
2477 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2478 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2479 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2480
2481 // We also need to unregister globals at the end, e.g., when a shared library
2482 // gets closed.
2483 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2484 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2485 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2486 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2487 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2488 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2489 }
2490}
2491
2492void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2493 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2494 ArrayRef<Constant *> MetadataInitializers) {
2495 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2496
2497 // On recent Mach-O platforms, use a structure which binds the liveness of
2498 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2499 // created to be added to llvm.compiler.used
2500 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2501 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2502
2503 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2504 Constant *Initializer = MetadataInitializers[i];
2505 GlobalVariable *G = ExtendedGlobals[i];
2506 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2507
2508 // On recent Mach-O platforms, we emit the global metadata in a way that
2509 // allows the linker to properly strip dead globals.
2510 auto LivenessBinder =
2511 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2513 GlobalVariable *Liveness = new GlobalVariable(
2514 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2515 Twine("__asan_binder_") + G->getName());
2516 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2517 LivenessGlobals[i] = Liveness;
2518 }
2519
2520 // Update llvm.compiler.used, adding the new liveness globals. This is
2521 // needed so that during LTO these variables stay alive. The alternative
2522 // would be to have the linker handling the LTO symbols, but libLTO
2523 // current API does not expose access to the section for each symbol.
2524 if (!LivenessGlobals.empty())
2525 appendToCompilerUsed(M, LivenessGlobals);
2526
2527 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2528 // to look up the loaded image that contains it. Second, we can store in it
2529 // whether registration has already occurred, to prevent duplicate
2530 // registration.
2531 //
2532 // common linkage ensures that there is only one global per shared library.
2533 GlobalVariable *RegisteredFlag = new GlobalVariable(
2534 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2535 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2537
2538 if (ConstructorKind == AsanCtorKind::Global)
2539 IRB.CreateCall(AsanRegisterImageGlobals,
2540 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2541
2542 // We also need to unregister globals at the end, e.g., when a shared library
2543 // gets closed.
2544 if (DestructorKind != AsanDtorKind::None) {
2545 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2546 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2547 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2548 }
2549}
2550
2551void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2552 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2553 ArrayRef<Constant *> MetadataInitializers) {
2554 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2555 unsigned N = ExtendedGlobals.size();
2556 assert(N > 0);
2557
2558 // On platforms that don't have a custom metadata section, we emit an array
2559 // of global metadata structures.
2560 ArrayType *ArrayOfGlobalStructTy =
2561 ArrayType::get(MetadataInitializers[0]->getType(), N);
2562 auto AllGlobals = new GlobalVariable(
2563 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2564 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2565 if (Mapping.Scale > 3)
2566 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2567
2568 if (ConstructorKind == AsanCtorKind::Global)
2569 IRB.CreateCall(AsanRegisterGlobals,
2570 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2571 ConstantInt::get(IntptrTy, N)});
2572
2573 // We also need to unregister globals at the end, e.g., when a shared library
2574 // gets closed.
2575 if (DestructorKind != AsanDtorKind::None) {
2576 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2577 IrbDtor.CreateCall(AsanUnregisterGlobals,
2578 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2579 ConstantInt::get(IntptrTy, N)});
2580 }
2581}
2582
2583// This function replaces all global variables with new variables that have
2584// trailing redzones. It also creates a function that poisons
2585// redzones and inserts this function into llvm.global_ctors.
2586// Sets *CtorComdat to true if the global registration code emitted into the
2587// asan constructor is comdat-compatible.
2588void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2589 bool *CtorComdat) {
2590 // Build set of globals that are aliased by some GA, where
2591 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2592 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2593 if (CompileKernel) {
2594 for (auto &GA : M.aliases()) {
2595 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2596 AliasedGlobalExclusions.insert(GV);
2597 }
2598 }
2599
2600 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2601 for (auto &G : M.globals()) {
2602 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2603 GlobalsToChange.push_back(&G);
2604 }
2605
2606 size_t n = GlobalsToChange.size();
2607 auto &DL = M.getDataLayout();
2608
2609 // A global is described by a structure
2610 // size_t beg;
2611 // size_t size;
2612 // size_t size_with_redzone;
2613 // const char *name;
2614 // const char *module_name;
2615 // size_t has_dynamic_init;
2616 // size_t padding_for_windows_msvc_incremental_link;
2617 // size_t odr_indicator;
2618 // We initialize an array of such structures and pass it to a run-time call.
2619 StructType *GlobalStructTy =
2620 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2621 IntptrTy, IntptrTy, IntptrTy);
2623 SmallVector<Constant *, 16> Initializers(n);
2624
2625 for (size_t i = 0; i < n; i++) {
2626 GlobalVariable *G = GlobalsToChange[i];
2627
2629 if (G->hasSanitizerMetadata())
2630 MD = G->getSanitizerMetadata();
2631
2632 // The runtime library tries demangling symbol names in the descriptor but
2633 // functionality like __cxa_demangle may be unavailable (e.g.
2634 // -static-libstdc++). So we demangle the symbol names here.
2635 std::string NameForGlobal = G->getName().str();
2638 /*AllowMerging*/ true, genName("global"));
2639
2640 Type *Ty = G->getValueType();
2641 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2642 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2643 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2644
2645 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2646 Constant *NewInitializer = ConstantStruct::get(
2647 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2648
2649 // Create a new global variable with enough space for a redzone.
2650 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2651 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2653 GlobalVariable *NewGlobal = new GlobalVariable(
2654 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2655 G->getThreadLocalMode(), G->getAddressSpace());
2656 NewGlobal->copyAttributesFrom(G);
2657 NewGlobal->setComdat(G->getComdat());
2658 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2659 // Don't fold globals with redzones. ODR violation detector and redzone
2660 // poisoning implicitly creates a dependence on the global's address, so it
2661 // is no longer valid for it to be marked unnamed_addr.
2663
2664 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2665 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2666 G->isConstant()) {
2667 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2668 if (Seq && Seq->isCString())
2669 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2670 }
2671
2672 // Transfer the debug info and type metadata. The payload starts at offset
2673 // zero so we can copy the metadata over as is.
2674 NewGlobal->copyMetadata(G, 0);
2675
2676 Value *Indices2[2];
2677 Indices2[0] = IRB.getInt32(0);
2678 Indices2[1] = IRB.getInt32(0);
2679
2681 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2682 NewGlobal->takeName(G);
2683 G->eraseFromParent();
2684 NewGlobals[i] = NewGlobal;
2685
2686 Constant *ODRIndicator = Constant::getNullValue(IntptrTy);
2687 GlobalValue *InstrumentedGlobal = NewGlobal;
2688
2689 bool CanUsePrivateAliases =
2690 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2691 TargetTriple.isOSBinFormatWasm();
2692 if (CanUsePrivateAliases && UsePrivateAlias) {
2693 // Create local alias for NewGlobal to avoid crash on ODR between
2694 // instrumented and non-instrumented libraries.
2695 InstrumentedGlobal =
2697 }
2698
2699 // ODR should not happen for local linkage.
2700 if (NewGlobal->hasLocalLinkage()) {
2701 ODRIndicator = ConstantInt::get(IntptrTy, -1);
2702 } else if (UseOdrIndicator) {
2703 // With local aliases, we need to provide another externally visible
2704 // symbol __odr_asan_XXX to detect ODR violation.
2705 auto *ODRIndicatorSym =
2706 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2708 kODRGenPrefix + NameForGlobal, nullptr,
2709 NewGlobal->getThreadLocalMode());
2710
2711 // Set meaningful attributes for indicator symbol.
2712 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2713 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2714 ODRIndicatorSym->setAlignment(Align(1));
2715 ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy);
2716 }
2717
2718 Constant *Initializer = ConstantStruct::get(
2719 GlobalStructTy,
2720 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2721 ConstantInt::get(IntptrTy, SizeInBytes),
2722 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2723 ConstantExpr::getPointerCast(Name, IntptrTy),
2724 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2725 ConstantInt::get(IntptrTy, MD.IsDynInit),
2726 Constant::getNullValue(IntptrTy), ODRIndicator);
2727
2728 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2729
2730 Initializers[i] = Initializer;
2731 }
2732
2733 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2734 // ConstantMerge'ing them.
2735 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2736 for (size_t i = 0; i < n; i++) {
2737 GlobalVariable *G = NewGlobals[i];
2738 if (G->getName().empty()) continue;
2739 GlobalsToAddToUsedList.push_back(G);
2740 }
2741 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2742
2743 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2744 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2745 // linkage unit will only have one module constructor, and (b) the register
2746 // function will be called. The module destructor is not created when n ==
2747 // 0.
2748 *CtorComdat = true;
2749 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2750 } else if (n == 0) {
2751 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2752 // all compile units will have identical module constructor/destructor.
2753 *CtorComdat = TargetTriple.isOSBinFormatELF();
2754 } else {
2755 *CtorComdat = false;
2756 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2757 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2758 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2759 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2760 } else {
2761 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2762 }
2763 }
2764
2765 // Create calls for poisoning before initializers run and unpoisoning after.
2766 if (ClInitializers)
2767 createInitializerPoisonCalls();
2768
2769 LLVM_DEBUG(dbgs() << M);
2770}
2771
2772uint64_t
2773ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2774 constexpr uint64_t kMaxRZ = 1 << 18;
2775 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2776
2777 uint64_t RZ = 0;
2778 if (SizeInBytes <= MinRZ / 2) {
2779 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2780 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2781 // half of MinRZ.
2782 RZ = MinRZ - SizeInBytes;
2783 } else {
2784 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2785 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2786
2787 // Round up to multiple of MinRZ.
2788 if (SizeInBytes % MinRZ)
2789 RZ += MinRZ - (SizeInBytes % MinRZ);
2790 }
2791
2792 assert((RZ + SizeInBytes) % MinRZ == 0);
2793
2794 return RZ;
2795}
2796
2797int ModuleAddressSanitizer::GetAsanVersion() const {
2798 int LongSize = M.getDataLayout().getPointerSizeInBits();
2799 bool isAndroid = M.getTargetTriple().isAndroid();
2800 int Version = 8;
2801 // 32-bit Android is one version ahead because of the switch to dynamic
2802 // shadow.
2803 Version += (LongSize == 32 && isAndroid);
2804 return Version;
2805}
2806
2807GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2808 if (!ModuleName) {
2809 // We shouldn't merge same module names, as this string serves as unique
2810 // module ID in runtime.
2811 ModuleName =
2812 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2813 /*AllowMerging*/ false, genName("module"));
2814 }
2815 return ModuleName;
2816}
2817
2818bool ModuleAddressSanitizer::instrumentModule() {
2819 initializeCallbacks();
2820
2821 for (Function &F : M)
2822 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2823
2824 // Create a module constructor. A destructor is created lazily because not all
2825 // platforms, and not all modules need it.
2826 if (ConstructorKind == AsanCtorKind::Global) {
2827 if (CompileKernel) {
2828 // The kernel always builds with its own runtime, and therefore does not
2829 // need the init and version check calls.
2830 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2831 } else {
2832 std::string AsanVersion = std::to_string(GetAsanVersion());
2833 std::string VersionCheckName =
2834 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2835 std::tie(AsanCtorFunction, std::ignore) =
2837 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2838 /*InitArgs=*/{}, VersionCheckName);
2839 }
2840 }
2841
2842 bool CtorComdat = true;
2843 if (ClGlobals) {
2844 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2845 if (AsanCtorFunction) {
2846 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2847 instrumentGlobals(IRB, &CtorComdat);
2848 } else {
2849 IRBuilder<> IRB(*C);
2850 instrumentGlobals(IRB, &CtorComdat);
2851 }
2852 }
2853
2854 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2855
2856 // Put the constructor and destructor in comdat if both
2857 // (1) global instrumentation is not TU-specific
2858 // (2) target is ELF.
2859 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2860 if (AsanCtorFunction) {
2861 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2862 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2863 }
2864 if (AsanDtorFunction) {
2865 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2866 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2867 }
2868 } else {
2869 if (AsanCtorFunction)
2870 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2871 if (AsanDtorFunction)
2872 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2873 }
2874
2875 return true;
2876}
2877
2878void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2879 IRBuilder<> IRB(*C);
2880 // Create __asan_report* callbacks.
2881 // IsWrite, TypeSize and Exp are encoded in the function name.
2882 for (int Exp = 0; Exp < 2; Exp++) {
2883 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2884 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2885 const std::string ExpStr = Exp ? "exp_" : "";
2886 const std::string EndingStr = Recover ? "_noabort" : "";
2887
2888 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2889 SmallVector<Type *, 2> Args1{1, IntptrTy};
2890 AttributeList AL2;
2891 AttributeList AL1;
2892 if (Exp) {
2893 Type *ExpType = Type::getInt32Ty(*C);
2894 Args2.push_back(ExpType);
2895 Args1.push_back(ExpType);
2896 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2897 AL2 = AL2.addParamAttribute(*C, 2, AK);
2898 AL1 = AL1.addParamAttribute(*C, 1, AK);
2899 }
2900 }
2901 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2902 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2903 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2904
2905 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2906 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2907 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2908
2909 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2910 AccessSizeIndex++) {
2911 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2912 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2913 M.getOrInsertFunction(
2914 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2915 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2916
2917 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2918 M.getOrInsertFunction(
2919 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2920 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2921 }
2922 }
2923 }
2924
2925 const std::string MemIntrinCallbackPrefix =
2926 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2927 ? std::string("")
2929 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2930 PtrTy, PtrTy, PtrTy, IntptrTy);
2931 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2932 PtrTy, PtrTy, IntptrTy);
2933 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2934 TLI->getAttrList(C, {1}, /*Signed=*/false),
2935 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2936
2937 AsanHandleNoReturnFunc =
2938 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2939
2940 AsanPtrCmpFunction =
2941 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2942 AsanPtrSubFunction =
2943 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2944 if (Mapping.InGlobal)
2945 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2946 ArrayType::get(IRB.getInt8Ty(), 0));
2947
2948 AMDGPUAddressShared =
2949 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2950 AMDGPUAddressPrivate =
2951 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2952}
2953
2954bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2955 // For each NSObject descendant having a +load method, this method is invoked
2956 // by the ObjC runtime before any of the static constructors is called.
2957 // Therefore we need to instrument such methods with a call to __asan_init
2958 // at the beginning in order to initialize our runtime before any access to
2959 // the shadow memory.
2960 // We cannot just ignore these methods, because they may call other
2961 // instrumented functions.
2962 if (F.getName().contains(" load]")) {
2963 FunctionCallee AsanInitFunction =
2964 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2965 IRBuilder<> IRB(&F.front(), F.front().begin());
2966 IRB.CreateCall(AsanInitFunction, {});
2967 return true;
2968 }
2969 return false;
2970}
2971
2972bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2973 // Generate code only when dynamic addressing is needed.
2974 if (Mapping.Offset != kDynamicShadowSentinel)
2975 return false;
2976
2977 IRBuilder<> IRB(&F.front().front());
2978 if (Mapping.InGlobal) {
2980 // An empty inline asm with input reg == output reg.
2981 // An opaque pointer-to-int cast, basically.
2983 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2984 StringRef(""), StringRef("=r,0"),
2985 /*hasSideEffects=*/false);
2986 LocalDynamicShadow =
2987 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2988 } else {
2989 LocalDynamicShadow =
2990 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2991 }
2992 } else {
2993 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2995 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2996 }
2997 return true;
2998}
2999
3000void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
3001 // Find the one possible call to llvm.localescape and pre-mark allocas passed
3002 // to it as uninteresting. This assumes we haven't started processing allocas
3003 // yet. This check is done up front because iterating the use list in
3004 // isInterestingAlloca would be algorithmically slower.
3005 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
3006
3007 // Try to get the declaration of llvm.localescape. If it's not in the module,
3008 // we can exit early.
3009 if (!F.getParent()->getFunction("llvm.localescape")) return;
3010
3011 // Look for a call to llvm.localescape call in the entry block. It can't be in
3012 // any other block.
3013 for (Instruction &I : F.getEntryBlock()) {
3015 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
3016 // We found a call. Mark all the allocas passed in as uninteresting.
3017 for (Value *Arg : II->args()) {
3018 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
3019 assert(AI && AI->isStaticAlloca() &&
3020 "non-static alloca arg to localescape");
3021 ProcessedAllocas[AI] = false;
3022 }
3023 break;
3024 }
3025 }
3026}
3027// Mitigation for https://github.com/google/sanitizers/issues/749
3028// We don't instrument Windows catch-block parameters to avoid
3029// interfering with exception handling assumptions.
3030void AddressSanitizer::markCatchParametersAsUninteresting(Function &F) {
3031 for (BasicBlock &BB : F) {
3032 for (Instruction &I : BB) {
3033 if (auto *CatchPad = dyn_cast<CatchPadInst>(&I)) {
3034 // Mark the parameters to a catch-block as uninteresting to avoid
3035 // instrumenting them.
3036 for (Value *Operand : CatchPad->arg_operands())
3037 if (auto *AI = dyn_cast<AllocaInst>(Operand))
3038 ProcessedAllocas[AI] = false;
3039 }
3040 }
3041 }
3042}
3043
3044bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3045 bool ShouldInstrument =
3046 ClDebugMin < 0 || ClDebugMax < 0 ||
3047 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3048 Instrumented++;
3049 return !ShouldInstrument;
3050}
3051
3052bool AddressSanitizer::instrumentFunction(Function &F,
3053 const TargetLibraryInfo *TLI,
3054 const TargetTransformInfo *TTI) {
3055 bool FunctionModified = false;
3056
3057 // Do not apply any instrumentation for naked functions.
3058 if (F.hasFnAttribute(Attribute::Naked))
3059 return FunctionModified;
3060
3061 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3062 // This function needs to be called even if the function body is not
3063 // instrumented.
3064 if (maybeInsertAsanInitAtFunctionEntry(F))
3065 FunctionModified = true;
3066
3067 // Leave if the function doesn't need instrumentation.
3068 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3069
3070 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3071 return FunctionModified;
3072
3073 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3074
3075 initializeCallbacks(TLI);
3076
3077 FunctionStateRAII CleanupObj(this);
3078
3079 RuntimeCallInserter RTCI(F);
3080
3081 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3082
3083 // We can't instrument allocas used with llvm.localescape. Only static allocas
3084 // can be passed to that intrinsic.
3085 markEscapedLocalAllocas(F);
3086
3087 if (TargetTriple.isOSWindows())
3088 markCatchParametersAsUninteresting(F);
3089
3090 // We want to instrument every address only once per basic block (unless there
3091 // are calls between uses).
3092 SmallPtrSet<Value *, 16> TempsToInstrument;
3093 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3094 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3095 SmallVector<Instruction *, 8> NoReturnCalls;
3097 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3098
3099 // Fill the set of memory operations to instrument.
3100 for (auto &BB : F) {
3101 AllBlocks.push_back(&BB);
3102 TempsToInstrument.clear();
3103 int NumInsnsPerBB = 0;
3104 for (auto &Inst : BB) {
3105 if (LooksLikeCodeInBug11395(&Inst)) return false;
3106 // Skip instructions inserted by another instrumentation.
3107 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3108 continue;
3109 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3110 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3111
3112 if (!InterestingOperands.empty()) {
3113 for (auto &Operand : InterestingOperands) {
3114 if (ClOpt && ClOptSameTemp) {
3115 Value *Ptr = Operand.getPtr();
3116 // If we have a mask, skip instrumentation if we've already
3117 // instrumented the full object. But don't add to TempsToInstrument
3118 // because we might get another load/store with a different mask.
3119 if (Operand.MaybeMask) {
3120 if (TempsToInstrument.count(Ptr))
3121 continue; // We've seen this (whole) temp in the current BB.
3122 } else {
3123 if (!TempsToInstrument.insert(Ptr).second)
3124 continue; // We've seen this temp in the current BB.
3125 }
3126 }
3127 OperandsToInstrument.push_back(Operand);
3128 NumInsnsPerBB++;
3129 }
3130 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3134 PointerComparisonsOrSubtracts.push_back(&Inst);
3135 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3136 // ok, take it.
3137 IntrinToInstrument.push_back(MI);
3138 NumInsnsPerBB++;
3139 } else {
3140 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3141 // A call inside BB.
3142 TempsToInstrument.clear();
3143 if (CB->doesNotReturn())
3144 NoReturnCalls.push_back(CB);
3145 }
3146 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3148 }
3149 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3150 }
3151 }
3152
3153 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3154 OperandsToInstrument.size() + IntrinToInstrument.size() >
3155 (unsigned)InstrumentationWithCallsThreshold);
3156 const DataLayout &DL = F.getDataLayout();
3157 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3158
3159 // Instrument.
3160 int NumInstrumented = 0;
3161 for (auto &Operand : OperandsToInstrument) {
3162 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3163 instrumentMop(ObjSizeVis, Operand, UseCalls,
3164 F.getDataLayout(), RTCI);
3165 FunctionModified = true;
3166 }
3167 for (auto *Inst : IntrinToInstrument) {
3168 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3169 instrumentMemIntrinsic(Inst, RTCI);
3170 FunctionModified = true;
3171 }
3172
3173 FunctionStackPoisoner FSP(F, *this, RTCI);
3174 bool ChangedStack = FSP.runOnFunction();
3175
3176 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3177 // See e.g. https://github.com/google/sanitizers/issues/37
3178 for (auto *CI : NoReturnCalls) {
3179 IRBuilder<> IRB(CI);
3180 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3181 }
3182
3183 for (auto *Inst : PointerComparisonsOrSubtracts) {
3184 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3185 FunctionModified = true;
3186 }
3187
3188 if (ChangedStack || !NoReturnCalls.empty())
3189 FunctionModified = true;
3190
3191 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3192 << F << "\n");
3193
3194 return FunctionModified;
3195}
3196
3197// Workaround for bug 11395: we don't want to instrument stack in functions
3198// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3199// FIXME: remove once the bug 11395 is fixed.
3200bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3201 if (LongSize != 32) return false;
3203 if (!CI || !CI->isInlineAsm()) return false;
3204 if (CI->arg_size() <= 5)
3205 return false;
3206 // We have inline assembly with quite a few arguments.
3207 return true;
3208}
3209
3210void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3211 IRBuilder<> IRB(*C);
3212 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3213 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3214 const char *MallocNameTemplate =
3215 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3218 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3219 std::string Suffix = itostr(Index);
3220 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3221 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3222 AsanStackFreeFunc[Index] =
3223 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3224 IRB.getVoidTy(), IntptrTy, IntptrTy);
3225 }
3226 }
3227 if (ASan.UseAfterScope) {
3228 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3229 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3230 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3231 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3232 }
3233
3234 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3235 0xf3, 0xf5, 0xf8}) {
3236 std::ostringstream Name;
3238 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3239 AsanSetShadowFunc[Val] =
3240 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3241 }
3242
3243 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3244 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3245 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3246 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3247}
3248
3249void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3250 ArrayRef<uint8_t> ShadowBytes,
3251 size_t Begin, size_t End,
3252 IRBuilder<> &IRB,
3253 Value *ShadowBase) {
3254 if (Begin >= End)
3255 return;
3256
3257 const size_t LargestStoreSizeInBytes =
3258 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3259
3260 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3261
3262 // Poison given range in shadow using larges store size with out leading and
3263 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3264 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3265 // middle of a store.
3266 for (size_t i = Begin; i < End;) {
3267 if (!ShadowMask[i]) {
3268 assert(!ShadowBytes[i]);
3269 ++i;
3270 continue;
3271 }
3272
3273 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3274 // Fit store size into the range.
3275 while (StoreSizeInBytes > End - i)
3276 StoreSizeInBytes /= 2;
3277
3278 // Minimize store size by trimming trailing zeros.
3279 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3280 while (j <= StoreSizeInBytes / 2)
3281 StoreSizeInBytes /= 2;
3282 }
3283
3284 uint64_t Val = 0;
3285 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3286 if (IsLittleEndian)
3287 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3288 else
3289 Val = (Val << 8) | ShadowBytes[i + j];
3290 }
3291
3292 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3293 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3295 Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
3296 Align(1));
3297
3298 i += StoreSizeInBytes;
3299 }
3300}
3301
3302void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3303 ArrayRef<uint8_t> ShadowBytes,
3304 IRBuilder<> &IRB, Value *ShadowBase) {
3305 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3306}
3307
3308void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3309 ArrayRef<uint8_t> ShadowBytes,
3310 size_t Begin, size_t End,
3311 IRBuilder<> &IRB, Value *ShadowBase) {
3312 assert(ShadowMask.size() == ShadowBytes.size());
3313 size_t Done = Begin;
3314 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3315 if (!ShadowMask[i]) {
3316 assert(!ShadowBytes[i]);
3317 continue;
3318 }
3319 uint8_t Val = ShadowBytes[i];
3320 if (!AsanSetShadowFunc[Val])
3321 continue;
3322
3323 // Skip same values.
3324 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3325 }
3326
3327 if (j - i >= ASan.MaxInlinePoisoningSize) {
3328 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3329 RTCI.createRuntimeCall(
3330 IRB, AsanSetShadowFunc[Val],
3331 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3332 ConstantInt::get(IntptrTy, j - i)});
3333 Done = j;
3334 }
3335 }
3336
3337 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3338}
3339
3340// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3341// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3342static int StackMallocSizeClass(uint64_t LocalStackSize) {
3343 assert(LocalStackSize <= kMaxStackMallocSize);
3344 uint64_t MaxSize = kMinStackMallocSize;
3345 for (int i = 0;; i++, MaxSize *= 2)
3346 if (LocalStackSize <= MaxSize) return i;
3347 llvm_unreachable("impossible LocalStackSize");
3348}
3349
3350void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3351 Instruction *CopyInsertPoint = &F.front().front();
3352 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3353 // Insert after the dynamic shadow location is determined
3354 CopyInsertPoint = CopyInsertPoint->getNextNode();
3355 assert(CopyInsertPoint);
3356 }
3357 IRBuilder<> IRB(CopyInsertPoint);
3358 const DataLayout &DL = F.getDataLayout();
3359 for (Argument &Arg : F.args()) {
3360 if (Arg.hasByValAttr()) {
3361 Type *Ty = Arg.getParamByValType();
3362 const Align Alignment =
3363 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3364
3365 AllocaInst *AI = IRB.CreateAlloca(
3366 Ty, nullptr,
3367 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3368 ".byval");
3369 AI->setAlignment(Alignment);
3370 Arg.replaceAllUsesWith(AI);
3371
3372 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3373 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3374 }
3375 }
3376}
3377
3378PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3379 Value *ValueIfTrue,
3380 Instruction *ThenTerm,
3381 Value *ValueIfFalse) {
3382 PHINode *PHI = IRB.CreatePHI(ValueIfTrue->getType(), 2);
3383 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3384 PHI->addIncoming(ValueIfFalse, CondBlock);
3385 BasicBlock *ThenBlock = ThenTerm->getParent();
3386 PHI->addIncoming(ValueIfTrue, ThenBlock);
3387 return PHI;
3388}
3389
3390Value *FunctionStackPoisoner::createAllocaForLayout(
3391 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3392 AllocaInst *Alloca;
3393 if (Dynamic) {
3394 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3395 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3396 "MyAlloca");
3397 } else {
3398 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3399 nullptr, "MyAlloca");
3400 assert(Alloca->isStaticAlloca());
3401 }
3402 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3403 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3404 Alloca->setAlignment(Align(FrameAlignment));
3405 return Alloca;
3406}
3407
3408void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3409 BasicBlock &FirstBB = *F.begin();
3410 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3411 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3412 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3413 DynamicAllocaLayout->setAlignment(Align(32));
3414}
3415
3416void FunctionStackPoisoner::processDynamicAllocas() {
3417 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3418 assert(DynamicAllocaPoisonCallVec.empty());
3419 return;
3420 }
3421
3422 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3423 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3424 assert(APC.InsBefore);
3425 assert(APC.AI);
3426 assert(ASan.isInterestingAlloca(*APC.AI));
3427 assert(!APC.AI->isStaticAlloca());
3428
3429 IRBuilder<> IRB(APC.InsBefore);
3430 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3431 // Dynamic allocas will be unpoisoned unconditionally below in
3432 // unpoisonDynamicAllocas.
3433 // Flag that we need unpoison static allocas.
3434 }
3435
3436 // Handle dynamic allocas.
3437 createDynamicAllocasInitStorage();
3438 for (auto &AI : DynamicAllocaVec)
3439 handleDynamicAllocaCall(AI);
3440 unpoisonDynamicAllocas();
3441}
3442
3443/// Collect instructions in the entry block after \p InsBefore which initialize
3444/// permanent storage for a function argument. These instructions must remain in
3445/// the entry block so that uninitialized values do not appear in backtraces. An
3446/// added benefit is that this conserves spill slots. This does not move stores
3447/// before instrumented / "interesting" allocas.
3449 AddressSanitizer &ASan, Instruction &InsBefore,
3450 SmallVectorImpl<Instruction *> &InitInsts) {
3451 Instruction *Start = InsBefore.getNextNode();
3452 for (Instruction *It = Start; It; It = It->getNextNode()) {
3453 // Argument initialization looks like:
3454 // 1) store <Argument>, <Alloca> OR
3455 // 2) <CastArgument> = cast <Argument> to ...
3456 // store <CastArgument> to <Alloca>
3457 // Do not consider any other kind of instruction.
3458 //
3459 // Note: This covers all known cases, but may not be exhaustive. An
3460 // alternative to pattern-matching stores is to DFS over all Argument uses:
3461 // this might be more general, but is probably much more complicated.
3462 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3463 continue;
3464 if (auto *Store = dyn_cast<StoreInst>(It)) {
3465 // The store destination must be an alloca that isn't interesting for
3466 // ASan to instrument. These are moved up before InsBefore, and they're
3467 // not interesting because allocas for arguments can be mem2reg'd.
3468 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3469 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3470 continue;
3471
3472 Value *Val = Store->getValueOperand();
3473 bool IsDirectArgInit = isa<Argument>(Val);
3474 bool IsArgInitViaCast =
3475 isa<CastInst>(Val) &&
3476 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3477 // Check that the cast appears directly before the store. Otherwise
3478 // moving the cast before InsBefore may break the IR.
3479 Val == It->getPrevNode();
3480 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3481 if (!IsArgInit)
3482 continue;
3483
3484 if (IsArgInitViaCast)
3485 InitInsts.push_back(cast<Instruction>(Val));
3486 InitInsts.push_back(Store);
3487 continue;
3488 }
3489
3490 // Do not reorder past unknown instructions: argument initialization should
3491 // only involve casts and stores.
3492 return;
3493 }
3494}
3495
3497 // Alloca could have been renamed for uniqueness. Its true name will have been
3498 // recorded as an annotation.
3499 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3500 MDTuple *AllocaAnnotations =
3501 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3502 for (auto &Annotation : AllocaAnnotations->operands()) {
3503 if (!isa<MDTuple>(Annotation))
3504 continue;
3505 auto AnnotationTuple = cast<MDTuple>(Annotation);
3506 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3507 Index++) {
3508 // All annotations are strings
3509 auto MetadataString =
3510 cast<MDString>(AnnotationTuple->getOperand(Index));
3511 if (MetadataString->getString() == "alloca_name_altered")
3512 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3513 ->getString();
3514 }
3515 }
3516 }
3517 return AI->getName();
3518}
3519
3520void FunctionStackPoisoner::processStaticAllocas() {
3521 if (AllocaVec.empty()) {
3522 assert(StaticAllocaPoisonCallVec.empty());
3523 return;
3524 }
3525
3526 int StackMallocIdx = -1;
3527 DebugLoc EntryDebugLocation;
3528 if (auto SP = F.getSubprogram())
3529 EntryDebugLocation =
3530 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3531
3532 Instruction *InsBefore = AllocaVec[0];
3533 IRBuilder<> IRB(InsBefore);
3534
3535 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3536 // debug info is broken, because only entry-block allocas are treated as
3537 // regular stack slots.
3538 auto InsBeforeB = InsBefore->getParent();
3539 assert(InsBeforeB == &F.getEntryBlock());
3540 for (auto *AI : StaticAllocasToMoveUp)
3541 if (AI->getParent() == InsBeforeB)
3542 AI->moveBefore(InsBefore->getIterator());
3543
3544 // Move stores of arguments into entry-block allocas as well. This prevents
3545 // extra stack slots from being generated (to house the argument values until
3546 // they can be stored into the allocas). This also prevents uninitialized
3547 // values from being shown in backtraces.
3548 SmallVector<Instruction *, 8> ArgInitInsts;
3549 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3550 for (Instruction *ArgInitInst : ArgInitInsts)
3551 ArgInitInst->moveBefore(InsBefore->getIterator());
3552
3553 // If we have a call to llvm.localescape, keep it in the entry block.
3554 if (LocalEscapeCall)
3555 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3556
3558 SVD.reserve(AllocaVec.size());
3559 for (AllocaInst *AI : AllocaVec) {
3562 ASan.getAllocaSizeInBytes(*AI),
3563 0,
3564 AI->getAlign().value(),
3565 AI,
3566 0,
3567 0};
3568 SVD.push_back(D);
3569 }
3570
3571 // Minimal header size (left redzone) is 4 pointers,
3572 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3573 uint64_t Granularity = 1ULL << Mapping.Scale;
3574 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3575 const ASanStackFrameLayout &L =
3576 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3577
3578 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3580 for (auto &Desc : SVD)
3581 AllocaToSVDMap[Desc.AI] = &Desc;
3582
3583 // Update SVD with information from lifetime intrinsics.
3584 for (const auto &APC : StaticAllocaPoisonCallVec) {
3585 assert(APC.InsBefore);
3586 assert(APC.AI);
3587 assert(ASan.isInterestingAlloca(*APC.AI));
3588 assert(APC.AI->isStaticAlloca());
3589
3590 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3591 Desc.LifetimeSize = Desc.Size;
3592 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3593 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3594 if (LifetimeLoc->getFile() == FnLoc->getFile())
3595 if (unsigned Line = LifetimeLoc->getLine())
3596 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3597 }
3598 }
3599 }
3600
3601 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3602 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3603 uint64_t LocalStackSize = L.FrameSize;
3604 bool DoStackMalloc =
3605 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3606 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3607 bool DoDynamicAlloca = ClDynamicAllocaStack;
3608 // Don't do dynamic alloca or stack malloc if:
3609 // 1) There is inline asm: too often it makes assumptions on which registers
3610 // are available.
3611 // 2) There is a returns_twice call (typically setjmp), which is
3612 // optimization-hostile, and doesn't play well with introduced indirect
3613 // register-relative calculation of local variable addresses.
3614 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3615 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3616
3617 Type *PtrTy = F.getDataLayout().getAllocaPtrType(F.getContext());
3618 Value *StaticAlloca =
3619 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3620
3621 Value *FakeStackPtr;
3622 Value *FakeStackInt;
3623 Value *LocalStackBase;
3624 Value *LocalStackBaseAlloca;
3625 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3626
3627 if (DoStackMalloc) {
3628 LocalStackBaseAlloca =
3629 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3630 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3631 // void *FakeStack = __asan_option_detect_stack_use_after_return
3632 // ? __asan_stack_malloc_N(LocalStackSize)
3633 // : nullptr;
3634 // void *LocalStackBase = (FakeStack) ? FakeStack :
3635 // alloca(LocalStackSize);
3636 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3638 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3639 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3641 Instruction *Term =
3642 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3643 IRBuilder<> IRBIf(Term);
3644 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3645 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3646 Value *FakeStackValue =
3647 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3648 ConstantInt::get(IntptrTy, LocalStackSize));
3649 IRB.SetInsertPoint(InsBefore);
3650 FakeStackInt = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue,
3651 Term, ConstantInt::get(IntptrTy, 0));
3652 } else {
3653 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3654 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3655 // void *LocalStackBase = (FakeStack) ? FakeStack :
3656 // alloca(LocalStackSize);
3657 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3658 FakeStackInt =
3659 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3660 ConstantInt::get(IntptrTy, LocalStackSize));
3661 }
3662 FakeStackPtr = IRB.CreateIntToPtr(FakeStackInt, PtrTy);
3663 Value *NoFakeStack =
3664 IRB.CreateICmpEQ(FakeStackInt, Constant::getNullValue(IntptrTy));
3665 Instruction *Term =
3666 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3667 IRBuilder<> IRBIf(Term);
3668 Value *AllocaValue =
3669 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3670
3671 IRB.SetInsertPoint(InsBefore);
3672 LocalStackBase =
3673 createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStackPtr);
3674 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3675 DIExprFlags |= DIExpression::DerefBefore;
3676 } else {
3677 // void *FakeStack = nullptr;
3678 // void *LocalStackBase = alloca(LocalStackSize);
3679 FakeStackInt = Constant::getNullValue(IntptrTy);
3680 FakeStackPtr = Constant::getNullValue(PtrTy);
3681 LocalStackBase =
3682 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3683 LocalStackBaseAlloca = LocalStackBase;
3684 }
3685
3686 // Replace Alloca instructions with base+offset.
3687 SmallVector<Value *> NewAllocaPtrs;
3688 for (const auto &Desc : SVD) {
3689 AllocaInst *AI = Desc.AI;
3690 replaceDbgDeclare(AI, LocalStackBaseAlloca, DIB, DIExprFlags, Desc.Offset);
3691 Value *NewAllocaPtr = IRB.CreatePtrAdd(
3692 LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset));
3693 AI->replaceAllUsesWith(NewAllocaPtr);
3694 NewAllocaPtrs.push_back(NewAllocaPtr);
3695 }
3696
3697 // The left-most redzone has enough space for at least 4 pointers.
3698 // Write the Magic value to redzone[0].
3699 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3700 LocalStackBase);
3701 // Write the frame description constant to redzone[1].
3702 Value *BasePlus1 = IRB.CreatePtrAdd(
3703 LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize / 8));
3704 GlobalVariable *StackDescriptionGlobal =
3705 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3706 /*AllowMerging*/ true, genName("stack"));
3707 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3708 IRB.CreateStore(Description, BasePlus1);
3709 // Write the PC to redzone[2].
3710 Value *BasePlus2 = IRB.CreatePtrAdd(
3711 LocalStackBase, ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8));
3712 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3713
3714 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3715
3716 // Poison the stack red zones at the entry.
3717 Value *ShadowBase =
3718 ASan.memToShadow(IRB.CreatePtrToInt(LocalStackBase, IntptrTy), IRB);
3719 // As mask we must use most poisoned case: red zones and after scope.
3720 // As bytes we can use either the same or just red zones only.
3721 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3722
3723 if (!StaticAllocaPoisonCallVec.empty()) {
3724 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3725
3726 // Poison static allocas near lifetime intrinsics.
3727 for (const auto &APC : StaticAllocaPoisonCallVec) {
3728 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3729 assert(Desc.Offset % L.Granularity == 0);
3730 size_t Begin = Desc.Offset / L.Granularity;
3731 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3732
3733 IRBuilder<> IRB(APC.InsBefore);
3734 copyToShadow(ShadowAfterScope,
3735 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3736 IRB, ShadowBase);
3737 }
3738 }
3739
3740 // Remove lifetime markers now that these are no longer allocas.
3741 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3742 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3743 auto *I = cast<Instruction>(U);
3744 if (I->isLifetimeStartOrEnd())
3745 I->eraseFromParent();
3746 }
3747 }
3748
3749 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3750 SmallVector<uint8_t, 64> ShadowAfterReturn;
3751
3752 // (Un)poison the stack before all ret instructions.
3753 for (Instruction *Ret : RetVec) {
3754 IRBuilder<> IRBRet(Ret);
3755 // Mark the current frame as retired.
3756 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3757 LocalStackBase);
3758 if (DoStackMalloc) {
3759 assert(StackMallocIdx >= 0);
3760 // if FakeStack != 0 // LocalStackBase == FakeStack
3761 // // In use-after-return mode, poison the whole stack frame.
3762 // if StackMallocIdx <= 4
3763 // // For small sizes inline the whole thing:
3764 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3765 // **SavedFlagPtr(FakeStack) = 0
3766 // else
3767 // __asan_stack_free_N(FakeStack, LocalStackSize)
3768 // else
3769 // <This is not a fake stack; unpoison the redzones>
3770 Value *Cmp =
3771 IRBRet.CreateICmpNE(FakeStackInt, Constant::getNullValue(IntptrTy));
3772 Instruction *ThenTerm, *ElseTerm;
3773 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3774
3775 IRBuilder<> IRBPoison(ThenTerm);
3776 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3777 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3778 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3780 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3781 ShadowBase);
3782 Value *SavedFlagPtrPtr = IRBPoison.CreatePtrAdd(
3783 FakeStackPtr,
3784 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3785 Value *SavedFlagPtr = IRBPoison.CreateLoad(IntptrTy, SavedFlagPtrPtr);
3786 IRBPoison.CreateStore(
3787 Constant::getNullValue(IRBPoison.getInt8Ty()),
3788 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3789 } else {
3790 // For larger frames call __asan_stack_free_*.
3791 RTCI.createRuntimeCall(
3792 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3793 {FakeStackInt, ConstantInt::get(IntptrTy, LocalStackSize)});
3794 }
3795
3796 IRBuilder<> IRBElse(ElseTerm);
3797 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3798 } else {
3799 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3800 }
3801 }
3802
3803 // We are done. Remove the old unused alloca instructions.
3804 for (auto *AI : AllocaVec)
3805 AI->eraseFromParent();
3806}
3807
3808void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3809 IRBuilder<> &IRB, bool DoPoison) {
3810 // For now just insert the call to ASan runtime.
3811 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3812 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3813 RTCI.createRuntimeCall(
3814 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3815 {AddrArg, SizeArg});
3816}
3817
3818// Handling llvm.lifetime intrinsics for a given %alloca:
3819// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3820// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3821// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3822// could be poisoned by previous llvm.lifetime.end instruction, as the
3823// variable may go in and out of scope several times, e.g. in loops).
3824// (3) if we poisoned at least one %alloca in a function,
3825// unpoison the whole stack frame at function exit.
3826void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3827 IRBuilder<> IRB(AI);
3828
3829 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3830 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3831
3832 Value *Zero = Constant::getNullValue(IntptrTy);
3833 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3834 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3835
3836 // Since we need to extend alloca with additional memory to locate
3837 // redzones, and OldSize is number of allocated blocks with
3838 // ElementSize size, get allocated memory size in bytes by
3839 // OldSize * ElementSize.
3840 const unsigned ElementSize =
3841 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3842 Value *OldSize =
3843 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3844 ConstantInt::get(IntptrTy, ElementSize));
3845
3846 // PartialSize = OldSize % 32
3847 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3848
3849 // Misalign = kAllocaRzSize - PartialSize;
3850 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3851
3852 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3853 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3854 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3855
3856 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3857 // Alignment is added to locate left redzone, PartialPadding for possible
3858 // partial redzone and kAllocaRzSize for right redzone respectively.
3859 Value *AdditionalChunkSize = IRB.CreateAdd(
3860 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3861 PartialPadding);
3862
3863 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3864
3865 // Insert new alloca with new NewSize and Alignment params.
3866 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3867 NewAlloca->setAlignment(Alignment);
3868
3869 // NewAddress = Address + Alignment
3870 Value *NewAddress =
3871 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3872 ConstantInt::get(IntptrTy, Alignment.value()));
3873
3874 // Insert __asan_alloca_poison call for new created alloca.
3875 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3876
3877 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3878 // for unpoisoning stuff.
3879 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3880
3881 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3882
3883 // Remove lifetime markers now that this is no longer an alloca.
3884 for (User *U : make_early_inc_range(AI->users())) {
3885 auto *I = cast<Instruction>(U);
3886 if (I->isLifetimeStartOrEnd())
3887 I->eraseFromParent();
3888 }
3889
3890 // Replace all uses of AddressReturnedByAlloca with NewAddressPtr.
3891 AI->replaceAllUsesWith(NewAddressPtr);
3892
3893 // We are done. Erase old alloca from parent.
3894 AI->eraseFromParent();
3895}
3896
3897// isSafeAccess returns true if Addr is always inbounds with respect to its
3898// base object. For example, it is a field access or an array access with
3899// constant inbounds index.
3900bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3901 Value *Addr, TypeSize TypeStoreSize) const {
3902 if (TypeStoreSize.isScalable())
3903 // TODO: We can use vscale_range to convert a scalable value to an
3904 // upper bound on the access size.
3905 return false;
3906
3907 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3908 if (!SizeOffset.bothKnown())
3909 return false;
3910
3911 uint64_t Size = SizeOffset.Size.getZExtValue();
3912 int64_t Offset = SizeOffset.Offset.getSExtValue();
3913
3914 // Three checks are required to ensure safety:
3915 // . Offset >= 0 (since the offset is given from the base ptr)
3916 // . Size >= Offset (unsigned)
3917 // . Size - Offset >= NeededSize (unsigned)
3918 return Offset >= 0 && Size >= uint64_t(Offset) &&
3919 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3920}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< int > ClShadowAddrSpace("asan-shadow-addr-space", cl::desc("Address space for pointers to the shadow map"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static cl::list< unsigned > ClAddrSpaces("asan-instrument-address-spaces", cl::desc("Only instrument variables in the specified address spaces."), cl::Hidden, cl::CommaSeparated, cl::ZeroOrMore, cl::callback([](const unsigned &AddrSpace) { SrcAddrSpaces.insert(AddrSpace);}))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static bool isSupportedAddrspace(const Triple &TargetTriple, Value *Addr)
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static SmallSet< unsigned, 8 > SrcAddrSpaces
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:380
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:598
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:553
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1833
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1867
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2254
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2360
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2202
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2039
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:567
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2336
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1926
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2497
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1808
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2332
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition IRBuilder.h:533
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1850
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1863
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2197
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2659
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2511
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2280
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1886
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:552
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2212
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1078
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Tuple of metadata.
Definition Metadata.h:1497
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:413
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:932
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:616
bool isBPF() const
Tests whether the target is eBPF.
Definition Triple.h:1170
bool isOSNetBSD() const
Definition Triple.h:646
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:841
bool isABIN32() const
Definition Triple.h:1158
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1054
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:413
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1043
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1049
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:695
@ UnknownObjectFormat
Definition Triple.h:320
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:937
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:744
bool isAMDGPU() const
Definition Triple.h:929
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:582
bool isOSFreeBSD() const
Definition Triple.h:654
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:764
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:601
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:591
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:838
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1142
bool isOSFuchsia() const
Definition Triple.h:658
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:685
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:503
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool isSwiftError() const
Return true if this value is a swifterror value.
Definition Value.cpp:1120
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
cb< typename detail::callback_traits< F >::result_type, typename detail::callback_traits< F >::arg_type > callback(F CB)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3865
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
#define N
LLVM_ABI ASanAccessInfo(int32_t Packed)
const uint8_t AccessSizeIndex
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:69
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.