LLVM  16.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1 //===- AddressSanitizer.cpp - memory error detector -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address basic correctness
10 // checker.
11 // Details of the algorithm:
12 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13 //
14 // FIXME: This sanitizer does not yet handle scalable vectors
15 //
16 //===----------------------------------------------------------------------===//
17 
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
35 #include "llvm/Demangle/Demangle.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Comdat.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DIBuilder.h"
43 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugLoc.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/InstVisitor.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/MDBuilder.h"
61 #include "llvm/IR/Metadata.h"
62 #include "llvm/IR/Module.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/Use.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/MC/MCSectionMachO.h"
67 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/Debug.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstddef>
84 #include <cstdint>
85 #include <iomanip>
86 #include <limits>
87 #include <sstream>
88 #include <string>
89 #include <tuple>
90 
91 using namespace llvm;
92 
93 #define DEBUG_TYPE "asan"
94 
95 static const uint64_t kDefaultShadowScale = 3;
96 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
97 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
100 static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
102 static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
103 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
104 static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
105 static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
106 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
107 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
108 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
109 static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
110 static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000;
111 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
112 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
113 static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
114 static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
115 static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
116 static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
117 static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
118 static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
119 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
121 
122 // The shadow memory space is dynamically allocated.
124 
125 static const size_t kMinStackMallocSize = 1 << 6; // 64B
126 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
127 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
128 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
129 
130 const char kAsanModuleCtorName[] = "asan.module_ctor";
131 const char kAsanModuleDtorName[] = "asan.module_dtor";
133 // On Emscripten, the system needs more than one priorities for constructors.
135 const char kAsanReportErrorTemplate[] = "__asan_report_";
136 const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
137 const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
138 const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
140  "__asan_unregister_image_globals";
141 const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
142 const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
143 const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
144 const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
145 const char kAsanInitName[] = "__asan_init";
146 const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
147 const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
148 const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
149 const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
150 static const int kMaxAsanStackMallocSizeClass = 10;
151 const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
153  "__asan_stack_malloc_always_";
154 const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
155 const char kAsanGenPrefix[] = "___asan_gen_";
156 const char kODRGenPrefix[] = "__odr_asan_gen_";
157 const char kSanCovGenPrefix[] = "__sancov_gen_";
158 const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
159 const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
160 const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
161 
162 // ASan version script has __asan_* wildcard. Triple underscore prevents a
163 // linker (gold) warning about attempting to export a local symbol.
164 const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
165 
167  "__asan_option_detect_stack_use_after_return";
168 
170  "__asan_shadow_memory_dynamic_address";
171 
172 const char kAsanAllocaPoison[] = "__asan_alloca_poison";
173 const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
174 
175 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
176 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
177 
178 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
179 static const size_t kNumberOfAccessSizes = 5;
180 
181 static const uint64_t kAllocaRzSize = 32;
182 
183 // ASanAccessInfo implementation constants.
184 constexpr size_t kCompileKernelShift = 0;
185 constexpr size_t kCompileKernelMask = 0x1;
186 constexpr size_t kAccessSizeIndexShift = 1;
187 constexpr size_t kAccessSizeIndexMask = 0xf;
188 constexpr size_t kIsWriteShift = 5;
189 constexpr size_t kIsWriteMask = 0x1;
190 
191 // Command-line flags.
192 
194  "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
195  cl::Hidden, cl::init(false));
196 
197 static cl::opt<bool> ClRecover(
198  "asan-recover",
199  cl::desc("Enable recovery mode (continue-after-error)."),
200  cl::Hidden, cl::init(false));
201 
203  "asan-guard-against-version-mismatch",
204  cl::desc("Guard against compiler/runtime version mismatch."),
205  cl::Hidden, cl::init(true));
206 
207 // This flag may need to be replaced with -f[no-]asan-reads.
208 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
209  cl::desc("instrument read instructions"),
210  cl::Hidden, cl::init(true));
211 
213  "asan-instrument-writes", cl::desc("instrument write instructions"),
214  cl::Hidden, cl::init(true));
215 
216 static cl::opt<bool>
217  ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false),
218  cl::Hidden, cl::desc("Use Stack Safety analysis results"),
219  cl::Optional);
220 
222  "asan-instrument-atomics",
223  cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
224  cl::init(true));
225 
226 static cl::opt<bool>
227  ClInstrumentByval("asan-instrument-byval",
228  cl::desc("instrument byval call arguments"), cl::Hidden,
229  cl::init(true));
230 
232  "asan-always-slow-path",
233  cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
234  cl::init(false));
235 
237  "asan-force-dynamic-shadow",
238  cl::desc("Load shadow address into a local variable for each function"),
239  cl::Hidden, cl::init(false));
240 
241 static cl::opt<bool>
242  ClWithIfunc("asan-with-ifunc",
243  cl::desc("Access dynamic shadow through an ifunc global on "
244  "platforms that support this"),
245  cl::Hidden, cl::init(true));
246 
248  "asan-with-ifunc-suppress-remat",
249  cl::desc("Suppress rematerialization of dynamic shadow address by passing "
250  "it through inline asm in prologue."),
251  cl::Hidden, cl::init(true));
252 
253 // This flag limits the number of instructions to be instrumented
254 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
255 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
256 // set it to 10000.
258  "asan-max-ins-per-bb", cl::init(10000),
259  cl::desc("maximal number of instructions to instrument in any given BB"),
260  cl::Hidden);
261 
262 // This flag may need to be replaced with -f[no]asan-stack.
263 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
264  cl::Hidden, cl::init(true));
266  "asan-max-inline-poisoning-size",
267  cl::desc(
268  "Inline shadow poisoning for blocks up to the given size in bytes."),
269  cl::Hidden, cl::init(64));
270 
272  "asan-use-after-return",
273  cl::desc("Sets the mode of detection for stack-use-after-return."),
274  cl::values(
276  "Never detect stack use after return."),
277  clEnumValN(
279  "Detect stack use after return if "
280  "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
282  "Always detect stack use after return.")),
284 
285 static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
286  cl::desc("Create redzones for byval "
287  "arguments (extra copy "
288  "required)"), cl::Hidden,
289  cl::init(true));
290 
291 static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
292  cl::desc("Check stack-use-after-scope"),
293  cl::Hidden, cl::init(false));
294 
295 // This flag may need to be replaced with -f[no]asan-globals.
296 static cl::opt<bool> ClGlobals("asan-globals",
297  cl::desc("Handle global objects"), cl::Hidden,
298  cl::init(true));
299 
300 static cl::opt<bool> ClInitializers("asan-initialization-order",
301  cl::desc("Handle C++ initializer order"),
302  cl::Hidden, cl::init(true));
303 
305  "asan-detect-invalid-pointer-pair",
306  cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
307  cl::init(false));
308 
310  "asan-detect-invalid-pointer-cmp",
311  cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
312  cl::init(false));
313 
315  "asan-detect-invalid-pointer-sub",
316  cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
317  cl::init(false));
318 
320  "asan-realign-stack",
321  cl::desc("Realign stack to the value of this flag (power of two)"),
322  cl::Hidden, cl::init(32));
323 
325  "asan-instrumentation-with-call-threshold",
326  cl::desc(
327  "If the function being instrumented contains more than "
328  "this number of memory accesses, use callbacks instead of "
329  "inline checks (-1 means never use callbacks)."),
330  cl::Hidden, cl::init(7000));
331 
333  "asan-memory-access-callback-prefix",
334  cl::desc("Prefix for memory access callbacks"), cl::Hidden,
335  cl::init("__asan_"));
336 
338  "asan-kernel-mem-intrinsic-prefix",
339  cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
340  cl::init(false));
341 
342 static cl::opt<bool>
343  ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
344  cl::desc("instrument dynamic allocas"),
345  cl::Hidden, cl::init(true));
346 
348  "asan-skip-promotable-allocas",
349  cl::desc("Do not instrument promotable allocas"), cl::Hidden,
350  cl::init(true));
351 
352 // These flags allow to change the shadow mapping.
353 // The shadow mapping looks like
354 // Shadow = (Mem >> scale) + offset
355 
356 static cl::opt<int> ClMappingScale("asan-mapping-scale",
357  cl::desc("scale of asan shadow mapping"),
358  cl::Hidden, cl::init(0));
359 
360 static cl::opt<uint64_t>
361  ClMappingOffset("asan-mapping-offset",
362  cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
363  cl::Hidden, cl::init(0));
364 
365 // Optimization flags. Not user visible, used mostly for testing
366 // and benchmarking the tool.
367 
368 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
369  cl::Hidden, cl::init(true));
370 
371 static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
372  cl::desc("Optimize callbacks"),
373  cl::Hidden, cl::init(false));
374 
376  "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
377  cl::Hidden, cl::init(true));
378 
379 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
380  cl::desc("Don't instrument scalar globals"),
381  cl::Hidden, cl::init(true));
382 
384  "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
385  cl::Hidden, cl::init(false));
386 
388  "asan-stack-dynamic-alloca",
389  cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
390  cl::init(true));
391 
393  "asan-force-experiment",
394  cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
395  cl::init(0));
396 
397 static cl::opt<bool>
398  ClUsePrivateAlias("asan-use-private-alias",
399  cl::desc("Use private aliases for global variables"),
400  cl::Hidden, cl::init(true));
401 
402 static cl::opt<bool>
403  ClUseOdrIndicator("asan-use-odr-indicator",
404  cl::desc("Use odr indicators to improve ODR reporting"),
405  cl::Hidden, cl::init(true));
406 
407 static cl::opt<bool>
408  ClUseGlobalsGC("asan-globals-live-support",
409  cl::desc("Use linker features to support dead "
410  "code stripping of globals"),
411  cl::Hidden, cl::init(true));
412 
413 // This is on by default even though there is a bug in gold:
414 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
415 static cl::opt<bool>
416  ClWithComdat("asan-with-comdat",
417  cl::desc("Place ASan constructors in comdat sections"),
418  cl::Hidden, cl::init(true));
419 
421  "asan-destructor-kind",
422  cl::desc("Sets the ASan destructor kind. The default is to use the value "
423  "provided to the pass constructor"),
424  cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
426  "Use global destructors")),
428 
429 // Debug flags.
430 
431 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
432  cl::init(0));
433 
434 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
435  cl::Hidden, cl::init(0));
436 
437 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
438  cl::desc("Debug func"));
439 
440 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
441  cl::Hidden, cl::init(-1));
442 
443 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
444  cl::Hidden, cl::init(-1));
445 
446 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
447 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
448 STATISTIC(NumOptimizedAccessesToGlobalVar,
449  "Number of optimized accesses to global vars");
450 STATISTIC(NumOptimizedAccessesToStackVar,
451  "Number of optimized accesses to stack vars");
452 
453 namespace {
454 
455 /// This struct defines the shadow mapping using the rule:
456 /// shadow = (mem >> Scale) ADD-or-OR Offset.
457 /// If InGlobal is true, then
458 /// extern char __asan_shadow[];
459 /// shadow = (mem >> Scale) + &__asan_shadow
460 struct ShadowMapping {
461  int Scale;
462  uint64_t Offset;
463  bool OrShadowOffset;
464  bool InGlobal;
465 };
466 
467 } // end anonymous namespace
468 
469 static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
470  bool IsKasan) {
471  bool IsAndroid = TargetTriple.isAndroid();
472  bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
473  TargetTriple.isDriverKit();
474  bool IsMacOS = TargetTriple.isMacOSX();
475  bool IsFreeBSD = TargetTriple.isOSFreeBSD();
476  bool IsNetBSD = TargetTriple.isOSNetBSD();
477  bool IsPS = TargetTriple.isPS();
478  bool IsLinux = TargetTriple.isOSLinux();
479  bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
480  TargetTriple.getArch() == Triple::ppc64le;
481  bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
482  bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
483  bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32;
484  bool IsMIPS32 = TargetTriple.isMIPS32();
485  bool IsMIPS64 = TargetTriple.isMIPS64();
486  bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
487  bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
488  bool IsLoongArch64 = TargetTriple.getArch() == Triple::loongarch64;
489  bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
490  bool IsWindows = TargetTriple.isOSWindows();
491  bool IsFuchsia = TargetTriple.isOSFuchsia();
492  bool IsEmscripten = TargetTriple.isOSEmscripten();
493  bool IsAMDGPU = TargetTriple.isAMDGPU();
494 
495  ShadowMapping Mapping;
496 
497  Mapping.Scale = kDefaultShadowScale;
498  if (ClMappingScale.getNumOccurrences() > 0) {
499  Mapping.Scale = ClMappingScale;
500  }
501 
502  if (LongSize == 32) {
503  if (IsAndroid)
504  Mapping.Offset = kDynamicShadowSentinel;
505  else if (IsMIPSN32ABI)
506  Mapping.Offset = kMIPS_ShadowOffsetN32;
507  else if (IsMIPS32)
508  Mapping.Offset = kMIPS32_ShadowOffset32;
509  else if (IsFreeBSD)
510  Mapping.Offset = kFreeBSD_ShadowOffset32;
511  else if (IsNetBSD)
512  Mapping.Offset = kNetBSD_ShadowOffset32;
513  else if (IsIOS)
514  Mapping.Offset = kDynamicShadowSentinel;
515  else if (IsWindows)
516  Mapping.Offset = kWindowsShadowOffset32;
517  else if (IsEmscripten)
518  Mapping.Offset = kEmscriptenShadowOffset;
519  else
520  Mapping.Offset = kDefaultShadowOffset32;
521  } else { // LongSize == 64
522  // Fuchsia is always PIE, which means that the beginning of the address
523  // space is always available.
524  if (IsFuchsia)
525  Mapping.Offset = 0;
526  else if (IsPPC64)
527  Mapping.Offset = kPPC64_ShadowOffset64;
528  else if (IsSystemZ)
529  Mapping.Offset = kSystemZ_ShadowOffset64;
530  else if (IsFreeBSD && IsAArch64)
531  Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
532  else if (IsFreeBSD && !IsMIPS64) {
533  if (IsKasan)
534  Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
535  else
536  Mapping.Offset = kFreeBSD_ShadowOffset64;
537  } else if (IsNetBSD) {
538  if (IsKasan)
539  Mapping.Offset = kNetBSDKasan_ShadowOffset64;
540  else
541  Mapping.Offset = kNetBSD_ShadowOffset64;
542  } else if (IsPS)
543  Mapping.Offset = kPS_ShadowOffset64;
544  else if (IsLinux && IsX86_64) {
545  if (IsKasan)
546  Mapping.Offset = kLinuxKasan_ShadowOffset64;
547  else
548  Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
549  (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
550  } else if (IsWindows && IsX86_64) {
551  Mapping.Offset = kWindowsShadowOffset64;
552  } else if (IsMIPS64)
553  Mapping.Offset = kMIPS64_ShadowOffset64;
554  else if (IsIOS)
555  Mapping.Offset = kDynamicShadowSentinel;
556  else if (IsMacOS && IsAArch64)
557  Mapping.Offset = kDynamicShadowSentinel;
558  else if (IsAArch64)
559  Mapping.Offset = kAArch64_ShadowOffset64;
560  else if (IsLoongArch64)
561  Mapping.Offset = kLoongArch64_ShadowOffset64;
562  else if (IsRISCV64)
563  Mapping.Offset = kRISCV64_ShadowOffset64;
564  else if (IsAMDGPU)
565  Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
566  (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
567  else
568  Mapping.Offset = kDefaultShadowOffset64;
569  }
570 
571  if (ClForceDynamicShadow) {
572  Mapping.Offset = kDynamicShadowSentinel;
573  }
574 
575  if (ClMappingOffset.getNumOccurrences() > 0) {
576  Mapping.Offset = ClMappingOffset;
577  }
578 
579  // OR-ing shadow offset if more efficient (at least on x86) if the offset
580  // is a power of two, but on ppc64 and loongarch64 we have to use add since
581  // the shadow offset is not necessarily 1/8-th of the address space. On
582  // SystemZ, we could OR the constant in a single instruction, but it's more
583  // efficient to load it once and use indexed addressing.
584  Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
585  !IsRISCV64 && !IsLoongArch64 &&
586  !(Mapping.Offset & (Mapping.Offset - 1)) &&
587  Mapping.Offset != kDynamicShadowSentinel;
588  bool IsAndroidWithIfuncSupport =
589  IsAndroid && !TargetTriple.isAndroidVersionLT(21);
590  Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
591 
592  return Mapping;
593 }
594 
595 namespace llvm {
596 void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
597  bool IsKasan, uint64_t *ShadowBase,
598  int *MappingScale, bool *OrShadowOffset) {
599  auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
600  *ShadowBase = Mapping.Offset;
601  *MappingScale = Mapping.Scale;
602  *OrShadowOffset = Mapping.OrShadowOffset;
603 }
604 
606  : Packed(Packed),
607  AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
608  IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
609  CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
610 
611 ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
612  uint8_t AccessSizeIndex)
613  : Packed((IsWrite << kIsWriteShift) +
614  (CompileKernel << kCompileKernelShift) +
615  (AccessSizeIndex << kAccessSizeIndexShift)),
616  AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
617  CompileKernel(CompileKernel) {}
618 
619 } // namespace llvm
620 
621 static uint64_t getRedzoneSizeForScale(int MappingScale) {
622  // Redzone used for stack and globals is at least 32 bytes.
623  // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
624  return std::max(32U, 1U << MappingScale);
625 }
626 
627 static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
628  if (TargetTriple.isOSEmscripten()) {
630  } else {
632  }
633 }
634 
635 namespace {
636 
637 /// AddressSanitizer: instrument the code in module to find memory bugs.
638 struct AddressSanitizer {
639  AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
640  bool CompileKernel = false, bool Recover = false,
641  bool UseAfterScope = false,
642  AsanDetectStackUseAfterReturnMode UseAfterReturn =
643  AsanDetectStackUseAfterReturnMode::Runtime)
644  : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
645  : CompileKernel),
646  Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
647  UseAfterScope(UseAfterScope || ClUseAfterScope),
648  UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
649  : UseAfterReturn),
650  SSGI(SSGI) {
651  C = &(M.getContext());
652  LongSize = M.getDataLayout().getPointerSizeInBits();
653  IntptrTy = Type::getIntNTy(*C, LongSize);
654  Int8PtrTy = Type::getInt8PtrTy(*C);
655  Int32Ty = Type::getInt32Ty(*C);
656  TargetTriple = Triple(M.getTargetTriple());
657 
658  Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
659 
660  assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
661  }
662 
663  uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
664  uint64_t ArraySize = 1;
665  if (AI.isArrayAllocation()) {
666  const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
667  assert(CI && "non-constant array size");
668  ArraySize = CI->getZExtValue();
669  }
670  Type *Ty = AI.getAllocatedType();
671  uint64_t SizeInBytes =
673  return SizeInBytes * ArraySize;
674  }
675 
676  /// Check if we want (and can) handle this alloca.
677  bool isInterestingAlloca(const AllocaInst &AI);
678 
679  bool ignoreAccess(Instruction *Inst, Value *Ptr);
680  void getInterestingMemoryOperands(
682 
683  void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
684  InterestingMemoryOperand &O, bool UseCalls,
685  const DataLayout &DL);
686  void instrumentPointerComparisonOrSubtraction(Instruction *I);
687  void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
688  Value *Addr, uint32_t TypeSize, bool IsWrite,
689  Value *SizeArgument, bool UseCalls, uint32_t Exp);
690  Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
691  Instruction *InsertBefore, Value *Addr,
692  uint32_t TypeSize, bool IsWrite,
693  Value *SizeArgument);
694  void instrumentUnusualSizeOrAlignment(Instruction *I,
695  Instruction *InsertBefore, Value *Addr,
696  uint32_t TypeSize, bool IsWrite,
697  Value *SizeArgument, bool UseCalls,
698  uint32_t Exp);
699  Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
700  Value *ShadowValue, uint32_t TypeSize);
701  Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
702  bool IsWrite, size_t AccessSizeIndex,
703  Value *SizeArgument, uint32_t Exp);
704  void instrumentMemIntrinsic(MemIntrinsic *MI);
705  Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
706  bool suppressInstrumentationSiteForDebug(int &Instrumented);
707  bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
708  bool maybeInsertAsanInitAtFunctionEntry(Function &F);
709  bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
710  void markEscapedLocalAllocas(Function &F);
711 
712 private:
713  friend struct FunctionStackPoisoner;
714 
715  void initializeCallbacks(Module &M);
716 
717  bool LooksLikeCodeInBug11395(Instruction *I);
718  bool GlobalIsLinkerInitialized(GlobalVariable *G);
719  bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
720  uint64_t TypeSize) const;
721 
722  /// Helper to cleanup per-function state.
723  struct FunctionStateRAII {
724  AddressSanitizer *Pass;
725 
726  FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
727  assert(Pass->ProcessedAllocas.empty() &&
728  "last pass forgot to clear cache");
729  assert(!Pass->LocalDynamicShadow);
730  }
731 
732  ~FunctionStateRAII() {
733  Pass->LocalDynamicShadow = nullptr;
734  Pass->ProcessedAllocas.clear();
735  }
736  };
737 
738  LLVMContext *C;
739  Triple TargetTriple;
740  int LongSize;
741  bool CompileKernel;
742  bool Recover;
743  bool UseAfterScope;
744  AsanDetectStackUseAfterReturnMode UseAfterReturn;
745  Type *IntptrTy;
746  Type *Int8PtrTy;
747  Type *Int32Ty;
748  ShadowMapping Mapping;
749  FunctionCallee AsanHandleNoReturnFunc;
750  FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
751  Constant *AsanShadowGlobal;
752 
753  // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
754  FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
755  FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
756 
757  // These arrays is indexed by AccessIsWrite and Experiment.
758  FunctionCallee AsanErrorCallbackSized[2][2];
759  FunctionCallee AsanMemoryAccessCallbackSized[2][2];
760 
761  FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
762  Value *LocalDynamicShadow = nullptr;
763  const StackSafetyGlobalInfo *SSGI;
764  DenseMap<const AllocaInst *, bool> ProcessedAllocas;
765 
766  FunctionCallee AMDGPUAddressShared;
767  FunctionCallee AMDGPUAddressPrivate;
768 };
769 
770 class ModuleAddressSanitizer {
771 public:
772  ModuleAddressSanitizer(Module &M, bool CompileKernel = false,
773  bool Recover = false, bool UseGlobalsGC = true,
774  bool UseOdrIndicator = true,
775  AsanDtorKind DestructorKind = AsanDtorKind::Global)
776  : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
777  : CompileKernel),
778  Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
779  UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
780  // Enable aliases as they should have no downside with ODR indicators.
781  UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
783  : UseOdrIndicator),
784  UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
786  : UseOdrIndicator),
787  // Not a typo: ClWithComdat is almost completely pointless without
788  // ClUseGlobalsGC (because then it only works on modules without
789  // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
790  // and both suffer from gold PR19002 for which UseGlobalsGC constructor
791  // argument is designed as workaround. Therefore, disable both
792  // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
793  // do globals-gc.
794  UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
795  DestructorKind(DestructorKind) {
796  C = &(M.getContext());
797  int LongSize = M.getDataLayout().getPointerSizeInBits();
798  IntptrTy = Type::getIntNTy(*C, LongSize);
799  TargetTriple = Triple(M.getTargetTriple());
800  Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
801 
803  this->DestructorKind = ClOverrideDestructorKind;
804  assert(this->DestructorKind != AsanDtorKind::Invalid);
805  }
806 
807  bool instrumentModule(Module &);
808 
809 private:
810  void initializeCallbacks(Module &M);
811 
812  bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
813  void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
814  ArrayRef<GlobalVariable *> ExtendedGlobals,
815  ArrayRef<Constant *> MetadataInitializers);
816  void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
817  ArrayRef<GlobalVariable *> ExtendedGlobals,
818  ArrayRef<Constant *> MetadataInitializers,
819  const std::string &UniqueModuleId);
820  void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
821  ArrayRef<GlobalVariable *> ExtendedGlobals,
822  ArrayRef<Constant *> MetadataInitializers);
823  void
824  InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
825  ArrayRef<GlobalVariable *> ExtendedGlobals,
826  ArrayRef<Constant *> MetadataInitializers);
827 
828  GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
829  StringRef OriginalName);
830  void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
831  StringRef InternalSuffix);
832  Instruction *CreateAsanModuleDtor(Module &M);
833 
834  const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
835  bool shouldInstrumentGlobal(GlobalVariable *G) const;
836  bool ShouldUseMachOGlobalsSection() const;
837  StringRef getGlobalMetadataSection() const;
838  void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
839  void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
840  uint64_t getMinRedzoneSizeForGlobal() const {
841  return getRedzoneSizeForScale(Mapping.Scale);
842  }
843  uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
844  int GetAsanVersion(const Module &M) const;
845 
846  bool CompileKernel;
847  bool Recover;
848  bool UseGlobalsGC;
849  bool UsePrivateAlias;
850  bool UseOdrIndicator;
851  bool UseCtorComdat;
852  AsanDtorKind DestructorKind;
853  Type *IntptrTy;
854  LLVMContext *C;
855  Triple TargetTriple;
856  ShadowMapping Mapping;
857  FunctionCallee AsanPoisonGlobals;
858  FunctionCallee AsanUnpoisonGlobals;
859  FunctionCallee AsanRegisterGlobals;
860  FunctionCallee AsanUnregisterGlobals;
861  FunctionCallee AsanRegisterImageGlobals;
862  FunctionCallee AsanUnregisterImageGlobals;
863  FunctionCallee AsanRegisterElfGlobals;
864  FunctionCallee AsanUnregisterElfGlobals;
865 
866  Function *AsanCtorFunction = nullptr;
867  Function *AsanDtorFunction = nullptr;
868 };
869 
870 // Stack poisoning does not play well with exception handling.
871 // When an exception is thrown, we essentially bypass the code
872 // that unpoisones the stack. This is why the run-time library has
873 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
874 // stack in the interceptor. This however does not work inside the
875 // actual function which catches the exception. Most likely because the
876 // compiler hoists the load of the shadow value somewhere too high.
877 // This causes asan to report a non-existing bug on 453.povray.
878 // It sounds like an LLVM bug.
879 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
880  Function &F;
881  AddressSanitizer &ASan;
882  DIBuilder DIB;
883  LLVMContext *C;
884  Type *IntptrTy;
885  Type *IntptrPtrTy;
886  ShadowMapping Mapping;
887 
889  SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
891 
892  FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
893  AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
894  FunctionCallee AsanSetShadowFunc[0x100] = {};
895  FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
896  FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
897 
898  // Stores a place and arguments of poisoning/unpoisoning call for alloca.
899  struct AllocaPoisonCall {
900  IntrinsicInst *InsBefore;
901  AllocaInst *AI;
902  uint64_t Size;
903  bool DoPoison;
904  };
905  SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
906  SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
907  bool HasUntracedLifetimeIntrinsic = false;
908 
909  SmallVector<AllocaInst *, 1> DynamicAllocaVec;
910  SmallVector<IntrinsicInst *, 1> StackRestoreVec;
911  AllocaInst *DynamicAllocaLayout = nullptr;
912  IntrinsicInst *LocalEscapeCall = nullptr;
913 
914  bool HasInlineAsm = false;
915  bool HasReturnsTwiceCall = false;
916  bool PoisonStack;
917 
918  FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
919  : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
920  C(ASan.C), IntptrTy(ASan.IntptrTy),
921  IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
922  PoisonStack(ClStack &&
923  !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
924 
925  bool runOnFunction() {
926  if (!PoisonStack)
927  return false;
928 
929  if (ClRedzoneByvalArgs)
930  copyArgsPassedByValToAllocas();
931 
932  // Collect alloca, ret, lifetime instructions etc.
933  for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
934 
935  if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
936 
937  initializeCallbacks(*F.getParent());
938 
939  if (HasUntracedLifetimeIntrinsic) {
940  // If there are lifetime intrinsics which couldn't be traced back to an
941  // alloca, we may not know exactly when a variable enters scope, and
942  // therefore should "fail safe" by not poisoning them.
943  StaticAllocaPoisonCallVec.clear();
944  DynamicAllocaPoisonCallVec.clear();
945  }
946 
947  processDynamicAllocas();
948  processStaticAllocas();
949 
950  if (ClDebugStack) {
951  LLVM_DEBUG(dbgs() << F);
952  }
953  return true;
954  }
955 
956  // Arguments marked with the "byval" attribute are implicitly copied without
957  // using an alloca instruction. To produce redzones for those arguments, we
958  // copy them a second time into memory allocated with an alloca instruction.
959  void copyArgsPassedByValToAllocas();
960 
961  // Finds all Alloca instructions and puts
962  // poisoned red zones around all of them.
963  // Then unpoison everything back before the function returns.
964  void processStaticAllocas();
965  void processDynamicAllocas();
966 
967  void createDynamicAllocasInitStorage();
968 
969  // ----------------------- Visitors.
970  /// Collect all Ret instructions, or the musttail call instruction if it
971  /// precedes the return instruction.
972  void visitReturnInst(ReturnInst &RI) {
974  RetVec.push_back(CI);
975  else
976  RetVec.push_back(&RI);
977  }
978 
979  /// Collect all Resume instructions.
980  void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
981 
982  /// Collect all CatchReturnInst instructions.
983  void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
984 
985  void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
986  Value *SavedStack) {
987  IRBuilder<> IRB(InstBefore);
988  Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
989  // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
990  // need to adjust extracted SP to compute the address of the most recent
991  // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
992  // this purpose.
993  if (!isa<ReturnInst>(InstBefore)) {
994  Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
995  InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
996  {IntptrTy});
997 
998  Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
999 
1000  DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1001  DynamicAreaOffset);
1002  }
1003 
1004  IRB.CreateCall(
1005  AsanAllocasUnpoisonFunc,
1006  {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1007  }
1008 
1009  // Unpoison dynamic allocas redzones.
1010  void unpoisonDynamicAllocas() {
1011  for (Instruction *Ret : RetVec)
1012  unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1013 
1014  for (Instruction *StackRestoreInst : StackRestoreVec)
1015  unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1016  StackRestoreInst->getOperand(0));
1017  }
1018 
1019  // Deploy and poison redzones around dynamic alloca call. To do this, we
1020  // should replace this call with another one with changed parameters and
1021  // replace all its uses with new address, so
1022  // addr = alloca type, old_size, align
1023  // is replaced by
1024  // new_size = (old_size + additional_size) * sizeof(type)
1025  // tmp = alloca i8, new_size, max(align, 32)
1026  // addr = tmp + 32 (first 32 bytes are for the left redzone).
1027  // Additional_size is added to make new memory allocation contain not only
1028  // requested memory, but also left, partial and right redzones.
1029  void handleDynamicAllocaCall(AllocaInst *AI);
1030 
1031  /// Collect Alloca instructions we want (and can) handle.
1032  void visitAllocaInst(AllocaInst &AI) {
1033  if (!ASan.isInterestingAlloca(AI)) {
1034  if (AI.isStaticAlloca()) {
1035  // Skip over allocas that are present *before* the first instrumented
1036  // alloca, we don't want to move those around.
1037  if (AllocaVec.empty())
1038  return;
1039 
1040  StaticAllocasToMoveUp.push_back(&AI);
1041  }
1042  return;
1043  }
1044 
1045  if (!AI.isStaticAlloca())
1046  DynamicAllocaVec.push_back(&AI);
1047  else
1048  AllocaVec.push_back(&AI);
1049  }
1050 
1051  /// Collect lifetime intrinsic calls to check for use-after-scope
1052  /// errors.
1053  void visitIntrinsicInst(IntrinsicInst &II) {
1055  if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1056  if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1057  if (!ASan.UseAfterScope)
1058  return;
1059  if (!II.isLifetimeStartOrEnd())
1060  return;
1061  // Found lifetime intrinsic, add ASan instrumentation if necessary.
1062  auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1063  // If size argument is undefined, don't do anything.
1064  if (Size->isMinusOne()) return;
1065  // Check that size doesn't saturate uint64_t and can
1066  // be stored in IntptrTy.
1067  const uint64_t SizeValue = Size->getValue().getLimitedValue();
1068  if (SizeValue == ~0ULL ||
1069  !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1070  return;
1071  // Find alloca instruction that corresponds to llvm.lifetime argument.
1072  // Currently we can only handle lifetime markers pointing to the
1073  // beginning of the alloca.
1074  AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1075  if (!AI) {
1076  HasUntracedLifetimeIntrinsic = true;
1077  return;
1078  }
1079  // We're interested only in allocas we can handle.
1080  if (!ASan.isInterestingAlloca(*AI))
1081  return;
1082  bool DoPoison = (ID == Intrinsic::lifetime_end);
1083  AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1084  if (AI->isStaticAlloca())
1085  StaticAllocaPoisonCallVec.push_back(APC);
1086  else if (ClInstrumentDynamicAllocas)
1087  DynamicAllocaPoisonCallVec.push_back(APC);
1088  }
1089 
1090  void visitCallBase(CallBase &CB) {
1091  if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1092  HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1093  HasReturnsTwiceCall |= CI->canReturnTwice();
1094  }
1095  }
1096 
1097  // ---------------------- Helpers.
1098  void initializeCallbacks(Module &M);
1099 
1100  // Copies bytes from ShadowBytes into shadow memory for indexes where
1101  // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1102  // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1103  void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1104  IRBuilder<> &IRB, Value *ShadowBase);
1105  void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1106  size_t Begin, size_t End, IRBuilder<> &IRB,
1107  Value *ShadowBase);
1108  void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1109  ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1110  size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1111 
1112  void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1113 
1114  Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1115  bool Dynamic);
1116  PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1117  Instruction *ThenTerm, Value *ValueIfFalse);
1118 };
1119 
1120 } // end anonymous namespace
1121 
1122 void AddressSanitizerPass::printPipeline(
1123  raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1124  static_cast<PassInfoMixin<AddressSanitizerPass> *>(this)->printPipeline(
1125  OS, MapClassName2PassName);
1126  OS << "<";
1127  if (Options.CompileKernel)
1128  OS << "kernel";
1129  OS << ">";
1130 }
1131 
1133  const AddressSanitizerOptions &Options, bool UseGlobalGC,
1134  bool UseOdrIndicator, AsanDtorKind DestructorKind)
1135  : Options(Options), UseGlobalGC(UseGlobalGC),
1136  UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {}
1137 
1140  ModuleAddressSanitizer ModuleSanitizer(M, Options.CompileKernel,
1141  Options.Recover, UseGlobalGC,
1142  UseOdrIndicator, DestructorKind);
1143  bool Modified = false;
1144  auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1145  const StackSafetyGlobalInfo *const SSGI =
1147  for (Function &F : M) {
1148  AddressSanitizer FunctionSanitizer(M, SSGI, Options.CompileKernel,
1149  Options.Recover, Options.UseAfterScope,
1150  Options.UseAfterReturn);
1152  Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1153  }
1154  Modified |= ModuleSanitizer.instrumentModule(M);
1155  if (!Modified)
1156  return PreservedAnalyses::all();
1157 
1159  // GlobalsAA is considered stateless and does not get invalidated unless
1160  // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1161  // make changes that require GlobalsAA to be invalidated.
1162  PA.abandon<GlobalsAA>();
1163  return PA;
1164 }
1165 
1167  size_t Res = countTrailingZeros(TypeSize / 8);
1169  return Res;
1170 }
1171 
1172 /// Check if \p G has been created by a trusted compiler pass.
1174  // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1175  if (G->getName().startswith("llvm.") ||
1176  // Do not instrument gcov counter arrays.
1177  G->getName().startswith("__llvm_gcov_ctr") ||
1178  // Do not instrument rtti proxy symbols for function sanitizer.
1179  G->getName().startswith("__llvm_rtti_proxy"))
1180  return true;
1181 
1182  // Do not instrument asan globals.
1183  if (G->getName().startswith(kAsanGenPrefix) ||
1184  G->getName().startswith(kSanCovGenPrefix) ||
1185  G->getName().startswith(kODRGenPrefix))
1186  return true;
1187 
1188  return false;
1189 }
1190 
1192  Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1193  unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1194  if (AddrSpace == 3 || AddrSpace == 5)
1195  return true;
1196  return false;
1197 }
1198 
1199 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1200  // Shadow >> scale
1201  Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1202  if (Mapping.Offset == 0) return Shadow;
1203  // (Shadow >> scale) | offset
1204  Value *ShadowBase;
1205  if (LocalDynamicShadow)
1206  ShadowBase = LocalDynamicShadow;
1207  else
1208  ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1209  if (Mapping.OrShadowOffset)
1210  return IRB.CreateOr(Shadow, ShadowBase);
1211  else
1212  return IRB.CreateAdd(Shadow, ShadowBase);
1213 }
1214 
1215 // Instrument memset/memmove/memcpy
1216 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1217  IRBuilder<> IRB(MI);
1218  if (isa<MemTransferInst>(MI)) {
1219  IRB.CreateCall(
1220  isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1221  {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1222  IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1223  IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1224  } else if (isa<MemSetInst>(MI)) {
1225  IRB.CreateCall(
1226  AsanMemset,
1227  {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1228  IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1229  IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1230  }
1231  MI->eraseFromParent();
1232 }
1233 
1234 /// Check if we want (and can) handle this alloca.
1235 bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1236  auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1237 
1238  if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1239  return PreviouslySeenAllocaInfo->getSecond();
1240 
1241  bool IsInteresting =
1242  (AI.getAllocatedType()->isSized() &&
1243  // alloca() may be called with 0 size, ignore it.
1244  ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) &&
1245  // We are only interested in allocas not promotable to registers.
1246  // Promotable allocas are common under -O0.
1248  // inalloca allocas are not treated as static, and we don't want
1249  // dynamic alloca instrumentation for them as well.
1250  !AI.isUsedWithInAlloca() &&
1251  // swifterror allocas are register promoted by ISel
1252  !AI.isSwiftError() &&
1253  // safe allocas are not interesting
1254  !(SSGI && SSGI->isSafe(AI)));
1255 
1256  ProcessedAllocas[&AI] = IsInteresting;
1257  return IsInteresting;
1258 }
1259 
1260 bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1261  // Instrument accesses from different address spaces only for AMDGPU.
1262  Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1263  if (PtrTy->getPointerAddressSpace() != 0 &&
1264  !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1265  return true;
1266 
1267  // Ignore swifterror addresses.
1268  // swifterror memory addresses are mem2reg promoted by instruction
1269  // selection. As such they cannot have regular uses like an instrumentation
1270  // function and it makes no sense to track them as memory.
1271  if (Ptr->isSwiftError())
1272  return true;
1273 
1274  // Treat memory accesses to promotable allocas as non-interesting since they
1275  // will not cause memory violations. This greatly speeds up the instrumented
1276  // executable at -O0.
1277  if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1278  if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1279  return true;
1280 
1281  if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1283  return true;
1284 
1285  return false;
1286 }
1287 
1288 void AddressSanitizer::getInterestingMemoryOperands(
1290  // Do not instrument the load fetching the dynamic shadow address.
1291  if (LocalDynamicShadow == I)
1292  return;
1293 
1294  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1295  if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1296  return;
1297  Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1298  LI->getType(), LI->getAlign());
1299  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1300  if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1301  return;
1302  Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1303  SI->getValueOperand()->getType(), SI->getAlign());
1304  } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1305  if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1306  return;
1307  Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1308  RMW->getValOperand()->getType(), None);
1309  } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1310  if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1311  return;
1312  Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1313  XCHG->getCompareOperand()->getType(), None);
1314  } else if (auto CI = dyn_cast<CallInst>(I)) {
1315  if (CI->getIntrinsicID() == Intrinsic::masked_load ||
1316  CI->getIntrinsicID() == Intrinsic::masked_store) {
1317  bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_store;
1318  // Masked store has an initial operand for the value.
1319  unsigned OpOffset = IsWrite ? 1 : 0;
1320  if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1321  return;
1322 
1323  auto BasePtr = CI->getOperand(OpOffset);
1324  if (ignoreAccess(I, BasePtr))
1325  return;
1326  Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1327  MaybeAlign Alignment = Align(1);
1328  // Otherwise no alignment guarantees. We probably got Undef.
1329  if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1330  Alignment = Op->getMaybeAlignValue();
1331  Value *Mask = CI->getOperand(2 + OpOffset);
1332  Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1333  } else {
1334  for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1335  if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1336  ignoreAccess(I, CI->getArgOperand(ArgNo)))
1337  continue;
1338  Type *Ty = CI->getParamByValType(ArgNo);
1339  Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1340  }
1341  }
1342  }
1343 }
1344 
1345 static bool isPointerOperand(Value *V) {
1346  return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1347 }
1348 
1349 // This is a rough heuristic; it may cause both false positives and
1350 // false negatives. The proper implementation requires cooperation with
1351 // the frontend.
1353  if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1354  if (!Cmp->isRelational())
1355  return false;
1356  } else {
1357  return false;
1358  }
1359  return isPointerOperand(I->getOperand(0)) &&
1360  isPointerOperand(I->getOperand(1));
1361 }
1362 
1363 // This is a rough heuristic; it may cause both false positives and
1364 // false negatives. The proper implementation requires cooperation with
1365 // the frontend.
1367  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1368  if (BO->getOpcode() != Instruction::Sub)
1369  return false;
1370  } else {
1371  return false;
1372  }
1373  return isPointerOperand(I->getOperand(0)) &&
1374  isPointerOperand(I->getOperand(1));
1375 }
1376 
1377 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1378  // If a global variable does not have dynamic initialization we don't
1379  // have to instrument it. However, if a global does not have initializer
1380  // at all, we assume it has dynamic initializer (in other TU).
1381  if (!G->hasInitializer())
1382  return false;
1383 
1384  if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1385  return false;
1386 
1387  return true;
1388 }
1389 
1390 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1391  Instruction *I) {
1392  IRBuilder<> IRB(I);
1393  FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1394  Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1395  for (Value *&i : Param) {
1396  if (i->getType()->isPointerTy())
1397  i = IRB.CreatePointerCast(i, IntptrTy);
1398  }
1399  IRB.CreateCall(F, Param);
1400 }
1401 
1402 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1403  Instruction *InsertBefore, Value *Addr,
1404  MaybeAlign Alignment, unsigned Granularity,
1405  uint32_t TypeSize, bool IsWrite,
1406  Value *SizeArgument, bool UseCalls,
1407  uint32_t Exp) {
1408  // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1409  // if the data is properly aligned.
1410  if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
1411  TypeSize == 128) &&
1412  (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8))
1413  return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
1414  nullptr, UseCalls, Exp);
1415  Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
1416  IsWrite, nullptr, UseCalls, Exp);
1417 }
1418 
1419 static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
1420  const DataLayout &DL, Type *IntptrTy,
1421  Value *Mask, Instruction *I,
1422  Value *Addr, MaybeAlign Alignment,
1423  unsigned Granularity, Type *OpType,
1424  bool IsWrite, Value *SizeArgument,
1425  bool UseCalls, uint32_t Exp) {
1426  auto *VTy = cast<FixedVectorType>(OpType);
1427  uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1428  unsigned Num = VTy->getNumElements();
1429  auto Zero = ConstantInt::get(IntptrTy, 0);
1430  for (unsigned Idx = 0; Idx < Num; ++Idx) {
1431  Value *InstrumentedAddress = nullptr;
1432  Instruction *InsertBefore = I;
1433  if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
1434  // dyn_cast as we might get UndefValue
1435  if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
1436  if (Masked->isZero())
1437  // Mask is constant false, so no instrumentation needed.
1438  continue;
1439  // If we have a true or undef value, fall through to doInstrumentAddress
1440  // with InsertBefore == I
1441  }
1442  } else {
1443  IRBuilder<> IRB(I);
1444  Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
1445  Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
1446  InsertBefore = ThenTerm;
1447  }
1448 
1449  IRBuilder<> IRB(InsertBefore);
1450  InstrumentedAddress =
1451  IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
1452  doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
1453  Granularity, ElemTypeSize, IsWrite, SizeArgument,
1454  UseCalls, Exp);
1455  }
1456 }
1457 
1458 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1459  InterestingMemoryOperand &O, bool UseCalls,
1460  const DataLayout &DL) {
1461  Value *Addr = O.getPtr();
1462 
1463  // Optimization experiments.
1464  // The experiments can be used to evaluate potential optimizations that remove
1465  // instrumentation (assess false negatives). Instead of completely removing
1466  // some instrumentation, you set Exp to a non-zero value (mask of optimization
1467  // experiments that want to remove instrumentation of this instruction).
1468  // If Exp is non-zero, this pass will emit special calls into runtime
1469  // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1470  // make runtime terminate the program in a special way (with a different
1471  // exit status). Then you run the new compiler on a buggy corpus, collect
1472  // the special terminations (ideally, you don't see them at all -- no false
1473  // negatives) and make the decision on the optimization.
1475 
1476  if (ClOpt && ClOptGlobals) {
1477  // If initialization order checking is disabled, a simple access to a
1478  // dynamically initialized global is always valid.
1479  GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1480  if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1481  isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
1482  NumOptimizedAccessesToGlobalVar++;
1483  return;
1484  }
1485  }
1486 
1487  if (ClOpt && ClOptStack) {
1488  // A direct inbounds access to a stack variable is always valid.
1489  if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1490  isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
1491  NumOptimizedAccessesToStackVar++;
1492  return;
1493  }
1494  }
1495 
1496  if (O.IsWrite)
1497  NumInstrumentedWrites++;
1498  else
1499  NumInstrumentedReads++;
1500 
1501  unsigned Granularity = 1 << Mapping.Scale;
1502  if (O.MaybeMask) {
1503  instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(),
1504  Addr, O.Alignment, Granularity, O.OpType,
1505  O.IsWrite, nullptr, UseCalls, Exp);
1506  } else {
1507  doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1508  Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls,
1509  Exp);
1510  }
1511 }
1512 
1513 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1514  Value *Addr, bool IsWrite,
1515  size_t AccessSizeIndex,
1516  Value *SizeArgument,
1517  uint32_t Exp) {
1518  IRBuilder<> IRB(InsertBefore);
1519  Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1520  CallInst *Call = nullptr;
1521  if (SizeArgument) {
1522  if (Exp == 0)
1523  Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
1524  {Addr, SizeArgument});
1525  else
1526  Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
1527  {Addr, SizeArgument, ExpVal});
1528  } else {
1529  if (Exp == 0)
1530  Call =
1531  IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1532  else
1533  Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
1534  {Addr, ExpVal});
1535  }
1536 
1537  Call->setCannotMerge();
1538  return Call;
1539 }
1540 
1541 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1542  Value *ShadowValue,
1543  uint32_t TypeSize) {
1544  size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1545  // Addr & (Granularity - 1)
1546  Value *LastAccessedByte =
1547  IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1548  // (Addr & (Granularity - 1)) + size - 1
1549  if (TypeSize / 8 > 1)
1550  LastAccessedByte = IRB.CreateAdd(
1551  LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
1552  // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1553  LastAccessedByte =
1554  IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1555  // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1556  return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1557 }
1558 
1559 Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1560  Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1561  uint32_t TypeSize, bool IsWrite, Value *SizeArgument) {
1562  // Do not instrument unsupported addrspaces.
1564  return nullptr;
1565  Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1566  // Follow host instrumentation for global and constant addresses.
1567  if (PtrTy->getPointerAddressSpace() != 0)
1568  return InsertBefore;
1569  // Instrument generic addresses in supported addressspaces.
1570  IRBuilder<> IRB(InsertBefore);
1571  Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy());
1572  Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong});
1573  Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong});
1574  Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1575  Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1576  Value *AddrSpaceZeroLanding =
1577  SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1578  InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1579  return InsertBefore;
1580 }
1581 
1582 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1583  Instruction *InsertBefore, Value *Addr,
1584  uint32_t TypeSize, bool IsWrite,
1585  Value *SizeArgument, bool UseCalls,
1586  uint32_t Exp) {
1587  if (TargetTriple.isAMDGPU()) {
1588  InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1589  TypeSize, IsWrite, SizeArgument);
1590  if (!InsertBefore)
1591  return;
1592  }
1593 
1594  IRBuilder<> IRB(InsertBefore);
1595  size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1596  const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1597 
1598  if (UseCalls && ClOptimizeCallbacks) {
1599  const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1600  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1601  IRB.CreateCall(
1602  Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1603  {IRB.CreatePointerCast(Addr, Int8PtrTy),
1604  ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1605  return;
1606  }
1607 
1608  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1609  if (UseCalls) {
1610  if (Exp == 0)
1611  IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1612  AddrLong);
1613  else
1614  IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1615  {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1616  return;
1617  }
1618 
1619  Type *ShadowTy =
1620  IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1621  Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1622  Value *ShadowPtr = memToShadow(AddrLong, IRB);
1623  Value *ShadowValue =
1624  IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1625 
1626  Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1627  size_t Granularity = 1ULL << Mapping.Scale;
1628  Instruction *CrashTerm = nullptr;
1629 
1630  if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1631  // We use branch weights for the slow path check, to indicate that the slow
1632  // path is rarely taken. This seems to be the case for SPEC benchmarks.
1634  Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1635  assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1636  BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1637  IRB.SetInsertPoint(CheckTerm);
1638  Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1639  if (Recover) {
1640  CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1641  } else {
1642  BasicBlock *CrashBlock =
1643  BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1644  CrashTerm = new UnreachableInst(*C, CrashBlock);
1645  BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1646  ReplaceInstWithInst(CheckTerm, NewTerm);
1647  }
1648  } else {
1649  CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1650  }
1651 
1652  Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1653  AccessSizeIndex, SizeArgument, Exp);
1654  Crash->setDebugLoc(OrigIns->getDebugLoc());
1655 }
1656 
1657 // Instrument unusual size or unusual alignment.
1658 // We can not do it with a single check, so we do 1-byte check for the first
1659 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1660 // to report the actual access size.
1661 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1662  Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
1663  bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1664  IRBuilder<> IRB(InsertBefore);
1665  Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
1666  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1667  if (UseCalls) {
1668  if (Exp == 0)
1669  IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1670  {AddrLong, Size});
1671  else
1672  IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1673  {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1674  } else {
1675  Value *LastByte = IRB.CreateIntToPtr(
1676  IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
1677  Addr->getType());
1678  instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
1679  instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
1680  }
1681 }
1682 
1683 void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1685  // Set up the arguments to our poison/unpoison functions.
1686  IRBuilder<> IRB(&GlobalInit.front(),
1687  GlobalInit.front().getFirstInsertionPt());
1688 
1689  // Add a call to poison all external globals before the given function starts.
1690  Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1691  IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1692 
1693  // Add calls to unpoison all globals before each return instruction.
1694  for (auto &BB : GlobalInit.getBasicBlockList())
1695  if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1696  CallInst::Create(AsanUnpoisonGlobals, "", RI);
1697 }
1698 
1699 void ModuleAddressSanitizer::createInitializerPoisonCalls(
1700  Module &M, GlobalValue *ModuleName) {
1701  GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1702  if (!GV)
1703  return;
1704 
1705  ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1706  if (!CA)
1707  return;
1708 
1709  for (Use &OP : CA->operands()) {
1710  if (isa<ConstantAggregateZero>(OP)) continue;
1711  ConstantStruct *CS = cast<ConstantStruct>(OP);
1712 
1713  // Must have a function or null ptr.
1714  if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1715  if (F->getName() == kAsanModuleCtorName) continue;
1716  auto *Priority = cast<ConstantInt>(CS->getOperand(0));
1717  // Don't instrument CTORs that will run before asan.module_ctor.
1718  if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
1719  continue;
1720  poisonOneInitializer(*F, ModuleName);
1721  }
1722  }
1723 }
1724 
1725 const GlobalVariable *
1726 ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
1727  // In case this function should be expanded to include rules that do not just
1728  // apply when CompileKernel is true, either guard all existing rules with an
1729  // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
1730  // should also apply to user space.
1731  assert(CompileKernel && "Only expecting to be called when compiling kernel");
1732 
1733  const Constant *C = GA.getAliasee();
1734 
1735  // When compiling the kernel, globals that are aliased by symbols prefixed
1736  // by "__" are special and cannot be padded with a redzone.
1737  if (GA.getName().startswith("__"))
1738  return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
1739 
1740  return nullptr;
1741 }
1742 
1743 bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
1744  Type *Ty = G->getValueType();
1745  LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1746 
1747  if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
1748  return false;
1749  if (!Ty->isSized()) return false;
1750  if (!G->hasInitializer()) return false;
1751  // Globals in address space 1 and 4 are supported for AMDGPU.
1752  if (G->getAddressSpace() &&
1753  !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
1754  return false;
1755  if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
1756  // Two problems with thread-locals:
1757  // - The address of the main thread's copy can't be computed at link-time.
1758  // - Need to poison all copies, not just the main thread's one.
1759  if (G->isThreadLocal()) return false;
1760  // For now, just ignore this Global if the alignment is large.
1761  if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false;
1762 
1763  // For non-COFF targets, only instrument globals known to be defined by this
1764  // TU.
1765  // FIXME: We can instrument comdat globals on ELF if we are using the
1766  // GC-friendly metadata scheme.
1767  if (!TargetTriple.isOSBinFormatCOFF()) {
1768  if (!G->hasExactDefinition() || G->hasComdat())
1769  return false;
1770  } else {
1771  // On COFF, don't instrument non-ODR linkages.
1772  if (G->isInterposable())
1773  return false;
1774  }
1775 
1776  // If a comdat is present, it must have a selection kind that implies ODR
1777  // semantics: no duplicates, any, or exact match.
1778  if (Comdat *C = G->getComdat()) {
1779  switch (C->getSelectionKind()) {
1780  case Comdat::Any:
1781  case Comdat::ExactMatch:
1782  case Comdat::NoDeduplicate:
1783  break;
1784  case Comdat::Largest:
1785  case Comdat::SameSize:
1786  return false;
1787  }
1788  }
1789 
1790  if (G->hasSection()) {
1791  // The kernel uses explicit sections for mostly special global variables
1792  // that we should not instrument. E.g. the kernel may rely on their layout
1793  // without redzones, or remove them at link time ("discard.*"), etc.
1794  if (CompileKernel)
1795  return false;
1796 
1797  StringRef Section = G->getSection();
1798 
1799  // Globals from llvm.metadata aren't emitted, do not instrument them.
1800  if (Section == "llvm.metadata") return false;
1801  // Do not instrument globals from special LLVM sections.
1802  if (Section.contains("__llvm") || Section.contains("__LLVM"))
1803  return false;
1804 
1805  // Do not instrument function pointers to initialization and termination
1806  // routines: dynamic linker will not properly handle redzones.
1807  if (Section.startswith(".preinit_array") ||
1808  Section.startswith(".init_array") ||
1809  Section.startswith(".fini_array")) {
1810  return false;
1811  }
1812 
1813  // Do not instrument user-defined sections (with names resembling
1814  // valid C identifiers)
1815  if (TargetTriple.isOSBinFormatELF()) {
1816  if (llvm::all_of(Section,
1817  [](char c) { return llvm::isAlnum(c) || c == '_'; }))
1818  return false;
1819  }
1820 
1821  // On COFF, if the section name contains '$', it is highly likely that the
1822  // user is using section sorting to create an array of globals similar to
1823  // the way initialization callbacks are registered in .init_array and
1824  // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
1825  // to such globals is counterproductive, because the intent is that they
1826  // will form an array, and out-of-bounds accesses are expected.
1827  // See https://github.com/google/sanitizers/issues/305
1828  // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1829  if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
1830  LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
1831  << *G << "\n");
1832  return false;
1833  }
1834 
1835  if (TargetTriple.isOSBinFormatMachO()) {
1836  StringRef ParsedSegment, ParsedSection;
1837  unsigned TAA = 0, StubSize = 0;
1838  bool TAAParsed;
1840  Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
1841 
1842  // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1843  // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1844  // them.
1845  if (ParsedSegment == "__OBJC" ||
1846  (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
1847  LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1848  return false;
1849  }
1850  // See https://github.com/google/sanitizers/issues/32
1851  // Constant CFString instances are compiled in the following way:
1852  // -- the string buffer is emitted into
1853  // __TEXT,__cstring,cstring_literals
1854  // -- the constant NSConstantString structure referencing that buffer
1855  // is placed into __DATA,__cfstring
1856  // Therefore there's no point in placing redzones into __DATA,__cfstring.
1857  // Moreover, it causes the linker to crash on OS X 10.7
1858  if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
1859  LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1860  return false;
1861  }
1862  // The linker merges the contents of cstring_literals and removes the
1863  // trailing zeroes.
1864  if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1865  LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1866  return false;
1867  }
1868  }
1869  }
1870 
1871  if (CompileKernel) {
1872  // Globals that prefixed by "__" are special and cannot be padded with a
1873  // redzone.
1874  if (G->getName().startswith("__"))
1875  return false;
1876  }
1877 
1878  return true;
1879 }
1880 
1881 // On Mach-O platforms, we emit global metadata in a separate section of the
1882 // binary in order to allow the linker to properly dead strip. This is only
1883 // supported on recent versions of ld64.
1884 bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
1885  if (!TargetTriple.isOSBinFormatMachO())
1886  return false;
1887 
1888  if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
1889  return true;
1890  if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
1891  return true;
1892  if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
1893  return true;
1894  if (TargetTriple.isDriverKit())
1895  return true;
1896 
1897  return false;
1898 }
1899 
1900 StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
1901  switch (TargetTriple.getObjectFormat()) {
1902  case Triple::COFF: return ".ASAN$GL";
1903  case Triple::ELF: return "asan_globals";
1904  case Triple::MachO: return "__DATA,__asan_globals,regular";
1905  case Triple::Wasm:
1906  case Triple::GOFF:
1907  case Triple::SPIRV:
1908  case Triple::XCOFF:
1909  case Triple::DXContainer:
1911  "ModuleAddressSanitizer not implemented for object file format");
1913  break;
1914  }
1915  llvm_unreachable("unsupported object format");
1916 }
1917 
1918 void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
1919  IRBuilder<> IRB(*C);
1920 
1921  // Declare our poisoning and unpoisoning functions.
1922  AsanPoisonGlobals =
1923  M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
1924  AsanUnpoisonGlobals =
1925  M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
1926 
1927  // Declare functions that register/unregister globals.
1928  AsanRegisterGlobals = M.getOrInsertFunction(
1929  kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
1930  AsanUnregisterGlobals = M.getOrInsertFunction(
1931  kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
1932 
1933  // Declare the functions that find globals in a shared object and then invoke
1934  // the (un)register function on them.
1935  AsanRegisterImageGlobals = M.getOrInsertFunction(
1936  kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
1937  AsanUnregisterImageGlobals = M.getOrInsertFunction(
1938  kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
1939 
1940  AsanRegisterElfGlobals =
1941  M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
1942  IntptrTy, IntptrTy, IntptrTy);
1943  AsanUnregisterElfGlobals =
1944  M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
1945  IntptrTy, IntptrTy, IntptrTy);
1946 }
1947 
1948 // Put the metadata and the instrumented global in the same group. This ensures
1949 // that the metadata is discarded if the instrumented global is discarded.
1950 void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
1951  GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
1952  Module &M = *G->getParent();
1953  Comdat *C = G->getComdat();
1954  if (!C) {
1955  if (!G->hasName()) {
1956  // If G is unnamed, it must be internal. Give it an artificial name
1957  // so we can put it in a comdat.
1958  assert(G->hasLocalLinkage());
1959  G->setName(Twine(kAsanGenPrefix) + "_anon_global");
1960  }
1961 
1962  if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
1963  std::string Name = std::string(G->getName());
1964  Name += InternalSuffix;
1965  C = M.getOrInsertComdat(Name);
1966  } else {
1967  C = M.getOrInsertComdat(G->getName());
1968  }
1969 
1970  // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
1971  // linkage to internal linkage so that a symbol table entry is emitted. This
1972  // is necessary in order to create the comdat group.
1973  if (TargetTriple.isOSBinFormatCOFF()) {
1974  C->setSelectionKind(Comdat::NoDeduplicate);
1975  if (G->hasPrivateLinkage())
1976  G->setLinkage(GlobalValue::InternalLinkage);
1977  }
1978  G->setComdat(C);
1979  }
1980 
1981  assert(G->hasComdat());
1982  Metadata->setComdat(G->getComdat());
1983 }
1984 
1985 // Create a separate metadata global and put it in the appropriate ASan
1986 // global registration section.
1988 ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
1989  StringRef OriginalName) {
1990  auto Linkage = TargetTriple.isOSBinFormatMachO()
1994  M, Initializer->getType(), false, Linkage, Initializer,
1995  Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
1996  Metadata->setSection(getGlobalMetadataSection());
1997  return Metadata;
1998 }
1999 
2000 Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2001  AsanDtorFunction = Function::createWithDefaultAttr(
2004  AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2005  // Ensure Dtor cannot be discarded, even if in a comdat.
2006  appendToUsed(M, {AsanDtorFunction});
2007  BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2008 
2009  return ReturnInst::Create(*C, AsanDtorBB);
2010 }
2011 
2012 void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2013  IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2014  ArrayRef<Constant *> MetadataInitializers) {
2015  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2016  auto &DL = M.getDataLayout();
2017 
2018  SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2019  for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2020  Constant *Initializer = MetadataInitializers[i];
2021  GlobalVariable *G = ExtendedGlobals[i];
2023  CreateMetadataGlobal(M, Initializer, G->getName());
2024  MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2025  Metadata->setMetadata(LLVMContext::MD_associated, MD);
2026  MetadataGlobals[i] = Metadata;
2027 
2028  // The MSVC linker always inserts padding when linking incrementally. We
2029  // cope with that by aligning each struct to its size, which must be a power
2030  // of two.
2031  unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2032  assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2033  "global metadata will not be padded appropriately");
2034  Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2035 
2036  SetComdatForGlobalMetadata(G, Metadata, "");
2037  }
2038 
2039  // Update llvm.compiler.used, adding the new metadata globals. This is
2040  // needed so that during LTO these variables stay alive.
2041  if (!MetadataGlobals.empty())
2042  appendToCompilerUsed(M, MetadataGlobals);
2043 }
2044 
2045 void ModuleAddressSanitizer::InstrumentGlobalsELF(
2046  IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2047  ArrayRef<Constant *> MetadataInitializers,
2048  const std::string &UniqueModuleId) {
2049  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2050 
2051  // Putting globals in a comdat changes the semantic and potentially cause
2052  // false negative odr violations at link time. If odr indicators are used, we
2053  // keep the comdat sections, as link time odr violations will be dectected on
2054  // the odr indicator symbols.
2055  bool UseComdatForGlobalsGC = UseOdrIndicator;
2056 
2057  SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2058  for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2059  GlobalVariable *G = ExtendedGlobals[i];
2061  CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2062  MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2063  Metadata->setMetadata(LLVMContext::MD_associated, MD);
2064  MetadataGlobals[i] = Metadata;
2065 
2066  if (UseComdatForGlobalsGC)
2067  SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2068  }
2069 
2070  // Update llvm.compiler.used, adding the new metadata globals. This is
2071  // needed so that during LTO these variables stay alive.
2072  if (!MetadataGlobals.empty())
2073  appendToCompilerUsed(M, MetadataGlobals);
2074 
2075  // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2076  // to look up the loaded image that contains it. Second, we can store in it
2077  // whether registration has already occurred, to prevent duplicate
2078  // registration.
2079  //
2080  // Common linkage ensures that there is only one global per shared library.
2081  GlobalVariable *RegisteredFlag = new GlobalVariable(
2082  M, IntptrTy, false, GlobalVariable::CommonLinkage,
2085 
2086  // Create start and stop symbols.
2087  GlobalVariable *StartELFMetadata = new GlobalVariable(
2088  M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2089  "__start_" + getGlobalMetadataSection());
2091  GlobalVariable *StopELFMetadata = new GlobalVariable(
2092  M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2093  "__stop_" + getGlobalMetadataSection());
2095 
2096  // Create a call to register the globals with the runtime.
2097  IRB.CreateCall(AsanRegisterElfGlobals,
2098  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2099  IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2100  IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2101 
2102  // We also need to unregister globals at the end, e.g., when a shared library
2103  // gets closed.
2104  if (DestructorKind != AsanDtorKind::None) {
2105  IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2106  IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2107  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2108  IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2109  IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2110  }
2111 }
2112 
2113 void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2114  IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2115  ArrayRef<Constant *> MetadataInitializers) {
2116  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2117 
2118  // On recent Mach-O platforms, use a structure which binds the liveness of
2119  // the global variable to the metadata struct. Keep the list of "Liveness" GV
2120  // created to be added to llvm.compiler.used
2121  StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2122  SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2123 
2124  for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2125  Constant *Initializer = MetadataInitializers[i];
2126  GlobalVariable *G = ExtendedGlobals[i];
2128  CreateMetadataGlobal(M, Initializer, G->getName());
2129 
2130  // On recent Mach-O platforms, we emit the global metadata in a way that
2131  // allows the linker to properly strip dead globals.
2132  auto LivenessBinder =
2133  ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2135  GlobalVariable *Liveness = new GlobalVariable(
2136  M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2137  Twine("__asan_binder_") + G->getName());
2138  Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2139  LivenessGlobals[i] = Liveness;
2140  }
2141 
2142  // Update llvm.compiler.used, adding the new liveness globals. This is
2143  // needed so that during LTO these variables stay alive. The alternative
2144  // would be to have the linker handling the LTO symbols, but libLTO
2145  // current API does not expose access to the section for each symbol.
2146  if (!LivenessGlobals.empty())
2147  appendToCompilerUsed(M, LivenessGlobals);
2148 
2149  // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2150  // to look up the loaded image that contains it. Second, we can store in it
2151  // whether registration has already occurred, to prevent duplicate
2152  // registration.
2153  //
2154  // common linkage ensures that there is only one global per shared library.
2155  GlobalVariable *RegisteredFlag = new GlobalVariable(
2156  M, IntptrTy, false, GlobalVariable::CommonLinkage,
2159 
2160  IRB.CreateCall(AsanRegisterImageGlobals,
2161  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2162 
2163  // We also need to unregister globals at the end, e.g., when a shared library
2164  // gets closed.
2165  if (DestructorKind != AsanDtorKind::None) {
2166  IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2167  IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2168  {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2169  }
2170 }
2171 
2172 void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2173  IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2174  ArrayRef<Constant *> MetadataInitializers) {
2175  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2176  unsigned N = ExtendedGlobals.size();
2177  assert(N > 0);
2178 
2179  // On platforms that don't have a custom metadata section, we emit an array
2180  // of global metadata structures.
2181  ArrayType *ArrayOfGlobalStructTy =
2182  ArrayType::get(MetadataInitializers[0]->getType(), N);
2183  auto AllGlobals = new GlobalVariable(
2184  M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2185  ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2186  if (Mapping.Scale > 3)
2187  AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2188 
2189  IRB.CreateCall(AsanRegisterGlobals,
2190  {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2191  ConstantInt::get(IntptrTy, N)});
2192 
2193  // We also need to unregister globals at the end, e.g., when a shared library
2194  // gets closed.
2195  if (DestructorKind != AsanDtorKind::None) {
2196  IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2197  IrbDtor.CreateCall(AsanUnregisterGlobals,
2198  {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2199  ConstantInt::get(IntptrTy, N)});
2200  }
2201 }
2202 
2203 // This function replaces all global variables with new variables that have
2204 // trailing redzones. It also creates a function that poisons
2205 // redzones and inserts this function into llvm.global_ctors.
2206 // Sets *CtorComdat to true if the global registration code emitted into the
2207 // asan constructor is comdat-compatible.
2208 bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
2209  bool *CtorComdat) {
2210  *CtorComdat = false;
2211 
2212  // Build set of globals that are aliased by some GA, where
2213  // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2214  SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2215  if (CompileKernel) {
2216  for (auto &GA : M.aliases()) {
2217  if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2218  AliasedGlobalExclusions.insert(GV);
2219  }
2220  }
2221 
2222  SmallVector<GlobalVariable *, 16> GlobalsToChange;
2223  for (auto &G : M.globals()) {
2224  if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2225  GlobalsToChange.push_back(&G);
2226  }
2227 
2228  size_t n = GlobalsToChange.size();
2229  if (n == 0) {
2230  *CtorComdat = true;
2231  return false;
2232  }
2233 
2234  auto &DL = M.getDataLayout();
2235 
2236  // A global is described by a structure
2237  // size_t beg;
2238  // size_t size;
2239  // size_t size_with_redzone;
2240  // const char *name;
2241  // const char *module_name;
2242  // size_t has_dynamic_init;
2243  // size_t padding_for_windows_msvc_incremental_link;
2244  // size_t odr_indicator;
2245  // We initialize an array of such structures and pass it to a run-time call.
2246  StructType *GlobalStructTy =
2247  StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2248  IntptrTy, IntptrTy, IntptrTy);
2250  SmallVector<Constant *, 16> Initializers(n);
2251 
2252  bool HasDynamicallyInitializedGlobals = false;
2253 
2254  // We shouldn't merge same module names, as this string serves as unique
2255  // module ID in runtime.
2257  M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix);
2258 
2259  for (size_t i = 0; i < n; i++) {
2260  GlobalVariable *G = GlobalsToChange[i];
2261 
2263  if (G->hasSanitizerMetadata())
2264  MD = G->getSanitizerMetadata();
2265 
2266  // The runtime library tries demangling symbol names in the descriptor but
2267  // functionality like __cxa_demangle may be unavailable (e.g.
2268  // -static-libstdc++). So we demangle the symbol names here.
2269  std::string NameForGlobal = G->getName().str();
2270  GlobalVariable *Name =
2271  createPrivateGlobalForString(M, llvm::demangle(NameForGlobal),
2272  /*AllowMerging*/ true, kAsanGenPrefix);
2273 
2274  Type *Ty = G->getValueType();
2275  const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2276  const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2277  Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2278 
2279  StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2280  Constant *NewInitializer = ConstantStruct::get(
2281  NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2282 
2283  // Create a new global variable with enough space for a redzone.
2284  GlobalValue::LinkageTypes Linkage = G->getLinkage();
2285  if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2287  GlobalVariable *NewGlobal = new GlobalVariable(
2288  M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2289  G->getThreadLocalMode(), G->getAddressSpace());
2290  NewGlobal->copyAttributesFrom(G);
2291  NewGlobal->setComdat(G->getComdat());
2292  NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal()));
2293  // Don't fold globals with redzones. ODR violation detector and redzone
2294  // poisoning implicitly creates a dependence on the global's address, so it
2295  // is no longer valid for it to be marked unnamed_addr.
2297 
2298  // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2299  if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2300  G->isConstant()) {
2301  auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2302  if (Seq && Seq->isCString())
2303  NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2304  }
2305 
2306  // Transfer the debug info and type metadata. The payload starts at offset
2307  // zero so we can copy the metadata over as is.
2308  NewGlobal->copyMetadata(G, 0);
2309 
2310  Value *Indices2[2];
2311  Indices2[0] = IRB.getInt32(0);
2312  Indices2[1] = IRB.getInt32(0);
2313 
2314  G->replaceAllUsesWith(
2315  ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2316  NewGlobal->takeName(G);
2317  G->eraseFromParent();
2318  NewGlobals[i] = NewGlobal;
2319 
2320  Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
2321  GlobalValue *InstrumentedGlobal = NewGlobal;
2322 
2323  bool CanUsePrivateAliases =
2324  TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2325  TargetTriple.isOSBinFormatWasm();
2326  if (CanUsePrivateAliases && UsePrivateAlias) {
2327  // Create local alias for NewGlobal to avoid crash on ODR between
2328  // instrumented and non-instrumented libraries.
2329  InstrumentedGlobal =
2331  }
2332 
2333  // ODR should not happen for local linkage.
2334  if (NewGlobal->hasLocalLinkage()) {
2335  ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
2336  IRB.getInt8PtrTy());
2337  } else if (UseOdrIndicator) {
2338  // With local aliases, we need to provide another externally visible
2339  // symbol __odr_asan_XXX to detect ODR violation.
2340  auto *ODRIndicatorSym =
2341  new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2343  kODRGenPrefix + NameForGlobal, nullptr,
2344  NewGlobal->getThreadLocalMode());
2345 
2346  // Set meaningful attributes for indicator symbol.
2347  ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2348  ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2349  ODRIndicatorSym->setAlignment(Align(1));
2350  ODRIndicator = ODRIndicatorSym;
2351  }
2352 
2353  Constant *Initializer = ConstantStruct::get(
2354  GlobalStructTy,
2355  ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2356  ConstantInt::get(IntptrTy, SizeInBytes),
2357  ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2358  ConstantExpr::getPointerCast(Name, IntptrTy),
2360  ConstantInt::get(IntptrTy, MD.IsDynInit),
2361  Constant::getNullValue(IntptrTy),
2362  ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2363 
2364  if (ClInitializers && MD.IsDynInit)
2365  HasDynamicallyInitializedGlobals = true;
2366 
2367  LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2368 
2369  Initializers[i] = Initializer;
2370  }
2371 
2372  // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2373  // ConstantMerge'ing them.
2374  SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2375  for (size_t i = 0; i < n; i++) {
2376  GlobalVariable *G = NewGlobals[i];
2377  if (G->getName().empty()) continue;
2378  GlobalsToAddToUsedList.push_back(G);
2379  }
2380  appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2381 
2382  std::string ELFUniqueModuleId =
2383  (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M)
2384  : "";
2385 
2386  if (!ELFUniqueModuleId.empty()) {
2387  InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
2388  *CtorComdat = true;
2389  } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2390  InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2391  } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2392  InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2393  } else {
2394  InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2395  }
2396 
2397  // Create calls for poisoning before initializers run and unpoisoning after.
2398  if (HasDynamicallyInitializedGlobals)
2399  createInitializerPoisonCalls(M, ModuleName);
2400 
2401  LLVM_DEBUG(dbgs() << M);
2402  return true;
2403 }
2404 
2405 uint64_t
2406 ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2407  constexpr uint64_t kMaxRZ = 1 << 18;
2408  const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2409 
2410  uint64_t RZ = 0;
2411  if (SizeInBytes <= MinRZ / 2) {
2412  // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2413  // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2414  // half of MinRZ.
2415  RZ = MinRZ - SizeInBytes;
2416  } else {
2417  // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2418  RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2419 
2420  // Round up to multiple of MinRZ.
2421  if (SizeInBytes % MinRZ)
2422  RZ += MinRZ - (SizeInBytes % MinRZ);
2423  }
2424 
2425  assert((RZ + SizeInBytes) % MinRZ == 0);
2426 
2427  return RZ;
2428 }
2429 
2430 int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2431  int LongSize = M.getDataLayout().getPointerSizeInBits();
2432  bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2433  int Version = 8;
2434  // 32-bit Android is one version ahead because of the switch to dynamic
2435  // shadow.
2436  Version += (LongSize == 32 && isAndroid);
2437  return Version;
2438 }
2439 
2440 bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2441  initializeCallbacks(M);
2442 
2443  // Create a module constructor. A destructor is created lazily because not all
2444  // platforms, and not all modules need it.
2445  if (CompileKernel) {
2446  // The kernel always builds with its own runtime, and therefore does not
2447  // need the init and version check calls.
2448  AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2449  } else {
2450  std::string AsanVersion = std::to_string(GetAsanVersion(M));
2451  std::string VersionCheckName =
2452  ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2453  std::tie(AsanCtorFunction, std::ignore) =
2455  kAsanInitName, /*InitArgTypes=*/{},
2456  /*InitArgs=*/{}, VersionCheckName);
2457  }
2458 
2459  bool CtorComdat = true;
2460  if (ClGlobals) {
2461  IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2462  InstrumentGlobals(IRB, M, &CtorComdat);
2463  }
2464 
2465  const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2466 
2467  // Put the constructor and destructor in comdat if both
2468  // (1) global instrumentation is not TU-specific
2469  // (2) target is ELF.
2470  if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2471  AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2472  appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2473  if (AsanDtorFunction) {
2474  AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2475  appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2476  }
2477  } else {
2478  appendToGlobalCtors(M, AsanCtorFunction, Priority);
2479  if (AsanDtorFunction)
2480  appendToGlobalDtors(M, AsanDtorFunction, Priority);
2481  }
2482 
2483  return true;
2484 }
2485 
2486 void AddressSanitizer::initializeCallbacks(Module &M) {
2487  IRBuilder<> IRB(*C);
2488  // Create __asan_report* callbacks.
2489  // IsWrite, TypeSize and Exp are encoded in the function name.
2490  for (int Exp = 0; Exp < 2; Exp++) {
2491  for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2492  const std::string TypeStr = AccessIsWrite ? "store" : "load";
2493  const std::string ExpStr = Exp ? "exp_" : "";
2494  const std::string EndingStr = Recover ? "_noabort" : "";
2495 
2496  SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2497  SmallVector<Type *, 2> Args1{1, IntptrTy};
2498  if (Exp) {
2499  Type *ExpType = Type::getInt32Ty(*C);
2500  Args2.push_back(ExpType);
2501  Args1.push_back(ExpType);
2502  }
2503  AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2504  kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2505  FunctionType::get(IRB.getVoidTy(), Args2, false));
2506 
2507  AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2508  ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2509  FunctionType::get(IRB.getVoidTy(), Args2, false));
2510 
2511  for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2512  AccessSizeIndex++) {
2513  const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2514  AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2515  M.getOrInsertFunction(
2516  kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2517  FunctionType::get(IRB.getVoidTy(), Args1, false));
2518 
2519  AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2520  M.getOrInsertFunction(
2521  ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2522  FunctionType::get(IRB.getVoidTy(), Args1, false));
2523  }
2524  }
2525  }
2526 
2527  const std::string MemIntrinCallbackPrefix =
2528  (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2529  ? std::string("")
2531  AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2532  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2533  IRB.getInt8PtrTy(), IntptrTy);
2534  AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
2535  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2536  IRB.getInt8PtrTy(), IntptrTy);
2537  AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2538  IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2539  IRB.getInt32Ty(), IntptrTy);
2540 
2541  AsanHandleNoReturnFunc =
2542  M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2543 
2544  AsanPtrCmpFunction =
2545  M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2546  AsanPtrSubFunction =
2547  M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2548  if (Mapping.InGlobal)
2549  AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2550  ArrayType::get(IRB.getInt8Ty(), 0));
2551 
2552  AMDGPUAddressShared = M.getOrInsertFunction(
2554  AMDGPUAddressPrivate = M.getOrInsertFunction(
2556 }
2557 
2558 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2559  // For each NSObject descendant having a +load method, this method is invoked
2560  // by the ObjC runtime before any of the static constructors is called.
2561  // Therefore we need to instrument such methods with a call to __asan_init
2562  // at the beginning in order to initialize our runtime before any access to
2563  // the shadow memory.
2564  // We cannot just ignore these methods, because they may call other
2565  // instrumented functions.
2566  if (F.getName().find(" load]") != std::string::npos) {
2567  FunctionCallee AsanInitFunction =
2568  declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2569  IRBuilder<> IRB(&F.front(), F.front().begin());
2570  IRB.CreateCall(AsanInitFunction, {});
2571  return true;
2572  }
2573  return false;
2574 }
2575 
2576 bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2577  // Generate code only when dynamic addressing is needed.
2578  if (Mapping.Offset != kDynamicShadowSentinel)
2579  return false;
2580 
2581  IRBuilder<> IRB(&F.front().front());
2582  if (Mapping.InGlobal) {
2584  // An empty inline asm with input reg == output reg.
2585  // An opaque pointer-to-int cast, basically.
2587  FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2588  StringRef(""), StringRef("=r,0"),
2589  /*hasSideEffects=*/false);
2590  LocalDynamicShadow =
2591  IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2592  } else {
2593  LocalDynamicShadow =
2594  IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2595  }
2596  } else {
2597  Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2599  LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2600  }
2601  return true;
2602 }
2603 
2604 void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2605  // Find the one possible call to llvm.localescape and pre-mark allocas passed
2606  // to it as uninteresting. This assumes we haven't started processing allocas
2607  // yet. This check is done up front because iterating the use list in
2608  // isInterestingAlloca would be algorithmically slower.
2609  assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2610 
2611  // Try to get the declaration of llvm.localescape. If it's not in the module,
2612  // we can exit early.
2613  if (!F.getParent()->getFunction("llvm.localescape")) return;
2614 
2615  // Look for a call to llvm.localescape call in the entry block. It can't be in
2616  // any other block.
2617  for (Instruction &I : F.getEntryBlock()) {
2618  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2619  if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2620  // We found a call. Mark all the allocas passed in as uninteresting.
2621  for (Value *Arg : II->args()) {
2622  AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2623  assert(AI && AI->isStaticAlloca() &&
2624  "non-static alloca arg to localescape");
2625  ProcessedAllocas[AI] = false;
2626  }
2627  break;
2628  }
2629  }
2630 }
2631 
2632 bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2633  bool ShouldInstrument =
2634  ClDebugMin < 0 || ClDebugMax < 0 ||
2635  (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2636  Instrumented++;
2637  return !ShouldInstrument;
2638 }
2639 
2640 bool AddressSanitizer::instrumentFunction(Function &F,
2641  const TargetLibraryInfo *TLI) {
2642  if (F.empty())
2643  return false;
2644  if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2645  if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2646  if (F.getName().startswith("__asan_")) return false;
2647 
2648  bool FunctionModified = false;
2649 
2650  // If needed, insert __asan_init before checking for SanitizeAddress attr.
2651  // This function needs to be called even if the function body is not
2652  // instrumented.
2653  if (maybeInsertAsanInitAtFunctionEntry(F))
2654  FunctionModified = true;
2655 
2656  // Leave if the function doesn't need instrumentation.
2657  if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2658 
2659  if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2660  return FunctionModified;
2661 
2662  LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2663 
2664  initializeCallbacks(*F.getParent());
2665 
2666  FunctionStateRAII CleanupObj(this);
2667 
2668  FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2669 
2670  // We can't instrument allocas used with llvm.localescape. Only static allocas
2671  // can be passed to that intrinsic.
2672  markEscapedLocalAllocas(F);
2673 
2674  // We want to instrument every address only once per basic block (unless there
2675  // are calls between uses).
2676  SmallPtrSet<Value *, 16> TempsToInstrument;
2677  SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2678  SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2679  SmallVector<Instruction *, 8> NoReturnCalls;
2681  SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2682 
2683  // Fill the set of memory operations to instrument.
2684  for (auto &BB : F) {
2685  AllBlocks.push_back(&BB);
2686  TempsToInstrument.clear();
2687  int NumInsnsPerBB = 0;
2688  for (auto &Inst : BB) {
2689  if (LooksLikeCodeInBug11395(&Inst)) return false;
2690  // Skip instructions inserted by another instrumentation.
2691  if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
2692  continue;
2693  SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
2694  getInterestingMemoryOperands(&Inst, InterestingOperands);
2695 
2696  if (!InterestingOperands.empty()) {
2697  for (auto &Operand : InterestingOperands) {
2698  if (ClOpt && ClOptSameTemp) {
2699  Value *Ptr = Operand.getPtr();
2700  // If we have a mask, skip instrumentation if we've already
2701  // instrumented the full object. But don't add to TempsToInstrument
2702  // because we might get another load/store with a different mask.
2703  if (Operand.MaybeMask) {
2704  if (TempsToInstrument.count(Ptr))
2705  continue; // We've seen this (whole) temp in the current BB.
2706  } else {
2707  if (!TempsToInstrument.insert(Ptr).second)
2708  continue; // We've seen this temp in the current BB.
2709  }
2710  }
2711  OperandsToInstrument.push_back(Operand);
2712  NumInsnsPerBB++;
2713  }
2714  } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
2718  PointerComparisonsOrSubtracts.push_back(&Inst);
2719  } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
2720  // ok, take it.
2721  IntrinToInstrument.push_back(MI);
2722  NumInsnsPerBB++;
2723  } else {
2724  if (auto *CB = dyn_cast<CallBase>(&Inst)) {
2725  // A call inside BB.
2726  TempsToInstrument.clear();
2727  if (CB->doesNotReturn())
2728  NoReturnCalls.push_back(CB);
2729  }
2730  if (CallInst *CI = dyn_cast<CallInst>(&Inst))
2732  }
2733  if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
2734  }
2735  }
2736 
2737  bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 &&
2738  OperandsToInstrument.size() + IntrinToInstrument.size() >
2740  const DataLayout &DL = F.getParent()->getDataLayout();
2741  ObjectSizeOpts ObjSizeOpts;
2742  ObjSizeOpts.RoundToAlign = true;
2743  ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
2744 
2745  // Instrument.
2746  int NumInstrumented = 0;
2747  for (auto &Operand : OperandsToInstrument) {
2748  if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2749  instrumentMop(ObjSizeVis, Operand, UseCalls,
2750  F.getParent()->getDataLayout());
2751  FunctionModified = true;
2752  }
2753  for (auto *Inst : IntrinToInstrument) {
2754  if (!suppressInstrumentationSiteForDebug(NumInstrumented))
2755  instrumentMemIntrinsic(Inst);
2756  FunctionModified = true;
2757  }
2758 
2759  FunctionStackPoisoner FSP(F, *this);
2760  bool ChangedStack = FSP.runOnFunction();
2761 
2762  // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
2763  // See e.g. https://github.com/google/sanitizers/issues/37
2764  for (auto *CI : NoReturnCalls) {
2765  IRBuilder<> IRB(CI);
2766  IRB.CreateCall(AsanHandleNoReturnFunc, {});
2767  }
2768 
2769  for (auto *Inst : PointerComparisonsOrSubtracts) {
2770  instrumentPointerComparisonOrSubtraction(Inst);
2771  FunctionModified = true;
2772  }
2773 
2774  if (ChangedStack || !NoReturnCalls.empty())
2775  FunctionModified = true;
2776 
2777  LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
2778  << F << "\n");
2779 
2780  return FunctionModified;
2781 }
2782 
2783 // Workaround for bug 11395: we don't want to instrument stack in functions
2784 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
2785 // FIXME: remove once the bug 11395 is fixed.
2786 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
2787  if (LongSize != 32) return false;
2788  CallInst *CI = dyn_cast<CallInst>(I);
2789  if (!CI || !CI->isInlineAsm()) return false;
2790  if (CI->arg_size() <= 5)
2791  return false;
2792  // We have inline assembly with quite a few arguments.
2793  return true;
2794 }
2795 
2796 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
2797  IRBuilder<> IRB(*C);
2798  if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
2799  ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
2800  const char *MallocNameTemplate =
2801  ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
2804  for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
2805  std::string Suffix = itostr(Index);
2806  AsanStackMallocFunc[Index] = M.getOrInsertFunction(
2807  MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
2808  AsanStackFreeFunc[Index] =
2809  M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
2810  IRB.getVoidTy(), IntptrTy, IntptrTy);
2811  }
2812  }
2813  if (ASan.UseAfterScope) {
2814  AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
2815  kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2816  AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
2817  kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2818  }
2819 
2820  for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
2821  0xf3, 0xf5, 0xf8}) {
2822  std::ostringstream Name;
2824  Name << std::setw(2) << std::setfill('0') << std::hex << Val;
2825  AsanSetShadowFunc[Val] =
2826  M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
2827  }
2828 
2829  AsanAllocaPoisonFunc = M.getOrInsertFunction(
2830  kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
2831  AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
2832  kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
2833 }
2834 
2835 void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
2836  ArrayRef<uint8_t> ShadowBytes,
2837  size_t Begin, size_t End,
2838  IRBuilder<> &IRB,
2839  Value *ShadowBase) {
2840  if (Begin >= End)
2841  return;
2842 
2843  const size_t LargestStoreSizeInBytes =
2844  std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
2845 
2846  const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
2847 
2848  // Poison given range in shadow using larges store size with out leading and
2849  // trailing zeros in ShadowMask. Zeros never change, so they need neither
2850  // poisoning nor up-poisoning. Still we don't mind if some of them get into a
2851  // middle of a store.
2852  for (size_t i = Begin; i < End;) {
2853  if (!ShadowMask[i]) {
2854  assert(!ShadowBytes[i]);
2855  ++i;
2856  continue;
2857  }
2858 
2859  size_t StoreSizeInBytes = LargestStoreSizeInBytes;
2860  // Fit store size into the range.
2861  while (StoreSizeInBytes > End - i)
2862  StoreSizeInBytes /= 2;
2863 
2864  // Minimize store size by trimming trailing zeros.
2865  for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
2866  while (j <= StoreSizeInBytes / 2)
2867  StoreSizeInBytes /= 2;
2868  }
2869 
2870  uint64_t Val = 0;
2871  for (size_t j = 0; j < StoreSizeInBytes; j++) {
2872  if (IsLittleEndian)
2873  Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
2874  else
2875  Val = (Val << 8) | ShadowBytes[i + j];
2876  }
2877 
2878  Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
2879  Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
2880  IRB.CreateAlignedStore(
2881  Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()),
2882  Align(1));
2883 
2884  i += StoreSizeInBytes;
2885  }
2886 }
2887 
2888 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2889  ArrayRef<uint8_t> ShadowBytes,
2890  IRBuilder<> &IRB, Value *ShadowBase) {
2891  copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
2892 }
2893 
2894 void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2895  ArrayRef<uint8_t> ShadowBytes,
2896  size_t Begin, size_t End,
2897  IRBuilder<> &IRB, Value *ShadowBase) {
2898  assert(ShadowMask.size() == ShadowBytes.size());
2899  size_t Done = Begin;
2900  for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
2901  if (!ShadowMask[i]) {
2902  assert(!ShadowBytes[i]);
2903  continue;
2904  }
2905  uint8_t Val = ShadowBytes[i];
2906  if (!AsanSetShadowFunc[Val])
2907  continue;
2908 
2909  // Skip same values.
2910  for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
2911  }
2912 
2913  if (j - i >= ClMaxInlinePoisoningSize) {
2914  copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
2915  IRB.CreateCall(AsanSetShadowFunc[Val],
2916  {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
2917  ConstantInt::get(IntptrTy, j - i)});
2918  Done = j;
2919  }
2920  }
2921 
2922  copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
2923 }
2924 
2925 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
2926 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
2927 static int StackMallocSizeClass(uint64_t LocalStackSize) {
2928  assert(LocalStackSize <= kMaxStackMallocSize);
2929  uint64_t MaxSize = kMinStackMallocSize;
2930  for (int i = 0;; i++, MaxSize *= 2)
2931  if (LocalStackSize <= MaxSize) return i;
2932  llvm_unreachable("impossible LocalStackSize");
2933 }
2934 
2935 void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
2936  Instruction *CopyInsertPoint = &F.front().front();
2937  if (CopyInsertPoint == ASan.LocalDynamicShadow) {
2938  // Insert after the dynamic shadow location is determined
2939  CopyInsertPoint = CopyInsertPoint->getNextNode();
2940  assert(CopyInsertPoint);
2941  }
2942  IRBuilder<> IRB(CopyInsertPoint);
2943  const DataLayout &DL = F.getParent()->getDataLayout();
2944  for (Argument &Arg : F.args()) {
2945  if (Arg.hasByValAttr()) {
2946  Type *Ty = Arg.getParamByValType();
2947  const Align Alignment =
2948  DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
2949 
2950  AllocaInst *AI = IRB.CreateAlloca(
2951  Ty, nullptr,
2952  (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
2953  ".byval");
2954  AI->setAlignment(Alignment);
2955  Arg.replaceAllUsesWith(AI);
2956 
2957  uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2958  IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
2959  }
2960  }
2961 }
2962 
2963 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
2964  Value *ValueIfTrue,
2965  Instruction *ThenTerm,
2966  Value *ValueIfFalse) {
2967  PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
2968  BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
2969  PHI->addIncoming(ValueIfFalse, CondBlock);
2970  BasicBlock *ThenBlock = ThenTerm->getParent();
2971  PHI->addIncoming(ValueIfTrue, ThenBlock);
2972  return PHI;
2973 }
2974 
2975 Value *FunctionStackPoisoner::createAllocaForLayout(
2976  IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
2977  AllocaInst *Alloca;
2978  if (Dynamic) {
2979  Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
2981  "MyAlloca");
2982  } else {
2983  Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
2984  nullptr, "MyAlloca");
2985  assert(Alloca->isStaticAlloca());
2986  }
2987  assert((ClRealignStack & (ClRealignStack - 1)) == 0);
2988  uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
2989  Alloca->setAlignment(Align(FrameAlignment));
2990  return IRB.CreatePointerCast(Alloca, IntptrTy);
2991 }
2992 
2993 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
2994  BasicBlock &FirstBB = *F.begin();
2995  IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
2996  DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
2997  IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
2998  DynamicAllocaLayout->setAlignment(Align(32));
2999 }
3000 
3001 void FunctionStackPoisoner::processDynamicAllocas() {
3002  if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3003  assert(DynamicAllocaPoisonCallVec.empty());
3004  return;
3005  }
3006 
3007  // Insert poison calls for lifetime intrinsics for dynamic allocas.
3008  for (const auto &APC : DynamicAllocaPoisonCallVec) {
3009  assert(APC.InsBefore);
3010  assert(APC.AI);
3011  assert(ASan.isInterestingAlloca(*APC.AI));
3012  assert(!APC.AI->isStaticAlloca());
3013 
3014  IRBuilder<> IRB(APC.InsBefore);
3015  poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3016  // Dynamic allocas will be unpoisoned unconditionally below in
3017  // unpoisonDynamicAllocas.
3018  // Flag that we need unpoison static allocas.
3019  }
3020 
3021  // Handle dynamic allocas.
3022  createDynamicAllocasInitStorage();
3023  for (auto &AI : DynamicAllocaVec)
3024  handleDynamicAllocaCall(AI);
3025  unpoisonDynamicAllocas();
3026 }
3027 
3028 /// Collect instructions in the entry block after \p InsBefore which initialize
3029 /// permanent storage for a function argument. These instructions must remain in
3030 /// the entry block so that uninitialized values do not appear in backtraces. An
3031 /// added benefit is that this conserves spill slots. This does not move stores
3032 /// before instrumented / "interesting" allocas.
3034  AddressSanitizer &ASan, Instruction &InsBefore,
3035  SmallVectorImpl<Instruction *> &InitInsts) {
3036  Instruction *Start = InsBefore.getNextNonDebugInstruction();
3037  for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3038  // Argument initialization looks like:
3039  // 1) store <Argument>, <Alloca> OR
3040  // 2) <CastArgument> = cast <Argument> to ...
3041  // store <CastArgument> to <Alloca>
3042  // Do not consider any other kind of instruction.
3043  //
3044  // Note: This covers all known cases, but may not be exhaustive. An
3045  // alternative to pattern-matching stores is to DFS over all Argument uses:
3046  // this might be more general, but is probably much more complicated.
3047  if (isa<AllocaInst>(It) || isa<CastInst>(It))
3048  continue;
3049  if (auto *Store = dyn_cast<StoreInst>(It)) {
3050  // The store destination must be an alloca that isn't interesting for
3051  // ASan to instrument. These are moved up before InsBefore, and they're
3052  // not interesting because allocas for arguments can be mem2reg'd.
3053  auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3054  if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3055  continue;
3056 
3057  Value *Val = Store->getValueOperand();
3058  bool IsDirectArgInit = isa<Argument>(Val);
3059  bool IsArgInitViaCast =
3060  isa<CastInst>(Val) &&
3061  isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3062  // Check that the cast appears directly before the store. Otherwise
3063  // moving the cast before InsBefore may break the IR.
3064  Val == It->getPrevNonDebugInstruction();
3065  bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3066  if (!IsArgInit)
3067  continue;
3068 
3069  if (IsArgInitViaCast)
3070  InitInsts.push_back(cast<Instruction>(Val));
3071  InitInsts.push_back(Store);
3072  continue;
3073  }
3074 
3075  // Do not reorder past unknown instructions: argument initialization should
3076  // only involve casts and stores.
3077  return;
3078  }
3079 }
3080 
3081 void FunctionStackPoisoner::processStaticAllocas() {
3082  if (AllocaVec.empty()) {
3083  assert(StaticAllocaPoisonCallVec.empty());
3084  return;
3085  }
3086 
3087  int StackMallocIdx = -1;
3088  DebugLoc EntryDebugLocation;
3089  if (auto SP = F.getSubprogram())
3090  EntryDebugLocation =
3091  DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3092 
3093  Instruction *InsBefore = AllocaVec[0];
3094  IRBuilder<> IRB(InsBefore);
3095 
3096  // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3097  // debug info is broken, because only entry-block allocas are treated as
3098  // regular stack slots.
3099  auto InsBeforeB = InsBefore->getParent();
3100  assert(InsBeforeB == &F.getEntryBlock());
3101  for (auto *AI : StaticAllocasToMoveUp)
3102  if (AI->getParent() == InsBeforeB)
3103  AI->moveBefore(InsBefore);
3104 
3105  // Move stores of arguments into entry-block allocas as well. This prevents
3106  // extra stack slots from being generated (to house the argument values until
3107  // they can be stored into the allocas). This also prevents uninitialized
3108  // values from being shown in backtraces.
3109  SmallVector<Instruction *, 8> ArgInitInsts;
3110  findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3111  for (Instruction *ArgInitInst : ArgInitInsts)
3112  ArgInitInst->moveBefore(InsBefore);
3113 
3114  // If we have a call to llvm.localescape, keep it in the entry block.
3115  if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3116 
3118  SVD.reserve(AllocaVec.size());
3119  for (AllocaInst *AI : AllocaVec) {
3121  ASan.getAllocaSizeInBytes(*AI),
3122  0,
3123  AI->getAlign().value(),
3124  AI,
3125  0,
3126  0};
3127  SVD.push_back(D);
3128  }
3129 
3130  // Minimal header size (left redzone) is 4 pointers,
3131  // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3132  uint64_t Granularity = 1ULL << Mapping.Scale;
3133  uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3134  const ASanStackFrameLayout &L =
3135  ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3136 
3137  // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3139  for (auto &Desc : SVD)
3140  AllocaToSVDMap[Desc.AI] = &Desc;
3141 
3142  // Update SVD with information from lifetime intrinsics.
3143  for (const auto &APC : StaticAllocaPoisonCallVec) {
3144  assert(APC.InsBefore);
3145  assert(APC.AI);
3146  assert(ASan.isInterestingAlloca(*APC.AI));
3147  assert(APC.AI->isStaticAlloca());
3148 
3149  ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3150  Desc.LifetimeSize = Desc.Size;
3151  if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3152  if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3153  if (LifetimeLoc->getFile() == FnLoc->getFile())
3154  if (unsigned Line = LifetimeLoc->getLine())
3155  Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3156  }
3157  }
3158  }
3159 
3160  auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3161  LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3162  uint64_t LocalStackSize = L.FrameSize;
3163  bool DoStackMalloc =
3164  ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3165  !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3166  bool DoDynamicAlloca = ClDynamicAllocaStack;
3167  // Don't do dynamic alloca or stack malloc if:
3168  // 1) There is inline asm: too often it makes assumptions on which registers
3169  // are available.
3170  // 2) There is a returns_twice call (typically setjmp), which is
3171  // optimization-hostile, and doesn't play well with introduced indirect
3172  // register-relative calculation of local variable addresses.
3173  DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3174  DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3175 
3176  Value *StaticAlloca =
3177  DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3178 
3179  Value *FakeStack;
3180  Value *LocalStackBase;
3181  Value *LocalStackBaseAlloca;
3182  uint8_t DIExprFlags = DIExpression::ApplyOffset;
3183 
3184  if (DoStackMalloc) {
3185  LocalStackBaseAlloca =
3186  IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3187  if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3188  // void *FakeStack = __asan_option_detect_stack_use_after_return
3189  // ? __asan_stack_malloc_N(LocalStackSize)
3190  // : nullptr;
3191  // void *LocalStackBase = (FakeStack) ? FakeStack :
3192  // alloca(LocalStackSize);
3193  Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3195  Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3196  IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3198  Instruction *Term =
3199  SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3200  IRBuilder<> IRBIf(Term);
3201  StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3202  assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3203  Value *FakeStackValue =
3204  IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3205  ConstantInt::get(IntptrTy, LocalStackSize));
3206  IRB.SetInsertPoint(InsBefore);
3207  FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3208  ConstantInt::get(IntptrTy, 0));
3209  } else {
3210  // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3211  // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3212  // void *LocalStackBase = (FakeStack) ? FakeStack :
3213  // alloca(LocalStackSize);
3214  StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3215  FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3216  ConstantInt::get(IntptrTy, LocalStackSize));
3217  }
3218  Value *NoFakeStack =
3219  IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3220  Instruction *Term =
3221  SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3222  IRBuilder<> IRBIf(Term);
3223  Value *AllocaValue =
3224  DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3225 
3226  IRB.SetInsertPoint(InsBefore);
3227  LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3228  IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3229  DIExprFlags |= DIExpression::DerefBefore;
3230  } else {
3231  // void *FakeStack = nullptr;
3232  // void *LocalStackBase = alloca(LocalStackSize);
3233  FakeStack = ConstantInt::get(IntptrTy, 0);
3234  LocalStackBase =
3235  DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3236  LocalStackBaseAlloca = LocalStackBase;
3237  }
3238 
3239  // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3240  // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3241  // later passes and can result in dropped variable coverage in debug info.
3242  Value *LocalStackBaseAllocaPtr =
3243  isa<PtrToIntInst>(LocalStackBaseAlloca)
3244  ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3245  : LocalStackBaseAlloca;
3246  assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3247  "Variable descriptions relative to ASan stack base will be dropped");
3248 
3249  // Replace Alloca instructions with base+offset.
3250  for (const auto &Desc : SVD) {
3251  AllocaInst *AI = Desc.AI;
3252  replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3253  Desc.Offset);
3254  Value *NewAllocaPtr = IRB.CreateIntToPtr(
3255  IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3256  AI->getType());
3257  AI->replaceAllUsesWith(NewAllocaPtr);
3258  }
3259 
3260  // The left-most redzone has enough space for at least 4 pointers.
3261  // Write the Magic value to redzone[0].
3262  Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3264  BasePlus0);
3265  // Write the frame description constant to redzone[1].
3266  Value *BasePlus1 = IRB.CreateIntToPtr(
3267  IRB.CreateAdd(LocalStackBase,
3268  ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3269  IntptrPtrTy);
3270  GlobalVariable *StackDescriptionGlobal =
3271  createPrivateGlobalForString(*F.getParent(), DescriptionString,
3272  /*AllowMerging*/ true, kAsanGenPrefix);
3273  Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3274  IRB.CreateStore(Description, BasePlus1);
3275  // Write the PC to redzone[2].
3276  Value *BasePlus2 = IRB.CreateIntToPtr(
3277  IRB.CreateAdd(LocalStackBase,
3278  ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3279  IntptrPtrTy);
3280  IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3281 
3282  const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3283 
3284  // Poison the stack red zones at the entry.
3285  Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3286  // As mask we must use most poisoned case: red zones and after scope.
3287  // As bytes we can use either the same or just red zones only.
3288  copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3289 
3290  if (!StaticAllocaPoisonCallVec.empty()) {
3291  const auto &ShadowInScope = GetShadowBytes(SVD, L);
3292 
3293  // Poison static allocas near lifetime intrinsics.
3294  for (const auto &APC : StaticAllocaPoisonCallVec) {
3295  const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3296  assert(Desc.Offset % L.Granularity == 0);
3297  size_t Begin = Desc.Offset / L.Granularity;
3298  size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3299 
3300  IRBuilder<> IRB(APC.InsBefore);
3301  copyToShadow(ShadowAfterScope,
3302  APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3303  IRB, ShadowBase);
3304  }
3305  }
3306 
3307  SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3308  SmallVector<uint8_t, 64> ShadowAfterReturn;
3309 
3310  // (Un)poison the stack before all ret instructions.
3311  for (Instruction *Ret : RetVec) {
3312  IRBuilder<> IRBRet(Ret);
3313  // Mark the current frame as retired.
3314  IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3315  BasePlus0);
3316  if (DoStackMalloc) {
3317  assert(StackMallocIdx >= 0);
3318  // if FakeStack != 0 // LocalStackBase == FakeStack
3319  // // In use-after-return mode, poison the whole stack frame.
3320  // if StackMallocIdx <= 4
3321  // // For small sizes inline the whole thing:
3322  // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3323  // **SavedFlagPtr(FakeStack) = 0
3324  // else
3325  // __asan_stack_free_N(FakeStack, LocalStackSize)
3326  // else
3327  // <This is not a fake stack; unpoison the redzones>
3328  Value *Cmp =
3329  IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3330  Instruction *ThenTerm, *ElseTerm;
3331  SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3332 
3333  IRBuilder<> IRBPoison(ThenTerm);
3334  if (StackMallocIdx <= 4) {
3335  int ClassSize = kMinStackMallocSize << StackMallocIdx;
3336  ShadowAfterReturn.resize(ClassSize / L.Granularity,
3338  copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3339  ShadowBase);
3340  Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3341  FakeStack,
3342  ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3343  Value *SavedFlagPtr = IRBPoison.CreateLoad(
3344  IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3345  IRBPoison.CreateStore(
3346  Constant::getNullValue(IRBPoison.getInt8Ty()),
3347  IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
3348  } else {
3349  // For larger frames call __asan_stack_free_*.
3350  IRBPoison.CreateCall(
3351  AsanStackFreeFunc[StackMallocIdx],
3352  {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3353  }
3354 
3355  IRBuilder<> IRBElse(ElseTerm);
3356  copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3357  } else {
3358  copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3359  }
3360  }
3361 
3362  // We are done. Remove the old unused alloca instructions.
3363  for (auto *AI : AllocaVec)
3364  AI->eraseFromParent();
3365 }
3366 
3367 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3368  IRBuilder<> &IRB, bool DoPoison) {
3369  // For now just insert the call to ASan runtime.
3370  Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3371  Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3372  IRB.CreateCall(
3373  DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3374  {AddrArg, SizeArg});
3375 }
3376 
3377 // Handling llvm.lifetime intrinsics for a given %alloca:
3378 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3379 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3380 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3381 // could be poisoned by previous llvm.lifetime.end instruction, as the
3382 // variable may go in and out of scope several times, e.g. in loops).
3383 // (3) if we poisoned at least one %alloca in a function,
3384 // unpoison the whole stack frame at function exit.
3385 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3386  IRBuilder<> IRB(AI);
3387 
3388  const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3389  const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3390 
3391  Value *Zero = Constant::getNullValue(IntptrTy);
3392  Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3393  Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3394 
3395  // Since we need to extend alloca with additional memory to locate
3396  // redzones, and OldSize is number of allocated blocks with
3397  // ElementSize size, get allocated memory size in bytes by
3398  // OldSize * ElementSize.
3399  const unsigned ElementSize =
3400  F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3401  Value *OldSize =
3402  IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3403  ConstantInt::get(IntptrTy, ElementSize));
3404 
3405  // PartialSize = OldSize % 32
3406  Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3407 
3408  // Misalign = kAllocaRzSize - PartialSize;
3409  Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3410 
3411  // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3412  Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3413  Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3414 
3415  // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3416  // Alignment is added to locate left redzone, PartialPadding for possible
3417  // partial redzone and kAllocaRzSize for right redzone respectively.
3418  Value *AdditionalChunkSize = IRB.CreateAdd(
3419  ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3420  PartialPadding);
3421 
3422  Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3423 
3424  // Insert new alloca with new NewSize and Alignment params.
3425  AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3426  NewAlloca->setAlignment(Alignment);
3427 
3428  // NewAddress = Address + Alignment
3429  Value *NewAddress =
3430  IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3431  ConstantInt::get(IntptrTy, Alignment.value()));
3432 
3433  // Insert __asan_alloca_poison call for new created alloca.
3434  IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
3435 
3436  // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3437  // for unpoisoning stuff.
3438  IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3439 
3440  Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3441 
3442  // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3443  AI->replaceAllUsesWith(NewAddressPtr);
3444 
3445  // We are done. Erase old alloca from parent.
3446  AI->eraseFromParent();
3447 }
3448 
3449 // isSafeAccess returns true if Addr is always inbounds with respect to its
3450 // base object. For example, it is a field access or an array access with
3451 // constant inbounds index.
3452 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3453  Value *Addr, uint64_t TypeSize) const {
3454  SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
3455  if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
3456  uint64_t Size = SizeOffset.first.getZExtValue();
3457  int64_t Offset = SizeOffset.second.getSExtValue();
3458  // Three checks are required to ensure safety:
3459  // . Offset >= 0 (since the offset is given from the base ptr)
3460  // . Size >= Offset (unsigned)
3461  // . Size - Offset >= NeededSize (unsigned)
3462  return Offset >= 0 && Size >= uint64_t(Offset) &&
3463  Size - uint64_t(Offset) >= TypeSize / 8;
3464 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:77
llvm::MachO::S_CSTRING_LITERALS
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition: MachO.h:131
llvm::Triple::DXContainer
@ DXContainer
Definition: Triple.h:282
i
i
Definition: README.txt:29
llvm::GlobalsAA
Analysis pass providing a never-invalidated alias analysis result.
Definition: GlobalsModRef.h:127
AddressSanitizer.h
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
llvm::M68kBeads::Term
@ Term
Definition: M68kBaseInfo.h:71
Instrumentation.h
llvm::Triple::riscv64
@ riscv64
Definition: Triple.h:76
llvm::SizeOffsetType
std::pair< APInt, APInt > SizeOffsetType
Definition: MemoryBuiltins.h:188
Int32Ty
IntegerType * Int32Ty
Definition: NVVMIntrRange.cpp:67
kRetiredStackFrameMagic
static const uintptr_t kRetiredStackFrameMagic
Definition: AddressSanitizer.cpp:128
llvm::Triple::Wasm
@ Wasm
Definition: Triple.h:287
ClInvalidPointerCmp
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
StackSafetyAnalysis.h
llvm::IRBuilderBase::CreateIntCast
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2054
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:283
llvm::createPrivateGlobalForString
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, const char *NamePrefix="")
Definition: Instrumentation.cpp:60
llvm::IRBuilderBase::getInt32Ty
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:509
llvm::IRBuilderBase::CreateStore
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1695
llvm::IRBuilderBase::CreateICmpSGE
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2127
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
MathExtras.h
kDefaultShadowOffset64
static const uint64_t kDefaultShadowOffset64
Definition: AddressSanitizer.cpp:97
llvm::IRBuilderBase::SetInsertPoint
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:179
llvm::GlobalObject::setComdat
void setComdat(Comdat *C)
Definition: Globals.cpp:189
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
ClMemoryAccessCallbackPrefix
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
ClDebugMin
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:69
Pass
print lazy value Lazy Value Info Printer Pass
Definition: LazyValueInfo.cpp:1999
llvm::IRBuilderBase::getInt64Ty
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:514
Comdat.h
ClWithComdat
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:3052
ClInvalidPointerPairs
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
llvm::IRBuilderBase::CreateAlignedStore
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1718
llvm::StructType::get
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:406
llvm::Triple::isMIPS32
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition: Triple.h:854
llvm::StackSafetyGlobalInfo::isSafe
bool isSafe(const AllocaInst &AI) const
Definition: StackSafetyAnalysis.cpp:966
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1481
Metadata.h
llvm::User::operands
op_range operands()
Definition: User.h:242
llvm::Comdat::ExactMatch
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition: Comdat.h:37
llvm::ConstantInt::getType
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:173
PHI
Rewrite undef for PHI
Definition: AMDGPURewriteUndefForPHI.cpp:101
llvm::AllocaInst::getAlign
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
llvm::CallBase::doesNotReturn
bool doesNotReturn() const
Determine if the call cannot return.
Definition: InstrTypes.h:1883
kAsanOptionDetectUseAfterReturn
const char kAsanOptionDetectUseAfterReturn[]
Definition: AddressSanitizer.cpp:166
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:104
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:237
kPS_ShadowOffset64
static const uint64_t kPS_ShadowOffset64
Definition: AddressSanitizer.cpp:118
kDefaultShadowScale
static const uint64_t kDefaultShadowScale
Definition: AddressSanitizer.cpp:95
llvm::DIBuilder
Definition: DIBuilder.h:42
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
DebugInfoMetadata.h
isUnsupportedAMDGPUAddrspace
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
Definition: AddressSanitizer.cpp:1191
llvm::GlobalVariable::copyAttributesFrom
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition: Globals.cpp:486
llvm::Function::getBasicBlockList
const BasicBlockListType & getBasicBlockList() const
Get the underlying elements of the Function...
Definition: Function.h:684
llvm::PassInfoMixin< AddressSanitizerPass >
llvm::Instruction::getNextNonDebugInstruction
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
Definition: Instruction.cpp:777
llvm::GlobalValue::dropLLVMManglingEscape
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:562
llvm::GlobalValue::HiddenVisibility
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:64
kAsanStackMallocAlwaysNameTemplate
const char kAsanStackMallocAlwaysNameTemplate[]
Definition: AddressSanitizer.cpp:152
llvm::Function
Definition: Function.h:60
llvm::IRBuilderBase::CreatePtrToInt
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1975
kNumberOfAccessSizes
static const size_t kNumberOfAccessSizes
Definition: AddressSanitizer.cpp:179
StringRef.h
llvm::ConstantStruct::get
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1306
ClEnableKasan
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
kAsanCtorAndDtorPriority
static const uint64_t kAsanCtorAndDtorPriority
Definition: AddressSanitizer.cpp:132
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:53
llvm::kAsanStackUseAfterReturnMagic
static const int kAsanStackUseAfterReturnMagic
Definition: ASanStackFrameLayout.h:26
kNetBSDKasan_ShadowOffset64
static const uint64_t kNetBSDKasan_ShadowOffset64
Definition: AddressSanitizer.cpp:117
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:727
llvm::Triple::isPS
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition: Triple.h:720
llvm::GetShadowBytesAfterScope
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
Definition: ASanStackFrameLayout.cpp:133
llvm::Comdat::NoDeduplicate
@ NoDeduplicate
No deduplication is performed.
Definition: Comdat.h:39
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:101
kMinStackMallocSize
static const size_t kMinStackMallocSize
Definition: AddressSanitizer.cpp:125
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
ClMaxInsnsToInstrumentPerBB
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
Statistic.h
TypeSizeToSizeIndex
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Definition: AddressSanitizer.cpp:1166
InlineAsm.h
llvm::CallBase::isInlineAsm
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1465
llvm::AddressSanitizerPass::run
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Definition: AddressSanitizer.cpp:1138
llvm::InstVisitor::visitIntrinsicInst
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
llvm::GetShadowBytes
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
Definition: ASanStackFrameLayout.cpp:115
llvm::createSanitizerCtor
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
Definition: ModuleUtils.cpp:125
ErrorHandling.h
llvm::GlobalValue::UnnamedAddr::None
@ None
ClDebugStack
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:729
llvm::IRBuilder<>
kLoongArch64_ShadowOffset64
static const uint64_t kLoongArch64_ShadowOffset64
Definition: AddressSanitizer.cpp:109
llvm::GlobalVariable
Definition: GlobalVariable.h:39
kNetBSD_ShadowOffset32
static const uint64_t kNetBSD_ShadowOffset32
Definition: AddressSanitizer.cpp:115
kMIPS64_ShadowOffset64
static const uint64_t kMIPS64_ShadowOffset64
Definition: AddressSanitizer.cpp:107
llvm::FunctionType::get
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:361
llvm::GlobalAlias
Definition: GlobalAlias.h:28
ValueTracking.h
Local.h
kAsanRegisterElfGlobalsName
const char kAsanRegisterElfGlobalsName[]
Definition: AddressSanitizer.cpp:141
isInterestingPointerSubtraction
static bool isInterestingPointerSubtraction(Instruction *I)
Definition: AddressSanitizer.cpp:1366
llvm::InstVisitor::visitCallBase
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
llvm::IRBuilderBase::CreateOr
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1403
ClRedzoneByvalArgs
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
llvm::PreservedAnalyses::abandon
void abandon()
Mark an analysis as abandoned.
Definition: PassManager.h:206
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
llvm::InterestingMemoryOperand
Definition: AddressSanitizerCommon.h:25
FAM
FunctionAnalysisManager FAM
Definition: PassBuilderBindings.cpp:59
GlobalsModRef.h
ClForceExperiment
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:140
kAMDGPUAddressSharedName
const char kAMDGPUAddressSharedName[]
Definition: AddressSanitizer.cpp:175
ClDynamicAllocaStack
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
kDynamicShadowSentinel
static const uint64_t kDynamicShadowSentinel
Definition: AddressSanitizer.cpp:98
llvm::Triple::isDriverKit
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition: Triple.h:512
llvm::DILocation
Debug location.
Definition: DebugInfoMetadata.h:1595
llvm::PreservedAnalyses::none
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
ClInstrumentReads
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
llvm::AsanDetectStackUseAfterReturnMode::Runtime
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
GetCtorAndDtorPriority
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
Definition: AddressSanitizer.cpp:627
Module.h
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:86
MemoryBuiltins.h
ClDebugMax
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
isInterestingPointerComparison
static bool isInterestingPointerComparison(Instruction *I)
Definition: AddressSanitizer.cpp:1352
llvm::ASanStackVariableDescription::AI
AllocaInst * AI
Definition: ASanStackFrameLayout.h:37
llvm::Triple::SPIRV
@ SPIRV
Definition: Triple.h:286
ClOptGlobals
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
llvm::MemIntrinsic
This is the common base class for memset/memcpy/memmove.
Definition: IntrinsicInst.h:1041
ClRealignStack
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
Vector
So we should use XX3Form_Rcr to implement intrinsic Convert DP outs ins xscvdpsp No builtin are required Round &Convert QP DP(dword[1] is set to zero) No builtin are required Round to Quad Precision because you need to assign rounding mode in instruction Provide builtin(set f128:$vT,(int_ppc_vsx_xsrqpi f128:$vB))(set f128 yields< n x< ty > >< result > yields< ty >< result > No builtin are required Load Store Vector
Definition: README_P9.txt:497
kAsanAllocaPoison
const char kAsanAllocaPoison[]
Definition: AddressSanitizer.cpp:172
llvm::IRBuilderBase::getIntN
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition: IRBuilder.h:480
llvm::Triple::XCOFF
@ XCOFF
Definition: Triple.h:288
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::Triple::isOSLinux
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:636
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition: MemoryBuiltins.h:158
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::Comdat::Largest
@ Largest
The linker will choose the largest COMDAT.
Definition: Comdat.h:38
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
kIsWriteMask
constexpr size_t kIsWriteMask
Definition: AddressSanitizer.cpp:189
llvm::Triple::isAMDGPU
bool isAMDGPU() const
Definition: Triple.h:768
llvm::ValueAsMetadata::get
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:393
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
kAsanPoisonStackMemoryName
const char kAsanPoisonStackMemoryName[]
Definition: AddressSanitizer.cpp:159
llvm::ASanStackFrameLayout::Granularity
uint64_t Granularity
Definition: ASanStackFrameLayout.h:45
llvm::GlobalValue::setUnnamedAddr
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:227
llvm::GlobalValue::LinkageTypes
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:47
llvm::ArrayType
Class to represent array types.
Definition: DerivedTypes.h:357
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:458
llvm::Triple::isOSEmscripten
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:656
ASanStackFrameLayout.h
kPPC64_ShadowOffset64
static const uint64_t kPPC64_ShadowOffset64
Definition: AddressSanitizer.cpp:103
llvm::IRBuilderBase::CreateAlloca
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1665
Use.h
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:239
llvm::IRBuilderBase::CreateIntToPtr
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1980
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
DepthFirstIterator.h
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1400
F
#define F(x, y, z)
Definition: MD5.cpp:55
kAsanAllocasUnpoison
const char kAsanAllocasUnpoison[]
Definition: AddressSanitizer.cpp:173
llvm::ConstantExpr::getPointerCast
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2014
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::appendToGlobalDtors
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
Definition: ModuleUtils.cpp:71
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ASanStackVariableDescription::Size
uint64_t Size
Definition: ASanStackFrameLayout.h:33
llvm::ARMBuildAttrs::Section
@ Section
Legacy Tags.
Definition: ARMBuildAttributes.h:82
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:187
llvm::Triple::isMacOSX
bool isMacOSX() const
Is this a Mac OS X triple.
Definition: Triple.h:484
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
Instruction.h
kNetBSD_ShadowOffset64
static const uint64_t kNetBSD_ShadowOffset64
Definition: AddressSanitizer.cpp:116
ClUseOdrIndicator
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
CommandLine.h
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::AllocaInst::isStaticAlloca
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Definition: Instructions.cpp:1507
kAsanUnregisterElfGlobalsName
const char kAsanUnregisterElfGlobalsName[]
Definition: AddressSanitizer.cpp:142
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
ClOverrideDestructorKind
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
llvm::ObjectSizeOffsetVisitor::bothKnown
static bool bothKnown(const SizeOffsetType &SizeOffset)
Definition: MemoryBuiltins.h:221
llvm::Triple::isAndroid
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:723
llvm::IRBuilderBase::CreateMul
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1267
ClForceDynamicShadow
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
llvm::Instruction::isLifetimeStartOrEnd
bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
Definition: Instruction.cpp:755
GlobalValue.h
llvm::StringRef::startswith
bool startswith(StringRef Prefix) const
Definition: StringRef.h:260
kAsanHandleNoReturnName
const char kAsanHandleNoReturnName[]
Definition: AddressSanitizer.cpp:149
Constants.h
kAsanStackFreeNameTemplate
const char kAsanStackFreeNameTemplate[]
Definition: AddressSanitizer.cpp:154
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
llvm::GlobalObject::setSection
void setSection(StringRef S)
Change the section for this global.
Definition: Globals.cpp:243
llvm::IRBuilderBase::CreateGEP
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1758
llvm::DIExpression::ApplyOffset
@ ApplyOffset
Definition: DebugInfoMetadata.h:2801
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
isPointerOperand
static bool isPointerOperand(Value *V)
Definition: AddressSanitizer.cpp:1345
llvm::ConstantExpr::getIntToPtr
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2188
llvm::GlobalValue::getThreadLocalMode
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:267
Twine.h
ClInstrumentationWithCallsThreshold
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
InstrTypes.h
doInstrumentAddress
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp)
Definition: AddressSanitizer.cpp:1402
llvm::IRBuilderBase::CreateLoad
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1682
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:306
MAM
ModuleAnalysisManager MAM
Definition: PassBuilderBindings.cpp:61
llvm::AsanDtorKind::None
@ None
Do not emit any destructors for ASan.
llvm::MDBuilder::createBranchWeights
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
llvm::MCSectionMachO::ParseSectionSpecifier
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
Definition: MCSectionMachO.cpp:186
MCSectionMachO.h
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1517
SI
@ SI
Definition: SIInstrInfo.cpp:7966
Param
Value * Param
Definition: NVPTXLowerArgs.cpp:165
llvm::ReplaceInstWithInst
void ReplaceInstWithInst(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Definition: BasicBlockUtils.cpp:559
TargetLibraryInfo.h
AddressSanitizerCommon.h
llvm::Comdat::Any
@ Any
The linker may choose any COMDAT.
Definition: Comdat.h:36
llvm::BasicBlock::getFirstInsertionPt
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:246
false
Definition: StackSlotColoring.cpp:141
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
llvm::Triple::ppc64
@ ppc64
Definition: Triple.h:71
llvm::ConstantArray
ConstantArray - Constant Array Declarations.
Definition: Constants.h:410
ClInsertVersionCheck
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
llvm::AsanDetectStackUseAfterReturnMode::Always
@ Always
Always detect stack use after return.
llvm::Instruction
Definition: Instruction.h:42
MDBuilder.h
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:97
llvm::IRBuilderBase::getInt8Ty
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:499
llvm::appendToCompilerUsed
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
Definition: ModuleUtils.cpp:111
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
Options
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
Definition: PassBuilderBindings.cpp:48
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
GlobalWasGeneratedByCompiler
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
Definition: AddressSanitizer.cpp:1173
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
LoopDeletionResult::Modified
@ Modified
kAsanVersionCheckNamePrefix
const char kAsanVersionCheckNamePrefix[]
Definition: AddressSanitizer.cpp:146
llvm::SmallVectorImpl::resize
void resize(size_type N)
Definition: SmallVector.h:642
llvm::codeview::EncodedFramePtrReg::BasePtr
@ BasePtr
llvm::Comdat::SameSize
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition: Comdat.h:40
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:879
llvm::StringRef::data
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
kAsanRegisterImageGlobalsName
const char kAsanRegisterImageGlobalsName[]
Definition: AddressSanitizer.cpp:138
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4499
ClMaxInlinePoisoningSize
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
DebugLoc.h
llvm::IndexedInstrProf::Version
const uint64_t Version
Definition: InstrProf.h:1056
llvm::GlobalValue::SanitizerMetadata
Definition: GlobalValue.h:313
SmallPtrSet.h
ClGlobals
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
ClUseAfterScope
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
Align
uint64_t Align
Definition: ELFObjHandler.cpp:82
llvm::GlobalValue::InternalLinkage
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Comdat
Definition: Comdat.h:33
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::Metadata
Root of the metadata hierarchy.
Definition: Metadata.h:62
llvm::Triple::getArch
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:354
c
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int int c
Definition: README.txt:418
llvm::Triple::GOFF
@ GOFF
Definition: Triple.h:284
kAllocaRzSize
static const uint64_t kAllocaRzSize
Definition: AddressSanitizer.cpp:181
instrumentMaskedLoadOrStore
static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask, Instruction *I, Value *Addr, MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp)
Definition: AddressSanitizer.cpp:1419
llvm::InlineAsm::get
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
ClInstrumentAtomics
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
Type.h
llvm::IRBuilderBase::CreateAnd
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1381
llvm::demangle
std::string demangle(const std::string &MangledName)
Attempt to demangle a string using different demangling schemes.
Definition: Demangle.cpp:29
llvm::IRBuilderBase::CreatePointerCast
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2031
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
kSystemZ_ShadowOffset64
static const uint64_t kSystemZ_ShadowOffset64
Definition: AddressSanitizer.cpp:104
llvm::Triple::isOSFuchsia
bool isOSFuchsia() const
Definition: Triple.h:547
kCompileKernelShift
constexpr size_t kCompileKernelShift
Definition: AddressSanitizer.cpp:184
llvm::InstVisitor::visitCleanupReturnInst
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
Definition: InstVisitor.h:244
llvm::function_ref
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLFunctionalExtras.h:36
llvm::StringRef::empty
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
llvm::InlineAsm
Definition: InlineAsm.h:33
G
const DataFlowGraph & G
Definition: RDFGraph.cpp:200
llvm::Triple::ppc64le
@ ppc64le
Definition: Triple.h:72
BasicBlock.h
llvm::cl::opt< bool >
kODRGenPrefix
const char kODRGenPrefix[]
Definition: AddressSanitizer.cpp:156
llvm::Triple::isThumb
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition: Triple.h:773
llvm::Triple::ELF
@ ELF
Definition: Triple.h:283
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:264
ClInitializers
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:298
llvm::GlobalValue
Definition: GlobalValue.h:44
kSmallX86_64ShadowOffsetAlignMask
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
Definition: AddressSanitizer.cpp:101
llvm::GlobalVariable::getInitializer
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
Definition: GlobalVariable.h:135
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::Instruction::getSuccessor
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
Definition: Instruction.cpp:826
llvm::cl::values
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:705
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:81
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
kMIPS_ShadowOffsetN32
static const uint64_t kMIPS_ShadowOffsetN32
Definition: AddressSanitizer.cpp:105
llvm::Triple::isARM
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition: Triple.h:778
kAsanRegisterGlobalsName
const char kAsanRegisterGlobalsName[]
Definition: AddressSanitizer.cpp:136
llvm::createSanitizerCtorAndInitFunctions
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef())
Creates sanitizer constructor function, and calls sanitizer's init function from it.
Definition: ModuleUtils.cpp:138
llvm::ICmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition: Instructions.h:1186
Index
uint32_t Index
Definition: ELFObjHandler.cpp:83
uint64_t
llvm::Triple::isMIPS64
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition: Triple.h:859
ClInstrumentByval
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
llvm::GlobalValue::getVisibility
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:244
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
llvm::StackSafetyGlobalAnalysis
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
Definition: StackSafetyAnalysis.h:128
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
kLinuxKasan_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
Definition: AddressSanitizer.cpp:102
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:79
llvm::Triple::isOSFreeBSD
bool isOSFreeBSD() const
Definition: Triple.h:543
kAsanInitName
const char kAsanInitName[]
Definition: AddressSanitizer.cpp:145
kRISCV64_ShadowOffset64
static const uint64_t kRISCV64_ShadowOffset64
Definition: AddressSanitizer.cpp:110
llvm::IRBuilderBase::getInt32
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:469
llvm::GlobalObject::copyMetadata
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
Definition: Metadata.cpp:1551
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
PromoteMemToReg.h
llvm::declareSanitizerInitFunction
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes)
Definition: ModuleUtils.cpp:116
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3190
llvm::assumeAligned
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
llvm::DenseMap
Definition: DenseMap.h:714
ClDebugFunc
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
llvm::GlobalValue::getDLLStorageClass
DLLStorageClassTypes getDLLStorageClass() const
Definition: GlobalValue.h:271
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::DebugLoc::get
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
llvm::IRBuilderBase::getInt8PtrTy
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Definition: IRBuilder.h:557
StringExtras.h
llvm::ObjectSizeOffsetVisitor
Evaluate the size and offset of an object pointed to by a Value* statically.
Definition: MemoryBuiltins.h:192
llvm::IRBuilderBase::CreateIsNotNull
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2405
llvm::ASanStackFrameLayout::FrameSize
uint64_t FrameSize
Definition: ASanStackFrameLayout.h:47
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:447
llvm::appendToUsed
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
Definition: ModuleUtils.cpp:107
llvm::IRBuilderBase::CreateSelect
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1123
kMaxStackMallocSize
static const size_t kMaxStackMallocSize
Definition: AddressSanitizer.cpp:126
DIBuilder.h
ClAlwaysSlowPath
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::AsanDetectStackUseAfterReturnMode
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
Definition: AddressSanitizerOptions.h:23
ClWithIfuncSuppressRemat
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
kDefaultShadowOffset32
static const uint64_t kDefaultShadowOffset32
Definition: AddressSanitizer.cpp:96
llvm::InstVisitor::visit
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
ArrayRef.h
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:356
llvm::IRBuilderBase::CreateAdd
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1233
ClSkipPromotableAllocas
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
llvm::AddressSanitizerPass::AddressSanitizerPass
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global)
Definition: AddressSanitizer.cpp:1132
kAsanEmscriptenCtorAndDtorPriority
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
Definition: AddressSanitizer.cpp:134
IRBuilder.h
ClUseAfterReturn
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
kAsanUnregisterGlobalsName
const char kAsanUnregisterGlobalsName[]
Definition: AddressSanitizer.cpp:137
llvm::DIExpression::DerefBefore
@ DerefBefore
Definition: DebugInfoMetadata.h:2802
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::Instruction::hasMetadata
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:258
kFreeBSDAArch64_ShadowOffset64
static const uint64_t kFreeBSDAArch64_ShadowOffset64
Definition: AddressSanitizer.cpp:113
ClInstrumentWrites
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
ModuleName
Definition: ItaniumDemangle.h:999
kAsanPoisonGlobalsName
const char kAsanPoisonGlobalsName[]
Definition: AddressSanitizer.cpp:143
llvm::GlobalValue::hasLocalLinkage
bool hasLocalLinkage() const
Definition: GlobalValue.h:523
ClMappingScale
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
llvm::IRBuilderBase::CreateExtractElement
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2316
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::ASanStackVariableDescription
Definition: ASanStackFrameLayout.h:30
kAccessSizeIndexMask
constexpr size_t kAccessSizeIndexMask
Definition: AddressSanitizer.cpp:187
ClMappingOffset
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
llvm::ArrayType::get
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:638
kEmscriptenShadowOffset
static const uint64_t kEmscriptenShadowOffset
Definition: AddressSanitizer.cpp:120
kAsanGenPrefix
const char kAsanGenPrefix[]
Definition: AddressSanitizer.cpp:155
llvm::AsanDetectStackUseAfterReturnMode::Never
@ Never
Never detect stack use after return.
kSanCovGenPrefix
const char kSanCovGenPrefix[]
Definition: AddressSanitizer.cpp:157
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:944
OP
#define OP(n)
Definition: regex2.h:73
llvm::SplitBlockAndInsertIfThenElse
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
Definition: BasicBlockUtils.cpp:1546
kAsanModuleCtorName
const char kAsanModuleCtorName[]
Definition: AddressSanitizer.cpp:130
llvm::Triple::GNUABIN32
@ GNUABIN32
Definition: Triple.h:231
llvm::ConstantStruct
Definition: Constants.h:442
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
ClOptSameTemp
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
getType
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
Definition: M68kELFObjectWriter.cpp:48
Triple.h
llvm::InstVisitor::visitReturnInst
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:226
llvm::Function::createWithDefaultAttr
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags applied.
Definition: Function.cpp:338
llvm::IRBuilderBase::CreatePHI
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2254
kWindowsShadowOffset64
static const uint64_t kWindowsShadowOffset64
Definition: AddressSanitizer.cpp:123
kAsanGlobalsRegisteredFlagName
const char kAsanGlobalsRegisteredFlagName[]
Definition: AddressSanitizer.cpp:164
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
llvm::ASanStackFrameLayout::FrameAlignment
uint64_t FrameAlignment
Definition: ASanStackFrameLayout.h:46
Demangle.h
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
kMIPS32_ShadowOffset32
static const uint64_t kMIPS32_ShadowOffset32
Definition: AddressSanitizer.cpp:106
llvm::BinaryOperator
Definition: InstrTypes.h:189
findStoresToUninstrumentedArgAllocas
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
Definition: AddressSanitizer.cpp:3033
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
DataLayout.h
llvm::AddressSanitizerOptions::Recover
bool Recover
Definition: AddressSanitizer.h:25
llvm::codeview::ExportFlags::IsPrivate
@ IsPrivate
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:213
llvm::countTrailingZeros
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: MathExtras.h:152
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:138
kAsanUnregisterImageGlobalsName
const char kAsanUnregisterImageGlobalsName[]
Definition: AddressSanitizer.cpp:139
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
this
Analysis the ScalarEvolution expression for r is this
Definition: README.txt:8
InstVisitor.h
llvm::logicalview::LVAttributeKind::Zero
@ Zero
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Constant::getAggregateElement
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:410
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
ClOptStack
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
llvm::IRBuilderBase::CreateICmpEQ
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2099
llvm::cantFail
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:744
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::BasicBlock::Create
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:97
getParent
static const Function * getParent(const Value *V)
Definition: BasicAliasAnalysis.cpp:805
uint32_t
llvm::AllocaInst::isArrayAllocation
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
Definition: Instructions.cpp:1498
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
clEnumValN
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:680
llvm::InstVisitor
Base class for instruction visitors.
Definition: InstVisitor.h:78
llvm::ASanAccessInfo::ASanAccessInfo
ASanAccessInfo(int32_t Packed)
Definition: AddressSanitizer.cpp:605
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition: MemoryBuiltins.h:138
llvm::Triple::isiOS
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:493
llvm::maybeMarkSanitizerLibraryCallNoBuiltin
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:3356
llvm::GlobalValue::AvailableExternallyLinkage
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:49
ClWithIfunc
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
llvm::ASanStackVariableDescription::Offset
size_t Offset
Definition: ASanStackFrameLayout.h:38
ClUsePrivateAlias
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
llvm::GlobalAlias::getAliasee
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::GlobalValue::CommonLinkage
@ CommonLinkage
Tentative definitions.
Definition: GlobalValue.h:58
llvm::InstVisitor::visitAllocaInst
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
llvm::Triple::isWatchOS
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition: Triple.h:503
ClInstrumentDynamicAllocas
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:715
llvm::cl::Optional
@ Optional
Definition: CommandLine.h:116
Argument.h
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:142
kAsanSetShadowPrefix
const char kAsanSetShadowPrefix[]
Definition: AddressSanitizer.cpp:158
kSmallX86_64ShadowOffsetBase
static const uint64_t kSmallX86_64ShadowOffsetBase
Definition: AddressSanitizer.cpp:100
llvm::Triple::isOSWindows
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:582
llvm::depth_first
iterator_range< df_iterator< T > > depth_first(const T &G)
Definition: DepthFirstIterator.h:230
Attributes.h
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:85
getShadowMapping
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
Definition: AddressSanitizer.cpp:469
j
return j(j<< 16)
Constant.h
llvm::ASanStackVariableDescription::Line
unsigned Line
Definition: ASanStackFrameLayout.h:40
llvm::ResumeInst
Resume the propagation of an exception.
Definition: Instructions.h:4255
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
kWindowsShadowOffset32
static const uint64_t kWindowsShadowOffset32
Definition: AddressSanitizer.cpp:119
llvm::ASanAccessInfo
Definition: AddressSanitizer.h:53
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:50
ClStack
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::AArch64::RZ
@ RZ
Definition: AArch64ISelLowering.h:488
llvm::None
constexpr std::nullopt_t None
Definition: None.h:27
llvm::GlobalAlias::create
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:511
kCompileKernelMask
constexpr size_t kCompileKernelMask
Definition: AddressSanitizer.cpp:185
get
Should compile to something r4 addze r3 instead we get
Definition: README.txt:24
kAMDGPUAddressPrivateName
const char kAMDGPUAddressPrivateName[]
Definition: AddressSanitizer.cpp:176
kAsanUnpoisonStackMemoryName
const char kAsanUnpoisonStackMemoryName[]
Definition: AddressSanitizer.cpp:160
llvm::AsanDtorKind
AsanDtorKind
Types of ASan module destructors supported.
Definition: AddressSanitizerOptions.h:16
llvm::IRBuilderBase::CreateMemCpy
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:625
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
llvm::Align::value
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
llvm::ASanStackVariableDescription::LifetimeSize
size_t LifetimeSize
Definition: ASanStackFrameLayout.h:34
llvm::CallBase::arg_size
unsigned arg_size() const
Definition: InstrTypes.h:1340
GlobalVariable.h
llvm::InstVisitor::visitResumeInst
RetTy visitResumeInst(ResumeInst &I)
Definition: InstVisitor.h:238
kFreeBSD_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset64
Definition: AddressSanitizer.cpp:112
llvm::IRBuilderBase::GetInsertBlock
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:173
llvm::TypeSize
Definition: TypeSize.h:435
kAArch64_ShadowOffset64
static const uint64_t kAArch64_ShadowOffset64
Definition: AddressSanitizer.cpp:108
Casting.h
llvm::Triple::MachO
@ MachO
Definition: Triple.h:285
Function.h
llvm::AsanDtorKind::Global
@ Global
Append to llvm.global_dtors.
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:774
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:226
ClOptimizeCallbacks
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
llvm::ReturnInst::Create
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3079
llvm::getUniqueModuleId
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
Definition: ModuleUtils.cpp:210
llvm::ConstantArray::get
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1241
llvm::Triple::loongarch64
@ loongarch64
Definition: Triple.h:62
llvm::ConstantExpr::getGetElementPtr
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1218
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:614
llvm::Triple::systemz
@ systemz
Definition: Triple.h:80
kFreeBSDKasan_ShadowOffset64
static const uint64_t kFreeBSDKasan_ShadowOffset64
Definition: AddressSanitizer.cpp:114
kMaxAsanStackMallocSizeClass
static const int kMaxAsanStackMallocSizeClass
Definition: AddressSanitizer.cpp:150
llvm::CleanupReturnInst
Definition: Instructions.h:4663
kIsWriteShift
constexpr size_t kIsWriteShift
Definition: AddressSanitizer.cpp:188
ClOpt
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
GlobalAlias.h
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
ClInvalidPointerSub
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
llvm::MDBuilder
Definition: MDBuilder.h:36
llvm::ComputeASanStackFrameDescription
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
Definition: ASanStackFrameLayout.cpp:96
llvm::Function::front
const BasicBlock & front() const
Definition: Function.h:714
kAccessSizeIndexShift
constexpr size_t kAccessSizeIndexShift
Definition: AddressSanitizer.cpp:186
llvm::AllocaInst::setAlignment
void setAlignment(Align Align)
Definition: Instructions.h:126
llvm::ObjectSizeOffsetVisitor::compute
SizeOffsetType compute(Value *V)
Definition: MemoryBuiltins.cpp:720
llvm::Triple::isAndroidVersionLT
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:725
llvm::Type::getVoidTy
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:222
llvm::IRBuilderBase::getInt1Ty
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:494
getRedzoneSizeForScale
static uint64_t getRedzoneSizeForScale(int MappingScale)
Definition: AddressSanitizer.cpp:621
AddressSanitizerOptions.h
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
llvm::StackSafetyGlobalInfo::stackAccessIsSafe
bool stackAccessIsSafe(const Instruction &I) const
Definition: StackSafetyAnalysis.cpp:971
llvm::Triple::COFF
@ COFF
Definition: Triple.h:281
llvm::GlobalValue::PrivateLinkage
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
Instructions.h
Invalid
@ Invalid
Definition: AArch64ISelLowering.cpp:9722
llvm::getAddressSanitizerParams
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
Definition: AddressSanitizer.cpp:596
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
kAsanUnpoisonGlobalsName
const char kAsanUnpoisonGlobalsName[]
Definition: AddressSanitizer.cpp:144
SmallVector.h
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:359
llvm::Triple::isOSNetBSD
bool isOSNetBSD() const
Definition: Triple.h:535
llvm::IRBuilderBase::CreateLShr
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1343
ModuleUtils.h
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1342
llvm::AddressSanitizerOptions::UseAfterScope
bool UseAfterScope
Definition: AddressSanitizer.h:26
N
#define N
kCurrentStackFrameMagic
static const uintptr_t kCurrentStackFrameMagic
Definition: AddressSanitizer.cpp:127
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
kFreeBSD_ShadowOffset32
static const uint64_t kFreeBSD_ShadowOffset32
Definition: AddressSanitizer.cpp:111
kAsanShadowMemoryDynamicAddress
const char kAsanShadowMemoryDynamicAddress[]
Definition: AddressSanitizer.cpp:169
llvm::to_string
std::string to_string(const T &Value)
Definition: ScopedPrinter.h:85
kAsanStackMallocNameTemplate
const char kAsanStackMallocNameTemplate[]
Definition: AddressSanitizer.cpp:151
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::memtag::getAllocaSizeInBytes
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Definition: MemoryTaggingSupport.cpp:175
llvm::FunctionCallee
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:165
llvm::codeview::CompileSym3Flags::Exp
@ Exp
llvm::PHINode
Definition: Instructions.h:2699
ClRecover
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
llvm::Triple::getEnvironment
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:371
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
kAsanReportErrorTemplate
const char kAsanReportErrorTemplate[]
Definition: AddressSanitizer.cpp:135
llvm::IRBuilderBase::CreateNot
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1641
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1175
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:399
DerivedTypes.h
llvm::StackSafetyGlobalInfo
Definition: StackSafetyAnalysis.h:58
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::HexStyle::Asm
@ Asm
0ffh
Definition: MCInstPrinter.h:34
llvm::InnerAnalysisManagerProxy
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:931
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
llvm::ComputeASanStackFrameLayout
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
Definition: ASanStackFrameLayout.cpp:53
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::IRBuilderBase::getVoidTy
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:547
llvm::UnreachableInst
This function has undefined behavior.
Definition: Instructions.h:4771
MachO.h
llvm::IRBuilderBase::CreateICmpNE
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2103
InlinePriorityMode::Size
@ Size
LLVMContext.h
kAsanPtrSub
const char kAsanPtrSub[]
Definition: AddressSanitizer.cpp:148
llvm::IRBuilderBase::CreateSub
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1250
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::SplitBlockAndInsertIfThen
Instruction * SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights, DominatorTree *DT, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
Definition: BasicBlockUtils.cpp:1525
kAsanPtrCmp
const char kAsanPtrCmp[]
Definition: AddressSanitizer.cpp:147
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:59
llvm::appendToGlobalCtors
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:67
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::BasicBlock::getTerminatingMustTailCall
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:151
llvm::cl::desc
Definition: CommandLine.h:413
llvm::GlobalObject::setAlignment
void setAlignment(MaybeAlign Align)
Definition: Globals.cpp:121
llvm::GlobalValue::ExternalWeakLinkage
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition: GlobalValue.h:57
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition: Instructions.h:3134
raw_ostream.h
n
The same transformation can work with an even modulo with the addition of a and shrink the compare RHS by the same amount Unless the target supports that transformation probably isn t worthwhile The transformation can also easily be made to work with non zero equality for n
Definition: README.txt:685
llvm::AddressSanitizerOptions::UseAfterReturn
AsanDetectStackUseAfterReturnMode UseAfterReturn
Definition: AddressSanitizer.h:27
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:667
llvm::AddressSanitizerOptions::CompileKernel
bool CompileKernel
Definition: AddressSanitizer.h:24
BasicBlockUtils.h
llvm::replaceDbgDeclare
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:1736
Value.h
llvm::AddressSanitizerPass::printPipeline
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
Definition: AddressSanitizer.cpp:1122
ClUseStackSafety
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
llvm::GlobalValue::setVisibility
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:250
StackMallocSizeClass
static int StackMallocSizeClass(uint64_t LocalStackSize)
Definition: AddressSanitizer.cpp:2927
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:510
Debug.h
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:450
llvm::ASanStackFrameLayout
Definition: ASanStackFrameLayout.h:44
llvm::AddressSanitizerOptions
Definition: AddressSanitizer.h:23
kAsanModuleDtorName
const char kAsanModuleDtorName[]
Definition: AddressSanitizer.cpp:131
llvm::Triple::aarch64
@ aarch64
Definition: Triple.h:51
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2269
llvm::isAllocaPromotable
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
Definition: PromoteMemoryToRegister.cpp:63
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1333
llvm::AsanDtorKind::Invalid
@ Invalid
Not a valid destructor Kind.
llvm::findAllocaForValue
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
Definition: ValueTracking.cpp:4648
ClKasanMemIntrinCallbackPrefix
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
llvm::Instruction::moveBefore
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Definition: Instruction.cpp:107
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
llvm::GlobalValue::SanitizerMetadata::IsDynInit
unsigned IsDynInit
Definition: GlobalValue.h:348
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:39
llvm::DataLayout::getTypeAllocSize
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:506
ClUseGlobalsGC
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
llvm::Triple::UnknownObjectFormat
@ UnknownObjectFormat
Definition: Triple.h:279
ClDebug
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))