LLVM 20.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Comdat.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DIBuilder.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
51#include "llvm/IR/IRBuilder.h"
52#include "llvm/IR/InlineAsm.h"
53#include "llvm/IR/InstVisitor.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/Use.h"
65#include "llvm/IR/Value.h"
69#include "llvm/Support/Debug.h"
82#include <algorithm>
83#include <cassert>
84#include <cstddef>
85#include <cstdint>
86#include <iomanip>
87#include <limits>
88#include <sstream>
89#include <string>
90#include <tuple>
91
92using namespace llvm;
93
94#define DEBUG_TYPE "asan"
95
97static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
98static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
100 std::numeric_limits<uint64_t>::max();
101static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
103static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
104static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
105static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
106static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
107static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
108static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
109static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
110static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
112static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
113static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
114static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
115static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
116static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
117static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
118static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
119static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
120static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
122
123// The shadow memory space is dynamically allocated.
125
126static const size_t kMinStackMallocSize = 1 << 6; // 64B
127static const size_t kMaxStackMallocSize = 1 << 16; // 64K
128static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
129static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
130
131const char kAsanModuleCtorName[] = "asan.module_ctor";
132const char kAsanModuleDtorName[] = "asan.module_dtor";
134// On Emscripten, the system needs more than one priorities for constructors.
136const char kAsanReportErrorTemplate[] = "__asan_report_";
137const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
138const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
139const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
141 "__asan_unregister_image_globals";
142const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
143const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
144const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
145const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
146const char kAsanInitName[] = "__asan_init";
147const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
148const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
149const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
150const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
151static const int kMaxAsanStackMallocSizeClass = 10;
152const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
154 "__asan_stack_malloc_always_";
155const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
156const char kAsanGenPrefix[] = "___asan_gen_";
157const char kODRGenPrefix[] = "__odr_asan_gen_";
158const char kSanCovGenPrefix[] = "__sancov_gen_";
159const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
160const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
161const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
162
163// ASan version script has __asan_* wildcard. Triple underscore prevents a
164// linker (gold) warning about attempting to export a local symbol.
165const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
166
168 "__asan_option_detect_stack_use_after_return";
169
171 "__asan_shadow_memory_dynamic_address";
172
173const char kAsanAllocaPoison[] = "__asan_alloca_poison";
174const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
175
176const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
177const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
178const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
179const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
180
181// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
182static const size_t kNumberOfAccessSizes = 5;
183
184static const uint64_t kAllocaRzSize = 32;
185
186// ASanAccessInfo implementation constants.
187constexpr size_t kCompileKernelShift = 0;
188constexpr size_t kCompileKernelMask = 0x1;
189constexpr size_t kAccessSizeIndexShift = 1;
190constexpr size_t kAccessSizeIndexMask = 0xf;
191constexpr size_t kIsWriteShift = 5;
192constexpr size_t kIsWriteMask = 0x1;
193
194// Command-line flags.
195
197 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
198 cl::Hidden, cl::init(false));
199
201 "asan-recover",
202 cl::desc("Enable recovery mode (continue-after-error)."),
203 cl::Hidden, cl::init(false));
204
206 "asan-guard-against-version-mismatch",
207 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
208 cl::init(true));
209
210// This flag may need to be replaced with -f[no-]asan-reads.
211static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
212 cl::desc("instrument read instructions"),
213 cl::Hidden, cl::init(true));
214
216 "asan-instrument-writes", cl::desc("instrument write instructions"),
217 cl::Hidden, cl::init(true));
218
219static cl::opt<bool>
220 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
221 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
223
225 "asan-instrument-atomics",
226 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
227 cl::init(true));
228
229static cl::opt<bool>
230 ClInstrumentByval("asan-instrument-byval",
231 cl::desc("instrument byval call arguments"), cl::Hidden,
232 cl::init(true));
233
235 "asan-always-slow-path",
236 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
237 cl::init(false));
238
240 "asan-force-dynamic-shadow",
241 cl::desc("Load shadow address into a local variable for each function"),
242 cl::Hidden, cl::init(false));
243
244static cl::opt<bool>
245 ClWithIfunc("asan-with-ifunc",
246 cl::desc("Access dynamic shadow through an ifunc global on "
247 "platforms that support this"),
248 cl::Hidden, cl::init(true));
249
251 "asan-with-ifunc-suppress-remat",
252 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
253 "it through inline asm in prologue."),
254 cl::Hidden, cl::init(true));
255
256// This flag limits the number of instructions to be instrumented
257// in any given BB. Normally, this should be set to unlimited (INT_MAX),
258// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
259// set it to 10000.
261 "asan-max-ins-per-bb", cl::init(10000),
262 cl::desc("maximal number of instructions to instrument in any given BB"),
263 cl::Hidden);
264
265// This flag may need to be replaced with -f[no]asan-stack.
266static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
267 cl::Hidden, cl::init(true));
269 "asan-max-inline-poisoning-size",
270 cl::desc(
271 "Inline shadow poisoning for blocks up to the given size in bytes."),
272 cl::Hidden, cl::init(64));
273
275 "asan-use-after-return",
276 cl::desc("Sets the mode of detection for stack-use-after-return."),
278 clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",
279 "Never detect stack use after return."),
281 AsanDetectStackUseAfterReturnMode::Runtime, "runtime",
282 "Detect stack use after return if "
283 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
284 clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",
285 "Always detect stack use after return.")),
286 cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime));
287
288static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
289 cl::desc("Create redzones for byval "
290 "arguments (extra copy "
291 "required)"), cl::Hidden,
292 cl::init(true));
293
294static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
295 cl::desc("Check stack-use-after-scope"),
296 cl::Hidden, cl::init(false));
297
298// This flag may need to be replaced with -f[no]asan-globals.
299static cl::opt<bool> ClGlobals("asan-globals",
300 cl::desc("Handle global objects"), cl::Hidden,
301 cl::init(true));
302
303static cl::opt<bool> ClInitializers("asan-initialization-order",
304 cl::desc("Handle C++ initializer order"),
305 cl::Hidden, cl::init(true));
306
308 "asan-detect-invalid-pointer-pair",
309 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
310 cl::init(false));
311
313 "asan-detect-invalid-pointer-cmp",
314 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
315 cl::init(false));
316
318 "asan-detect-invalid-pointer-sub",
319 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
320 cl::init(false));
321
323 "asan-realign-stack",
324 cl::desc("Realign stack to the value of this flag (power of two)"),
325 cl::Hidden, cl::init(32));
326
328 "asan-instrumentation-with-call-threshold",
329 cl::desc("If the function being instrumented contains more than "
330 "this number of memory accesses, use callbacks instead of "
331 "inline checks (-1 means never use callbacks)."),
332 cl::Hidden, cl::init(7000));
333
335 "asan-memory-access-callback-prefix",
336 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
337 cl::init("__asan_"));
338
340 "asan-kernel-mem-intrinsic-prefix",
341 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
342 cl::init(false));
343
344static cl::opt<bool>
345 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
346 cl::desc("instrument dynamic allocas"),
347 cl::Hidden, cl::init(true));
348
350 "asan-skip-promotable-allocas",
351 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
352 cl::init(true));
353
355 "asan-constructor-kind",
356 cl::desc("Sets the ASan constructor kind"),
357 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
358 clEnumValN(AsanCtorKind::Global, "global",
359 "Use global constructors")),
360 cl::init(AsanCtorKind::Global), cl::Hidden);
361// These flags allow to change the shadow mapping.
362// The shadow mapping looks like
363// Shadow = (Mem >> scale) + offset
364
365static cl::opt<int> ClMappingScale("asan-mapping-scale",
366 cl::desc("scale of asan shadow mapping"),
367 cl::Hidden, cl::init(0));
368
370 ClMappingOffset("asan-mapping-offset",
371 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
372 cl::Hidden, cl::init(0));
373
374// Optimization flags. Not user visible, used mostly for testing
375// and benchmarking the tool.
376
377static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
378 cl::Hidden, cl::init(true));
379
380static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
381 cl::desc("Optimize callbacks"),
382 cl::Hidden, cl::init(false));
383
385 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
386 cl::Hidden, cl::init(true));
387
388static cl::opt<bool> ClOptGlobals("asan-opt-globals",
389 cl::desc("Don't instrument scalar globals"),
390 cl::Hidden, cl::init(true));
391
393 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
394 cl::Hidden, cl::init(false));
395
397 "asan-stack-dynamic-alloca",
398 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
399 cl::init(true));
400
402 "asan-force-experiment",
403 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
404 cl::init(0));
405
406static cl::opt<bool>
407 ClUsePrivateAlias("asan-use-private-alias",
408 cl::desc("Use private aliases for global variables"),
409 cl::Hidden, cl::init(true));
410
411static cl::opt<bool>
412 ClUseOdrIndicator("asan-use-odr-indicator",
413 cl::desc("Use odr indicators to improve ODR reporting"),
414 cl::Hidden, cl::init(true));
415
416static cl::opt<bool>
417 ClUseGlobalsGC("asan-globals-live-support",
418 cl::desc("Use linker features to support dead "
419 "code stripping of globals"),
420 cl::Hidden, cl::init(true));
421
422// This is on by default even though there is a bug in gold:
423// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
424static cl::opt<bool>
425 ClWithComdat("asan-with-comdat",
426 cl::desc("Place ASan constructors in comdat sections"),
427 cl::Hidden, cl::init(true));
428
430 "asan-destructor-kind",
431 cl::desc("Sets the ASan destructor kind. The default is to use the value "
432 "provided to the pass constructor"),
433 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
434 clEnumValN(AsanDtorKind::Global, "global",
435 "Use global destructors")),
436 cl::init(AsanDtorKind::Invalid), cl::Hidden);
437
438// Debug flags.
439
440static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
441 cl::init(0));
442
443static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
444 cl::Hidden, cl::init(0));
445
447 cl::desc("Debug func"));
448
449static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
450 cl::Hidden, cl::init(-1));
451
452static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
453 cl::Hidden, cl::init(-1));
454
455STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
456STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
457STATISTIC(NumOptimizedAccessesToGlobalVar,
458 "Number of optimized accesses to global vars");
459STATISTIC(NumOptimizedAccessesToStackVar,
460 "Number of optimized accesses to stack vars");
461
462namespace {
463
464/// This struct defines the shadow mapping using the rule:
465/// shadow = (mem >> Scale) ADD-or-OR Offset.
466/// If InGlobal is true, then
467/// extern char __asan_shadow[];
468/// shadow = (mem >> Scale) + &__asan_shadow
469struct ShadowMapping {
470 int Scale;
471 uint64_t Offset;
472 bool OrShadowOffset;
473 bool InGlobal;
474};
475
476} // end anonymous namespace
477
478static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
479 bool IsKasan) {
480 bool IsAndroid = TargetTriple.isAndroid();
481 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
482 TargetTriple.isDriverKit();
483 bool IsMacOS = TargetTriple.isMacOSX();
484 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
485 bool IsNetBSD = TargetTriple.isOSNetBSD();
486 bool IsPS = TargetTriple.isPS();
487 bool IsLinux = TargetTriple.isOSLinux();
488 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
489 TargetTriple.getArch() == Triple::ppc64le;
490 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
491 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
492 bool IsMIPSN32ABI = TargetTriple.getEnvironment() == Triple::GNUABIN32;
493 bool IsMIPS32 = TargetTriple.isMIPS32();
494 bool IsMIPS64 = TargetTriple.isMIPS64();
495 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
496 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
497 TargetTriple.getArch() == Triple::aarch64_be;
498 bool IsLoongArch64 = TargetTriple.isLoongArch64();
499 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
500 bool IsWindows = TargetTriple.isOSWindows();
501 bool IsFuchsia = TargetTriple.isOSFuchsia();
502 bool IsEmscripten = TargetTriple.isOSEmscripten();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504
505 ShadowMapping Mapping;
506
507 Mapping.Scale = kDefaultShadowScale;
508 if (ClMappingScale.getNumOccurrences() > 0) {
509 Mapping.Scale = ClMappingScale;
510 }
511
512 if (LongSize == 32) {
513 if (IsAndroid)
514 Mapping.Offset = kDynamicShadowSentinel;
515 else if (IsMIPSN32ABI)
516 Mapping.Offset = kMIPS_ShadowOffsetN32;
517 else if (IsMIPS32)
518 Mapping.Offset = kMIPS32_ShadowOffset32;
519 else if (IsFreeBSD)
520 Mapping.Offset = kFreeBSD_ShadowOffset32;
521 else if (IsNetBSD)
522 Mapping.Offset = kNetBSD_ShadowOffset32;
523 else if (IsIOS)
524 Mapping.Offset = kDynamicShadowSentinel;
525 else if (IsWindows)
526 Mapping.Offset = kWindowsShadowOffset32;
527 else if (IsEmscripten)
528 Mapping.Offset = kEmscriptenShadowOffset;
529 else
530 Mapping.Offset = kDefaultShadowOffset32;
531 } else { // LongSize == 64
532 // Fuchsia is always PIE, which means that the beginning of the address
533 // space is always available.
534 if (IsFuchsia)
535 Mapping.Offset = 0;
536 else if (IsPPC64)
537 Mapping.Offset = kPPC64_ShadowOffset64;
538 else if (IsSystemZ)
539 Mapping.Offset = kSystemZ_ShadowOffset64;
540 else if (IsFreeBSD && IsAArch64)
541 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
542 else if (IsFreeBSD && !IsMIPS64) {
543 if (IsKasan)
544 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
545 else
546 Mapping.Offset = kFreeBSD_ShadowOffset64;
547 } else if (IsNetBSD) {
548 if (IsKasan)
549 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
550 else
551 Mapping.Offset = kNetBSD_ShadowOffset64;
552 } else if (IsPS)
553 Mapping.Offset = kPS_ShadowOffset64;
554 else if (IsLinux && IsX86_64) {
555 if (IsKasan)
556 Mapping.Offset = kLinuxKasan_ShadowOffset64;
557 else
558 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
559 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
560 } else if (IsWindows && IsX86_64) {
561 Mapping.Offset = kWindowsShadowOffset64;
562 } else if (IsMIPS64)
563 Mapping.Offset = kMIPS64_ShadowOffset64;
564 else if (IsIOS)
565 Mapping.Offset = kDynamicShadowSentinel;
566 else if (IsMacOS && IsAArch64)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsAArch64)
569 Mapping.Offset = kAArch64_ShadowOffset64;
570 else if (IsLoongArch64)
571 Mapping.Offset = kLoongArch64_ShadowOffset64;
572 else if (IsRISCV64)
573 Mapping.Offset = kRISCV64_ShadowOffset64;
574 else if (IsAMDGPU)
575 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
576 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
577 else
578 Mapping.Offset = kDefaultShadowOffset64;
579 }
580
582 Mapping.Offset = kDynamicShadowSentinel;
583 }
584
585 if (ClMappingOffset.getNumOccurrences() > 0) {
586 Mapping.Offset = ClMappingOffset;
587 }
588
589 // OR-ing shadow offset if more efficient (at least on x86) if the offset
590 // is a power of two, but on ppc64 and loongarch64 we have to use add since
591 // the shadow offset is not necessarily 1/8-th of the address space. On
592 // SystemZ, we could OR the constant in a single instruction, but it's more
593 // efficient to load it once and use indexed addressing.
594 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
595 !IsRISCV64 && !IsLoongArch64 &&
596 !(Mapping.Offset & (Mapping.Offset - 1)) &&
597 Mapping.Offset != kDynamicShadowSentinel;
598 bool IsAndroidWithIfuncSupport =
599 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
600 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
601
602 return Mapping;
603}
604
605namespace llvm {
606void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
607 bool IsKasan, uint64_t *ShadowBase,
608 int *MappingScale, bool *OrShadowOffset) {
609 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
610 *ShadowBase = Mapping.Offset;
611 *MappingScale = Mapping.Scale;
612 *OrShadowOffset = Mapping.OrShadowOffset;
613}
614
616 : Packed(Packed),
617 AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
618 IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
619 CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
620
621ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
622 uint8_t AccessSizeIndex)
623 : Packed((IsWrite << kIsWriteShift) +
624 (CompileKernel << kCompileKernelShift) +
625 (AccessSizeIndex << kAccessSizeIndexShift)),
626 AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
627 CompileKernel(CompileKernel) {}
628
629} // namespace llvm
630
631static uint64_t getRedzoneSizeForScale(int MappingScale) {
632 // Redzone used for stack and globals is at least 32 bytes.
633 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
634 return std::max(32U, 1U << MappingScale);
635}
636
638 if (TargetTriple.isOSEmscripten()) {
640 } else {
642 }
643}
644
645namespace {
646/// Helper RAII class to post-process inserted asan runtime calls during a
647/// pass on a single Function. Upon end of scope, detects and applies the
648/// required funclet OpBundle.
649class RuntimeCallInserter {
650 Function *OwnerFn = nullptr;
651 bool TrackInsertedCalls = false;
652 SmallVector<CallInst *> InsertedCalls;
653
654public:
655 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
656 if (Fn.hasPersonalityFn()) {
657 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
658 if (isScopedEHPersonality(Personality))
659 TrackInsertedCalls = true;
660 }
661 }
662
663 ~RuntimeCallInserter() {
664 if (InsertedCalls.empty())
665 return;
666 assert(TrackInsertedCalls && "Calls were wrongly tracked");
667
669 for (CallInst *CI : InsertedCalls) {
670 BasicBlock *BB = CI->getParent();
671 assert(BB && "Instruction doesn't belong to a BasicBlock");
672 assert(BB->getParent() == OwnerFn &&
673 "Instruction doesn't belong to the expected Function!");
674
675 ColorVector &Colors = BlockColors[BB];
676 // funclet opbundles are only valid in monochromatic BBs.
677 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
678 // and will be DCE'ed later.
679 if (Colors.empty())
680 continue;
681 if (Colors.size() != 1) {
682 OwnerFn->getContext().emitError(
683 "Instruction's BasicBlock is not monochromatic");
684 continue;
685 }
686
687 BasicBlock *Color = Colors.front();
688 Instruction *EHPad = Color->getFirstNonPHI();
689
690 if (EHPad && EHPad->isEHPad()) {
691 // Replace CI with a clone with an added funclet OperandBundle
692 OperandBundleDef OB("funclet", EHPad);
694 OB, CI->getIterator());
695 NewCall->copyMetadata(*CI);
696 CI->replaceAllUsesWith(NewCall);
697 CI->eraseFromParent();
698 }
699 }
700 }
701
702 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
703 ArrayRef<Value *> Args = {},
704 const Twine &Name = "") {
705 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
706
707 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
708 if (TrackInsertedCalls)
709 InsertedCalls.push_back(Inst);
710 return Inst;
711 }
712};
713
714/// AddressSanitizer: instrument the code in module to find memory bugs.
715struct AddressSanitizer {
716 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
717 int InstrumentationWithCallsThreshold,
718 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
719 bool Recover = false, bool UseAfterScope = false,
720 AsanDetectStackUseAfterReturnMode UseAfterReturn =
721 AsanDetectStackUseAfterReturnMode::Runtime)
722 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
723 : CompileKernel),
724 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
725 UseAfterScope(UseAfterScope || ClUseAfterScope),
726 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
727 : UseAfterReturn),
728 SSGI(SSGI),
729 InstrumentationWithCallsThreshold(
730 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
732 : InstrumentationWithCallsThreshold),
733 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
735 : MaxInlinePoisoningSize) {
736 C = &(M.getContext());
737 DL = &M.getDataLayout();
738 LongSize = M.getDataLayout().getPointerSizeInBits();
739 IntptrTy = Type::getIntNTy(*C, LongSize);
740 PtrTy = PointerType::getUnqual(*C);
741 Int32Ty = Type::getInt32Ty(*C);
742 TargetTriple = Triple(M.getTargetTriple());
743
744 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
745
746 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
747 }
748
749 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
750 return *AI.getAllocationSize(AI.getDataLayout());
751 }
752
753 /// Check if we want (and can) handle this alloca.
754 bool isInterestingAlloca(const AllocaInst &AI);
755
756 bool ignoreAccess(Instruction *Inst, Value *Ptr);
759
760 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
761 InterestingMemoryOperand &O, bool UseCalls,
762 const DataLayout &DL, RuntimeCallInserter &RTCI);
763 void instrumentPointerComparisonOrSubtraction(Instruction *I,
764 RuntimeCallInserter &RTCI);
765 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
766 Value *Addr, MaybeAlign Alignment,
767 uint32_t TypeStoreSize, bool IsWrite,
768 Value *SizeArgument, bool UseCalls, uint32_t Exp,
769 RuntimeCallInserter &RTCI);
770 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
771 Instruction *InsertBefore, Value *Addr,
772 uint32_t TypeStoreSize, bool IsWrite,
773 Value *SizeArgument);
774 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
775 bool Recover);
776 void instrumentUnusualSizeOrAlignment(Instruction *I,
777 Instruction *InsertBefore, Value *Addr,
778 TypeSize TypeStoreSize, bool IsWrite,
779 Value *SizeArgument, bool UseCalls,
780 uint32_t Exp,
781 RuntimeCallInserter &RTCI);
782 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
783 Type *IntptrTy, Value *Mask, Value *EVL,
784 Value *Stride, Instruction *I, Value *Addr,
785 MaybeAlign Alignment, unsigned Granularity,
786 Type *OpType, bool IsWrite,
787 Value *SizeArgument, bool UseCalls,
788 uint32_t Exp, RuntimeCallInserter &RTCI);
789 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
790 Value *ShadowValue, uint32_t TypeStoreSize);
791 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
792 bool IsWrite, size_t AccessSizeIndex,
793 Value *SizeArgument, uint32_t Exp,
794 RuntimeCallInserter &RTCI);
795 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
796 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
797 bool suppressInstrumentationSiteForDebug(int &Instrumented);
798 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
799 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
800 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
801 void markEscapedLocalAllocas(Function &F);
802
803private:
804 friend struct FunctionStackPoisoner;
805
806 void initializeCallbacks(Module &M, const TargetLibraryInfo *TLI);
807
808 bool LooksLikeCodeInBug11395(Instruction *I);
809 bool GlobalIsLinkerInitialized(GlobalVariable *G);
810 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
811 TypeSize TypeStoreSize) const;
812
813 /// Helper to cleanup per-function state.
814 struct FunctionStateRAII {
815 AddressSanitizer *Pass;
816
817 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
818 assert(Pass->ProcessedAllocas.empty() &&
819 "last pass forgot to clear cache");
820 assert(!Pass->LocalDynamicShadow);
821 }
822
823 ~FunctionStateRAII() {
824 Pass->LocalDynamicShadow = nullptr;
825 Pass->ProcessedAllocas.clear();
826 }
827 };
828
829 LLVMContext *C;
830 const DataLayout *DL;
831 Triple TargetTriple;
832 int LongSize;
833 bool CompileKernel;
834 bool Recover;
835 bool UseAfterScope;
837 Type *IntptrTy;
838 Type *Int32Ty;
839 PointerType *PtrTy;
840 ShadowMapping Mapping;
841 FunctionCallee AsanHandleNoReturnFunc;
842 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
843 Constant *AsanShadowGlobal;
844
845 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
846 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
847 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
848
849 // These arrays is indexed by AccessIsWrite and Experiment.
850 FunctionCallee AsanErrorCallbackSized[2][2];
851 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
852
853 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
854 Value *LocalDynamicShadow = nullptr;
855 const StackSafetyGlobalInfo *SSGI;
856 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
857
858 FunctionCallee AMDGPUAddressShared;
859 FunctionCallee AMDGPUAddressPrivate;
860 int InstrumentationWithCallsThreshold;
861 uint32_t MaxInlinePoisoningSize;
862};
863
864class ModuleAddressSanitizer {
865public:
866 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
867 bool CompileKernel = false, bool Recover = false,
868 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
869 AsanDtorKind DestructorKind = AsanDtorKind::Global,
870 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
871 : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
872 : CompileKernel),
873 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
875 : InsertVersionCheck),
876 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
877 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
878 // Enable aliases as they should have no downside with ODR indicators.
879 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
881 : UseOdrIndicator),
882 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
884 : UseOdrIndicator),
885 // Not a typo: ClWithComdat is almost completely pointless without
886 // ClUseGlobalsGC (because then it only works on modules without
887 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
888 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
889 // argument is designed as workaround. Therefore, disable both
890 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
891 // do globals-gc.
892 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
893 DestructorKind(DestructorKind),
894 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
896 : ConstructorKind) {
897 C = &(M.getContext());
898 int LongSize = M.getDataLayout().getPointerSizeInBits();
899 IntptrTy = Type::getIntNTy(*C, LongSize);
900 PtrTy = PointerType::getUnqual(*C);
901 TargetTriple = Triple(M.getTargetTriple());
902 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
903
904 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
905 this->DestructorKind = ClOverrideDestructorKind;
906 assert(this->DestructorKind != AsanDtorKind::Invalid);
907 }
908
909 bool instrumentModule(Module &);
910
911private:
912 void initializeCallbacks(Module &M);
913
914 void instrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
915 void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
916 ArrayRef<GlobalVariable *> ExtendedGlobals,
917 ArrayRef<Constant *> MetadataInitializers);
918 void instrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
919 ArrayRef<GlobalVariable *> ExtendedGlobals,
920 ArrayRef<Constant *> MetadataInitializers,
921 const std::string &UniqueModuleId);
922 void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
923 ArrayRef<GlobalVariable *> ExtendedGlobals,
924 ArrayRef<Constant *> MetadataInitializers);
925 void
926 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
927 ArrayRef<GlobalVariable *> ExtendedGlobals,
928 ArrayRef<Constant *> MetadataInitializers);
929
930 GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
931 StringRef OriginalName);
932 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
933 StringRef InternalSuffix);
934 Instruction *CreateAsanModuleDtor(Module &M);
935
936 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
937 bool shouldInstrumentGlobal(GlobalVariable *G) const;
938 bool ShouldUseMachOGlobalsSection() const;
939 StringRef getGlobalMetadataSection() const;
940 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
941 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
942 uint64_t getMinRedzoneSizeForGlobal() const {
943 return getRedzoneSizeForScale(Mapping.Scale);
944 }
945 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
946 int GetAsanVersion(const Module &M) const;
947
948 bool CompileKernel;
949 bool InsertVersionCheck;
950 bool Recover;
951 bool UseGlobalsGC;
952 bool UsePrivateAlias;
953 bool UseOdrIndicator;
954 bool UseCtorComdat;
955 AsanDtorKind DestructorKind;
956 AsanCtorKind ConstructorKind;
957 Type *IntptrTy;
958 PointerType *PtrTy;
959 LLVMContext *C;
960 Triple TargetTriple;
961 ShadowMapping Mapping;
962 FunctionCallee AsanPoisonGlobals;
963 FunctionCallee AsanUnpoisonGlobals;
964 FunctionCallee AsanRegisterGlobals;
965 FunctionCallee AsanUnregisterGlobals;
966 FunctionCallee AsanRegisterImageGlobals;
967 FunctionCallee AsanUnregisterImageGlobals;
968 FunctionCallee AsanRegisterElfGlobals;
969 FunctionCallee AsanUnregisterElfGlobals;
970
971 Function *AsanCtorFunction = nullptr;
972 Function *AsanDtorFunction = nullptr;
973};
974
975// Stack poisoning does not play well with exception handling.
976// When an exception is thrown, we essentially bypass the code
977// that unpoisones the stack. This is why the run-time library has
978// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
979// stack in the interceptor. This however does not work inside the
980// actual function which catches the exception. Most likely because the
981// compiler hoists the load of the shadow value somewhere too high.
982// This causes asan to report a non-existing bug on 453.povray.
983// It sounds like an LLVM bug.
984struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
985 Function &F;
986 AddressSanitizer &ASan;
987 RuntimeCallInserter &RTCI;
988 DIBuilder DIB;
989 LLVMContext *C;
990 Type *IntptrTy;
991 Type *IntptrPtrTy;
992 ShadowMapping Mapping;
993
995 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
997
998 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
999 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1000 FunctionCallee AsanSetShadowFunc[0x100] = {};
1001 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1002 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1003
1004 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1005 struct AllocaPoisonCall {
1006 IntrinsicInst *InsBefore;
1007 AllocaInst *AI;
1008 uint64_t Size;
1009 bool DoPoison;
1010 };
1011 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1012 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1013 bool HasUntracedLifetimeIntrinsic = false;
1014
1015 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1016 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1017 AllocaInst *DynamicAllocaLayout = nullptr;
1018 IntrinsicInst *LocalEscapeCall = nullptr;
1019
1020 bool HasInlineAsm = false;
1021 bool HasReturnsTwiceCall = false;
1022 bool PoisonStack;
1023
1024 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1025 RuntimeCallInserter &RTCI)
1026 : F(F), ASan(ASan), RTCI(RTCI),
1027 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1028 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
1029 Mapping(ASan.Mapping),
1030 PoisonStack(ClStack &&
1031 !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
1032
1033 bool runOnFunction() {
1034 if (!PoisonStack)
1035 return false;
1036
1038 copyArgsPassedByValToAllocas();
1039
1040 // Collect alloca, ret, lifetime instructions etc.
1041 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1042
1043 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1044
1045 initializeCallbacks(*F.getParent());
1046
1047 if (HasUntracedLifetimeIntrinsic) {
1048 // If there are lifetime intrinsics which couldn't be traced back to an
1049 // alloca, we may not know exactly when a variable enters scope, and
1050 // therefore should "fail safe" by not poisoning them.
1051 StaticAllocaPoisonCallVec.clear();
1052 DynamicAllocaPoisonCallVec.clear();
1053 }
1054
1055 processDynamicAllocas();
1056 processStaticAllocas();
1057
1058 if (ClDebugStack) {
1059 LLVM_DEBUG(dbgs() << F);
1060 }
1061 return true;
1062 }
1063
1064 // Arguments marked with the "byval" attribute are implicitly copied without
1065 // using an alloca instruction. To produce redzones for those arguments, we
1066 // copy them a second time into memory allocated with an alloca instruction.
1067 void copyArgsPassedByValToAllocas();
1068
1069 // Finds all Alloca instructions and puts
1070 // poisoned red zones around all of them.
1071 // Then unpoison everything back before the function returns.
1072 void processStaticAllocas();
1073 void processDynamicAllocas();
1074
1075 void createDynamicAllocasInitStorage();
1076
1077 // ----------------------- Visitors.
1078 /// Collect all Ret instructions, or the musttail call instruction if it
1079 /// precedes the return instruction.
1080 void visitReturnInst(ReturnInst &RI) {
1081 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1082 RetVec.push_back(CI);
1083 else
1084 RetVec.push_back(&RI);
1085 }
1086
1087 /// Collect all Resume instructions.
1088 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1089
1090 /// Collect all CatchReturnInst instructions.
1091 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1092
1093 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1094 Value *SavedStack) {
1095 IRBuilder<> IRB(InstBefore);
1096 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1097 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1098 // need to adjust extracted SP to compute the address of the most recent
1099 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1100 // this purpose.
1101 if (!isa<ReturnInst>(InstBefore)) {
1102 Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
1103 InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
1104 {IntptrTy});
1105
1106 Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
1107
1108 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1109 DynamicAreaOffset);
1110 }
1111
1112 RTCI.createRuntimeCall(
1113 IRB, AsanAllocasUnpoisonFunc,
1114 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1115 }
1116
1117 // Unpoison dynamic allocas redzones.
1118 void unpoisonDynamicAllocas() {
1119 for (Instruction *Ret : RetVec)
1120 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1121
1122 for (Instruction *StackRestoreInst : StackRestoreVec)
1123 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1124 StackRestoreInst->getOperand(0));
1125 }
1126
1127 // Deploy and poison redzones around dynamic alloca call. To do this, we
1128 // should replace this call with another one with changed parameters and
1129 // replace all its uses with new address, so
1130 // addr = alloca type, old_size, align
1131 // is replaced by
1132 // new_size = (old_size + additional_size) * sizeof(type)
1133 // tmp = alloca i8, new_size, max(align, 32)
1134 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1135 // Additional_size is added to make new memory allocation contain not only
1136 // requested memory, but also left, partial and right redzones.
1137 void handleDynamicAllocaCall(AllocaInst *AI);
1138
1139 /// Collect Alloca instructions we want (and can) handle.
1140 void visitAllocaInst(AllocaInst &AI) {
1141 // FIXME: Handle scalable vectors instead of ignoring them.
1142 const Type *AllocaType = AI.getAllocatedType();
1143 const auto *STy = dyn_cast<StructType>(AllocaType);
1144 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1145 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1146 if (AI.isStaticAlloca()) {
1147 // Skip over allocas that are present *before* the first instrumented
1148 // alloca, we don't want to move those around.
1149 if (AllocaVec.empty())
1150 return;
1151
1152 StaticAllocasToMoveUp.push_back(&AI);
1153 }
1154 return;
1155 }
1156
1157 if (!AI.isStaticAlloca())
1158 DynamicAllocaVec.push_back(&AI);
1159 else
1160 AllocaVec.push_back(&AI);
1161 }
1162
1163 /// Collect lifetime intrinsic calls to check for use-after-scope
1164 /// errors.
1166 Intrinsic::ID ID = II.getIntrinsicID();
1167 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1168 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1169 if (!ASan.UseAfterScope)
1170 return;
1171 if (!II.isLifetimeStartOrEnd())
1172 return;
1173 // Found lifetime intrinsic, add ASan instrumentation if necessary.
1174 auto *Size = cast<ConstantInt>(II.getArgOperand(0));
1175 // If size argument is undefined, don't do anything.
1176 if (Size->isMinusOne()) return;
1177 // Check that size doesn't saturate uint64_t and can
1178 // be stored in IntptrTy.
1179 const uint64_t SizeValue = Size->getValue().getLimitedValue();
1180 if (SizeValue == ~0ULL ||
1181 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1182 return;
1183 // Find alloca instruction that corresponds to llvm.lifetime argument.
1184 // Currently we can only handle lifetime markers pointing to the
1185 // beginning of the alloca.
1186 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true);
1187 if (!AI) {
1188 HasUntracedLifetimeIntrinsic = true;
1189 return;
1190 }
1191 // We're interested only in allocas we can handle.
1192 if (!ASan.isInterestingAlloca(*AI))
1193 return;
1194 bool DoPoison = (ID == Intrinsic::lifetime_end);
1195 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1196 if (AI->isStaticAlloca())
1197 StaticAllocaPoisonCallVec.push_back(APC);
1199 DynamicAllocaPoisonCallVec.push_back(APC);
1200 }
1201
1202 void visitCallBase(CallBase &CB) {
1203 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1204 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1205 HasReturnsTwiceCall |= CI->canReturnTwice();
1206 }
1207 }
1208
1209 // ---------------------- Helpers.
1210 void initializeCallbacks(Module &M);
1211
1212 // Copies bytes from ShadowBytes into shadow memory for indexes where
1213 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1214 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1215 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1216 IRBuilder<> &IRB, Value *ShadowBase);
1217 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1218 size_t Begin, size_t End, IRBuilder<> &IRB,
1219 Value *ShadowBase);
1220 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1221 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1222 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1223
1224 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1225
1226 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1227 bool Dynamic);
1228 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1229 Instruction *ThenTerm, Value *ValueIfFalse);
1230};
1231
1232} // end anonymous namespace
1233
1235 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1237 OS, MapClassName2PassName);
1238 OS << '<';
1239 if (Options.CompileKernel)
1240 OS << "kernel";
1241 OS << '>';
1242}
1243
1245 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1246 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1247 AsanCtorKind ConstructorKind)
1248 : Options(Options), UseGlobalGC(UseGlobalGC),
1249 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1250 ConstructorKind(ConstructorKind) {}
1251
1254 // Return early if nosanitize_address module flag is present for the module.
1255 // This implies that asan pass has already run before.
1256 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1257 return PreservedAnalyses::all();
1258
1259 ModuleAddressSanitizer ModuleSanitizer(
1260 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1261 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1262 bool Modified = false;
1263 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1264 const StackSafetyGlobalInfo *const SSGI =
1266 for (Function &F : M) {
1267 AddressSanitizer FunctionSanitizer(
1268 M, SSGI, Options.InstrumentationWithCallsThreshold,
1269 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1270 Options.UseAfterScope, Options.UseAfterReturn);
1272 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
1273 }
1274 Modified |= ModuleSanitizer.instrumentModule(M);
1275 if (!Modified)
1276 return PreservedAnalyses::all();
1277
1279 // GlobalsAA is considered stateless and does not get invalidated unless
1280 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1281 // make changes that require GlobalsAA to be invalidated.
1282 PA.abandon<GlobalsAA>();
1283 return PA;
1284}
1285
1287 size_t Res = llvm::countr_zero(TypeSize / 8);
1289 return Res;
1290}
1291
1292/// Check if \p G has been created by a trusted compiler pass.
1294 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1295 if (G->getName().starts_with("llvm.") ||
1296 // Do not instrument gcov counter arrays.
1297 G->getName().starts_with("__llvm_gcov_ctr") ||
1298 // Do not instrument rtti proxy symbols for function sanitizer.
1299 G->getName().starts_with("__llvm_rtti_proxy"))
1300 return true;
1301
1302 // Do not instrument asan globals.
1303 if (G->getName().starts_with(kAsanGenPrefix) ||
1304 G->getName().starts_with(kSanCovGenPrefix) ||
1305 G->getName().starts_with(kODRGenPrefix))
1306 return true;
1307
1308 return false;
1309}
1310
1312 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1313 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1314 if (AddrSpace == 3 || AddrSpace == 5)
1315 return true;
1316 return false;
1317}
1318
1319Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1320 // Shadow >> scale
1321 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1322 if (Mapping.Offset == 0) return Shadow;
1323 // (Shadow >> scale) | offset
1324 Value *ShadowBase;
1325 if (LocalDynamicShadow)
1326 ShadowBase = LocalDynamicShadow;
1327 else
1328 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1329 if (Mapping.OrShadowOffset)
1330 return IRB.CreateOr(Shadow, ShadowBase);
1331 else
1332 return IRB.CreateAdd(Shadow, ShadowBase);
1333}
1334
1335// Instrument memset/memmove/memcpy
1336void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1337 RuntimeCallInserter &RTCI) {
1339 if (isa<MemTransferInst>(MI)) {
1340 RTCI.createRuntimeCall(
1341 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1342 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1343 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1344 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1345 } else if (isa<MemSetInst>(MI)) {
1346 RTCI.createRuntimeCall(
1347 IRB, AsanMemset,
1348 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1349 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1350 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1351 }
1352 MI->eraseFromParent();
1353}
1354
1355/// Check if we want (and can) handle this alloca.
1356bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1357 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1358
1359 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1360 return PreviouslySeenAllocaInfo->getSecond();
1361
1362 bool IsInteresting =
1363 (AI.getAllocatedType()->isSized() &&
1364 // alloca() may be called with 0 size, ignore it.
1365 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1366 // We are only interested in allocas not promotable to registers.
1367 // Promotable allocas are common under -O0.
1369 // inalloca allocas are not treated as static, and we don't want
1370 // dynamic alloca instrumentation for them as well.
1371 !AI.isUsedWithInAlloca() &&
1372 // swifterror allocas are register promoted by ISel
1373 !AI.isSwiftError() &&
1374 // safe allocas are not interesting
1375 !(SSGI && SSGI->isSafe(AI)));
1376
1377 ProcessedAllocas[&AI] = IsInteresting;
1378 return IsInteresting;
1379}
1380
1381bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1382 // Instrument accesses from different address spaces only for AMDGPU.
1383 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1384 if (PtrTy->getPointerAddressSpace() != 0 &&
1385 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1386 return true;
1387
1388 // Ignore swifterror addresses.
1389 // swifterror memory addresses are mem2reg promoted by instruction
1390 // selection. As such they cannot have regular uses like an instrumentation
1391 // function and it makes no sense to track them as memory.
1392 if (Ptr->isSwiftError())
1393 return true;
1394
1395 // Treat memory accesses to promotable allocas as non-interesting since they
1396 // will not cause memory violations. This greatly speeds up the instrumented
1397 // executable at -O0.
1398 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1399 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1400 return true;
1401
1402 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1404 return true;
1405
1406 return false;
1407}
1408
1409void AddressSanitizer::getInterestingMemoryOperands(
1411 // Do not instrument the load fetching the dynamic shadow address.
1412 if (LocalDynamicShadow == I)
1413 return;
1414
1415 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1416 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1417 return;
1418 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1419 LI->getType(), LI->getAlign());
1420 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1421 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1422 return;
1423 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1424 SI->getValueOperand()->getType(), SI->getAlign());
1425 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1426 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1427 return;
1428 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1429 RMW->getValOperand()->getType(), std::nullopt);
1430 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1431 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1432 return;
1433 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1434 XCHG->getCompareOperand()->getType(),
1435 std::nullopt);
1436 } else if (auto CI = dyn_cast<CallInst>(I)) {
1437 switch (CI->getIntrinsicID()) {
1438 case Intrinsic::masked_load:
1439 case Intrinsic::masked_store:
1440 case Intrinsic::masked_gather:
1441 case Intrinsic::masked_scatter: {
1442 bool IsWrite = CI->getType()->isVoidTy();
1443 // Masked store has an initial operand for the value.
1444 unsigned OpOffset = IsWrite ? 1 : 0;
1445 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1446 return;
1447
1448 auto BasePtr = CI->getOperand(OpOffset);
1449 if (ignoreAccess(I, BasePtr))
1450 return;
1451 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1452 MaybeAlign Alignment = Align(1);
1453 // Otherwise no alignment guarantees. We probably got Undef.
1454 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1455 Alignment = Op->getMaybeAlignValue();
1456 Value *Mask = CI->getOperand(2 + OpOffset);
1457 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1458 break;
1459 }
1460 case Intrinsic::masked_expandload:
1461 case Intrinsic::masked_compressstore: {
1462 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1463 unsigned OpOffset = IsWrite ? 1 : 0;
1464 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1465 return;
1466 auto BasePtr = CI->getOperand(OpOffset);
1467 if (ignoreAccess(I, BasePtr))
1468 return;
1469 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1470 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1471
1472 IRBuilder IB(I);
1473 Value *Mask = CI->getOperand(1 + OpOffset);
1474 // Use the popcount of Mask as the effective vector length.
1475 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1476 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1477 Value *EVL = IB.CreateAddReduce(ExtMask);
1478 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1479 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1480 EVL);
1481 break;
1482 }
1483 case Intrinsic::vp_load:
1484 case Intrinsic::vp_store:
1485 case Intrinsic::experimental_vp_strided_load:
1486 case Intrinsic::experimental_vp_strided_store: {
1487 auto *VPI = cast<VPIntrinsic>(CI);
1488 unsigned IID = CI->getIntrinsicID();
1489 bool IsWrite = CI->getType()->isVoidTy();
1490 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1491 return;
1492 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1493 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1494 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1495 Value *Stride = nullptr;
1496 if (IID == Intrinsic::experimental_vp_strided_store ||
1497 IID == Intrinsic::experimental_vp_strided_load) {
1498 Stride = VPI->getOperand(PtrOpNo + 1);
1499 // Use the pointer alignment as the element alignment if the stride is a
1500 // mutiple of the pointer alignment. Otherwise, the element alignment
1501 // should be Align(1).
1502 unsigned PointerAlign = Alignment.valueOrOne().value();
1503 if (!isa<ConstantInt>(Stride) ||
1504 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1505 Alignment = Align(1);
1506 }
1507 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1508 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1509 Stride);
1510 break;
1511 }
1512 case Intrinsic::vp_gather:
1513 case Intrinsic::vp_scatter: {
1514 auto *VPI = cast<VPIntrinsic>(CI);
1515 unsigned IID = CI->getIntrinsicID();
1516 bool IsWrite = IID == Intrinsic::vp_scatter;
1517 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1518 return;
1519 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1520 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1521 MaybeAlign Alignment = VPI->getPointerAlignment();
1522 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1523 VPI->getMaskParam(),
1524 VPI->getVectorLengthParam());
1525 break;
1526 }
1527 default:
1528 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1529 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1530 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1531 continue;
1532 Type *Ty = CI->getParamByValType(ArgNo);
1533 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1534 }
1535 }
1536 }
1537}
1538
1539static bool isPointerOperand(Value *V) {
1540 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1541}
1542
1543// This is a rough heuristic; it may cause both false positives and
1544// false negatives. The proper implementation requires cooperation with
1545// the frontend.
1547 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1548 if (!Cmp->isRelational())
1549 return false;
1550 } else {
1551 return false;
1552 }
1553 return isPointerOperand(I->getOperand(0)) &&
1554 isPointerOperand(I->getOperand(1));
1555}
1556
1557// This is a rough heuristic; it may cause both false positives and
1558// false negatives. The proper implementation requires cooperation with
1559// the frontend.
1561 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1562 if (BO->getOpcode() != Instruction::Sub)
1563 return false;
1564 } else {
1565 return false;
1566 }
1567 return isPointerOperand(I->getOperand(0)) &&
1568 isPointerOperand(I->getOperand(1));
1569}
1570
1571bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1572 // If a global variable does not have dynamic initialization we don't
1573 // have to instrument it. However, if a global does not have initializer
1574 // at all, we assume it has dynamic initializer (in other TU).
1575 if (!G->hasInitializer())
1576 return false;
1577
1578 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1579 return false;
1580
1581 return true;
1582}
1583
1584void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1585 Instruction *I, RuntimeCallInserter &RTCI) {
1586 IRBuilder<> IRB(I);
1587 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1588 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1589 for (Value *&i : Param) {
1590 if (i->getType()->isPointerTy())
1591 i = IRB.CreatePointerCast(i, IntptrTy);
1592 }
1593 RTCI.createRuntimeCall(IRB, F, Param);
1594}
1595
1596static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1597 Instruction *InsertBefore, Value *Addr,
1598 MaybeAlign Alignment, unsigned Granularity,
1599 TypeSize TypeStoreSize, bool IsWrite,
1600 Value *SizeArgument, bool UseCalls,
1601 uint32_t Exp, RuntimeCallInserter &RTCI) {
1602 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1603 // if the data is properly aligned.
1604 if (!TypeStoreSize.isScalable()) {
1605 const auto FixedSize = TypeStoreSize.getFixedValue();
1606 switch (FixedSize) {
1607 case 8:
1608 case 16:
1609 case 32:
1610 case 64:
1611 case 128:
1612 if (!Alignment || *Alignment >= Granularity ||
1613 *Alignment >= FixedSize / 8)
1614 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1615 FixedSize, IsWrite, nullptr, UseCalls,
1616 Exp, RTCI);
1617 }
1618 }
1619 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1620 IsWrite, nullptr, UseCalls, Exp, RTCI);
1621}
1622
1623void AddressSanitizer::instrumentMaskedLoadOrStore(
1624 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1625 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1626 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1627 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1628 RuntimeCallInserter &RTCI) {
1629 auto *VTy = cast<VectorType>(OpType);
1630 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1631 auto Zero = ConstantInt::get(IntptrTy, 0);
1632
1633 IRBuilder IB(I);
1634 Instruction *LoopInsertBefore = I;
1635 if (EVL) {
1636 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1637 // than zero, so we should check whether EVL is zero here.
1638 Type *EVLType = EVL->getType();
1639 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1640 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1641 IB.SetInsertPoint(LoopInsertBefore);
1642 // Cast EVL to IntptrTy.
1643 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1644 // To avoid undefined behavior for extracting with out of range index, use
1645 // the minimum of evl and element count as trip count.
1646 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1647 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1648 } else {
1649 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1650 }
1651
1652 // Cast Stride to IntptrTy.
1653 if (Stride)
1654 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1655
1656 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore,
1657 [&](IRBuilderBase &IRB, Value *Index) {
1658 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1659 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1660 if (MaskElemC->isZero())
1661 // No check
1662 return;
1663 // Unconditional check
1664 } else {
1665 // Conditional check
1667 MaskElem, &*IRB.GetInsertPoint(), false);
1668 IRB.SetInsertPoint(ThenTerm);
1669 }
1670
1671 Value *InstrumentedAddress;
1672 if (isa<VectorType>(Addr->getType())) {
1673 assert(
1674 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1675 "Expected vector of pointer.");
1676 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1677 } else if (Stride) {
1678 Index = IRB.CreateMul(Index, Stride);
1679 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1680 } else {
1681 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1682 }
1683 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1684 Alignment, Granularity, ElemTypeSize, IsWrite,
1685 SizeArgument, UseCalls, Exp, RTCI);
1686 });
1687}
1688
1689void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1690 InterestingMemoryOperand &O, bool UseCalls,
1691 const DataLayout &DL,
1692 RuntimeCallInserter &RTCI) {
1693 Value *Addr = O.getPtr();
1694
1695 // Optimization experiments.
1696 // The experiments can be used to evaluate potential optimizations that remove
1697 // instrumentation (assess false negatives). Instead of completely removing
1698 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1699 // experiments that want to remove instrumentation of this instruction).
1700 // If Exp is non-zero, this pass will emit special calls into runtime
1701 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1702 // make runtime terminate the program in a special way (with a different
1703 // exit status). Then you run the new compiler on a buggy corpus, collect
1704 // the special terminations (ideally, you don't see them at all -- no false
1705 // negatives) and make the decision on the optimization.
1707
1708 if (ClOpt && ClOptGlobals) {
1709 // If initialization order checking is disabled, a simple access to a
1710 // dynamically initialized global is always valid.
1711 GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
1712 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1713 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1714 NumOptimizedAccessesToGlobalVar++;
1715 return;
1716 }
1717 }
1718
1719 if (ClOpt && ClOptStack) {
1720 // A direct inbounds access to a stack variable is always valid.
1721 if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
1722 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1723 NumOptimizedAccessesToStackVar++;
1724 return;
1725 }
1726 }
1727
1728 if (O.IsWrite)
1729 NumInstrumentedWrites++;
1730 else
1731 NumInstrumentedReads++;
1732
1733 unsigned Granularity = 1 << Mapping.Scale;
1734 if (O.MaybeMask) {
1735 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1736 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1737 Granularity, O.OpType, O.IsWrite, nullptr,
1738 UseCalls, Exp, RTCI);
1739 } else {
1740 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1741 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1742 UseCalls, Exp, RTCI);
1743 }
1744}
1745
1746Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1747 Value *Addr, bool IsWrite,
1748 size_t AccessSizeIndex,
1749 Value *SizeArgument,
1750 uint32_t Exp,
1751 RuntimeCallInserter &RTCI) {
1752 InstrumentationIRBuilder IRB(InsertBefore);
1753 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1754 CallInst *Call = nullptr;
1755 if (SizeArgument) {
1756 if (Exp == 0)
1757 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1758 {Addr, SizeArgument});
1759 else
1760 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1761 {Addr, SizeArgument, ExpVal});
1762 } else {
1763 if (Exp == 0)
1764 Call = RTCI.createRuntimeCall(
1765 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1766 else
1767 Call = RTCI.createRuntimeCall(
1768 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1769 }
1770
1771 Call->setCannotMerge();
1772 return Call;
1773}
1774
1775Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1776 Value *ShadowValue,
1777 uint32_t TypeStoreSize) {
1778 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1779 // Addr & (Granularity - 1)
1780 Value *LastAccessedByte =
1781 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1782 // (Addr & (Granularity - 1)) + size - 1
1783 if (TypeStoreSize / 8 > 1)
1784 LastAccessedByte = IRB.CreateAdd(
1785 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1786 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1787 LastAccessedByte =
1788 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1789 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1790 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1791}
1792
1793Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1794 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1795 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1796 // Do not instrument unsupported addrspaces.
1798 return nullptr;
1799 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1800 // Follow host instrumentation for global and constant addresses.
1801 if (PtrTy->getPointerAddressSpace() != 0)
1802 return InsertBefore;
1803 // Instrument generic addresses in supported addressspaces.
1804 IRBuilder<> IRB(InsertBefore);
1805 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1806 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1807 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1808 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1809 Value *AddrSpaceZeroLanding =
1810 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1811 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1812 return InsertBefore;
1813}
1814
1815Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1816 Value *Cond, bool Recover) {
1817 Module &M = *IRB.GetInsertBlock()->getModule();
1818 Value *ReportCond = Cond;
1819 if (!Recover) {
1820 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1821 IRB.getInt1Ty());
1822 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1823 }
1824
1825 auto *Trm =
1826 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1828 Trm->getParent()->setName("asan.report");
1829
1830 if (Recover)
1831 return Trm;
1832
1833 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1834 IRB.SetInsertPoint(Trm);
1835 return IRB.CreateCall(
1836 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1837}
1838
1839void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1840 Instruction *InsertBefore, Value *Addr,
1841 MaybeAlign Alignment,
1842 uint32_t TypeStoreSize, bool IsWrite,
1843 Value *SizeArgument, bool UseCalls,
1844 uint32_t Exp,
1845 RuntimeCallInserter &RTCI) {
1846 if (TargetTriple.isAMDGPU()) {
1847 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1848 TypeStoreSize, IsWrite, SizeArgument);
1849 if (!InsertBefore)
1850 return;
1851 }
1852
1853 InstrumentationIRBuilder IRB(InsertBefore);
1854 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1855
1856 if (UseCalls && ClOptimizeCallbacks) {
1857 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1858 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1859 IRB.CreateCall(
1860 Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
1861 {IRB.CreatePointerCast(Addr, PtrTy),
1862 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1863 return;
1864 }
1865
1866 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1867 if (UseCalls) {
1868 if (Exp == 0)
1869 RTCI.createRuntimeCall(
1870 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1871 else
1872 RTCI.createRuntimeCall(
1873 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1874 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1875 return;
1876 }
1877
1878 Type *ShadowTy =
1879 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1880 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1881 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1882 const uint64_t ShadowAlign =
1883 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1884 Value *ShadowValue = IRB.CreateAlignedLoad(
1885 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1886
1887 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1888 size_t Granularity = 1ULL << Mapping.Scale;
1889 Instruction *CrashTerm = nullptr;
1890
1891 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1892
1893 if (TargetTriple.isAMDGCN()) {
1894 if (GenSlowPath) {
1895 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1896 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1897 }
1898 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1899 } else if (GenSlowPath) {
1900 // We use branch weights for the slow path check, to indicate that the slow
1901 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1903 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1904 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1905 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1906 IRB.SetInsertPoint(CheckTerm);
1907 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1908 if (Recover) {
1909 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1910 } else {
1911 BasicBlock *CrashBlock =
1912 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1913 CrashTerm = new UnreachableInst(*C, CrashBlock);
1914 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1915 ReplaceInstWithInst(CheckTerm, NewTerm);
1916 }
1917 } else {
1918 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1919 }
1920
1921 Instruction *Crash = generateCrashCode(
1922 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1923 if (OrigIns->getDebugLoc())
1924 Crash->setDebugLoc(OrigIns->getDebugLoc());
1925}
1926
1927// Instrument unusual size or unusual alignment.
1928// We can not do it with a single check, so we do 1-byte check for the first
1929// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1930// to report the actual access size.
1931void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1932 Instruction *I, Instruction *InsertBefore, Value *Addr,
1933 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
1934 uint32_t Exp, RuntimeCallInserter &RTCI) {
1935 InstrumentationIRBuilder IRB(InsertBefore);
1936 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
1937 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
1938
1939 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1940 if (UseCalls) {
1941 if (Exp == 0)
1942 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1943 {AddrLong, Size});
1944 else
1945 RTCI.createRuntimeCall(
1946 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
1947 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1948 } else {
1949 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
1950 Value *LastByte = IRB.CreateIntToPtr(
1951 IRB.CreateAdd(AddrLong, SizeMinusOne),
1952 Addr->getType());
1953 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
1954 RTCI);
1955 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
1956 Exp, RTCI);
1957 }
1958}
1959
1960void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1962 // Set up the arguments to our poison/unpoison functions.
1963 IRBuilder<> IRB(&GlobalInit.front(),
1964 GlobalInit.front().getFirstInsertionPt());
1965
1966 // Add a call to poison all external globals before the given function starts.
1967 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1968 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1969
1970 // Add calls to unpoison all globals before each return instruction.
1971 for (auto &BB : GlobalInit)
1972 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1973 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
1974}
1975
1976void ModuleAddressSanitizer::createInitializerPoisonCalls(
1978 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1979 if (!GV)
1980 return;
1981
1982 ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1983 if (!CA)
1984 return;
1985
1986 for (Use &OP : CA->operands()) {
1987 if (isa<ConstantAggregateZero>(OP)) continue;
1988 ConstantStruct *CS = cast<ConstantStruct>(OP);
1989
1990 // Must have a function or null ptr.
1991 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1992 if (F->getName() == kAsanModuleCtorName) continue;
1993 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
1994 // Don't instrument CTORs that will run before asan.module_ctor.
1995 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
1996 continue;
1997 poisonOneInitializer(*F, ModuleName);
1998 }
1999 }
2000}
2001
2002const GlobalVariable *
2003ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2004 // In case this function should be expanded to include rules that do not just
2005 // apply when CompileKernel is true, either guard all existing rules with an
2006 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2007 // should also apply to user space.
2008 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2009
2010 const Constant *C = GA.getAliasee();
2011
2012 // When compiling the kernel, globals that are aliased by symbols prefixed
2013 // by "__" are special and cannot be padded with a redzone.
2014 if (GA.getName().starts_with("__"))
2015 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2016
2017 return nullptr;
2018}
2019
2020bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2021 Type *Ty = G->getValueType();
2022 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2023
2024 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2025 return false;
2026 if (!Ty->isSized()) return false;
2027 if (!G->hasInitializer()) return false;
2028 // Globals in address space 1 and 4 are supported for AMDGPU.
2029 if (G->getAddressSpace() &&
2030 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2031 return false;
2032 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2033 // Two problems with thread-locals:
2034 // - The address of the main thread's copy can't be computed at link-time.
2035 // - Need to poison all copies, not just the main thread's one.
2036 if (G->isThreadLocal()) return false;
2037 // For now, just ignore this Global if the alignment is large.
2038 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2039
2040 // For non-COFF targets, only instrument globals known to be defined by this
2041 // TU.
2042 // FIXME: We can instrument comdat globals on ELF if we are using the
2043 // GC-friendly metadata scheme.
2044 if (!TargetTriple.isOSBinFormatCOFF()) {
2045 if (!G->hasExactDefinition() || G->hasComdat())
2046 return false;
2047 } else {
2048 // On COFF, don't instrument non-ODR linkages.
2049 if (G->isInterposable())
2050 return false;
2051 // If the global has AvailableExternally linkage, then it is not in this
2052 // module, which means it does not need to be instrumented.
2053 if (G->hasAvailableExternallyLinkage())
2054 return false;
2055 }
2056
2057 // If a comdat is present, it must have a selection kind that implies ODR
2058 // semantics: no duplicates, any, or exact match.
2059 if (Comdat *C = G->getComdat()) {
2060 switch (C->getSelectionKind()) {
2061 case Comdat::Any:
2062 case Comdat::ExactMatch:
2064 break;
2065 case Comdat::Largest:
2066 case Comdat::SameSize:
2067 return false;
2068 }
2069 }
2070
2071 if (G->hasSection()) {
2072 // The kernel uses explicit sections for mostly special global variables
2073 // that we should not instrument. E.g. the kernel may rely on their layout
2074 // without redzones, or remove them at link time ("discard.*"), etc.
2075 if (CompileKernel)
2076 return false;
2077
2078 StringRef Section = G->getSection();
2079
2080 // Globals from llvm.metadata aren't emitted, do not instrument them.
2081 if (Section == "llvm.metadata") return false;
2082 // Do not instrument globals from special LLVM sections.
2083 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2084 return false;
2085
2086 // Do not instrument function pointers to initialization and termination
2087 // routines: dynamic linker will not properly handle redzones.
2088 if (Section.starts_with(".preinit_array") ||
2089 Section.starts_with(".init_array") ||
2090 Section.starts_with(".fini_array")) {
2091 return false;
2092 }
2093
2094 // Do not instrument user-defined sections (with names resembling
2095 // valid C identifiers)
2096 if (TargetTriple.isOSBinFormatELF()) {
2097 if (llvm::all_of(Section,
2098 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2099 return false;
2100 }
2101
2102 // On COFF, if the section name contains '$', it is highly likely that the
2103 // user is using section sorting to create an array of globals similar to
2104 // the way initialization callbacks are registered in .init_array and
2105 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2106 // to such globals is counterproductive, because the intent is that they
2107 // will form an array, and out-of-bounds accesses are expected.
2108 // See https://github.com/google/sanitizers/issues/305
2109 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2110 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2111 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2112 << *G << "\n");
2113 return false;
2114 }
2115
2116 if (TargetTriple.isOSBinFormatMachO()) {
2117 StringRef ParsedSegment, ParsedSection;
2118 unsigned TAA = 0, StubSize = 0;
2119 bool TAAParsed;
2121 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2122
2123 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2124 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2125 // them.
2126 if (ParsedSegment == "__OBJC" ||
2127 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2128 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2129 return false;
2130 }
2131 // See https://github.com/google/sanitizers/issues/32
2132 // Constant CFString instances are compiled in the following way:
2133 // -- the string buffer is emitted into
2134 // __TEXT,__cstring,cstring_literals
2135 // -- the constant NSConstantString structure referencing that buffer
2136 // is placed into __DATA,__cfstring
2137 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2138 // Moreover, it causes the linker to crash on OS X 10.7
2139 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2140 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2141 return false;
2142 }
2143 // The linker merges the contents of cstring_literals and removes the
2144 // trailing zeroes.
2145 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2146 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2147 return false;
2148 }
2149 }
2150 }
2151
2152 if (CompileKernel) {
2153 // Globals that prefixed by "__" are special and cannot be padded with a
2154 // redzone.
2155 if (G->getName().starts_with("__"))
2156 return false;
2157 }
2158
2159 return true;
2160}
2161
2162// On Mach-O platforms, we emit global metadata in a separate section of the
2163// binary in order to allow the linker to properly dead strip. This is only
2164// supported on recent versions of ld64.
2165bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2166 if (!TargetTriple.isOSBinFormatMachO())
2167 return false;
2168
2169 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2170 return true;
2171 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2172 return true;
2173 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2174 return true;
2175 if (TargetTriple.isDriverKit())
2176 return true;
2177 if (TargetTriple.isXROS())
2178 return true;
2179
2180 return false;
2181}
2182
2183StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2184 switch (TargetTriple.getObjectFormat()) {
2185 case Triple::COFF: return ".ASAN$GL";
2186 case Triple::ELF: return "asan_globals";
2187 case Triple::MachO: return "__DATA,__asan_globals,regular";
2188 case Triple::Wasm:
2189 case Triple::GOFF:
2190 case Triple::SPIRV:
2191 case Triple::XCOFF:
2194 "ModuleAddressSanitizer not implemented for object file format");
2196 break;
2197 }
2198 llvm_unreachable("unsupported object format");
2199}
2200
2201void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
2202 IRBuilder<> IRB(*C);
2203
2204 // Declare our poisoning and unpoisoning functions.
2205 AsanPoisonGlobals =
2206 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2207 AsanUnpoisonGlobals =
2208 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2209
2210 // Declare functions that register/unregister globals.
2211 AsanRegisterGlobals = M.getOrInsertFunction(
2212 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2213 AsanUnregisterGlobals = M.getOrInsertFunction(
2214 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2215
2216 // Declare the functions that find globals in a shared object and then invoke
2217 // the (un)register function on them.
2218 AsanRegisterImageGlobals = M.getOrInsertFunction(
2219 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2220 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2222
2223 AsanRegisterElfGlobals =
2224 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2225 IntptrTy, IntptrTy, IntptrTy);
2226 AsanUnregisterElfGlobals =
2227 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2228 IntptrTy, IntptrTy, IntptrTy);
2229}
2230
2231// Put the metadata and the instrumented global in the same group. This ensures
2232// that the metadata is discarded if the instrumented global is discarded.
2233void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2234 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2235 Module &M = *G->getParent();
2236 Comdat *C = G->getComdat();
2237 if (!C) {
2238 if (!G->hasName()) {
2239 // If G is unnamed, it must be internal. Give it an artificial name
2240 // so we can put it in a comdat.
2241 assert(G->hasLocalLinkage());
2242 G->setName(Twine(kAsanGenPrefix) + "_anon_global");
2243 }
2244
2245 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2246 std::string Name = std::string(G->getName());
2247 Name += InternalSuffix;
2248 C = M.getOrInsertComdat(Name);
2249 } else {
2250 C = M.getOrInsertComdat(G->getName());
2251 }
2252
2253 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2254 // linkage to internal linkage so that a symbol table entry is emitted. This
2255 // is necessary in order to create the comdat group.
2256 if (TargetTriple.isOSBinFormatCOFF()) {
2257 C->setSelectionKind(Comdat::NoDeduplicate);
2258 if (G->hasPrivateLinkage())
2259 G->setLinkage(GlobalValue::InternalLinkage);
2260 }
2261 G->setComdat(C);
2262 }
2263
2264 assert(G->hasComdat());
2265 Metadata->setComdat(G->getComdat());
2266}
2267
2268// Create a separate metadata global and put it in the appropriate ASan
2269// global registration section.
2271ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
2272 StringRef OriginalName) {
2273 auto Linkage = TargetTriple.isOSBinFormatMachO()
2277 M, Initializer->getType(), false, Linkage, Initializer,
2278 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2279 Metadata->setSection(getGlobalMetadataSection());
2280 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2281 // relocation pressure.
2283 return Metadata;
2284}
2285
2286Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2287 AsanDtorFunction = Function::createWithDefaultAttr(
2290 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2291 // Ensure Dtor cannot be discarded, even if in a comdat.
2292 appendToUsed(M, {AsanDtorFunction});
2293 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2294
2295 return ReturnInst::Create(*C, AsanDtorBB);
2296}
2297
2298void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2299 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2300 ArrayRef<Constant *> MetadataInitializers) {
2301 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2302 auto &DL = M.getDataLayout();
2303
2304 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2305 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2306 Constant *Initializer = MetadataInitializers[i];
2307 GlobalVariable *G = ExtendedGlobals[i];
2309 CreateMetadataGlobal(M, Initializer, G->getName());
2310 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2311 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2312 MetadataGlobals[i] = Metadata;
2313
2314 // The MSVC linker always inserts padding when linking incrementally. We
2315 // cope with that by aligning each struct to its size, which must be a power
2316 // of two.
2317 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2318 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2319 "global metadata will not be padded appropriately");
2320 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2321
2322 SetComdatForGlobalMetadata(G, Metadata, "");
2323 }
2324
2325 // Update llvm.compiler.used, adding the new metadata globals. This is
2326 // needed so that during LTO these variables stay alive.
2327 if (!MetadataGlobals.empty())
2328 appendToCompilerUsed(M, MetadataGlobals);
2329}
2330
2331void ModuleAddressSanitizer::instrumentGlobalsELF(
2332 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2333 ArrayRef<Constant *> MetadataInitializers,
2334 const std::string &UniqueModuleId) {
2335 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2336
2337 // Putting globals in a comdat changes the semantic and potentially cause
2338 // false negative odr violations at link time. If odr indicators are used, we
2339 // keep the comdat sections, as link time odr violations will be dectected on
2340 // the odr indicator symbols.
2341 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2342
2343 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2344 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2345 GlobalVariable *G = ExtendedGlobals[i];
2347 CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2348 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2349 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2350 MetadataGlobals[i] = Metadata;
2351
2352 if (UseComdatForGlobalsGC)
2353 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2354 }
2355
2356 // Update llvm.compiler.used, adding the new metadata globals. This is
2357 // needed so that during LTO these variables stay alive.
2358 if (!MetadataGlobals.empty())
2359 appendToCompilerUsed(M, MetadataGlobals);
2360
2361 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2362 // to look up the loaded image that contains it. Second, we can store in it
2363 // whether registration has already occurred, to prevent duplicate
2364 // registration.
2365 //
2366 // Common linkage ensures that there is only one global per shared library.
2367 GlobalVariable *RegisteredFlag = new GlobalVariable(
2368 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2369 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2371
2372 // Create start and stop symbols.
2373 GlobalVariable *StartELFMetadata = new GlobalVariable(
2374 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2375 "__start_" + getGlobalMetadataSection());
2377 GlobalVariable *StopELFMetadata = new GlobalVariable(
2378 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2379 "__stop_" + getGlobalMetadataSection());
2381
2382 // Create a call to register the globals with the runtime.
2383 if (ConstructorKind == AsanCtorKind::Global)
2384 IRB.CreateCall(AsanRegisterElfGlobals,
2385 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2386 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2387 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2388
2389 // We also need to unregister globals at the end, e.g., when a shared library
2390 // gets closed.
2391 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2392 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2393 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2394 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2395 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2396 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2397 }
2398}
2399
2400void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2401 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2402 ArrayRef<Constant *> MetadataInitializers) {
2403 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2404
2405 // On recent Mach-O platforms, use a structure which binds the liveness of
2406 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2407 // created to be added to llvm.compiler.used
2408 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2409 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2410
2411 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2412 Constant *Initializer = MetadataInitializers[i];
2413 GlobalVariable *G = ExtendedGlobals[i];
2415 CreateMetadataGlobal(M, Initializer, G->getName());
2416
2417 // On recent Mach-O platforms, we emit the global metadata in a way that
2418 // allows the linker to properly strip dead globals.
2419 auto LivenessBinder =
2420 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2422 GlobalVariable *Liveness = new GlobalVariable(
2423 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2424 Twine("__asan_binder_") + G->getName());
2425 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2426 LivenessGlobals[i] = Liveness;
2427 }
2428
2429 // Update llvm.compiler.used, adding the new liveness globals. This is
2430 // needed so that during LTO these variables stay alive. The alternative
2431 // would be to have the linker handling the LTO symbols, but libLTO
2432 // current API does not expose access to the section for each symbol.
2433 if (!LivenessGlobals.empty())
2434 appendToCompilerUsed(M, LivenessGlobals);
2435
2436 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2437 // to look up the loaded image that contains it. Second, we can store in it
2438 // whether registration has already occurred, to prevent duplicate
2439 // registration.
2440 //
2441 // common linkage ensures that there is only one global per shared library.
2442 GlobalVariable *RegisteredFlag = new GlobalVariable(
2443 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2444 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2446
2447 if (ConstructorKind == AsanCtorKind::Global)
2448 IRB.CreateCall(AsanRegisterImageGlobals,
2449 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2450
2451 // We also need to unregister globals at the end, e.g., when a shared library
2452 // gets closed.
2453 if (DestructorKind != AsanDtorKind::None) {
2454 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2455 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2456 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2457 }
2458}
2459
2460void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2461 IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2462 ArrayRef<Constant *> MetadataInitializers) {
2463 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2464 unsigned N = ExtendedGlobals.size();
2465 assert(N > 0);
2466
2467 // On platforms that don't have a custom metadata section, we emit an array
2468 // of global metadata structures.
2469 ArrayType *ArrayOfGlobalStructTy =
2470 ArrayType::get(MetadataInitializers[0]->getType(), N);
2471 auto AllGlobals = new GlobalVariable(
2472 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2473 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2474 if (Mapping.Scale > 3)
2475 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2476
2477 if (ConstructorKind == AsanCtorKind::Global)
2478 IRB.CreateCall(AsanRegisterGlobals,
2479 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2480 ConstantInt::get(IntptrTy, N)});
2481
2482 // We also need to unregister globals at the end, e.g., when a shared library
2483 // gets closed.
2484 if (DestructorKind != AsanDtorKind::None) {
2485 IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
2486 IrbDtor.CreateCall(AsanUnregisterGlobals,
2487 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2488 ConstantInt::get(IntptrTy, N)});
2489 }
2490}
2491
2492// This function replaces all global variables with new variables that have
2493// trailing redzones. It also creates a function that poisons
2494// redzones and inserts this function into llvm.global_ctors.
2495// Sets *CtorComdat to true if the global registration code emitted into the
2496// asan constructor is comdat-compatible.
2497void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, Module &M,
2498 bool *CtorComdat) {
2499 // Build set of globals that are aliased by some GA, where
2500 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2501 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2502 if (CompileKernel) {
2503 for (auto &GA : M.aliases()) {
2504 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2505 AliasedGlobalExclusions.insert(GV);
2506 }
2507 }
2508
2509 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2510 for (auto &G : M.globals()) {
2511 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2512 GlobalsToChange.push_back(&G);
2513 }
2514
2515 size_t n = GlobalsToChange.size();
2516 auto &DL = M.getDataLayout();
2517
2518 // A global is described by a structure
2519 // size_t beg;
2520 // size_t size;
2521 // size_t size_with_redzone;
2522 // const char *name;
2523 // const char *module_name;
2524 // size_t has_dynamic_init;
2525 // size_t padding_for_windows_msvc_incremental_link;
2526 // size_t odr_indicator;
2527 // We initialize an array of such structures and pass it to a run-time call.
2528 StructType *GlobalStructTy =
2529 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2530 IntptrTy, IntptrTy, IntptrTy);
2532 SmallVector<Constant *, 16> Initializers(n);
2533
2534 bool HasDynamicallyInitializedGlobals = false;
2535
2536 // We shouldn't merge same module names, as this string serves as unique
2537 // module ID in runtime.
2539 n != 0
2540 ? createPrivateGlobalForString(M, M.getModuleIdentifier(),
2541 /*AllowMerging*/ false, kAsanGenPrefix)
2542 : nullptr;
2543
2544 for (size_t i = 0; i < n; i++) {
2545 GlobalVariable *G = GlobalsToChange[i];
2546
2548 if (G->hasSanitizerMetadata())
2549 MD = G->getSanitizerMetadata();
2550
2551 // The runtime library tries demangling symbol names in the descriptor but
2552 // functionality like __cxa_demangle may be unavailable (e.g.
2553 // -static-libstdc++). So we demangle the symbol names here.
2554 std::string NameForGlobal = G->getName().str();
2557 /*AllowMerging*/ true, kAsanGenPrefix);
2558
2559 Type *Ty = G->getValueType();
2560 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2561 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2562 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2563
2564 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2565 Constant *NewInitializer = ConstantStruct::get(
2566 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2567
2568 // Create a new global variable with enough space for a redzone.
2569 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2570 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2572 GlobalVariable *NewGlobal = new GlobalVariable(
2573 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2574 G->getThreadLocalMode(), G->getAddressSpace());
2575 NewGlobal->copyAttributesFrom(G);
2576 NewGlobal->setComdat(G->getComdat());
2577 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2578 // Don't fold globals with redzones. ODR violation detector and redzone
2579 // poisoning implicitly creates a dependence on the global's address, so it
2580 // is no longer valid for it to be marked unnamed_addr.
2582
2583 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2584 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2585 G->isConstant()) {
2586 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2587 if (Seq && Seq->isCString())
2588 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2589 }
2590
2591 // Transfer the debug info and type metadata. The payload starts at offset
2592 // zero so we can copy the metadata over as is.
2593 NewGlobal->copyMetadata(G, 0);
2594
2595 Value *Indices2[2];
2596 Indices2[0] = IRB.getInt32(0);
2597 Indices2[1] = IRB.getInt32(0);
2598
2599 G->replaceAllUsesWith(
2600 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2601 NewGlobal->takeName(G);
2602 G->eraseFromParent();
2603 NewGlobals[i] = NewGlobal;
2604
2605 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2606 GlobalValue *InstrumentedGlobal = NewGlobal;
2607
2608 bool CanUsePrivateAliases =
2609 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2610 TargetTriple.isOSBinFormatWasm();
2611 if (CanUsePrivateAliases && UsePrivateAlias) {
2612 // Create local alias for NewGlobal to avoid crash on ODR between
2613 // instrumented and non-instrumented libraries.
2614 InstrumentedGlobal =
2616 }
2617
2618 // ODR should not happen for local linkage.
2619 if (NewGlobal->hasLocalLinkage()) {
2620 ODRIndicator =
2621 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2622 } else if (UseOdrIndicator) {
2623 // With local aliases, we need to provide another externally visible
2624 // symbol __odr_asan_XXX to detect ODR violation.
2625 auto *ODRIndicatorSym =
2626 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2628 kODRGenPrefix + NameForGlobal, nullptr,
2629 NewGlobal->getThreadLocalMode());
2630
2631 // Set meaningful attributes for indicator symbol.
2632 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2633 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2634 ODRIndicatorSym->setAlignment(Align(1));
2635 ODRIndicator = ODRIndicatorSym;
2636 }
2637
2638 Constant *Initializer = ConstantStruct::get(
2639 GlobalStructTy,
2640 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2641 ConstantInt::get(IntptrTy, SizeInBytes),
2642 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2645 ConstantInt::get(IntptrTy, MD.IsDynInit),
2646 Constant::getNullValue(IntptrTy),
2647 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2648
2649 if (ClInitializers && MD.IsDynInit)
2650 HasDynamicallyInitializedGlobals = true;
2651
2652 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2653
2654 Initializers[i] = Initializer;
2655 }
2656
2657 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2658 // ConstantMerge'ing them.
2659 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2660 for (size_t i = 0; i < n; i++) {
2661 GlobalVariable *G = NewGlobals[i];
2662 if (G->getName().empty()) continue;
2663 GlobalsToAddToUsedList.push_back(G);
2664 }
2665 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2666
2667 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2668 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2669 // linkage unit will only have one module constructor, and (b) the register
2670 // function will be called. The module destructor is not created when n ==
2671 // 0.
2672 *CtorComdat = true;
2673 instrumentGlobalsELF(IRB, M, NewGlobals, Initializers,
2674 getUniqueModuleId(&M));
2675 } else if (n == 0) {
2676 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2677 // all compile units will have identical module constructor/destructor.
2678 *CtorComdat = TargetTriple.isOSBinFormatELF();
2679 } else {
2680 *CtorComdat = false;
2681 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2682 InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2683 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2684 InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2685 } else {
2686 InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2687 }
2688 }
2689
2690 // Create calls for poisoning before initializers run and unpoisoning after.
2691 if (HasDynamicallyInitializedGlobals)
2692 createInitializerPoisonCalls(M, ModuleName);
2693
2694 LLVM_DEBUG(dbgs() << M);
2695}
2696
2698ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2699 constexpr uint64_t kMaxRZ = 1 << 18;
2700 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2701
2702 uint64_t RZ = 0;
2703 if (SizeInBytes <= MinRZ / 2) {
2704 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2705 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2706 // half of MinRZ.
2707 RZ = MinRZ - SizeInBytes;
2708 } else {
2709 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2710 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2711
2712 // Round up to multiple of MinRZ.
2713 if (SizeInBytes % MinRZ)
2714 RZ += MinRZ - (SizeInBytes % MinRZ);
2715 }
2716
2717 assert((RZ + SizeInBytes) % MinRZ == 0);
2718
2719 return RZ;
2720}
2721
2722int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2723 int LongSize = M.getDataLayout().getPointerSizeInBits();
2724 bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2725 int Version = 8;
2726 // 32-bit Android is one version ahead because of the switch to dynamic
2727 // shadow.
2728 Version += (LongSize == 32 && isAndroid);
2729 return Version;
2730}
2731
2732bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2733 initializeCallbacks(M);
2734
2735 // Create a module constructor. A destructor is created lazily because not all
2736 // platforms, and not all modules need it.
2737 if (ConstructorKind == AsanCtorKind::Global) {
2738 if (CompileKernel) {
2739 // The kernel always builds with its own runtime, and therefore does not
2740 // need the init and version check calls.
2741 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2742 } else {
2743 std::string AsanVersion = std::to_string(GetAsanVersion(M));
2744 std::string VersionCheckName =
2745 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2746 std::tie(AsanCtorFunction, std::ignore) =
2748 kAsanInitName, /*InitArgTypes=*/{},
2749 /*InitArgs=*/{}, VersionCheckName);
2750 }
2751 }
2752
2753 bool CtorComdat = true;
2754 if (ClGlobals) {
2755 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2756 if (AsanCtorFunction) {
2757 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2758 instrumentGlobals(IRB, M, &CtorComdat);
2759 } else {
2760 IRBuilder<> IRB(*C);
2761 instrumentGlobals(IRB, M, &CtorComdat);
2762 }
2763 }
2764
2765 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2766
2767 // Put the constructor and destructor in comdat if both
2768 // (1) global instrumentation is not TU-specific
2769 // (2) target is ELF.
2770 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2771 if (AsanCtorFunction) {
2772 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2773 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2774 }
2775 if (AsanDtorFunction) {
2776 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2777 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2778 }
2779 } else {
2780 if (AsanCtorFunction)
2781 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2782 if (AsanDtorFunction)
2783 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2784 }
2785
2786 return true;
2787}
2788
2789void AddressSanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo *TLI) {
2790 IRBuilder<> IRB(*C);
2791 // Create __asan_report* callbacks.
2792 // IsWrite, TypeSize and Exp are encoded in the function name.
2793 for (int Exp = 0; Exp < 2; Exp++) {
2794 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2795 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2796 const std::string ExpStr = Exp ? "exp_" : "";
2797 const std::string EndingStr = Recover ? "_noabort" : "";
2798
2799 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2800 SmallVector<Type *, 2> Args1{1, IntptrTy};
2801 AttributeList AL2;
2802 AttributeList AL1;
2803 if (Exp) {
2804 Type *ExpType = Type::getInt32Ty(*C);
2805 Args2.push_back(ExpType);
2806 Args1.push_back(ExpType);
2807 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2808 AL2 = AL2.addParamAttribute(*C, 2, AK);
2809 AL1 = AL1.addParamAttribute(*C, 1, AK);
2810 }
2811 }
2812 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2813 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2814 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2815
2816 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2817 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2818 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2819
2820 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2821 AccessSizeIndex++) {
2822 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2823 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2824 M.getOrInsertFunction(
2825 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2826 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2827
2828 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2829 M.getOrInsertFunction(
2830 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2831 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2832 }
2833 }
2834 }
2835
2836 const std::string MemIntrinCallbackPrefix =
2837 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2838 ? std::string("")
2840 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2841 PtrTy, PtrTy, PtrTy, IntptrTy);
2842 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2843 PtrTy, PtrTy, IntptrTy);
2844 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2845 TLI->getAttrList(C, {1}, /*Signed=*/false),
2846 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2847
2848 AsanHandleNoReturnFunc =
2849 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2850
2851 AsanPtrCmpFunction =
2852 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2853 AsanPtrSubFunction =
2854 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2855 if (Mapping.InGlobal)
2856 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2857 ArrayType::get(IRB.getInt8Ty(), 0));
2858
2859 AMDGPUAddressShared =
2860 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2861 AMDGPUAddressPrivate =
2862 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2863}
2864
2865bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2866 // For each NSObject descendant having a +load method, this method is invoked
2867 // by the ObjC runtime before any of the static constructors is called.
2868 // Therefore we need to instrument such methods with a call to __asan_init
2869 // at the beginning in order to initialize our runtime before any access to
2870 // the shadow memory.
2871 // We cannot just ignore these methods, because they may call other
2872 // instrumented functions.
2873 if (F.getName().contains(" load]")) {
2874 FunctionCallee AsanInitFunction =
2875 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2876 IRBuilder<> IRB(&F.front(), F.front().begin());
2877 IRB.CreateCall(AsanInitFunction, {});
2878 return true;
2879 }
2880 return false;
2881}
2882
2883bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2884 // Generate code only when dynamic addressing is needed.
2885 if (Mapping.Offset != kDynamicShadowSentinel)
2886 return false;
2887
2888 IRBuilder<> IRB(&F.front().front());
2889 if (Mapping.InGlobal) {
2891 // An empty inline asm with input reg == output reg.
2892 // An opaque pointer-to-int cast, basically.
2894 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2895 StringRef(""), StringRef("=r,0"),
2896 /*hasSideEffects=*/false);
2897 LocalDynamicShadow =
2898 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2899 } else {
2900 LocalDynamicShadow =
2901 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2902 }
2903 } else {
2904 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2906 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2907 }
2908 return true;
2909}
2910
2911void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2912 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2913 // to it as uninteresting. This assumes we haven't started processing allocas
2914 // yet. This check is done up front because iterating the use list in
2915 // isInterestingAlloca would be algorithmically slower.
2916 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2917
2918 // Try to get the declaration of llvm.localescape. If it's not in the module,
2919 // we can exit early.
2920 if (!F.getParent()->getFunction("llvm.localescape")) return;
2921
2922 // Look for a call to llvm.localescape call in the entry block. It can't be in
2923 // any other block.
2924 for (Instruction &I : F.getEntryBlock()) {
2925 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2926 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2927 // We found a call. Mark all the allocas passed in as uninteresting.
2928 for (Value *Arg : II->args()) {
2929 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2930 assert(AI && AI->isStaticAlloca() &&
2931 "non-static alloca arg to localescape");
2932 ProcessedAllocas[AI] = false;
2933 }
2934 break;
2935 }
2936 }
2937}
2938
2939bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
2940 bool ShouldInstrument =
2941 ClDebugMin < 0 || ClDebugMax < 0 ||
2942 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
2943 Instrumented++;
2944 return !ShouldInstrument;
2945}
2946
2947bool AddressSanitizer::instrumentFunction(Function &F,
2948 const TargetLibraryInfo *TLI) {
2949 if (F.empty())
2950 return false;
2951 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
2952 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
2953 if (F.getName().starts_with("__asan_")) return false;
2954 if (F.isPresplitCoroutine())
2955 return false;
2956
2957 bool FunctionModified = false;
2958
2959 // If needed, insert __asan_init before checking for SanitizeAddress attr.
2960 // This function needs to be called even if the function body is not
2961 // instrumented.
2962 if (maybeInsertAsanInitAtFunctionEntry(F))
2963 FunctionModified = true;
2964
2965 // Leave if the function doesn't need instrumentation.
2966 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
2967
2968 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
2969 return FunctionModified;
2970
2971 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2972
2973 initializeCallbacks(*F.getParent(), TLI);
2974
2975 FunctionStateRAII CleanupObj(this);
2976
2977 RuntimeCallInserter RTCI(F);
2978
2979 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
2980
2981 // We can't instrument allocas used with llvm.localescape. Only static allocas
2982 // can be passed to that intrinsic.
2983 markEscapedLocalAllocas(F);
2984
2985 // We want to instrument every address only once per basic block (unless there
2986 // are calls between uses).
2987 SmallPtrSet<Value *, 16> TempsToInstrument;
2988 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
2989 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
2990 SmallVector<Instruction *, 8> NoReturnCalls;
2992 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2993
2994 // Fill the set of memory operations to instrument.
2995 for (auto &BB : F) {
2996 AllBlocks.push_back(&BB);
2997 TempsToInstrument.clear();
2998 int NumInsnsPerBB = 0;
2999 for (auto &Inst : BB) {
3000 if (LooksLikeCodeInBug11395(&Inst)) return false;
3001 // Skip instructions inserted by another instrumentation.
3002 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3003 continue;
3004 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3005 getInterestingMemoryOperands(&Inst, InterestingOperands);
3006
3007 if (!InterestingOperands.empty()) {
3008 for (auto &Operand : InterestingOperands) {
3009 if (ClOpt && ClOptSameTemp) {
3010 Value *Ptr = Operand.getPtr();
3011 // If we have a mask, skip instrumentation if we've already
3012 // instrumented the full object. But don't add to TempsToInstrument
3013 // because we might get another load/store with a different mask.
3014 if (Operand.MaybeMask) {
3015 if (TempsToInstrument.count(Ptr))
3016 continue; // We've seen this (whole) temp in the current BB.
3017 } else {
3018 if (!TempsToInstrument.insert(Ptr).second)
3019 continue; // We've seen this temp in the current BB.
3020 }
3021 }
3022 OperandsToInstrument.push_back(Operand);
3023 NumInsnsPerBB++;
3024 }
3025 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3029 PointerComparisonsOrSubtracts.push_back(&Inst);
3030 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3031 // ok, take it.
3032 IntrinToInstrument.push_back(MI);
3033 NumInsnsPerBB++;
3034 } else {
3035 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3036 // A call inside BB.
3037 TempsToInstrument.clear();
3038 if (CB->doesNotReturn())
3039 NoReturnCalls.push_back(CB);
3040 }
3041 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3043 }
3044 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3045 }
3046 }
3047
3048 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3049 OperandsToInstrument.size() + IntrinToInstrument.size() >
3050 (unsigned)InstrumentationWithCallsThreshold);
3051 const DataLayout &DL = F.getDataLayout();
3052 ObjectSizeOpts ObjSizeOpts;
3053 ObjSizeOpts.RoundToAlign = true;
3054 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
3055
3056 // Instrument.
3057 int NumInstrumented = 0;
3058 for (auto &Operand : OperandsToInstrument) {
3059 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3060 instrumentMop(ObjSizeVis, Operand, UseCalls,
3061 F.getDataLayout(), RTCI);
3062 FunctionModified = true;
3063 }
3064 for (auto *Inst : IntrinToInstrument) {
3065 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3066 instrumentMemIntrinsic(Inst, RTCI);
3067 FunctionModified = true;
3068 }
3069
3070 FunctionStackPoisoner FSP(F, *this, RTCI);
3071 bool ChangedStack = FSP.runOnFunction();
3072
3073 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3074 // See e.g. https://github.com/google/sanitizers/issues/37
3075 for (auto *CI : NoReturnCalls) {
3076 IRBuilder<> IRB(CI);
3077 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3078 }
3079
3080 for (auto *Inst : PointerComparisonsOrSubtracts) {
3081 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3082 FunctionModified = true;
3083 }
3084
3085 if (ChangedStack || !NoReturnCalls.empty())
3086 FunctionModified = true;
3087
3088 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3089 << F << "\n");
3090
3091 return FunctionModified;
3092}
3093
3094// Workaround for bug 11395: we don't want to instrument stack in functions
3095// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3096// FIXME: remove once the bug 11395 is fixed.
3097bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3098 if (LongSize != 32) return false;
3099 CallInst *CI = dyn_cast<CallInst>(I);
3100 if (!CI || !CI->isInlineAsm()) return false;
3101 if (CI->arg_size() <= 5)
3102 return false;
3103 // We have inline assembly with quite a few arguments.
3104 return true;
3105}
3106
3107void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3108 IRBuilder<> IRB(*C);
3109 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3110 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3111 const char *MallocNameTemplate =
3112 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3115 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3116 std::string Suffix = itostr(Index);
3117 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3118 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3119 AsanStackFreeFunc[Index] =
3120 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3121 IRB.getVoidTy(), IntptrTy, IntptrTy);
3122 }
3123 }
3124 if (ASan.UseAfterScope) {
3125 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3126 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3127 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3128 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3129 }
3130
3131 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3132 0xf3, 0xf5, 0xf8}) {
3133 std::ostringstream Name;
3135 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3136 AsanSetShadowFunc[Val] =
3137 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3138 }
3139
3140 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3141 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3142 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3143 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3144}
3145
3146void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3147 ArrayRef<uint8_t> ShadowBytes,
3148 size_t Begin, size_t End,
3149 IRBuilder<> &IRB,
3150 Value *ShadowBase) {
3151 if (Begin >= End)
3152 return;
3153
3154 const size_t LargestStoreSizeInBytes =
3155 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3156
3157 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3158
3159 // Poison given range in shadow using larges store size with out leading and
3160 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3161 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3162 // middle of a store.
3163 for (size_t i = Begin; i < End;) {
3164 if (!ShadowMask[i]) {
3165 assert(!ShadowBytes[i]);
3166 ++i;
3167 continue;
3168 }
3169
3170 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3171 // Fit store size into the range.
3172 while (StoreSizeInBytes > End - i)
3173 StoreSizeInBytes /= 2;
3174
3175 // Minimize store size by trimming trailing zeros.
3176 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3177 while (j <= StoreSizeInBytes / 2)
3178 StoreSizeInBytes /= 2;
3179 }
3180
3181 uint64_t Val = 0;
3182 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3183 if (IsLittleEndian)
3184 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3185 else
3186 Val = (Val << 8) | ShadowBytes[i + j];
3187 }
3188
3189 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3190 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3193 Align(1));
3194
3195 i += StoreSizeInBytes;
3196 }
3197}
3198
3199void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3200 ArrayRef<uint8_t> ShadowBytes,
3201 IRBuilder<> &IRB, Value *ShadowBase) {
3202 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3203}
3204
3205void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3206 ArrayRef<uint8_t> ShadowBytes,
3207 size_t Begin, size_t End,
3208 IRBuilder<> &IRB, Value *ShadowBase) {
3209 assert(ShadowMask.size() == ShadowBytes.size());
3210 size_t Done = Begin;
3211 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3212 if (!ShadowMask[i]) {
3213 assert(!ShadowBytes[i]);
3214 continue;
3215 }
3216 uint8_t Val = ShadowBytes[i];
3217 if (!AsanSetShadowFunc[Val])
3218 continue;
3219
3220 // Skip same values.
3221 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3222 }
3223
3224 if (j - i >= ASan.MaxInlinePoisoningSize) {
3225 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3226 RTCI.createRuntimeCall(
3227 IRB, AsanSetShadowFunc[Val],
3228 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3229 ConstantInt::get(IntptrTy, j - i)});
3230 Done = j;
3231 }
3232 }
3233
3234 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3235}
3236
3237// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3238// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3239static int StackMallocSizeClass(uint64_t LocalStackSize) {
3240 assert(LocalStackSize <= kMaxStackMallocSize);
3241 uint64_t MaxSize = kMinStackMallocSize;
3242 for (int i = 0;; i++, MaxSize *= 2)
3243 if (LocalStackSize <= MaxSize) return i;
3244 llvm_unreachable("impossible LocalStackSize");
3245}
3246
3247void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3248 Instruction *CopyInsertPoint = &F.front().front();
3249 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3250 // Insert after the dynamic shadow location is determined
3251 CopyInsertPoint = CopyInsertPoint->getNextNode();
3252 assert(CopyInsertPoint);
3253 }
3254 IRBuilder<> IRB(CopyInsertPoint);
3255 const DataLayout &DL = F.getDataLayout();
3256 for (Argument &Arg : F.args()) {
3257 if (Arg.hasByValAttr()) {
3258 Type *Ty = Arg.getParamByValType();
3259 const Align Alignment =
3260 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3261
3262 AllocaInst *AI = IRB.CreateAlloca(
3263 Ty, nullptr,
3264 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3265 ".byval");
3266 AI->setAlignment(Alignment);
3267 Arg.replaceAllUsesWith(AI);
3268
3269 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3270 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3271 }
3272 }
3273}
3274
3275PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3276 Value *ValueIfTrue,
3277 Instruction *ThenTerm,
3278 Value *ValueIfFalse) {
3279 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3280 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3281 PHI->addIncoming(ValueIfFalse, CondBlock);
3282 BasicBlock *ThenBlock = ThenTerm->getParent();
3283 PHI->addIncoming(ValueIfTrue, ThenBlock);
3284 return PHI;
3285}
3286
3287Value *FunctionStackPoisoner::createAllocaForLayout(
3288 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3289 AllocaInst *Alloca;
3290 if (Dynamic) {
3291 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3292 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3293 "MyAlloca");
3294 } else {
3295 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3296 nullptr, "MyAlloca");
3297 assert(Alloca->isStaticAlloca());
3298 }
3299 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3300 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3301 Alloca->setAlignment(Align(FrameAlignment));
3302 return IRB.CreatePointerCast(Alloca, IntptrTy);
3303}
3304
3305void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3306 BasicBlock &FirstBB = *F.begin();
3307 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3308 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3309 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3310 DynamicAllocaLayout->setAlignment(Align(32));
3311}
3312
3313void FunctionStackPoisoner::processDynamicAllocas() {
3314 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3315 assert(DynamicAllocaPoisonCallVec.empty());
3316 return;
3317 }
3318
3319 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3320 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3321 assert(APC.InsBefore);
3322 assert(APC.AI);
3323 assert(ASan.isInterestingAlloca(*APC.AI));
3324 assert(!APC.AI->isStaticAlloca());
3325
3326 IRBuilder<> IRB(APC.InsBefore);
3327 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3328 // Dynamic allocas will be unpoisoned unconditionally below in
3329 // unpoisonDynamicAllocas.
3330 // Flag that we need unpoison static allocas.
3331 }
3332
3333 // Handle dynamic allocas.
3334 createDynamicAllocasInitStorage();
3335 for (auto &AI : DynamicAllocaVec)
3336 handleDynamicAllocaCall(AI);
3337 unpoisonDynamicAllocas();
3338}
3339
3340/// Collect instructions in the entry block after \p InsBefore which initialize
3341/// permanent storage for a function argument. These instructions must remain in
3342/// the entry block so that uninitialized values do not appear in backtraces. An
3343/// added benefit is that this conserves spill slots. This does not move stores
3344/// before instrumented / "interesting" allocas.
3346 AddressSanitizer &ASan, Instruction &InsBefore,
3347 SmallVectorImpl<Instruction *> &InitInsts) {
3348 Instruction *Start = InsBefore.getNextNonDebugInstruction();
3349 for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
3350 // Argument initialization looks like:
3351 // 1) store <Argument>, <Alloca> OR
3352 // 2) <CastArgument> = cast <Argument> to ...
3353 // store <CastArgument> to <Alloca>
3354 // Do not consider any other kind of instruction.
3355 //
3356 // Note: This covers all known cases, but may not be exhaustive. An
3357 // alternative to pattern-matching stores is to DFS over all Argument uses:
3358 // this might be more general, but is probably much more complicated.
3359 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3360 continue;
3361 if (auto *Store = dyn_cast<StoreInst>(It)) {
3362 // The store destination must be an alloca that isn't interesting for
3363 // ASan to instrument. These are moved up before InsBefore, and they're
3364 // not interesting because allocas for arguments can be mem2reg'd.
3365 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3366 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3367 continue;
3368
3369 Value *Val = Store->getValueOperand();
3370 bool IsDirectArgInit = isa<Argument>(Val);
3371 bool IsArgInitViaCast =
3372 isa<CastInst>(Val) &&
3373 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3374 // Check that the cast appears directly before the store. Otherwise
3375 // moving the cast before InsBefore may break the IR.
3376 Val == It->getPrevNonDebugInstruction();
3377 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3378 if (!IsArgInit)
3379 continue;
3380
3381 if (IsArgInitViaCast)
3382 InitInsts.push_back(cast<Instruction>(Val));
3383 InitInsts.push_back(Store);
3384 continue;
3385 }
3386
3387 // Do not reorder past unknown instructions: argument initialization should
3388 // only involve casts and stores.
3389 return;
3390 }
3391}
3392
3393void FunctionStackPoisoner::processStaticAllocas() {
3394 if (AllocaVec.empty()) {
3395 assert(StaticAllocaPoisonCallVec.empty());
3396 return;
3397 }
3398
3399 int StackMallocIdx = -1;
3400 DebugLoc EntryDebugLocation;
3401 if (auto SP = F.getSubprogram())
3402 EntryDebugLocation =
3403 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3404
3405 Instruction *InsBefore = AllocaVec[0];
3406 IRBuilder<> IRB(InsBefore);
3407
3408 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3409 // debug info is broken, because only entry-block allocas are treated as
3410 // regular stack slots.
3411 auto InsBeforeB = InsBefore->getParent();
3412 assert(InsBeforeB == &F.getEntryBlock());
3413 for (auto *AI : StaticAllocasToMoveUp)
3414 if (AI->getParent() == InsBeforeB)
3415 AI->moveBefore(InsBefore);
3416
3417 // Move stores of arguments into entry-block allocas as well. This prevents
3418 // extra stack slots from being generated (to house the argument values until
3419 // they can be stored into the allocas). This also prevents uninitialized
3420 // values from being shown in backtraces.
3421 SmallVector<Instruction *, 8> ArgInitInsts;
3422 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3423 for (Instruction *ArgInitInst : ArgInitInsts)
3424 ArgInitInst->moveBefore(InsBefore);
3425
3426 // If we have a call to llvm.localescape, keep it in the entry block.
3427 if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
3428
3430 SVD.reserve(AllocaVec.size());
3431 for (AllocaInst *AI : AllocaVec) {
3433 ASan.getAllocaSizeInBytes(*AI),
3434 0,
3435 AI->getAlign().value(),
3436 AI,
3437 0,
3438 0};
3439 SVD.push_back(D);
3440 }
3441
3442 // Minimal header size (left redzone) is 4 pointers,
3443 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3444 uint64_t Granularity = 1ULL << Mapping.Scale;
3445 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3446 const ASanStackFrameLayout &L =
3447 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3448
3449 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3451 for (auto &Desc : SVD)
3452 AllocaToSVDMap[Desc.AI] = &Desc;
3453
3454 // Update SVD with information from lifetime intrinsics.
3455 for (const auto &APC : StaticAllocaPoisonCallVec) {
3456 assert(APC.InsBefore);
3457 assert(APC.AI);
3458 assert(ASan.isInterestingAlloca(*APC.AI));
3459 assert(APC.AI->isStaticAlloca());
3460
3461 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3462 Desc.LifetimeSize = Desc.Size;
3463 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3464 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3465 if (LifetimeLoc->getFile() == FnLoc->getFile())
3466 if (unsigned Line = LifetimeLoc->getLine())
3467 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3468 }
3469 }
3470 }
3471
3472 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3473 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3474 uint64_t LocalStackSize = L.FrameSize;
3475 bool DoStackMalloc =
3476 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3477 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3478 bool DoDynamicAlloca = ClDynamicAllocaStack;
3479 // Don't do dynamic alloca or stack malloc if:
3480 // 1) There is inline asm: too often it makes assumptions on which registers
3481 // are available.
3482 // 2) There is a returns_twice call (typically setjmp), which is
3483 // optimization-hostile, and doesn't play well with introduced indirect
3484 // register-relative calculation of local variable addresses.
3485 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3486 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3487
3488 Value *StaticAlloca =
3489 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3490
3491 Value *FakeStack;
3492 Value *LocalStackBase;
3493 Value *LocalStackBaseAlloca;
3494 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3495
3496 if (DoStackMalloc) {
3497 LocalStackBaseAlloca =
3498 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3499 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3500 // void *FakeStack = __asan_option_detect_stack_use_after_return
3501 // ? __asan_stack_malloc_N(LocalStackSize)
3502 // : nullptr;
3503 // void *LocalStackBase = (FakeStack) ? FakeStack :
3504 // alloca(LocalStackSize);
3505 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3507 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3508 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3510 Instruction *Term =
3511 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3512 IRBuilder<> IRBIf(Term);
3513 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3514 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3515 Value *FakeStackValue =
3516 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3517 ConstantInt::get(IntptrTy, LocalStackSize));
3518 IRB.SetInsertPoint(InsBefore);
3519 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3520 ConstantInt::get(IntptrTy, 0));
3521 } else {
3522 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3523 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3524 // void *LocalStackBase = (FakeStack) ? FakeStack :
3525 // alloca(LocalStackSize);
3526 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3527 FakeStack =
3528 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3529 ConstantInt::get(IntptrTy, LocalStackSize));
3530 }
3531 Value *NoFakeStack =
3532 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3533 Instruction *Term =
3534 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3535 IRBuilder<> IRBIf(Term);
3536 Value *AllocaValue =
3537 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3538
3539 IRB.SetInsertPoint(InsBefore);
3540 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3541 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3542 DIExprFlags |= DIExpression::DerefBefore;
3543 } else {
3544 // void *FakeStack = nullptr;
3545 // void *LocalStackBase = alloca(LocalStackSize);
3546 FakeStack = ConstantInt::get(IntptrTy, 0);
3547 LocalStackBase =
3548 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3549 LocalStackBaseAlloca = LocalStackBase;
3550 }
3551
3552 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3553 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3554 // later passes and can result in dropped variable coverage in debug info.
3555 Value *LocalStackBaseAllocaPtr =
3556 isa<PtrToIntInst>(LocalStackBaseAlloca)
3557 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3558 : LocalStackBaseAlloca;
3559 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3560 "Variable descriptions relative to ASan stack base will be dropped");
3561
3562 // Replace Alloca instructions with base+offset.
3563 for (const auto &Desc : SVD) {
3564 AllocaInst *AI = Desc.AI;
3565 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3566 Desc.Offset);
3567 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3568 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3569 AI->getType());
3570 AI->replaceAllUsesWith(NewAllocaPtr);
3571 }
3572
3573 // The left-most redzone has enough space for at least 4 pointers.
3574 // Write the Magic value to redzone[0].
3575 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3576 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3577 BasePlus0);
3578 // Write the frame description constant to redzone[1].
3579 Value *BasePlus1 = IRB.CreateIntToPtr(
3580 IRB.CreateAdd(LocalStackBase,
3581 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3582 IntptrPtrTy);
3583 GlobalVariable *StackDescriptionGlobal =
3584 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3585 /*AllowMerging*/ true, kAsanGenPrefix);
3586 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3587 IRB.CreateStore(Description, BasePlus1);
3588 // Write the PC to redzone[2].
3589 Value *BasePlus2 = IRB.CreateIntToPtr(
3590 IRB.CreateAdd(LocalStackBase,
3591 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3592 IntptrPtrTy);
3593 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3594
3595 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3596
3597 // Poison the stack red zones at the entry.
3598 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3599 // As mask we must use most poisoned case: red zones and after scope.
3600 // As bytes we can use either the same or just red zones only.
3601 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3602
3603 if (!StaticAllocaPoisonCallVec.empty()) {
3604 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3605
3606 // Poison static allocas near lifetime intrinsics.
3607 for (const auto &APC : StaticAllocaPoisonCallVec) {
3608 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3609 assert(Desc.Offset % L.Granularity == 0);
3610 size_t Begin = Desc.Offset / L.Granularity;
3611 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3612
3613 IRBuilder<> IRB(APC.InsBefore);
3614 copyToShadow(ShadowAfterScope,
3615 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3616 IRB, ShadowBase);
3617 }
3618 }
3619
3620 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3621 SmallVector<uint8_t, 64> ShadowAfterReturn;
3622
3623 // (Un)poison the stack before all ret instructions.
3624 for (Instruction *Ret : RetVec) {
3625 IRBuilder<> IRBRet(Ret);
3626 // Mark the current frame as retired.
3627 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3628 BasePlus0);
3629 if (DoStackMalloc) {
3630 assert(StackMallocIdx >= 0);
3631 // if FakeStack != 0 // LocalStackBase == FakeStack
3632 // // In use-after-return mode, poison the whole stack frame.
3633 // if StackMallocIdx <= 4
3634 // // For small sizes inline the whole thing:
3635 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3636 // **SavedFlagPtr(FakeStack) = 0
3637 // else
3638 // __asan_stack_free_N(FakeStack, LocalStackSize)
3639 // else
3640 // <This is not a fake stack; unpoison the redzones>
3641 Value *Cmp =
3642 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3643 Instruction *ThenTerm, *ElseTerm;
3644 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3645
3646 IRBuilder<> IRBPoison(ThenTerm);
3647 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3648 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3649 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3651 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3652 ShadowBase);
3653 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3654 FakeStack,
3655 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3656 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3657 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3658 IRBPoison.CreateStore(
3659 Constant::getNullValue(IRBPoison.getInt8Ty()),
3660 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3661 } else {
3662 // For larger frames call __asan_stack_free_*.
3663 RTCI.createRuntimeCall(
3664 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3665 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3666 }
3667
3668 IRBuilder<> IRBElse(ElseTerm);
3669 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3670 } else {
3671 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3672 }
3673 }
3674
3675 // We are done. Remove the old unused alloca instructions.
3676 for (auto *AI : AllocaVec)
3677 AI->eraseFromParent();
3678}
3679
3680void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3681 IRBuilder<> &IRB, bool DoPoison) {
3682 // For now just insert the call to ASan runtime.
3683 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3684 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3685 RTCI.createRuntimeCall(
3686 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3687 {AddrArg, SizeArg});
3688}
3689
3690// Handling llvm.lifetime intrinsics for a given %alloca:
3691// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3692// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3693// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3694// could be poisoned by previous llvm.lifetime.end instruction, as the
3695// variable may go in and out of scope several times, e.g. in loops).
3696// (3) if we poisoned at least one %alloca in a function,
3697// unpoison the whole stack frame at function exit.
3698void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3699 IRBuilder<> IRB(AI);
3700
3701 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3702 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3703
3704 Value *Zero = Constant::getNullValue(IntptrTy);
3705 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3706 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3707
3708 // Since we need to extend alloca with additional memory to locate
3709 // redzones, and OldSize is number of allocated blocks with
3710 // ElementSize size, get allocated memory size in bytes by
3711 // OldSize * ElementSize.
3712 const unsigned ElementSize =
3713 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3714 Value *OldSize =
3715 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3716 ConstantInt::get(IntptrTy, ElementSize));
3717
3718 // PartialSize = OldSize % 32
3719 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3720
3721 // Misalign = kAllocaRzSize - PartialSize;
3722 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3723
3724 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3725 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3726 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3727
3728 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3729 // Alignment is added to locate left redzone, PartialPadding for possible
3730 // partial redzone and kAllocaRzSize for right redzone respectively.
3731 Value *AdditionalChunkSize = IRB.CreateAdd(
3732 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3733 PartialPadding);
3734
3735 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3736
3737 // Insert new alloca with new NewSize and Alignment params.
3738 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3739 NewAlloca->setAlignment(Alignment);
3740
3741 // NewAddress = Address + Alignment
3742 Value *NewAddress =
3743 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3744 ConstantInt::get(IntptrTy, Alignment.value()));
3745
3746 // Insert __asan_alloca_poison call for new created alloca.
3747 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3748
3749 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3750 // for unpoisoning stuff.
3751 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3752
3753 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3754
3755 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3756 AI->replaceAllUsesWith(NewAddressPtr);
3757
3758 // We are done. Erase old alloca from parent.
3759 AI->eraseFromParent();
3760}
3761
3762// isSafeAccess returns true if Addr is always inbounds with respect to its
3763// base object. For example, it is a field access or an array access with
3764// constant inbounds index.
3765bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3766 Value *Addr, TypeSize TypeStoreSize) const {
3767 if (TypeStoreSize.isScalable())
3768 // TODO: We can use vscale_range to convert a scalable value to an
3769 // upper bound on the access size.
3770 return false;
3771
3772 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3773 if (!SizeOffset.bothKnown())
3774 return false;
3775
3776 uint64_t Size = SizeOffset.Size.getZExtValue();
3777 int64_t Offset = SizeOffset.Offset.getSExtValue();
3778
3779 // Three checks are required to ensure safety:
3780 // . Offset >= 0 (since the offset is given from the base ptr)
3781 // . Size >= Offset (unsigned)
3782 // . Size - Offset >= NeededSize (unsigned)
3783 return Offset >= 0 && Size >= uint64_t(Offset) &&
3784 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3785}
@ Poison
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
static const uint64_t kEmscriptenShadowOffset
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Addr
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
This defines the Use class.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
#define OP(OPC)
Definition: SandboxIR.h:612
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This file contains some functions that are useful when dealing with strings.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1522
AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
Definition: Instructions.h:126
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:635
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:606
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
Definition: InstrTypes.h:2008
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition: Comdat.h:38
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition: Comdat.h:40
@ Any
The linker may choose any COMDAT.
Definition: Comdat.h:36
@ NoDeduplicate
No deduplication is performed.
Definition: Comdat.h:39
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition: Comdat.h:37
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1292
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2281
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2227
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1253
static bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
Definition: Constants.cpp:1575
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1357
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
Debug location.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition: Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition: Function.cpp:401
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:903
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1993
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
const Constant * getAliasee() const
Definition: GlobalAlias.h:84
static GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition: Globals.cpp:550
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:137
void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
Definition: Metadata.cpp:1762
void setComdat(Comdat *C)
Definition: Globals.cpp:206
void setSection(StringRef S)
Change the section for this global.
Definition: Globals.cpp:267
VisibilityTypes getVisibility() const
Definition: GlobalValue.h:248
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:231
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:567
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ CommonLinkage
Tentative definitions.
Definition: GlobalValue.h:62
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition: GlobalValue.h:61
DLLStorageClassTypes getDLLStorageClass() const
Definition: GlobalValue.h:275
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition: Globals.cpp:514
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1790
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2480
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2190
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2289
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1996
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2265
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1766
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition: IRBuilder.h:494
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2569
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1514
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2216
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:656
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2152
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1378
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
static InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition: InlineAsm.cpp:43
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:267
RetTy visitCleanupReturnInst(CleanupReturnInst &I)
Definition: InstVisitor.h:244
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:226
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
RetTy visitResumeInst(ResumeInst &I)
Definition: InstVisitor.h:238
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:824
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
An instruction for reading from memory.
Definition: Instructions.h:174
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
Metadata node.
Definition: Metadata.h:1069
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Evaluate the size and offset of an object pointed to by a Value* statically.
SizeOffsetAPInt compute(Value *V)
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1189
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
Resume the propagation of an exception.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:436
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
bool empty() const
Definition: SmallVector.h:95
size_t size() const
Definition: SmallVector.h:92
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:587
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:951
void reserve(size_type N)
Definition: SmallVector.h:677
void resize(size_type N)
Definition: SmallVector.h:652
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:361
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
EltTy front() const
bool empty() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAndroidVersionLT(unsigned Major) const
Definition: Triple.h:771
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition: Triple.h:852
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition: Triple.h:553
bool isOSNetBSD() const
Definition: Triple.h:576
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition: Triple.h:943
@ aarch64_be
Definition: Triple.h:52
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition: Triple.h:932
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:390
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition: Triple.h:938
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
@ DXContainer
Definition: Triple.h:301
@ UnknownObjectFormat
Definition: Triple.h:298
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition: Triple.h:857
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:678
bool isAMDGPU() const
Definition: Triple.h:847
bool isMacOSX() const
Is this a Mac OS X triple.
Definition: Triple.h:522
bool isOSFreeBSD() const
Definition: Triple.h:584
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition: Triple.h:541
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:531
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition: Triple.h:766
bool isOSFuchsia() const
Definition: Triple.h:588
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
static IntegerType * getInt32Ty(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:501
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition: MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
Definition: X86BaseInfo.h:732
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
@ Done
Definition: Threading.h:61
Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, const char *NamePrefix="")
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Op::Description Desc
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, Instruction *InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AsanDtorKind
Types of ASan module destructors supported.
@ None
Do not emit any destructors for ASan.
ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
@ Dynamic
Denotes mode unknown at compile time.
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AsanCtorKind
Types of ASan module constructors supported.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4103
void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
Definition: ModuleUtils.cpp:78
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition: Demangle.cpp:20
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2132
#define N
ASanAccessInfo(int32_t Packed)
AsanDetectStackUseAfterReturnMode UseAfterReturn
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Description of the encoding of one expression Op.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
Various options to control the behavior of getObjectSize.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.
bool bothKnown() const