Bug Summary

File:projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h
Warning:line 200, column 20
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name hwasan_allocator.cc -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -ffreestanding -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D HWASAN_WITH_INTERCEPTORS=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/projects/compiler-rt/lib/hwasan -I /build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan -I /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn350071/include -I /build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/.. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O3 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-unused-parameter -Wno-variadic-macros -Wno-non-virtual-dtor -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn350071/build-llvm/projects/compiler-rt/lib/hwasan -fdebug-prefix-map=/build/llvm-toolchain-snapshot-8~svn350071=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -fno-builtin -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-12-27-042839-1215-1 -x c++ /build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc -faddrsig

/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc

1//===-- hwasan_allocator.cc ------------------------- ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of HWAddressSanitizer.
11//
12// HWAddressSanitizer allocator.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_stackdepot.h"
18#include "hwasan.h"
19#include "hwasan_allocator.h"
20#include "hwasan_mapping.h"
21#include "hwasan_thread.h"
22#include "hwasan_report.h"
23
24namespace __hwasan {
25
26static Allocator allocator;
27static AllocatorCache fallback_allocator_cache;
28static SpinMutex fallback_mutex;
29static atomic_uint8_t hwasan_allocator_tagging_enabled;
30
31static const tag_t kFallbackAllocTag = 0xBB;
32static const tag_t kFallbackFreeTag = 0xBC;
33
34enum RightAlignMode {
35 kRightAlignNever,
36 kRightAlignSometimes,
37 kRightAlignAlways
38};
39
40// These two variables are initialized from flags()->malloc_align_right
41// in HwasanAllocatorInit and are never changed afterwards.
42static RightAlignMode right_align_mode = kRightAlignNever;
43static bool right_align_8 = false;
44
45// Initialized in HwasanAllocatorInit, an never changed.
46static ALIGNED(16)__attribute__((aligned(16))) u8 tail_magic[kShadowAlignment];
47
48bool HwasanChunkView::IsAllocated() const {
49 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
50}
51
52// Aligns the 'addr' right to the granule boundary.
53static uptr AlignRight(uptr addr, uptr requested_size) {
54 uptr tail_size = requested_size % kShadowAlignment;
55 if (!tail_size) return addr;
56 if (right_align_8)
57 return tail_size > 8 ? addr : addr + 8;
58 return addr + kShadowAlignment - tail_size;
59}
60
61uptr HwasanChunkView::Beg() const {
62 if (metadata_ && metadata_->right_aligned)
63 return AlignRight(block_, metadata_->requested_size);
64 return block_;
65}
66uptr HwasanChunkView::End() const {
67 return Beg() + UsedSize();
68}
69uptr HwasanChunkView::UsedSize() const {
70 return metadata_->requested_size;
71}
72u32 HwasanChunkView::GetAllocStackId() const {
73 return metadata_->alloc_context_id;
74}
75
76uptr HwasanChunkView::ActualSize() const {
77 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
78}
79
80bool HwasanChunkView::FromSmallHeap() const {
81 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
82}
83
84void GetAllocatorStats(AllocatorStatCounters s) {
85 allocator.GetStats(s);
86}
87
88void HwasanAllocatorInit() {
89 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
90 !flags()->disable_allocator_tagging);
91 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
92 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
93 switch (flags()->malloc_align_right) {
94 case 0: break;
95 case 1:
96 right_align_mode = kRightAlignSometimes;
97 right_align_8 = false;
98 break;
99 case 2:
100 right_align_mode = kRightAlignAlways;
101 right_align_8 = false;
102 break;
103 case 8:
104 right_align_mode = kRightAlignSometimes;
105 right_align_8 = true;
106 break;
107 case 9:
108 right_align_mode = kRightAlignAlways;
109 right_align_8 = true;
110 break;
111 default:
112 Report("ERROR: unsupported value of malloc_align_right flag: %d\n",
113 flags()->malloc_align_right);
114 Die();
115 }
116 for (uptr i = 0; i < kShadowAlignment; i++)
117 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
118}
119
120void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
121 allocator.SwallowCache(cache);
122}
123
124static uptr TaggedSize(uptr size) {
125 if (!size) size = 1;
126 uptr new_size = RoundUpTo(size, kShadowAlignment);
127 CHECK_GE(new_size, size)do { __sanitizer::u64 v1 = (__sanitizer::u64)((new_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((size)); if (__builtin_expect(!
!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 127, "(" "(new_size)" ") " ">=" " (" "(size)" ")", v1, v2
); } while (false)
;
128 return new_size;
129}
130
131static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
132 bool zeroise) {
133 if (orig_size > kMaxAllowedMallocSize) {
134 if (AllocatorMayReturnNull()) {
135 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
136 orig_size);
137 return nullptr;
138 }
139 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
140 }
141
142 alignment = Max(alignment, kShadowAlignment);
143 uptr size = TaggedSize(orig_size);
144 Thread *t = GetCurrentThread();
145 void *allocated;
146 if (t) {
147 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
148 } else {
149 SpinMutexLock l(&fallback_mutex);
150 AllocatorCache *cache = &fallback_allocator_cache;
151 allocated = allocator.Allocate(cache, size, alignment);
152 }
153 if (UNLIKELY(!allocated)__builtin_expect(!!(!allocated), 0)) {
154 SetAllocatorOutOfMemory();
155 if (AllocatorMayReturnNull())
156 return nullptr;
157 ReportOutOfMemory(size, stack);
158 }
159 Metadata *meta =
160 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
161 meta->requested_size = static_cast<u32>(orig_size);
162 meta->alloc_context_id = StackDepotPut(*stack);
163 meta->right_aligned = false;
164 if (zeroise) {
165 internal_memset(allocated, 0, size);
166 } else if (flags()->max_malloc_fill_size > 0) {
167 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
168 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
169 }
170 if (!right_align_mode)
171 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
172 size - orig_size);
173
174 void *user_ptr = allocated;
175 if (flags()->tag_in_malloc &&
176 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
177 user_ptr = (void *)TagMemoryAligned(
178 (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
179
180 if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
181 right_align_mode) {
182 uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
183 if (right_align_mode == kRightAlignAlways ||
184 GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit.
185 user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
186 meta->right_aligned = 1;
187 }
188 }
189
190 HWASAN_MALLOC_HOOK(user_ptr, size)do { if (&__sanitizer_malloc_hook) { __sanitizer_malloc_hook
(user_ptr, size); } RunMallocHooks(user_ptr, size); } while (
false)
;
191 return user_ptr;
192}
193
194static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
195 CHECK(tagged_ptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tagged_ptr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 195, "(" "(tagged_ptr)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
196 tag_t ptr_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
197 tag_t mem_tag = *reinterpret_cast<tag_t *>(
198 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
199 return ptr_tag == mem_tag;
200}
201
202void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
203 CHECK(tagged_ptr)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tagged_ptr)); __sanitizer
::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect(!!(!(v1
!= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 203, "(" "(tagged_ptr)" ") " "!=" " (" "0" ")", v1, v2); } while
(false)
;
204 HWASAN_FREE_HOOK(tagged_ptr)do { if (&__sanitizer_free_hook) { __sanitizer_free_hook(
tagged_ptr); } RunFreeHooks(tagged_ptr); } while (false)
;
205
206 if (!PointerAndMemoryTagsMatch(tagged_ptr))
207 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
208
209 void *untagged_ptr = UntagPtr(tagged_ptr);
210 void *aligned_ptr = reinterpret_cast<void *>(
211 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
212 Metadata *meta =
213 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
214 uptr orig_size = meta->requested_size;
215 u32 free_context_id = StackDepotPut(*stack);
216 u32 alloc_context_id = meta->alloc_context_id;
217
218 // Check tail magic.
219 uptr tagged_size = TaggedSize(orig_size);
220 if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) {
221 uptr tail_size = tagged_size - orig_size;
222 CHECK_LT(tail_size, kShadowAlignment)do { __sanitizer::u64 v1 = (__sanitizer::u64)((tail_size)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kShadowAlignment)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 222, "(" "(tail_size)" ") " "<" " (" "(kShadowAlignment)"
")", v1, v2); } while (false)
;
223 void *tail_beg = reinterpret_cast<void *>(
224 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
225 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
226 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
227 orig_size, tail_size, tail_magic);
228 }
229
230 meta->requested_size = 0;
231 meta->alloc_context_id = 0;
232 // This memory will not be reused by anyone else, so we are free to keep it
233 // poisoned.
234 Thread *t = GetCurrentThread();
235 if (flags()->max_free_fill_size > 0) {
236 uptr fill_size =
237 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
238 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
239 }
240 if (flags()->tag_in_free &&
241 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
242 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
243 t ? t->GenerateRandomTag() : kFallbackFreeTag);
244 if (t) {
245 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
246 if (auto *ha = t->heap_allocations())
247 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
248 free_context_id, static_cast<u32>(orig_size)});
249 } else {
250 SpinMutexLock l(&fallback_mutex);
251 AllocatorCache *cache = &fallback_allocator_cache;
252 allocator.Deallocate(cache, aligned_ptr);
253 }
254}
255
256void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, uptr new_size,
257 uptr alignment) {
258 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
259 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
260
261 void *tagged_ptr_new =
262 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
263 if (tagged_ptr_old && tagged_ptr_new) {
264 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
265 Metadata *meta =
266 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
267 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
268 Min(new_size, static_cast<uptr>(meta->requested_size)));
269 HwasanDeallocate(stack, tagged_ptr_old);
270 }
271 return tagged_ptr_new;
272}
273
274void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
275 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))__builtin_expect(!!(CheckForCallocOverflow(size, nmemb)), 0)) {
276 if (AllocatorMayReturnNull())
277 return nullptr;
278 ReportCallocOverflow(nmemb, size, stack);
279 }
280 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
281}
282
283HwasanChunkView FindHeapChunkByAddress(uptr address) {
284 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
1
Calling 'CombinedAllocator::GetBlockBegin'
285 if (!block)
286 return HwasanChunkView();
287 Metadata *metadata =
288 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
289 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
290}
291
292static uptr AllocationSize(const void *tagged_ptr) {
293 const void *untagged_ptr = UntagPtr(tagged_ptr);
294 if (!untagged_ptr) return 0;
295 const void *beg = allocator.GetBlockBegin(untagged_ptr);
296 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
297 if (b->right_aligned) {
298 if (beg != reinterpret_cast<void *>(RoundDownTo(
299 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
300 return 0;
301 } else {
302 if (beg != untagged_ptr) return 0;
303 }
304 return b->requested_size;
305}
306
307void *hwasan_malloc(uptr size, StackTrace *stack) {
308 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
309}
310
311void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
312 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
313}
314
315void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
316 if (!ptr)
317 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
318 if (size == 0) {
319 HwasanDeallocate(stack, ptr);
320 return nullptr;
321 }
322 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
323}
324
325void *hwasan_valloc(uptr size, StackTrace *stack) {
326 return SetErrnoOnNull(
327 HwasanAllocate(stack, size, GetPageSizeCached(), false));
328}
329
330void *hwasan_pvalloc(uptr size, StackTrace *stack) {
331 uptr PageSize = GetPageSizeCached();
332 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))__builtin_expect(!!(CheckForPvallocOverflow(size, PageSize)),
0)
) {
333 errno(*__errno_location()) = errno_ENOMEM12;
334 if (AllocatorMayReturnNull())
335 return nullptr;
336 ReportPvallocOverflow(size, stack);
337 }
338 // pvalloc(0) should allocate one page.
339 size = size ? RoundUpTo(size, PageSize) : PageSize;
340 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
341}
342
343void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
344 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))__builtin_expect(!!(!CheckAlignedAllocAlignmentAndSize(alignment
, size)), 0)
) {
345 errno(*__errno_location()) = errno_EINVAL22;
346 if (AllocatorMayReturnNull())
347 return nullptr;
348 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
349 }
350 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
351}
352
353void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
354 if (UNLIKELY(!IsPowerOfTwo(alignment))__builtin_expect(!!(!IsPowerOfTwo(alignment)), 0)) {
355 errno(*__errno_location()) = errno_EINVAL22;
356 if (AllocatorMayReturnNull())
357 return nullptr;
358 ReportInvalidAllocationAlignment(alignment, stack);
359 }
360 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
361}
362
363int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
364 StackTrace *stack) {
365 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))__builtin_expect(!!(!CheckPosixMemalignAlignment(alignment)),
0)
) {
366 if (AllocatorMayReturnNull())
367 return errno_EINVAL22;
368 ReportInvalidPosixMemalignAlignment(alignment, stack);
369 }
370 void *ptr = HwasanAllocate(stack, size, alignment, false);
371 if (UNLIKELY(!ptr)__builtin_expect(!!(!ptr), 0))
372 // OOM error is already taken care of by HwasanAllocate.
373 return errno_ENOMEM12;
374 CHECK(IsAligned((uptr)ptr, alignment))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned((uptr
)ptr, alignment))); __sanitizer::u64 v2 = (__sanitizer::u64)(
0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/hwasan_allocator.cc"
, 374, "(" "(IsAligned((uptr)ptr, alignment))" ") " "!=" " ("
"0" ")", v1, v2); } while (false)
;
375 *memptr = ptr;
376 return 0;
377}
378
379} // namespace __hwasan
380
381using namespace __hwasan;
382
383void __hwasan_enable_allocator_tagging() {
384 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
385}
386
387void __hwasan_disable_allocator_tagging() {
388 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
389}
390
391uptr __sanitizer_get_current_allocated_bytes() {
392 uptr stats[AllocatorStatCount];
393 allocator.GetStats(stats);
394 return stats[AllocatorStatAllocated];
395}
396
397uptr __sanitizer_get_heap_size() {
398 uptr stats[AllocatorStatCount];
399 allocator.GetStats(stats);
400 return stats[AllocatorStatMapped];
401}
402
403uptr __sanitizer_get_free_bytes() { return 1; }
404
405uptr __sanitizer_get_unmapped_bytes() { return 1; }
406
407uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
408
409int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
410
411uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }

/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h

1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// This class implements a complete memory allocator by using two
18// internal allocators:
19// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20// When allocating 2^x bytes it should return 2^x aligned chunk.
21// PrimaryAllocator is used via a local AllocatorCache.
22// SecondaryAllocator can allocate anything, but is not efficient.
23template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator,
25 typename AddressSpaceViewTy = LocalAddressSpaceView> // NOLINT
26class CombinedAllocator {
27 public:
28 using AddressSpaceView = AddressSpaceViewTy;
29 static_assert(is_same<AddressSpaceView,
30 typename PrimaryAllocator::AddressSpaceView>::value,
31 "PrimaryAllocator is using wrong AddressSpaceView");
32 static_assert(is_same<AddressSpaceView,
33 typename SecondaryAllocator::AddressSpaceView>::value,
34 "SecondaryAllocator is using wrong AddressSpaceView");
35
36 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
37 primary_.Init(release_to_os_interval_ms);
38 secondary_.InitLinkerInitialized();
39 stats_.InitLinkerInitialized();
40 }
41
42 void Init(s32 release_to_os_interval_ms) {
43 primary_.Init(release_to_os_interval_ms);
44 secondary_.Init();
45 stats_.Init();
46 }
47
48 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
49 // Returning 0 on malloc(0) may break a lot of code.
50 if (size == 0)
51 size = 1;
52 if (size + alignment < size) {
53 Report("WARNING: %s: CombinedAllocator allocation overflow: "
54 "0x%zx bytes with 0x%zx alignment requested\n",
55 SanitizerToolName, size, alignment);
56 return nullptr;
57 }
58 uptr original_size = size;
59 // If alignment requirements are to be fulfilled by the frontend allocator
60 // rather than by the primary or secondary, passing an alignment lower than
61 // or equal to 8 will prevent any further rounding up, as well as the later
62 // alignment check.
63 if (alignment > 8)
64 size = RoundUpTo(size, alignment);
65 // The primary allocator should return a 2^x aligned allocation when
66 // requested 2^x bytes, hence using the rounded up 'size' when being
67 // serviced by the primary (this is no longer true when the primary is
68 // using a non-fixed base address). The secondary takes care of the
69 // alignment without such requirement, and allocating 'size' would use
70 // extraneous memory, so we employ 'original_size'.
71 void *res;
72 if (primary_.CanAllocate(size, alignment))
73 res = cache->Allocate(&primary_, primary_.ClassID(size));
74 else
75 res = secondary_.Allocate(&stats_, original_size, alignment);
76 if (alignment > 8)
77 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((reinterpret_cast
<uptr>(res) & (alignment - 1))); __sanitizer::u64 v2
= (__sanitizer::u64)((0)); if (__builtin_expect(!!(!(v1 == v2
)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h"
, 77, "(" "(reinterpret_cast<uptr>(res) & (alignment - 1))"
") " "==" " (" "(0)" ")", v1, v2); } while (false)
;
78 return res;
79 }
80
81 s32 ReleaseToOSIntervalMs() const {
82 return primary_.ReleaseToOSIntervalMs();
83 }
84
85 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
86 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
87 }
88
89 void ForceReleaseToOS() {
90 primary_.ForceReleaseToOS();
91 }
92
93 void Deallocate(AllocatorCache *cache, void *p) {
94 if (!p) return;
95 if (primary_.PointerIsMine(p))
96 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
97 else
98 secondary_.Deallocate(&stats_, p);
99 }
100
101 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
102 uptr alignment) {
103 if (!p)
104 return Allocate(cache, new_size, alignment);
105 if (!new_size) {
106 Deallocate(cache, p);
107 return nullptr;
108 }
109 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_combined.h"
, 109, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
110 uptr old_size = GetActuallyAllocatedSize(p);
111 uptr memcpy_size = Min(new_size, old_size);
112 void *new_p = Allocate(cache, new_size, alignment);
113 if (new_p)
114 internal_memcpy(new_p, p, memcpy_size);
115 Deallocate(cache, p);
116 return new_p;
117 }
118
119 bool PointerIsMine(void *p) {
120 if (primary_.PointerIsMine(p))
121 return true;
122 return secondary_.PointerIsMine(p);
123 }
124
125 bool FromPrimary(void *p) {
126 return primary_.PointerIsMine(p);
127 }
128
129 void *GetMetaData(const void *p) {
130 if (primary_.PointerIsMine(p))
131 return primary_.GetMetaData(p);
132 return secondary_.GetMetaData(p);
133 }
134
135 void *GetBlockBegin(const void *p) {
136 if (primary_.PointerIsMine(p))
2
Taking true branch
137 return primary_.GetBlockBegin(p);
3
Calling 'SizeClassAllocator32::GetBlockBegin'
138 return secondary_.GetBlockBegin(p);
139 }
140
141 // This function does the same as GetBlockBegin, but is much faster.
142 // Must be called with the allocator locked.
143 void *GetBlockBeginFastLocked(void *p) {
144 if (primary_.PointerIsMine(p))
145 return primary_.GetBlockBegin(p);
146 return secondary_.GetBlockBeginFastLocked(p);
147 }
148
149 uptr GetActuallyAllocatedSize(void *p) {
150 if (primary_.PointerIsMine(p))
151 return primary_.GetActuallyAllocatedSize(p);
152 return secondary_.GetActuallyAllocatedSize(p);
153 }
154
155 uptr TotalMemoryUsed() {
156 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
157 }
158
159 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
160
161 void InitCache(AllocatorCache *cache) {
162 cache->Init(&stats_);
163 }
164
165 void DestroyCache(AllocatorCache *cache) {
166 cache->Destroy(&primary_, &stats_);
167 }
168
169 void SwallowCache(AllocatorCache *cache) {
170 cache->Drain(&primary_);
171 }
172
173 void GetStats(AllocatorStatCounters s) const {
174 stats_.Get(s);
175 }
176
177 void PrintStats() {
178 primary_.PrintStats();
179 secondary_.PrintStats();
180 }
181
182 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
183 // introspection API.
184 void ForceLock() {
185 primary_.ForceLock();
186 secondary_.ForceLock();
187 }
188
189 void ForceUnlock() {
190 secondary_.ForceUnlock();
191 primary_.ForceUnlock();
192 }
193
194 // Iterate over all existing chunks.
195 // The allocator must be locked when calling this function.
196 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
197 primary_.ForEachChunk(callback, arg);
198 secondary_.ForEachChunk(callback, arg);
199 }
200
201 private:
202 PrimaryAllocator primary_;
203 SecondaryAllocator secondary_;
204 AllocatorGlobalStats stats_;
205};

/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h

1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
18
19// SizeClassAllocator32 -- allocator for 32-bit address space.
20// This allocator can theoretically be used on 64-bit arch, but there it is less
21// efficient than SizeClassAllocator64.
22//
23// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
24// be returned by MmapOrDie().
25//
26// Region:
27// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
28// kRegionSize).
29// Since the regions are aligned by kRegionSize, there are exactly
30// kNumPossibleRegions possible regions in the address space and so we keep
31// a ByteMap possible_regions to store the size classes of each Region.
32// 0 size class means the region is not used by the allocator.
33//
34// One Region is used to allocate chunks of a single size class.
35// A Region looks like this:
36// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
37//
38// In order to avoid false sharing the objects of this class should be
39// chache-line aligned.
40
41struct SizeClassAllocator32FlagMasks { // Bit masks.
42 enum {
43 kRandomShuffleChunks = 1,
44 kUseSeparateSizeClassForBatch = 2,
45 };
46};
47
48template <class Params>
49class SizeClassAllocator32 {
50 public:
51 using AddressSpaceView = typename Params::AddressSpaceView;
52 static const uptr kSpaceBeg = Params::kSpaceBeg;
53 static const u64 kSpaceSize = Params::kSpaceSize;
54 static const uptr kMetadataSize = Params::kMetadataSize;
55 typedef typename Params::SizeClassMap SizeClassMap;
56 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
57 typedef typename Params::ByteMap ByteMap;
58 typedef typename Params::MapUnmapCallback MapUnmapCallback;
59
60 static_assert(
61 is_same<typename ByteMap::AddressSpaceView, AddressSpaceView>::value,
62 "AddressSpaceView type mismatch");
63
64 static const bool kRandomShuffleChunks = Params::kFlags &
65 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
66 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
67 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
68
69 struct TransferBatch {
70 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
71 void SetFromArray(void *batch[], uptr count) {
72 DCHECK_LE(count, kMaxNumCached);
73 count_ = count;
74 for (uptr i = 0; i < count; i++)
75 batch_[i] = batch[i];
76 }
77 uptr Count() const { return count_; }
78 void Clear() { count_ = 0; }
79 void Add(void *ptr) {
80 batch_[count_++] = ptr;
81 DCHECK_LE(count_, kMaxNumCached);
82 }
83 void CopyToArray(void *to_batch[]) const {
84 for (uptr i = 0, n = Count(); i < n; i++)
85 to_batch[i] = batch_[i];
86 }
87
88 // How much memory do we need for a batch containing n elements.
89 static uptr AllocationSizeRequiredForNElements(uptr n) {
90 return sizeof(uptr) * 2 + sizeof(void *) * n;
91 }
92 static uptr MaxCached(uptr size) {
93 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
94 }
95
96 TransferBatch *next;
97
98 private:
99 uptr count_;
100 void *batch_[kMaxNumCached];
101 };
102
103 static const uptr kBatchSize = sizeof(TransferBatch);
104 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0)typedef char assertion_failed__104[2*(int)((kBatchSize & (
kBatchSize - 1)) == 0)-1]
;
105 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))typedef char assertion_failed__105[2*(int)(kBatchSize == SizeClassMap
::kMaxNumCachedHint * sizeof(uptr))-1]
;
106
107 static uptr ClassIdToSize(uptr class_id) {
108 return (class_id == SizeClassMap::kBatchClassID) ?
8
'?' condition is false
14
Returning zero
109 kBatchSize : SizeClassMap::Size(class_id);
9
Calling 'SizeClassMap::Size'
13
Returning from 'SizeClassMap::Size'
110 }
111
112 typedef SizeClassAllocator32<Params> ThisT;
113 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
114
115 void Init(s32 release_to_os_interval_ms) {
116 possible_regions.Init();
117 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
118 }
119
120 s32 ReleaseToOSIntervalMs() const {
121 return kReleaseToOSIntervalNever;
122 }
123
124 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
125 // This is empty here. Currently only implemented in 64-bit allocator.
126 }
127
128 void ForceReleaseToOS() {
129 // Currently implemented in 64-bit allocator only.
130 }
131
132 void *MapWithCallback(uptr size) {
133 void *res = MmapOrDie(size, PrimaryAllocatorName);
134 MapUnmapCallback().OnMap((uptr)res, size);
135 return res;
136 }
137
138 void UnmapWithCallback(uptr beg, uptr size) {
139 MapUnmapCallback().OnUnmap(beg, size);
140 UnmapOrDie(reinterpret_cast<void *>(beg), size);
141 }
142
143 static bool CanAllocate(uptr size, uptr alignment) {
144 return size <= SizeClassMap::kMaxSize &&
145 alignment <= SizeClassMap::kMaxSize;
146 }
147
148 void *GetMetaData(const void *p) {
149 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 149, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
150 uptr mem = reinterpret_cast<uptr>(p);
151 uptr beg = ComputeRegionBeg(mem);
152 uptr size = ClassIdToSize(GetSizeClass(p));
153 u32 offset = mem - beg;
154 uptr n = offset / (u32)size; // 32-bit division
155 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
156 return reinterpret_cast<void*>(meta);
157 }
158
159 NOINLINE__attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
160 uptr class_id) {
161 DCHECK_LT(class_id, kNumClasses);
162 SizeClassInfo *sci = GetSizeClassInfo(class_id);
163 SpinMutexLock l(&sci->mutex);
164 if (sci->free_list.empty()) {
165 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))__builtin_expect(!!(!PopulateFreeList(stat, c, sci, class_id)
), 0)
)
166 return nullptr;
167 DCHECK(!sci->free_list.empty());
168 }
169 TransferBatch *b = sci->free_list.front();
170 sci->free_list.pop_front();
171 return b;
172 }
173
174 NOINLINE__attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id,
175 TransferBatch *b) {
176 DCHECK_LT(class_id, kNumClasses);
177 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 177, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
178 SizeClassInfo *sci = GetSizeClassInfo(class_id);
179 SpinMutexLock l(&sci->mutex);
180 sci->free_list.push_front(b);
181 }
182
183 bool PointerIsMine(const void *p) {
184 uptr mem = reinterpret_cast<uptr>(p);
185 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
186 return false;
187 return GetSizeClass(p) != 0;
188 }
189
190 uptr GetSizeClass(const void *p) {
191 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
192 }
193
194 void *GetBlockBegin(const void *p) {
195 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 195, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
4
Assuming 'v1' is not equal to 'v2'
5
Taking false branch
6
Loop condition is false. Exiting loop
196 uptr mem = reinterpret_cast<uptr>(p);
197 uptr beg = ComputeRegionBeg(mem);
198 uptr size = ClassIdToSize(GetSizeClass(p));
7
Calling 'SizeClassAllocator32::ClassIdToSize'
15
Returning from 'SizeClassAllocator32::ClassIdToSize'
16
'size' initialized to 0
199 u32 offset = mem - beg;
200 u32 n = offset / (u32)size; // 32-bit division
17
Division by zero
201 uptr res = beg + (n * (u32)size);
202 return reinterpret_cast<void*>(res);
203 }
204
205 uptr GetActuallyAllocatedSize(void *p) {
206 CHECK(PointerIsMine(p))do { __sanitizer::u64 v1 = (__sanitizer::u64)((PointerIsMine(
p))); __sanitizer::u64 v2 = (__sanitizer::u64)(0); if (__builtin_expect
(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 206, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2
); } while (false)
;
207 return ClassIdToSize(GetSizeClass(p));
208 }
209
210 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
211
212 uptr TotalMemoryUsed() {
213 // No need to lock here.
214 uptr res = 0;
215 for (uptr i = 0; i < kNumPossibleRegions; i++)
216 if (possible_regions[i])
217 res += kRegionSize;
218 return res;
219 }
220
221 void TestOnlyUnmap() {
222 for (uptr i = 0; i < kNumPossibleRegions; i++)
223 if (possible_regions[i])
224 UnmapWithCallback((i * kRegionSize), kRegionSize);
225 }
226
227 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
228 // introspection API.
229 void ForceLock() {
230 for (uptr i = 0; i < kNumClasses; i++) {
231 GetSizeClassInfo(i)->mutex.Lock();
232 }
233 }
234
235 void ForceUnlock() {
236 for (int i = kNumClasses - 1; i >= 0; i--) {
237 GetSizeClassInfo(i)->mutex.Unlock();
238 }
239 }
240
241 // Iterate over all existing chunks.
242 // The allocator must be locked when calling this function.
243 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
244 for (uptr region = 0; region < kNumPossibleRegions; region++)
245 if (possible_regions[region]) {
246 uptr chunk_size = ClassIdToSize(possible_regions[region]);
247 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
248 uptr region_beg = region * kRegionSize;
249 for (uptr chunk = region_beg;
250 chunk < region_beg + max_chunks_in_region * chunk_size;
251 chunk += chunk_size) {
252 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
253 callback(chunk, arg);
254 }
255 }
256 }
257
258 void PrintStats() {}
259
260 static uptr AdditionalSize() { return 0; }
261
262 typedef SizeClassMap SizeClassMapT;
263 static const uptr kNumClasses = SizeClassMap::kNumClasses;
264
265 private:
266 static const uptr kRegionSize = 1 << kRegionSizeLog;
267 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
268
269 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE)__attribute__((aligned(64))) SizeClassInfo {
270 StaticSpinMutex mutex;
271 IntrusiveList<TransferBatch> free_list;
272 u32 rand_state;
273 };
274 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0)typedef char assertion_failed__274[2*(int)(sizeof(SizeClassInfo
) % kCacheLineSize == 0)-1]
;
275
276 uptr ComputeRegionId(uptr mem) {
277 const uptr res = mem >> kRegionSizeLog;
278 CHECK_LT(res, kNumPossibleRegions)do { __sanitizer::u64 v1 = (__sanitizer::u64)((res)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumPossibleRegions)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 278, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")"
, v1, v2); } while (false)
;
279 return res;
280 }
281
282 uptr ComputeRegionBeg(uptr mem) {
283 return mem & ~(kRegionSize - 1);
284 }
285
286 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
287 DCHECK_LT(class_id, kNumClasses);
288 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
289 kRegionSize, kRegionSize, PrimaryAllocatorName));
290 if (UNLIKELY(!res)__builtin_expect(!!(!res), 0))
291 return 0;
292 MapUnmapCallback().OnMap(res, kRegionSize);
293 stat->Add(AllocatorStatMapped, kRegionSize);
294 CHECK(IsAligned(res, kRegionSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((IsAligned(res,
kRegionSize))); __sanitizer::u64 v2 = (__sanitizer::u64)(0);
if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed
("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 294, "(" "(IsAligned(res, kRegionSize))" ") " "!=" " (" "0"
")", v1, v2); } while (false)
;
295 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
296 return res;
297 }
298
299 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
300 DCHECK_LT(class_id, kNumClasses);
301 return &size_class_info_array[class_id];
302 }
303
304 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
305 TransferBatch **current_batch, uptr max_count,
306 uptr *pointers_array, uptr count) {
307 // If using a separate class for batches, we do not need to shuffle it.
308 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
309 class_id != SizeClassMap::kBatchClassID))
310 RandomShuffle(pointers_array, count, &sci->rand_state);
311 TransferBatch *b = *current_batch;
312 for (uptr i = 0; i < count; i++) {
313 if (!b) {
314 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
315 if (UNLIKELY(!b)__builtin_expect(!!(!b), 0))
316 return false;
317 b->Clear();
318 }
319 b->Add((void*)pointers_array[i]);
320 if (b->Count() == max_count) {
321 sci->free_list.push_back(b);
322 b = nullptr;
323 }
324 }
325 *current_batch = b;
326 return true;
327 }
328
329 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
330 SizeClassInfo *sci, uptr class_id) {
331 const uptr region = AllocateRegion(stat, class_id);
332 if (UNLIKELY(!region)__builtin_expect(!!(!region), 0))
333 return false;
334 if (kRandomShuffleChunks)
335 if (UNLIKELY(sci->rand_state == 0)__builtin_expect(!!(sci->rand_state == 0), 0))
336 // The random state is initialized from ASLR (PIE) and time.
337 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
338 const uptr size = ClassIdToSize(class_id);
339 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
340 const uptr max_count = TransferBatch::MaxCached(size);
341 DCHECK_GT(max_count, 0);
342 TransferBatch *b = nullptr;
343 constexpr uptr kShuffleArraySize = 48;
344 uptr shuffle_array[kShuffleArraySize];
345 uptr count = 0;
346 for (uptr i = region; i < region + n_chunks * size; i += size) {
347 shuffle_array[count++] = i;
348 if (count == kShuffleArraySize) {
349 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
350 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
351 return false;
352 count = 0;
353 }
354 }
355 if (count) {
356 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
357 shuffle_array, count))__builtin_expect(!!(!PopulateBatches(c, sci, class_id, &b
, max_count, shuffle_array, count)), 0)
)
358 return false;
359 }
360 if (b) {
361 CHECK_GT(b->Count(), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((b->Count())
); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_primary32.h"
, 361, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2
); } while (false)
;
362 sci->free_list.push_back(b);
363 }
364 return true;
365 }
366
367 ByteMap possible_regions;
368 SizeClassInfo size_class_info_array[kNumClasses];
369};

/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h

1//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// SizeClassMap maps allocation sizes into size classes and back.
18// Class 0 always corresponds to size 0.
19// The other sizes are controlled by the template parameters:
20// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
21// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
22// kMidSizeLog: the classes starting from 1 increase with step
23// 2^kMinSizeLog until 2^kMidSizeLog.
24// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
25// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
26// look like 0b1xx0..0, where x is either 0 or 1.
27//
28// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
29//
30// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
31// Next 4 classes: 256 + i * 64 (i = 1 to 4).
32// Next 4 classes: 512 + i * 128 (i = 1 to 4).
33// ...
34// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
35// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
36//
37// This structure of the size class map gives us:
38// - Efficient table-free class-to-size and size-to-class functions.
39// - Difference between two consequent size classes is between 14% and 25%
40//
41// This class also gives a hint to a thread-caching allocator about the amount
42// of chunks that need to be cached per-thread:
43// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
44// The actual number is computed in TransferBatch.
45// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46//
47// Part of output of SizeClassMap::Print():
48// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
56//
57// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
65//
66// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
70//
71// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
75//
76// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80//
81// ...
82//
83// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87//
88// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
89//
90//
91// Another example (kNumBits=2):
92// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
93// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
94// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
95// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
96// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
97// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
98// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
99// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
100// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
101// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
102// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
103// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
104// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
105// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
106// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
107// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
108// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
109// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
110// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
111// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
112// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
113// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
114// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
115// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
116// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
117// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
118// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
119
120template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
121 uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
122class SizeClassMap {
123 static const uptr kMinSize = 1 << kMinSizeLog;
124 static const uptr kMidSize = 1 << kMidSizeLog;
125 static const uptr kMidClass = kMidSize / kMinSize;
126 static const uptr S = kNumBits - 1;
127 static const uptr M = (1 << S) - 1;
128
129 public:
130 // kMaxNumCachedHintT is a power of two. It serves as a hint
131 // for the size of TransferBatch, the actual size could be a bit smaller.
132 static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
133 COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)typedef char assertion_failed__133[2*(int)((kMaxNumCachedHint
& (kMaxNumCachedHint - 1)) == 0)-1]
;
134
135 static const uptr kMaxSize = 1UL << kMaxSizeLog;
136 static const uptr kNumClasses =
137 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
138 static const uptr kLargestClassID = kNumClasses - 2;
139 static const uptr kBatchClassID = kNumClasses - 1;
140 COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256)typedef char assertion_failed__140[2*(int)(kNumClasses >= 16
&& kNumClasses <= 256)-1]
;
141 static const uptr kNumClassesRounded =
142 kNumClasses <= 32 ? 32 :
143 kNumClasses <= 64 ? 64 :
144 kNumClasses <= 128 ? 128 : 256;
145
146 static uptr Size(uptr class_id) {
147 // Estimate the result for kBatchClassID because this class does not know
148 // the exact size of TransferBatch. It's OK since we are using the actual
149 // sizeof(TransferBatch) where it matters.
150 if (UNLIKELY(class_id == kBatchClassID)__builtin_expect(!!(class_id == kBatchClassID), 0))
10
Taking false branch
151 return kMaxNumCachedHint * sizeof(uptr);
152 if (class_id <= kMidClass)
11
Taking true branch
153 return kMinSize * class_id;
12
Returning zero
154 class_id -= kMidClass;
155 uptr t = kMidSize << (class_id >> S);
156 return t + (t >> S) * (class_id & M);
157 }
158
159 static uptr ClassID(uptr size) {
160 if (UNLIKELY(size > kMaxSize)__builtin_expect(!!(size > kMaxSize), 0))
161 return 0;
162 if (size <= kMidSize)
163 return (size + kMinSize - 1) >> kMinSizeLog;
164 const uptr l = MostSignificantSetBitIndex(size);
165 const uptr hbits = (size >> (l - S)) & M;
166 const uptr lbits = size & ((1U << (l - S)) - 1);
167 const uptr l1 = l - kMidSizeLog;
168 return kMidClass + (l1 << S) + hbits + (lbits > 0);
169 }
170
171 static uptr MaxCachedHint(uptr size) {
172 DCHECK_LE(size, kMaxSize);
173 if (UNLIKELY(size == 0)__builtin_expect(!!(size == 0), 0))
174 return 0;
175 uptr n;
176 // Force a 32-bit division if the template parameters allow for it.
177 if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
178 n = (1UL << kMaxBytesCachedLog) / size;
179 else
180 n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
181 return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
182 }
183
184 static void Print() {
185 uptr prev_s = 0;
186 uptr total_cached = 0;
187 for (uptr i = 0; i < kNumClasses; i++) {
188 uptr s = Size(i);
189 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
190 Printf("\n");
191 uptr d = s - prev_s;
192 uptr p = prev_s ? (d * 100 / prev_s) : 0;
193 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
194 uptr cached = MaxCachedHint(s) * s;
195 if (i == kBatchClassID)
196 d = p = l = 0;
197 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
198 "cached: %zd %zd; id %zd\n",
199 i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
200 total_cached += cached;
201 prev_s = s;
202 }
203 Printf("Total cached: %zd\n", total_cached);
204 }
205
206 static void Validate() {
207 for (uptr c = 1; c < kNumClasses; c++) {
208 // Printf("Validate: c%zd\n", c);
209 uptr s = Size(c);
210 CHECK_NE(s, 0U)do { __sanitizer::u64 v1 = (__sanitizer::u64)((s)); __sanitizer
::u64 v2 = (__sanitizer::u64)((0U)); if (__builtin_expect(!!(
!(v1 != v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 210, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (
false)
;
211 if (c == kBatchClassID)
212 continue;
213 CHECK_EQ(ClassID(s), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s))); __sanitizer
::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect(!!(!
(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 213, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); }
while (false)
;
214 if (c < kLargestClassID)
215 CHECK_EQ(ClassID(s + 1), c + 1)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s + 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c + 1)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 215, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1
, v2); } while (false)
;
216 CHECK_EQ(ClassID(s - 1), c)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(s - 1)
)); __sanitizer::u64 v2 = (__sanitizer::u64)((c)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 216, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2
); } while (false)
;
217 CHECK_GT(Size(c), Size(c - 1))do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((Size(c - 1))); if (__builtin_expect
(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 217, "(" "(Size(c))" ") " ">" " (" "(Size(c - 1))" ")", v1
, v2); } while (false)
;
218 }
219 CHECK_EQ(ClassID(kMaxSize + 1), 0)do { __sanitizer::u64 v1 = (__sanitizer::u64)((ClassID(kMaxSize
+ 1))); __sanitizer::u64 v2 = (__sanitizer::u64)((0)); if (__builtin_expect
(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 219, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")"
, v1, v2); } while (false)
;
220
221 for (uptr s = 1; s <= kMaxSize; s++) {
222 uptr c = ClassID(s);
223 // Printf("s%zd => c%zd\n", s, c);
224 CHECK_LT(c, kNumClasses)do { __sanitizer::u64 v1 = (__sanitizer::u64)((c)); __sanitizer
::u64 v2 = (__sanitizer::u64)((kNumClasses)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 224, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2
); } while (false)
;
225 CHECK_GE(Size(c), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c))); __sanitizer
::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect(!!(!
(v1 >= v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 225, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); }
while (false)
;
226 if (c > 0)
227 CHECK_LT(Size(c - 1), s)do { __sanitizer::u64 v1 = (__sanitizer::u64)((Size(c - 1)));
__sanitizer::u64 v2 = (__sanitizer::u64)((s)); if (__builtin_expect
(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/hwasan/../sanitizer_common/sanitizer_allocator_size_class_map.h"
, 227, "(" "(Size(c - 1))" ") " "<" " (" "(s)" ")", v1, v2
); } while (false)
;
228 }
229 }
230};
231
232typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
233typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
234typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
235
236// The following SizeClassMap only holds a way small number of cached entries,
237// allowing for denser per-class arrays, smaller memory footprint and usually
238// better performances in threaded environments.
239typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;