23#pragma clang diagnostic ignored "-Wunused-macros"
24#pragma clang diagnostic ignored "-Wunused-function"
25#if __has_warning("-Wreserved-identifier")
26#pragma clang diagnostic ignored "-Wreserved-identifier"
28#if __has_warning("-Wstatic-in-inline")
29#pragma clang diagnostic ignored "-Wstatic-in-inline"
31#elif defined(__GNUC__)
32#pragma GCC diagnostic ignored "-Wunused-macros"
33#pragma GCC diagnostic ignored "-Wunused-function"
36#if !defined(__has_builtin)
37#define __has_builtin(b) 0
40#if defined(__GNUC__) || defined(__clang__)
42#if __has_builtin(__builtin_memcpy_inline)
43#define _rpmalloc_memcpy_const(x, y, s) __builtin_memcpy_inline(x, y, s)
45#define _rpmalloc_memcpy_const(x, y, s) \
47 _Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), \
48 "len must be a constant integer"); \
53#if __has_builtin(__builtin_memset_inline)
54#define _rpmalloc_memset_const(x, y, s) __builtin_memset_inline(x, y, s)
56#define _rpmalloc_memset_const(x, y, s) \
58 _Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), \
59 "len must be a constant integer"); \
64#define _rpmalloc_memcpy_const(x, y, s) memcpy(x, y, s)
65#define _rpmalloc_memset_const(x, y, s) memset(x, y, s)
68#if __has_builtin(__builtin_assume)
69#define rpmalloc_assume(cond) __builtin_assume(cond)
70#elif defined(__GNUC__)
71#define rpmalloc_assume(cond) \
73 if (!__builtin_expect(cond, 0)) \
74 __builtin_unreachable(); \
76#elif defined(_MSC_VER)
77#define rpmalloc_assume(cond) __assume(cond)
79#define rpmalloc_assume(cond) 0
82#ifndef HEAP_ARRAY_SIZE
84#define HEAP_ARRAY_SIZE 47
86#ifndef ENABLE_THREAD_CACHE
88#define ENABLE_THREAD_CACHE 1
90#ifndef ENABLE_GLOBAL_CACHE
92#define ENABLE_GLOBAL_CACHE 1
94#ifndef ENABLE_VALIDATE_ARGS
96#define ENABLE_VALIDATE_ARGS 0
98#ifndef ENABLE_STATISTICS
100#define ENABLE_STATISTICS 0
102#ifndef ENABLE_ASSERTS
104#define ENABLE_ASSERTS 0
106#ifndef ENABLE_OVERRIDE
108#define ENABLE_OVERRIDE 0
110#ifndef ENABLE_PRELOAD
112#define ENABLE_PRELOAD 0
116#define DISABLE_UNMAP 0
118#ifndef ENABLE_UNLIMITED_CACHE
120#define ENABLE_UNLIMITED_CACHE 0
122#ifndef ENABLE_ADAPTIVE_THREAD_CACHE
124#define ENABLE_ADAPTIVE_THREAD_CACHE 0
126#ifndef DEFAULT_SPAN_MAP_COUNT
129#define DEFAULT_SPAN_MAP_COUNT 64
131#ifndef GLOBAL_CACHE_MULTIPLIER
133#define GLOBAL_CACHE_MULTIPLIER 8
136#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE
137#error Must use global cache if unmap is disabled
141#undef ENABLE_UNLIMITED_CACHE
142#define ENABLE_UNLIMITED_CACHE 1
145#if !ENABLE_GLOBAL_CACHE
146#undef ENABLE_UNLIMITED_CACHE
147#define ENABLE_UNLIMITED_CACHE 0
150#if !ENABLE_THREAD_CACHE
151#undef ENABLE_ADAPTIVE_THREAD_CACHE
152#define ENABLE_ADAPTIVE_THREAD_CACHE 0
155#if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)
156#define PLATFORM_WINDOWS 1
157#define PLATFORM_POSIX 0
159#define PLATFORM_WINDOWS 0
160#define PLATFORM_POSIX 1
164#if defined(_MSC_VER) && !defined(__clang__)
165#pragma warning(disable : 5105)
167#define FORCEINLINE inline __forceinline
169#define _Static_assert static_assert
172#define FORCEINLINE inline __attribute__((__always_inline__))
176#ifndef WIN32_LEAN_AND_MEAN
177#define WIN32_LEAN_AND_MEAN
180#if ENABLE_VALIDATE_ARGS
188#if defined(__linux__) || defined(__ANDROID__)
189#include <sys/prctl.h>
190#if !defined(PR_SET_VMA)
191#define PR_SET_VMA 0x53564d41
192#define PR_SET_VMA_ANON_NAME 0
195#if defined(__APPLE__)
196#include <TargetConditionals.h>
197#if !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
198#include <mach/mach_vm.h>
199#include <mach/vm_statistics.h>
203#if defined(__HAIKU__) || defined(__TINYC__)
212#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
213#include <fibersapi.h>
221#include <sys/sysctl.h>
222#define MAP_HUGETLB MAP_ALIGNED_SUPER
230extern int madvise(caddr_t,
size_t,
int);
232#ifndef MAP_UNINITIALIZED
233#define MAP_UNINITIALIZED 0
240#if defined(_MSC_VER) && !defined(_DEBUG)
244#define RPMALLOC_TOSTRING_M(x) #x
245#define RPMALLOC_TOSTRING(x) RPMALLOC_TOSTRING_M(x)
246#define rpmalloc_assert(truth, message) \
249 if (_memory_config.error_callback) { \
250 _memory_config.error_callback(message " (" RPMALLOC_TOSTRING( \
251 truth) ") at " __FILE__ ":" RPMALLOC_TOSTRING(__LINE__)); \
253 assert((truth) && message); \
258#define rpmalloc_assert(truth, message) \
272#if defined(_MSC_VER) && !defined(__clang__)
274typedef volatile long atomic32_t;
275typedef volatile long long atomic64_t;
276typedef volatile void *atomicptr_t;
278static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {
279 return (int32_t)InterlockedOr(src, 0);
282 InterlockedExchange(dst, val);
285 return (int32_t)InterlockedIncrement(val);
288 return (int32_t)InterlockedDecrement(val);
291 return (int32_t)InterlockedExchangeAdd(val, add) +
add;
295 return (InterlockedCompareExchange(dst, val, ref) == ref) ? 1 : 0;
298 InterlockedExchange(dst, val);
301 return (int64_t)InterlockedOr64(src, 0);
304 return (int64_t)InterlockedExchangeAdd64(val, add) +
add;
307 return InterlockedCompareExchangePointer(src, 0, 0);
310 InterlockedExchangePointer(dst, val);
313 InterlockedExchangePointer(dst, val);
317 return (
void *)InterlockedExchangePointer((
void *
volatile *)dst, val);
320 return (InterlockedCompareExchangePointer((
void *
volatile *)dst, val, ref) ==
326#define EXPECTED(x) (x)
327#define UNEXPECTED(x) (x)
331#include <stdatomic.h>
334typedef volatile _Atomic(int64_t) atomic64_t;
335typedef volatile _Atomic(
void *) atomicptr_t;
337static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {
338 return atomic_load_explicit(src, memory_order_relaxed);
341 atomic_store_explicit(dst, val, memory_order_relaxed);
344 return atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
347 return atomic_fetch_add_explicit(val, -1, memory_order_relaxed) - 1;
350 return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add;
354 return atomic_compare_exchange_weak_explicit(
355 dst, &ref, val, memory_order_acquire, memory_order_relaxed);
358 atomic_store_explicit(dst, val, memory_order_release);
361 return atomic_load_explicit(val, memory_order_relaxed);
364 return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add;
367 return atomic_load_explicit(src, memory_order_relaxed);
370 atomic_store_explicit(dst, val, memory_order_relaxed);
373 atomic_store_explicit(dst, val, memory_order_release);
377 return atomic_exchange_explicit(dst, val, memory_order_acquire);
380 return atomic_compare_exchange_weak_explicit(
381 dst, &ref, val, memory_order_relaxed, memory_order_relaxed);
384#define EXPECTED(x) __builtin_expect((x), 1)
385#define UNEXPECTED(x) __builtin_expect((x), 0)
397#define _rpmalloc_stat_inc(counter) atomic_incr32(counter)
398#define _rpmalloc_stat_dec(counter) atomic_decr32(counter)
399#define _rpmalloc_stat_add(counter, value) \
400 atomic_add32(counter, (int32_t)(value))
401#define _rpmalloc_stat_add64(counter, value) \
402 atomic_add64(counter, (int64_t)(value))
403#define _rpmalloc_stat_add_peak(counter, value, peak) \
405 int32_t _cur_count = atomic_add32(counter, (int32_t)(value)); \
406 if (_cur_count > (peak)) \
409#define _rpmalloc_stat_sub(counter, value) \
410 atomic_add32(counter, -(int32_t)(value))
411#define _rpmalloc_stat_inc_alloc(heap, class_idx) \
413 int32_t alloc_current = \
414 atomic_incr32(&heap->size_class_use[class_idx].alloc_current); \
415 if (alloc_current > heap->size_class_use[class_idx].alloc_peak) \
416 heap->size_class_use[class_idx].alloc_peak = alloc_current; \
417 atomic_incr32(&heap->size_class_use[class_idx].alloc_total); \
419#define _rpmalloc_stat_inc_free(heap, class_idx) \
421 atomic_decr32(&heap->size_class_use[class_idx].alloc_current); \
422 atomic_incr32(&heap->size_class_use[class_idx].free_total); \
425#define _rpmalloc_stat_inc(counter) \
428#define _rpmalloc_stat_dec(counter) \
431#define _rpmalloc_stat_add(counter, value) \
434#define _rpmalloc_stat_add64(counter, value) \
437#define _rpmalloc_stat_add_peak(counter, value, peak) \
440#define _rpmalloc_stat_sub(counter, value) \
443#define _rpmalloc_stat_inc_alloc(heap, class_idx) \
446#define _rpmalloc_stat_inc_free(heap, class_idx) \
456#define SMALL_GRANULARITY 16
458#define SMALL_GRANULARITY_SHIFT 4
460#define SMALL_CLASS_COUNT 65
462#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))
464#define MEDIUM_GRANULARITY 512
466#define MEDIUM_GRANULARITY_SHIFT 9
468#define MEDIUM_CLASS_COUNT 61
470#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)
472#define LARGE_CLASS_COUNT 63
474#define MEDIUM_SIZE_LIMIT \
475 (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))
477#define LARGE_SIZE_LIMIT \
478 ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)
481#define SPAN_HEADER_SIZE 128
483#define MAX_THREAD_SPAN_CACHE 400
485#define THREAD_SPAN_CACHE_TRANSFER 64
488#define MAX_THREAD_SPAN_LARGE_CACHE 100
490#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6
493 "Small granularity must be power of two");
495 "Span header size must be power of two");
497#if ENABLE_VALIDATE_ARGS
500#define MAX_ALLOC_SIZE (((size_t) - 1) - _memory_span_size)
503#define pointer_offset(ptr, ofs) (void *)((char *)(ptr) + (ptrdiff_t)(ofs))
504#define pointer_diff(first, second) \
505 (ptrdiff_t)((const char *)(first) - (const char *)(second))
507#define INVALID_POINTER ((void *)((uintptr_t) - 1))
509#define SIZE_CLASS_LARGE SIZE_CLASS_COUNT
510#define SIZE_CLASS_HUGE ((uint32_t) - 1)
532#define SPAN_FLAG_MASTER 1U
534#define SPAN_FLAG_SUBSPAN 2U
536#define SPAN_FLAG_ALIGNED_BLOCKS 4U
538#define SPAN_FLAG_UNMAPPED_MASTER 8U
540#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
548 atomic32_t spans_deferred;
550 atomic32_t spans_to_global;
552 atomic32_t spans_from_global;
554 atomic32_t spans_to_cache;
556 atomic32_t spans_from_cache;
558 atomic32_t spans_to_reserved;
560 atomic32_t spans_from_reserved;
562 atomic32_t spans_map_calls;
565typedef struct span_use_t span_use_t;
569struct size_class_use_t {
571 atomic32_t alloc_current;
575 atomic32_t alloc_total;
577 atomic32_t free_total;
579 atomic32_t spans_current;
583 atomic32_t spans_to_cache;
585 atomic32_t spans_from_cache;
587 atomic32_t spans_from_reserved;
589 atomic32_t spans_map_calls;
592typedef struct size_class_use_t size_class_use_t;
675#if ENABLE_THREAD_CACHE
701#if ENABLE_THREAD_CACHE
705#if RPMALLOC_FIRST_CLASS_HEAPS
713#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
721 atomic64_t thread_to_global;
723 atomic64_t global_to_thread;
736_Static_assert(
sizeof(
size_class_t) == 8,
"Size class size mismatch");
747 size_t extract_count;
762#define _memory_default_span_size (64 * 1024)
763#define _memory_default_span_size_shift 16
764#define _memory_default_span_mask (~((uintptr_t)(_memory_span_size - 1)))
778#if RPMALLOC_CONFIGURABLE
787#define _memory_span_size _memory_default_span_size
788#define _memory_span_size_shift _memory_default_span_size_shift
789#define _memory_span_mask _memory_default_span_mask
803#if ENABLE_GLOBAL_CACHE
819#if RPMALLOC_FIRST_CLASS_HEAPS
821static heap_t *_memory_first_class_orphan_heaps;
825static atomic64_t _allocation_counter;
827static atomic64_t _deallocation_counter;
829static atomic32_t _memory_active_heaps;
831static atomic32_t _mapped_pages;
833static int32_t _mapped_pages_peak;
835static atomic32_t _master_spans;
837static atomic32_t _unmapped_master_spans;
839static atomic32_t _mapped_total;
841static atomic32_t _unmapped_total;
843static atomic32_t _mapped_pages_os;
845static atomic32_t _huge_pages_current;
847static int32_t _huge_pages_peak;
857#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \
859static pthread_key_t _memory_thread_heap;
862#define _Thread_local __declspec(thread)
866#define TLS_MODEL __attribute__((tls_model("initial-exec")))
870#if !defined(__clang__) && defined(__GNUC__)
871#define _Thread_local __thread
878#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
879 return pthread_getspecific(_memory_thread_heap);
881 return _memory_thread_heap;
901 return (uintptr_t)((
void *)NtCurrentTeb());
902#elif (defined(__GNUC__) || defined(__clang__)) && !defined(__CYGWIN__)
905 __asm__(
"movl %%gs:0, %0" :
"=r"(tid) : :);
906#elif defined(__x86_64__)
908 __asm__(
"movq %%gs:0, %0" :
"=r"(tid) : :);
910 __asm__(
"movq %%fs:0, %0" :
"=r"(tid) : :);
912#elif defined(__arm__)
913 __asm__
volatile(
"mrc p15, 0, %0, c13, c0, 3" :
"=r"(tid));
914#elif defined(__aarch64__)
917 __asm__
volatile(
"mrs %0, tpidrro_el0" :
"=r"(tid));
919 __asm__
volatile(
"mrs %0, tpidr_el0" :
"=r"(tid));
922#error This platform needs implementation of get_thread_id()
926#error This platform needs implementation of get_thread_id()
932#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \
934 pthread_setspecific(_memory_thread_heap, heap);
936 _memory_thread_heap = heap;
956#elif defined(__x86_64__) || defined(__i386__)
957 __asm__
volatile(
"pause" :::
"memory");
958#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
959 __asm__
volatile(
"yield" :::
"memory");
960#elif defined(__powerpc__) || defined(__powerpc64__)
962 __asm__
volatile(
"or 27,27,27");
963#elif defined(__sparc__)
964 __asm__
volatile(
"rd %ccr, %g0 \n\trd %ccr, %g0 \n\trd %ccr, %g0");
966 struct timespec ts = {0};
971#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
973static void NTAPI RPMallocTlsOnThreadExit(PVOID module, DWORD reason,
976 case DLL_PROCESS_ATTACH:
978 case DLL_PROCESS_DETACH:
981 case DLL_THREAD_ATTACH:
983 case DLL_THREAD_DETACH:
990#pragma comment(linker, "/INCLUDE:_tls_used")
991#pragma comment(linker, "/INCLUDE:rpmalloc_tls_thread_exit_callback")
996#pragma const_seg(".CRT$XLY")
998extern const PIMAGE_TLS_CALLBACK rpmalloc_tls_thread_exit_callback;
999const PIMAGE_TLS_CALLBACK rpmalloc_tls_thread_exit_callback =
1000 RPMallocTlsOnThreadExit;
1005#pragma comment(linker, "/INCLUDE:__tls_used")
1006#pragma comment(linker, "/INCLUDE:_rpmalloc_tls_thread_exit_callback")
1008#pragma data_seg(".CRT$XLY")
1010PIMAGE_TLS_CALLBACK rpmalloc_tls_thread_exit_callback = RPMallocTlsOnThreadExit;
1016static void NTAPI _rpmalloc_thread_destructor(
void *value) {
1035#if defined(__linux__) || defined(__ANDROID__)
1038 if (address == MAP_FAILED || !
name)
1042 (void)prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (uintptr_t)address, size,
1046 (void)
sizeof(address);
1060 _mapped_pages_peak);
1076 "Invalid unmap size");
1097 void *ptr = VirtualAlloc(0, size + padding,
1099 MEM_RESERVE | MEM_COMMIT,
1111 int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;
1112#if defined(__APPLE__) && !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
1113 int fd = (int)VM_MAKE_TAG(240U);
1115 fd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
1116 void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, fd, 0);
1117#elif defined(MAP_HUGETLB)
1118 void *ptr = mmap(0, size + padding,
1119 PROT_READ | PROT_WRITE | PROT_MAX(PROT_READ | PROT_WRITE),
1121#if defined(MADV_HUGEPAGE)
1126 ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
1127 if (ptr && ptr != MAP_FAILED) {
1128 int prm = madvise(ptr, size + padding, MADV_HUGEPAGE);
1135#elif defined(MAP_ALIGNED)
1136 const size_t align =
1137 (
sizeof(size_t) * 8) - (
size_t)(__builtin_clzl(size - 1));
1139 mmap(0, size + padding, PROT_READ | PROT_WRITE,
1141#elif defined(MAP_ALIGN)
1143 void *ptr = mmap(base, size + padding, PROT_READ | PROT_WRITE,
1146 void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
1148 if ((ptr == MAP_FAILED) || !ptr) {
1152 }
else if (errno != ENOMEM) {
1154 "Failed to map virtual memory block");
1164 "Internal failure in padding");
1165 rpmalloc_assert(final_padding <= padding,
"Internal failure in padding");
1168 *offset = final_padding >> 3;
1172 "Internal failure in padding");
1181 "Invalid unmap size");
1183 if (release && offset) {
1194 if (!VirtualFree(address, release ? 0 : size,
1195 release ? MEM_RELEASE : MEM_DECOMMIT)) {
1200 if (munmap(address, release)) {
1204#if defined(MADV_FREE_REUSABLE)
1206 while ((ret = madvise(address, size, MADV_FREE_REUSABLE)) == -1 &&
1209 if ((ret == -1) && (errno != 0)) {
1210#elif defined(MADV_DONTNEED)
1211 if (madvise(address, size, MADV_DONTNEED)) {
1212#elif defined(MADV_PAGEOUT)
1213 if (madvise(address, size, MADV_PAGEOUT)) {
1214#elif defined(MADV_FREE)
1215 if (madvise(address, size, MADV_FREE)) {
1217 if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
1250 size_t reserve_span_count) {
1265 (*head)->prev = span;
1282 if (*head == span) {
1287 prev_span->
next = next_span;
1289 next_span->
prev = prev_span;
1305 size_t reserve_span_count);
1311 size_t span_count) {
1313 "Span master pointer and/or flag mismatch");
1314 if (subspan != master) {
1327 size_t span_count) {
1352 return request_count;
1357 size_t span_count,
size_t align_offset) {
1370 size_t span_count) {
1375 size_t align_offset = 0;
1384 if (aligned_span_count > span_count) {
1387 size_t reserved_count = aligned_span_count - span_count;
1397 "Global spin lock not held as expected");
1419 if (span_count <= heap->spans_reserved)
1422 int use_global_reserve =
1425 if (use_global_reserve) {
1430 size_t reserve_count =
1436 if (reserve_count > span_count) {
1441 reserve_count - span_count);
1450 if (use_global_reserve)
1460 "Span flag corrupted");
1463 "Span flag corrupted");
1472 "Span flag corrupted");
1495 "Span flag corrupted");
1512 "Invalid span size class");
1514#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
1533 void *page_start,
void *block_start,
1537 *first_block = block_start;
1538 if (block_count > 1) {
1546 if (page_end < block_end)
1547 block_end = page_end;
1552 while (next_block < block_end) {
1553 *((
void **)free_block) = next_block;
1554 free_block = next_block;
1558 *((
void **)free_block) = 0;
1594#if RPMALLOC_FIRST_CLASS_HEAPS
1618 "Span free list corrupted");
1626 if (span == class_span) {
1629 void *last_block = 0;
1641 *((
void **)last_block) = free_list;
1668#if ENABLE_GLOBAL_CACHE
1671static void _rpmalloc_global_cache_finalize(
global_cache_t *cache) {
1675 for (
size_t ispan = 0; ispan < cache->
count; ++ispan)
1688static void _rpmalloc_global_cache_insert_spans(
span_t **span,
1691 const size_t cache_limit =
1698 size_t insert_count =
count;
1702#if ENABLE_STATISTICS
1703 cache->insert_count +=
count;
1705 if ((cache->
count + insert_count) > cache_limit)
1706 insert_count = cache_limit - cache->
count;
1708 memcpy(cache->
span + cache->
count, span,
sizeof(
span_t *) * insert_count);
1712 while (insert_count < count) {
1719 span_t *current_span = span[insert_count++];
1726 for (
size_t ispan = insert_count; ispan <
count; ++ispan) {
1727 span_t *current_span = span[ispan];
1732 current_span->
next = keep;
1733 keep = current_span;
1745 for (; islot < cache->
count; ++islot) {
1752 cache->
span[islot] = keep;
1756 if (islot == cache->
count)
1773static size_t _rpmalloc_global_cache_extract_spans(
span_t **span,
1778 size_t extract_count = 0;
1782#if ENABLE_STATISTICS
1783 cache->extract_count +=
count;
1785 size_t want =
count - extract_count;
1786 if (want > cache->
count)
1787 want = cache->
count;
1789 memcpy(span + extract_count, cache->
span + (cache->
count - want),
1790 sizeof(
span_t *) * want);
1792 extract_count += want;
1794 while ((extract_count < count) && cache->
overflow) {
1796 span[extract_count++] = current_span;
1801 for (
size_t ispan = 0; ispan < extract_count; ++ispan) {
1803 "Global cache span count mismatch");
1809 return extract_count;
1825 size_t reserve_span_count) {
1844#if RPMALLOC_FIRST_CLASS_HEAPS
1850 if (single_span && !*single_span)
1851 *single_span = span;
1859 "Span size class invalid");
1862#if RPMALLOC_FIRST_CLASS_HEAPS
1868 if (!idx && single_span && !*single_span)
1869 *single_span = span;
1899#if ENABLE_THREAD_CACHE
1903 span_cache = &heap->span_cache;
1905 span_cache = (
span_cache_t *)(heap->span_large_cache + (iclass - 1));
1906 for (
size_t ispan = 0; ispan < span_cache->
count; ++ispan)
1908 span_cache->
count = 0;
1927 if (list_heap == heap) {
1946#if ENABLE_THREAD_CACHE
1949 if (span_count == 1) {
1951 span_cache->
span[span_cache->
count++] = span;
1953 const size_t remain_count =
1955#if ENABLE_GLOBAL_CACHE
1960 _rpmalloc_global_cache_insert_spans(span_cache->
span + remain_count,
1967 span_cache->
count = remain_count;
1970 size_t cache_idx = span_count - 2;
1972 span_cache->
span[span_cache->
count++] = span;
1973 const size_t cache_limit =
1975 if (span_cache->
count == cache_limit) {
1976 const size_t transfer_limit = 2 + (cache_limit >> 2);
1977 const size_t transfer_count =
1981 const size_t remain_count = cache_limit - transfer_count;
1982#if ENABLE_GLOBAL_CACHE
1987 _rpmalloc_global_cache_insert_spans(span_cache->
span + remain_count,
1988 span_count, transfer_count);
1990 for (
size_t ispan = 0; ispan < transfer_count; ++ispan)
1993 span_cache->
count = remain_count;
2004 size_t span_count) {
2006#if ENABLE_THREAD_CACHE
2008 if (span_count == 1)
2009 span_cache = &heap->span_cache;
2011 span_cache = (
span_cache_t *)(heap->span_large_cache + (span_count - 2));
2012 if (span_cache->
count) {
2014 return span_cache->
span[--span_cache->
count];
2021 size_t span_count) {
2023 if (span_count == 1) {
2033 size_t span_count) {
2041 size_t span_count) {
2042#if ENABLE_GLOBAL_CACHE
2043#if ENABLE_THREAD_CACHE
2045 size_t wanted_count;
2046 if (span_count == 1) {
2047 span_cache = &heap->span_cache;
2050 span_cache = (
span_cache_t *)(heap->span_large_cache + (span_count - 2));
2053 span_cache->
count = _rpmalloc_global_cache_extract_spans(
2054 span_cache->
span, span_count, wanted_count);
2055 if (span_cache->
count) {
2060 return span_cache->
span[--span_cache->
count];
2064 size_t count = _rpmalloc_global_cache_extract_spans(&span, span_count, 1);
2075 (void)
sizeof(span_count);
2082 (void)
sizeof(span_count);
2083 (void)
sizeof(class_idx);
2084#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
2088 if (current_count > (
uint32_t)atomic_load32(&heap->span_use[idx].high))
2089 atomic_store32(&heap->span_use[idx].high, (int32_t)current_count);
2091 heap->size_class_use[class_idx].spans_peak);
2100 size_t span_count,
uint32_t class_idx) {
2102#if ENABLE_THREAD_CACHE
2103 if (heap_size_class && heap_size_class->
cache) {
2104 span = heap_size_class->
cache;
2105 heap_size_class->
cache =
2106 (heap->span_cache.count
2107 ? heap->span_cache.span[--heap->span_cache.count]
2113 (void)
sizeof(class_idx);
2115 size_t base_span_count = span_count;
2116 size_t limit_span_count =
2117 (span_count > 2) ? (span_count + (span_count >> 1)) : span_count;
2146 }
while (span_count <= limit_span_count);
2167#if RPMALLOC_FIRST_CLASS_HEAPS
2171 (void)
sizeof(first_class);
2184 size_t heap_size =
sizeof(
heap_t);
2185 size_t aligned_heap_size = 16 * ((heap_size + 15) / 16);
2186 size_t request_heap_count = 16;
2187 size_t heap_span_count = ((aligned_heap_size * request_heap_count) +
2191 size_t span_count = heap_span_count;
2203 size_t possible_heap_count =
2204 (block_size -
sizeof(
span_t)) / aligned_heap_size;
2205 if (possible_heap_count >= (request_heap_count * 16))
2206 request_heap_count *= 16;
2207 else if (possible_heap_count < request_heap_count)
2208 request_heap_count = possible_heap_count;
2209 heap_span_count = ((aligned_heap_size * request_heap_count) +
2214 size_t align_offset = 0;
2229 size_t num_heaps = remain_size / aligned_heap_size;
2230 if (num_heaps < request_heap_count)
2231 num_heaps = request_heap_count;
2234 while (num_heaps > 1) {
2242 if (span_count > heap_span_count) {
2244 size_t remain_count = span_count - heap_span_count;
2245 size_t reserve_count =
2252 if (remain_count > reserve_count) {
2256 reserve_count = remain_count - reserve_count;
2265 heap_t *heap = *heap_list;
2275 if (first_class == 0)
2277#if RPMALLOC_FIRST_CLASS_HEAPS
2290 int release_cache) {
2296 if (release_cache || heap->
finalize) {
2297#if ENABLE_THREAD_CACHE
2301 span_cache = &heap->span_cache;
2303 span_cache = (
span_cache_t *)(heap->span_large_cache + (iclass - 1));
2304 if (!span_cache->
count)
2306#if ENABLE_GLOBAL_CACHE
2308 for (
size_t ispan = 0; ispan < span_cache->
count; ++ispan)
2316 _rpmalloc_global_cache_insert_spans(span_cache->
span, iclass + 1,
2320 for (
size_t ispan = 0; ispan < span_cache->
count; ++ispan)
2323 span_cache->
count = 0;
2331#if ENABLE_STATISTICS
2334 "Still active heaps during finalization");
2381#if RPMALLOC_FIRST_CLASS_HEAPS
2382 list = &heap->full_span[iclass];
2394#if ENABLE_THREAD_CACHE
2398 span_cache = &heap->span_cache;
2400 span_cache = (
span_cache_t *)(heap->span_large_cache + (iclass - 1));
2401 for (
size_t ispan = 0; ispan < span_cache->
count; ++ispan)
2403 span_cache->
count = 0;
2407 "Heaps still active during finalization");
2418 void *
block = *list;
2419 *list = *((
void **)
block);
2431 "Span block count corrupted");
2433 "Internal failure");
2453 "Span block count corrupted");
2469#if RPMALLOC_FIRST_CLASS_HEAPS
2540#if RPMALLOC_FIRST_CLASS_HEAPS
2556 size_t align_offset = 0;
2569#if RPMALLOC_FIRST_CLASS_HEAPS
2594#if ENABLE_VALIDATE_ARGS
2595 if ((size + alignment) < size) {
2599 if (alignment & (alignment - 1)) {
2614 "Failed alignment calculation");
2615 if (multiple_size <= (size + alignment))
2620 size_t align_mask = alignment - 1;
2623 if ((uintptr_t)ptr & align_mask) {
2624 ptr = (
void *)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
2641 if (alignment & align_mask) {
2657 if (extra_pages > num_pages)
2658 num_pages = 1 + extra_pages;
2660 size_t original_pages = num_pages;
2662 if (limit_pages < (original_pages * 2))
2663 limit_pages = original_pages * 2;
2665 size_t mapped_size, align_offset;
2679 if ((uintptr_t)ptr & align_mask)
2680 ptr = (
void *)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
2687 if (num_pages > limit_pages) {
2701#if RPMALLOC_FIRST_CLASS_HEAPS
2724 "Internal failure");
2728#if RPMALLOC_FIRST_CLASS_HEAPS
2781 *((
void **)
block) = free_list;
2783 int all_deferred_free = (free_count == span->
block_count);
2785 if (all_deferred_free) {
2802#if RPMALLOC_FIRST_CLASS_HEAPS
2821 "Span flag corrupted");
2824 "Span flag corrupted");
2827#if RPMALLOC_FIRST_CLASS_HEAPS
2841#if RPMALLOC_FIRST_CLASS_HEAPS
2844#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
2851#if ENABLE_THREAD_CACHE
2852 const int set_as_reserved =
2853 ((span->
span_count > 1) && (heap->span_cache.count == 0) &&
2856 const int set_as_reserved =
2859 if (set_as_reserved) {
2872 "Master span count corrupted");
2884#if RPMALLOC_FIRST_CLASS_HEAPS
2898#if RPMALLOC_FIRST_CLASS_HEAPS
2934 size_t oldsize,
unsigned int flags) {
2953 memmove(
block, p, oldsize);
2967 if ((current_spans >= num_spans) && (total_size >= (oldsize / 2))) {
2971 memmove(
block, p, oldsize);
2986 if ((current_pages >= num_pages) && (num_pages >= (current_pages / 2))) {
2990 memmove(
block, p, oldsize);
3004 size_t lower_bound = oldsize + (oldsize >> 2) + (oldsize >> 3);
3006 (size > lower_bound) ? size : ((size > oldsize) ? lower_bound : size);
3010 memcpy(
block, p, oldsize < new_size ? oldsize : new_size);
3018 size_t alignment,
size_t size,
3019 size_t oldsize,
unsigned int flags) {
3025 if ((usablesize >= size) && !((uintptr_t)ptr & (alignment - 1))) {
3026 if (no_alloc || (size >= (usablesize / 2)))
3035 oldsize = usablesize;
3036 memcpy(
block, ptr, oldsize < size ? oldsize : size);
3079 size_t prevclass = iclass;
3080 while (prevclass > 0) {
3121 SYSTEM_INFO system_info;
3122 memset(&system_info, 0,
sizeof(system_info));
3123 GetSystemInfo(&system_info);
3129#if RPMALLOC_CONFIGURABLE
3141#if defined(__linux__)
3142 size_t huge_page_size = 0;
3143 FILE *meminfo = fopen(
"/proc/meminfo",
"r");
3146 while (!huge_page_size && fgets(line,
sizeof(line) - 1, meminfo)) {
3147 line[
sizeof(line) - 1] = 0;
3148 if (strstr(line,
"Hugepagesize:"))
3149 huge_page_size = (size_t)strtol(line + 13, 0, 10) * 1024;
3153 if (huge_page_size) {
3158#elif defined(__FreeBSD__)
3160 size_t sz =
sizeof(
rc);
3162 if (sysctlbyname(
"vm.pmap.pg_ps_enabled", &
rc, &sz, NULL, 0) == 0 &&
3164 static size_t defsize = 2 * 1024 * 1024;
3166 size_t sizes[4] = {0};
3169 if ((nsize = getpagesizes(sizes, 4)) >= 2) {
3171 for (
size_t csize = sizes[nsize]; nsize >= 0 && csize;
3172 --nsize, csize = sizes[nsize]) {
3175 "Invalid page size");
3176 if (defsize < csize) {
3184#elif defined(__APPLE__) || defined(__NetBSD__)
3199 size_t large_page_minimum = GetLargePageMinimum();
3200 if (large_page_minimum)
3201 OpenProcessToken(GetCurrentProcess(),
3202 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
3205 if (LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &luid)) {
3206 TOKEN_PRIVILEGES token_privileges;
3207 memset(&token_privileges, 0,
sizeof(token_privileges));
3208 token_privileges.PrivilegeCount = 1;
3209 token_privileges.Privileges[0].Luid = luid;
3210 token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
3211 if (AdjustTokenPrivileges(token, FALSE, &token_privileges, 0, 0, 0)) {
3212 if (GetLastError() == ERROR_SUCCESS)
3227 size_t min_span_size = 256;
3228 size_t max_page_size;
3229#if UINTPTR_MAX > 0xFFFFFFFF
3230 max_page_size = 4096ULL * 1024ULL * 1024ULL;
3232 max_page_size = 4 * 1024 * 1024;
3240 while (page_size_bit != 1) {
3242 page_size_bit >>= 1;
3246#if RPMALLOC_CONFIGURABLE
3253 if (span_size > (256 * 1024))
3254 span_size = (256 * 1024);
3282#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \
3287#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
3288 fls_key = FlsAlloc(&_rpmalloc_thread_destructor);
3316#if RPMALLOC_FIRST_CLASS_HEAPS
3317 _memory_first_class_orphan_heaps = 0;
3319#if ENABLE_STATISTICS
3322 _mapped_pages_peak = 0;
3328 _huge_pages_peak = 0;
3365#if ENABLE_GLOBAL_CACHE
3368 _rpmalloc_global_cache_finalize(&_memory_span_cache[iclass]);
3371#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
3372 pthread_key_delete(_memory_thread_heap);
3374#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
3378#if ENABLE_STATISTICS
3381 rpmalloc_assert(atomic_load32(&_mapped_pages) == 0,
"Memory leak detected");
3383 "Memory leak detected");
3396#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
3397 FlsSetValue(fls_key, heap);
3409#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
3410 FlsSetValue(fls_key, 0);
3423#if ENABLE_VALIDATE_ARGS
3424 if (size >= MAX_ALLOC_SIZE) {
3437#if ENABLE_VALIDATE_ARGS
3439 int err = SizeTMult(num, size, &total);
3440 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
3445 int err = __builtin_umull_overflow(num, size, &total);
3446 if (err || (total >= MAX_ALLOC_SIZE)) {
3457 memset(
block, 0, total);
3462#if ENABLE_VALIDATE_ARGS
3463 if (size >= MAX_ALLOC_SIZE) {
3473 size_t size,
size_t oldsize,
3474 unsigned int flags) {
3475#if ENABLE_VALIDATE_ARGS
3494#if ENABLE_VALIDATE_ARGS
3496 int err = SizeTMult(num, size, &total);
3497 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
3502 int err = __builtin_umull_overflow(num, size, &total);
3503 if (err || (total >= MAX_ALLOC_SIZE)) {
3513 memset(
block, 0, total);
3528 return *memptr ? 0 : ENOMEM;
3551 free_count += (block_count - span->
used_count);
3557#if ENABLE_THREAD_CACHE
3561 span_cache = &heap->span_cache;
3563 span_cache = (
span_cache_t *)(heap->span_large_cache + (iclass - 1));
3575#if ENABLE_STATISTICS
3580 stats->span_use[iclass].current =
3581 (size_t)atomic_load32(&heap->span_use[iclass].current);
3582 stats->span_use[iclass].peak =
3583 (size_t)atomic_load32(&heap->span_use[iclass].high);
3584 stats->span_use[iclass].to_global =
3585 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_global);
3586 stats->span_use[iclass].from_global =
3587 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_global);
3588 stats->span_use[iclass].to_cache =
3589 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache);
3590 stats->span_use[iclass].from_cache =
3591 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache);
3592 stats->span_use[iclass].to_reserved =
3593 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved);
3594 stats->span_use[iclass].from_reserved =
3595 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved);
3596 stats->span_use[iclass].map_calls =
3597 (size_t)atomic_load32(&heap->span_use[iclass].spans_map_calls);
3600 stats->size_use[iclass].alloc_current =
3601 (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_current);
3602 stats->size_use[iclass].alloc_peak =
3603 (size_t)heap->size_class_use[iclass].alloc_peak;
3604 stats->size_use[iclass].alloc_total =
3605 (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_total);
3606 stats->size_use[iclass].free_total =
3607 (size_t)atomic_load32(&heap->size_class_use[iclass].free_total);
3608 stats->size_use[iclass].spans_to_cache =
3609 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache);
3610 stats->size_use[iclass].spans_from_cache =
3611 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache);
3612 stats->size_use[iclass].spans_from_reserved = (size_t)atomic_load32(
3613 &heap->size_class_use[iclass].spans_from_reserved);
3614 stats->size_use[iclass].map_calls =
3615 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_map_calls);
3622#if ENABLE_STATISTICS
3625 stats->mapped_total =
3627 stats->unmapped_total =
3633#if ENABLE_GLOBAL_CACHE
3639#if ENABLE_UNLIMITED_CACHE
3641 while (current_span) {
3643 current_span = current_span->
next;
3652#if ENABLE_STATISTICS
3654static void _memory_heap_dump_statistics(
heap_t *heap,
void *
file) {
3655 fprintf(
file,
"Heap %d stats:\n", heap->
id);
3656 fprintf(
file,
"Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize "
3657 "BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB "
3658 "FromCacheMiB FromReserveMiB MmapCalls\n");
3660 if (!atomic_load32(&heap->size_class_use[iclass].alloc_total))
3664 "%3u: %10u %10u %10u %10u %8u %8u %8d %9d %13zu %11zu %12zu %14zu "
3667 atomic_load32(&heap->size_class_use[iclass].alloc_current),
3668 heap->size_class_use[iclass].alloc_peak,
3669 atomic_load32(&heap->size_class_use[iclass].alloc_total),
3670 atomic_load32(&heap->size_class_use[iclass].free_total),
3673 atomic_load32(&heap->size_class_use[iclass].spans_current),
3674 heap->size_class_use[iclass].spans_peak,
3675 ((
size_t)heap->size_class_use[iclass].alloc_peak *
3677 (
size_t)(1024 * 1024),
3678 ((
size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache) *
3680 (
size_t)(1024 * 1024),
3681 ((
size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache) *
3683 (
size_t)(1024 * 1024),
3684 ((
size_t)atomic_load32(
3685 &heap->size_class_use[iclass].spans_from_reserved) *
3687 (
size_t)(1024 * 1024),
3688 atomic_load32(&heap->size_class_use[iclass].spans_map_calls));
3690 fprintf(
file,
"Spans Current Peak Deferred PeakMiB Cached ToCacheMiB "
3691 "FromCacheMiB ToReserveMiB FromReserveMiB ToGlobalMiB "
3692 "FromGlobalMiB MmapCalls\n");
3694 if (!atomic_load32(&heap->span_use[iclass].high) &&
3695 !atomic_load32(&heap->span_use[iclass].spans_map_calls))
3699 "%4u: %8d %8u %8u %8zu %7u %11zu %12zu %12zu %14zu %11zu %13zu %10u\n",
3700 (
uint32_t)(iclass + 1), atomic_load32(&heap->span_use[iclass].current),
3701 atomic_load32(&heap->span_use[iclass].high),
3702 atomic_load32(&heap->span_use[iclass].spans_deferred),
3703 ((
size_t)atomic_load32(&heap->span_use[iclass].high) *
3705 (
size_t)(1024 * 1024),
3707 (
unsigned int)(!iclass ? heap->span_cache.count
3708 : heap->span_large_cache[iclass - 1].count),
3709 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache) *
3711 (
size_t)(1024 * 1024),
3712 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache) *
3714 (
size_t)(1024 * 1024),
3716 0, (
size_t)0, (
size_t)0,
3718 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved) *
3720 (
size_t)(1024 * 1024),
3721 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved) *
3723 (
size_t)(1024 * 1024),
3724 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_to_global) *
3726 (
size_t)(1024 * 1024),
3727 ((
size_t)atomic_load32(&heap->span_use[iclass].spans_from_global) *
3729 (
size_t)(1024 * 1024),
3730 atomic_load32(&heap->span_use[iclass].spans_map_calls));
3733 fprintf(
file,
"ThreadToGlobalMiB GlobalToThreadMiB\n");
3735 file,
"%17zu %17zu\n",
3736 (
size_t)
atomic_load64(&heap->thread_to_global) / (
size_t)(1024 * 1024),
3737 (
size_t)
atomic_load64(&heap->global_to_thread) / (
size_t)(1024 * 1024));
3743#if ENABLE_STATISTICS
3750 if (!atomic_load32(&heap->size_class_use[iclass].alloc_total)) {
3752 !atomic_load32(&heap->size_class_use[iclass].free_total),
3753 "Heap statistics counter mismatch");
3755 !atomic_load32(&heap->size_class_use[iclass].spans_map_calls),
3756 "Heap statistics counter mismatch");
3763 if (!atomic_load32(&heap->span_use[iclass].high) &&
3764 !atomic_load32(&heap->span_use[iclass].spans_map_calls))
3769 _memory_heap_dump_statistics(heap,
file);
3773 fprintf(
file,
"Global stats:\n");
3774 size_t huge_current =
3777 fprintf(
file,
"HugeCurrentMiB HugePeakMiB\n");
3778 fprintf(
file,
"%14zu %11zu\n", huge_current / (
size_t)(1024 * 1024),
3779 huge_peak / (
size_t)(1024 * 1024));
3781#if ENABLE_GLOBAL_CACHE
3782 fprintf(
file,
"GlobalCacheMiB\n");
3787 size_t global_overflow_cache = 0;
3793 if (global_cache || global_overflow_cache || cache->insert_count ||
3794 cache->extract_count)
3796 "%4zu: %8zuMiB (%8zuMiB overflow) %14zu insert %14zu extract\n",
3797 iclass + 1, global_cache / (
size_t)(1024 * 1024),
3798 global_overflow_cache / (
size_t)(1024 * 1024),
3799 cache->insert_count, cache->extract_count);
3807 size_t mapped_total =
3809 size_t unmapped_total =
3813 "MappedMiB MappedOSMiB MappedPeakMiB MappedTotalMiB UnmappedTotalMiB\n");
3814 fprintf(
file,
"%9zu %11zu %13zu %14zu %16zu\n",
3815 mapped / (
size_t)(1024 * 1024), mapped_os / (
size_t)(1024 * 1024),
3816 mapped_peak / (
size_t)(1024 * 1024),
3817 mapped_total / (
size_t)(1024 * 1024),
3818 unmapped_total / (
size_t)(1024 * 1024));
3820 fprintf(
file,
"\n");
3823 int64_t deallocated =
atomic_load64(&_deallocation_counter);
3824 fprintf(
file,
"Allocation count: %lli\n", allocated);
3825 fprintf(
file,
"Deallocation count: %lli\n", deallocated);
3826 fprintf(
file,
"Current allocations: %lli\n", (allocated - deallocated));
3827 fprintf(
file,
"Master spans: %d\n", atomic_load32(&_master_spans));
3828 fprintf(
file,
"Dangling master spans: %d\n", atomic_load32(&_unmapped_master_spans));
3834#if RPMALLOC_FIRST_CLASS_HEAPS
3836extern inline rpmalloc_heap_t *rpmalloc_heap_acquire(
void) {
3848extern inline void rpmalloc_heap_release(rpmalloc_heap_t *heap) {
3854rpmalloc_heap_alloc(rpmalloc_heap_t *heap,
size_t size) {
3855#if ENABLE_VALIDATE_ARGS
3856 if (size >= MAX_ALLOC_SIZE) {
3865rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap,
size_t alignment,
3867#if ENABLE_VALIDATE_ARGS
3868 if (size >= MAX_ALLOC_SIZE) {
3877rpmalloc_heap_calloc(rpmalloc_heap_t *heap,
size_t num,
size_t size) {
3878 return rpmalloc_heap_aligned_calloc(heap, 0, num, size);
3882rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap,
size_t alignment,
3883 size_t num,
size_t size) {
3885#if ENABLE_VALIDATE_ARGS
3887 int err = SizeTMult(num, size, &total);
3888 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
3893 int err = __builtin_umull_overflow(num, size, &total);
3894 if (err || (total >= MAX_ALLOC_SIZE)) {
3904 memset(
block, 0, total);
3909rpmalloc_heap_realloc(rpmalloc_heap_t *heap,
void *ptr,
size_t size,
3910 unsigned int flags) {
3911#if ENABLE_VALIDATE_ARGS
3912 if (size >= MAX_ALLOC_SIZE) {
3921rpmalloc_heap_aligned_realloc(rpmalloc_heap_t *heap,
void *ptr,
3922 size_t alignment,
size_t size,
3923 unsigned int flags) {
3924#if ENABLE_VALIDATE_ARGS
3933extern inline void rpmalloc_heap_free(rpmalloc_heap_t *heap,
void *ptr) {
3938extern inline void rpmalloc_heap_free_all(rpmalloc_heap_t *heap) {
3945 span = heap->
size_class[iclass].partial_span;
3947 next_span = span->
next;
3951 heap->size_class[iclass].partial_span = 0;
3952 span = heap->full_span[iclass];
3954 next_span = span->
next;
3962 heap->size_class[iclass].cache = 0;
3964 memset(heap->size_class, 0,
sizeof(heap->size_class));
3965 memset(heap->full_span, 0,
sizeof(heap->full_span));
3967 span = heap->large_huge_span;
3969 next_span = span->
next;
3976 heap->large_huge_span = 0;
3977 heap->full_span_count = 0;
3979#if ENABLE_THREAD_CACHE
3983 span_cache = &heap->span_cache;
3985 span_cache = (
span_cache_t *)(heap->span_large_cache + (iclass - 1));
3986 if (!span_cache->
count)
3988#if ENABLE_GLOBAL_CACHE
3993 _rpmalloc_global_cache_insert_spans(span_cache->
span, iclass + 1,
3996 for (
size_t ispan = 0; ispan < span_cache->
count; ++ispan)
3999 span_cache->
count = 0;
4003#if ENABLE_STATISTICS
4014extern inline void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap) {
4016 if (prev_heap != heap) {
4019 rpmalloc_heap_release(prev_heap);
4023extern inline rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(
void *ptr) {
4034#if ENABLE_PRELOAD || ENABLE_OVERRIDE
dot regions Print regions of function to dot file(with no function bodies)"
unify loop Fixup each natural loop to have a single exit block
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
#define SMALL_GRANULARITY
Preconfigured limits and sizes.
static FORCEINLINE void * atomic_exchange_ptr_acquire(atomicptr_t *dst, void *val)
#define LARGE_CLASS_COUNT
Number of large block size classes.
#define _memory_default_span_size
Global data.
static FORCEINLINE int64_t atomic_load64(atomic64_t *val)
#define _memory_span_size_shift
static void * _rpmalloc_allocate_from_heap_fallback(heap_t *heap, heap_size_class_t *heap_size_class, uint32_t class_idx)
Allocate a small/medium sized memory block from the given heap.
static heap_t * _rpmalloc_heap_allocate(int first_class)
Allocate a new heap, potentially reusing a previously orphaned heap.
#define MEDIUM_GRANULARITY
Granularity of a medium allocation block.
static void _rpmalloc_heap_unmap(heap_t *heap)
#define SPAN_FLAG_UNMAPPED_MASTER
Flag indicating an unmapped master span.
static heap_t * get_thread_heap_raw(void)
void rpfree(void *ptr)
Free the given memory block.
#define rpmalloc_assume(cond)
static size_t _memory_global_reserve_count
Global reserved count.
static void _rpmalloc_span_unmap(span_t *span)
Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
static void _rpmalloc_heap_release(void *heapptr, int first_class, int release_cache)
static atomic32_t _memory_global_lock
Used to restrict access to mapping memory for huge pages.
static span_t * _memory_global_reserve_master
Global reserved master.
static size_t _memory_map_granularity
Granularity at which memory pages are mapped by OS.
static FORCEINLINE int32_t atomic_incr32(atomic32_t *val)
static FORCEINLINE int atomic_cas32_acquire(atomic32_t *dst, int32_t val, int32_t ref)
static void _rpmalloc_adjust_size_class(size_t iclass)
Adjust and optimize the size class properties for the given class.
#define THREAD_SPAN_CACHE_TRANSFER
Number of spans to transfer between thread and global cache.
RPMALLOC_ALLOCATOR void * rpmalloc(size_t size)
Allocate a memory block of at least the given size.
static void * _rpmalloc_aligned_reallocate(heap_t *heap, void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags)
static void _rpmalloc_heap_finalize(heap_t *heap)
static span_t * _rpmalloc_span_map_aligned_count(heap_t *heap, size_t span_count)
Map an aligned set of spans, taking configured mapping granularity and the page size into account.
static FORCEINLINE void atomic_store32_release(atomic32_t *dst, int32_t val)
int rpmalloc_initialize_config(const rpmalloc_config_t *config)
Initialize allocator with given configuration.
#define THREAD_SPAN_LARGE_CACHE_TRANSFER
Number of spans to transfer between thread and global cache for large spans.
static void _rpmalloc_heap_global_finalize(heap_t *heap)
#define SPAN_FLAG_SUBSPAN
Flag indicating span is a secondary (sub) span of a split superspan.
static FORCEINLINE int atomic_cas_ptr(atomicptr_t *dst, void *val, void *ref)
static void _rpmalloc_spin(void)
static size_class_t _memory_size_class[SIZE_CLASS_COUNT]
Global size classes.
static size_t _memory_page_size
Memory page size.
static void _rpmalloc_heap_release_raw_fc(void *heapptr)
static void * _rpmalloc_allocate(heap_t *heap, size_t size)
Allocate a block of the given size.
#define TLS_MODEL
Thread local heap and ID.
static size_t _memory_span_map_count
Number of spans to map in each map call.
static void * _rpmalloc_allocate_small(heap_t *heap, size_t size)
Allocate a small sized memory block from the given heap.
#define _memory_default_span_size_shift
static heap_t * _memory_heaps[HEAP_ARRAY_SIZE]
All heaps.
#define _memory_span_mask
static span_t * _memory_global_reserve
Global reserved spans.
const rpmalloc_config_t * rpmalloc_config(void)
Get allocator configuration.
volatile _Atomic(int32_t)
Atomic access abstraction (since MSVC does not do C11 yet)
static FORCEINLINE int64_t atomic_add64(atomic64_t *val, int64_t add)
static void _rpmalloc_span_double_link_list_pop_head(span_t **head, span_t *span)
Pop head span from double linked list.
#define _rpmalloc_stat_add64(counter, value)
static void set_thread_heap(heap_t *heap)
Set the current thread heap.
#define _rpmalloc_stat_add(counter, value)
static void * _rpmalloc_aligned_allocate(heap_t *heap, size_t alignment, size_t size)
static int _memory_huge_pages
Huge page support.
static FORCEINLINE int32_t atomic_decr32(atomic32_t *val)
RPMALLOC_ALLOCATOR void * rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags)
Reallocate the given block to at least the given size and alignment,.
#define MAX_THREAD_SPAN_LARGE_CACHE
Number of spans in thread cache for large spans (must be greater than LARGE_CLASS_COUNT / 2)
void rpmalloc_set_main_thread(void)
Set main thread ID.
#define MEDIUM_CLASS_COUNT
Number of medium block size classes.
static void _rpmalloc_inc_span_statistics(heap_t *heap, size_t span_count, uint32_t class_idx)
RPMALLOC_ALLOCATOR void * rpcalloc(size_t num, size_t size)
static void _rpmalloc_deallocate_large(span_t *span)
Deallocate the given large memory block to the current heap.
#define _memory_span_size
Hardwired span size.
static void _rpmalloc_heap_initialize(heap_t *heap)
#define _rpmalloc_stat_add_peak(counter, value, peak)
static void _rpmalloc_heap_orphan(heap_t *heap, int first_class)
RPMALLOC_ALLOCATOR void * rpmemalign(size_t alignment, size_t size)
Allocate a memory block of at least the given size and alignment.
static void _rpmalloc_deallocate_huge(span_t *)
Global cache.
static void _rpmalloc_span_release_to_cache(heap_t *heap, span_t *span)
Move the span (used for small or medium allocations) to the heap thread cache.
static int _rpmalloc_initialized
Initialized flag.
#define SPAN_HEADER_SIZE
Size of a span header (must be a multiple of SMALL_GRANULARITY and a power of two)
static heap_t * _rpmalloc_heap_allocate_new(void)
Allocate a new heap from newly mapped memory pages.
static void * _rpmalloc_span_initialize_new(heap_t *heap, heap_size_class_t *heap_size_class, span_t *span, uint32_t class_idx)
Initialize an unused span (from cache or mapped) to be new active span, putting the initial free list...
#define SMALL_GRANULARITY_SHIFT
Small granularity shift count.
static void _rpmalloc_set_name(void *address, size_t size)
Low level memory map/unmap.
static size_t _rpmalloc_span_align_count(size_t span_count)
Get the aligned number of spans to map in based on wanted count, configured mapping granularity and t...
#define LARGE_SIZE_LIMIT
Maximum size of a large block.
static void _rpmalloc_unmap_os(void *address, size_t size, size_t offset, size_t release)
Default implementation to unmap pages from virtual memory.
#define ENABLE_THREAD_CACHE
Enable per-thread cache.
static void _rpmalloc_deallocate_direct_small_or_medium(span_t *span, void *block)
Deallocation entry points.
static void _rpmalloc_deallocate_defer_small_or_medium(span_t *span, void *block)
Put the block in the deferred free list of the owning span.
void rpmalloc_thread_collect(void)
Perform deferred deallocations pending for the calling thread heap.
void rpmalloc_thread_finalize(int release_caches)
Finalize thread, orphan heap.
int rpmalloc_is_thread_initialized(void)
Query if allocator is initialized for calling thread.
void rpmalloc_linker_reference(void)
Dummy empty function for forcing linker symbol inclusion.
#define FORCEINLINE
Platform and arch specifics.
void rpmalloc_dump_statistics(void *file)
Dump all statistics in human readable format to file (should be a FILE*)
RPMALLOC_ALLOCATOR void * rpaligned_alloc(size_t alignment, size_t size)
Allocate a memory block of at least the given size and alignment.
size_t rpmalloc_usable_size(void *ptr)
Query the usable size of the given memory block (from given pointer to the end of block)
static void _rpmalloc_deallocate_small_or_medium(span_t *span, void *p)
static span_t * _rpmalloc_global_get_reserved_spans(size_t span_count)
Use global reserved spans to fulfill a memory map request (reserve size must be checked by caller)
#define _rpmalloc_stat_sub(counter, value)
static void * _rpmalloc_mmap_os(size_t size, size_t *offset)
Default implementation to map new pages to virtual memory.
#define _rpmalloc_memcpy_const(x, y, s)
static uintptr_t get_thread_id(void)
Fast thread ID.
void rpmalloc_thread_initialize(void)
Initialize thread, assign heap.
static span_t * _rpmalloc_heap_thread_cache_deferred_extract(heap_t *heap, size_t span_count)
static void * _rpmalloc_allocate_large(heap_t *heap, size_t size)
Allocate a large sized memory block from the given heap.
RPMALLOC_ALLOCATOR void * rprealloc(void *ptr, size_t size)
Reallocate the given block to at least the given size.
static FORCEINLINE void * atomic_load_ptr(atomicptr_t *src)
static void _rpmalloc_span_mark_as_subspan_unless_master(span_t *master, span_t *subspan, size_t span_count)
Declare the span to be a subspan and store distance from master span and span count.
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t *dst, void *val)
#define MEDIUM_GRANULARITY_SHIFT
Medium granularity shift count.
static size_t _memory_medium_size_limit
Run-time size limit of medium blocks.
#define SPAN_FLAG_MASTER
Flag indicating span is the first (master) span of a split superspan.
static void * _rpmalloc_reallocate(heap_t *heap, void *p, size_t size, size_t oldsize, unsigned int flags)
Reallocate the given block to the given size.
int rpmalloc_initialize(void)
Initialize the allocator and setup global data.
static span_t * _rpmalloc_heap_reserved_extract(heap_t *heap, size_t span_count)
static size_t _memory_page_size_shift
Shift to divide by page size.
static FORCEINLINE void atomic_store32(atomic32_t *dst, int32_t val)
static void _rpmalloc_heap_release_raw(void *heapptr, int release_cache)
static void _rpmalloc_global_set_reserved_spans(span_t *master, span_t *reserve, size_t reserve_span_count)
Store the given spans as global reserve (must only be called from within new heap allocation,...
#define SIZE_CLASS_COUNT
Total number of small + medium size classes.
static int _rpmalloc_span_finalize(heap_t *heap, size_t iclass, span_t *span, span_t **list_head)
#define _rpmalloc_stat_dec(counter)
static uintptr_t _rpmalloc_main_thread_id
Main thread ID.
#define _rpmalloc_stat_inc_free(heap, class_idx)
static span_t * _rpmalloc_heap_thread_cache_extract(heap_t *heap, size_t span_count)
Extract the given number of spans from the different cache levels.
#define pointer_offset(ptr, ofs)
#define _rpmalloc_stat_inc(counter)
Statistics related functions (evaluate to nothing when statistics not enabled)
#define _rpmalloc_memset_const(x, y, s)
static void * _rpmalloc_allocate_huge(heap_t *heap, size_t size)
Allocate a huge block by mapping memory pages directly.
#define _rpmalloc_stat_inc_alloc(heap, class_idx)
static void _rpmalloc_heap_cache_insert(heap_t *heap, span_t *span)
Span control.
static span_t * _rpmalloc_heap_global_cache_extract(heap_t *heap, size_t span_count)
Extract a span from the global cache.
static void _rpmalloc_unmap(void *address, size_t size, size_t offset, size_t release)
Unmap virtual memory.
static void _rpmalloc_span_double_link_list_add(span_t **head, span_t *span)
Span linked list management.
static void * _rpmalloc_allocate_medium(heap_t *heap, size_t size)
Allocate a medium sized memory block from the given heap.
#define HEAP_ARRAY_SIZE
Size of heap hashmap.
static void _rpmalloc_deallocate_defer_free_span(heap_t *heap, span_t *span)
static void * free_list_pop(void **list)
Allocation entry points.
void rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats)
Get per-thread statistics.
static heap_t * _rpmalloc_heap_extract_orphan(heap_t **heap_list)
static span_t * _rpmalloc_heap_extract_new_span(heap_t *heap, heap_size_class_t *heap_size_class, size_t span_count, uint32_t class_idx)
Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping...
#define rpmalloc_assert(truth, message)
#define SPAN_FLAG_ALIGNED_BLOCKS
Flag indicating span has blocks with increased alignment.
static void _rpmalloc_span_initialize(span_t *span, size_t total_span_count, size_t span_count, size_t align_offset)
Setup a newly mapped span.
#define _memory_default_span_mask
static size_t _memory_heap_reserve_count
Number of spans to keep reserved in each heap.
static size_t _rpmalloc_usable_size(void *p)
Reallocation entry points.
static uint32_t free_list_partial_init(void **list, void **first_block, void *page_start, void *block_start, uint32_t block_count, uint32_t block_size)
Initialize a (partial) free list up to next system memory page, while reserving the first block as al...
static int _rpmalloc_span_is_fully_utilized(span_t *span)
static rpmalloc_config_t _memory_config
Configuration.
static FORCEINLINE void atomic_store_ptr(atomicptr_t *dst, void *val)
static void _rpmalloc_deallocate(void *p)
Deallocate the given block.
static heap_t * _memory_orphan_heaps
Orphaned heaps.
static span_t * _rpmalloc_span_map(heap_t *heap, size_t span_count)
Map in memory pages for the given number of spans (or use previously reserved pages)
static void _rpmalloc_heap_set_reserved_spans(heap_t *heap, span_t *master, span_t *reserve, size_t reserve_span_count)
Store the given spans as reserve in the given heap.
RPMALLOC_ALLOCATOR void * rpaligned_calloc(size_t alignment, size_t num, size_t size)
void rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats)
Get global statistics.
int rpposix_memalign(void **memptr, size_t alignment, size_t size)
Allocate a memory block of at least the given size and alignment.
#define MAX_THREAD_SPAN_CACHE
Number of spans in thread cache.
static void _rpmalloc_span_double_link_list_remove(span_t **head, span_t *span)
Remove a span from double linked list.
#define GLOBAL_CACHE_MULTIPLIER
Multiplier for global cache.
#define MEDIUM_SIZE_LIMIT
Maximum size of a medium block.
#define SMALL_SIZE_LIMIT
Maximum size of a small block.
static void _rpmalloc_span_extract_free_list_deferred(span_t *span)
struct span_list_t span_list_t
Span list.
static FORCEINLINE int32_t atomic_add32(atomic32_t *val, int32_t add)
static atomic32_t _memory_heap_id
Heap ID counter.
static void _rpmalloc_heap_cache_adopt_deferred(heap_t *heap, span_t **single_span)
Adopt the deferred span cache list, optionally extracting the first single span for immediate re-use.
#define pointer_diff(first, second)
struct span_active_t span_active_t
Span active data.
#define DEFAULT_SPAN_MAP_COUNT
Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)
static span_t * _rpmalloc_span_map_from_reserve(heap_t *heap, size_t span_count)
Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static void * _rpmalloc_mmap(size_t size, size_t *offset)
Map more virtual memory.
static heap_t * get_thread_heap(void)
Get the current thread heap.
#define ENABLE_UNLIMITED_CACHE
Enable unlimited global cache (no unmapping until finalization)
#define SMALL_CLASS_COUNT
Number of small block size classes.
void rpmalloc_finalize(void)
Finalize the allocator.
#define RPMALLOC_ALLOCATOR
#define RPMALLOC_GROW_OR_FAIL
Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,...
#define RPMALLOC_NO_PRESERVE
Flag to rpaligned_realloc to not preserve content in reallocation.
uint32_t count
Cache count.
span_t * overflow
Unlimited cache overflow.
atomic32_t lock
Cache lock.
span_t * span[GLOBAL_CACHE_MULTIPLIER *MAX_THREAD_SPAN_CACHE]
Cached spans.
span_t * cache
Early level cache of fully free spans.
void * free_list
Free list of active span.
span_t * partial_span
Double linked list of partially used spans with free blocks.
int finalize
Finalization state flag.
heap_t * master_heap
Master heap owning the memory pages.
atomicptr_t span_free_deferred
List of deferred free spans (single linked list)
uintptr_t owner_thread
Owning thread ID.
atomic32_t child_count
Child count.
size_t full_span_count
Number of full spans.
heap_size_class_t size_class[SIZE_CLASS_COUNT]
Free lists for each size class.
heap_t * next_heap
Next heap in id list.
heap_t * next_orphan
Next heap in orphan list.
uint32_t spans_reserved
Number of mapped but unused spans.
span_t * span_reserve
Mapped but unused spans.
span_t * span_reserve_master
Master span for mapped but unused spans.
uint32_t block_size
Size of blocks in this class.
uint16_t class_idx
Class index this class is merged with.
uint16_t block_count
Number of blocks in each chunk.
span_t * span[MAX_THREAD_SPAN_CACHE]
span_t * span[MAX_THREAD_SPAN_LARGE_CACHE]
uint32_t offset_from_master
Offset from master span for subspans.
uint32_t align_offset
Alignment offset.
atomicptr_t free_list_deferred
Deferred free list.
heap_t * heap
Owning heap.
span_t * prev
Previous span.
atomic32_t remaining_spans
Remaining span counter, for master spans.
uint32_t flags
Flags and counters.
uint32_t size_class
Size class.
uint32_t block_count
Total block count of size class.
void * free_list
Free list.
uint32_t block_size
Size of a block.
uint32_t used_count
Number of used blocks remaining when in partial state.
uint32_t list_size
Size of deferred free list, or list of spans when part of a cache list.
uint32_t span_count
Number of spans.
uint32_t free_list_limit
Index of last block initialized in free list.
uint32_t total_spans
Total span counter for master spans.