LLVM  14.0.0git
Allocator.h
Go to the documentation of this file.
1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
11 /// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
12 /// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
13 /// allocator.
14 ///
15 //===----------------------------------------------------------------------===//
16 
17 #ifndef LLVM_SUPPORT_ALLOCATOR_H
18 #define LLVM_SUPPORT_ALLOCATOR_H
19 
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/MemAlloc.h"
28 #include <algorithm>
29 #include <cassert>
30 #include <cstddef>
31 #include <cstdint>
32 #include <cstdlib>
33 #include <iterator>
34 #include <type_traits>
35 #include <utility>
36 
37 namespace llvm {
38 
39 namespace detail {
40 
41 // We call out to an external function to actually print the message as the
42 // printing code uses Allocator.h in its implementation.
43 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
44  size_t TotalMemory);
45 
46 } // end namespace detail
47 
48 /// Allocate memory in an ever growing pool, as if by bump-pointer.
49 ///
50 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
51 /// memory rather than relying on a boundless contiguous heap. However, it has
52 /// bump-pointer semantics in that it is a monotonically growing pool of memory
53 /// where every allocation is found by merely allocating the next N bytes in
54 /// the slab, or the next N bytes in the next slab.
55 ///
56 /// Note that this also has a threshold for forcing allocations above a certain
57 /// size into their own slab.
58 ///
59 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
60 /// object, which wraps malloc, to allocate memory, but it can be changed to
61 /// use a custom allocator.
62 ///
63 /// The GrowthDelay specifies after how many allocated slabs the allocator
64 /// increases the size of the slabs.
65 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
66  size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
68  : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
69  SizeThreshold, GrowthDelay>>,
70  private AllocatorT {
71 public:
72  static_assert(SizeThreshold <= SlabSize,
73  "The SizeThreshold must be at most the SlabSize to ensure "
74  "that objects larger than a slab go into their own memory "
75  "allocation.");
76  static_assert(GrowthDelay > 0,
77  "GrowthDelay must be at least 1 which already increases the"
78  "slab size after each allocated slab.");
79 
80  BumpPtrAllocatorImpl() = default;
81 
82  template <typename T>
84  : AllocatorT(std::forward<T &&>(Allocator)) {}
85 
86  // Manually implement a move constructor as we must clear the old allocator's
87  // slabs as a matter of correctness.
89  : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
90  End(Old.End), Slabs(std::move(Old.Slabs)),
91  CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
92  BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
93  Old.CurPtr = Old.End = nullptr;
94  Old.BytesAllocated = 0;
95  Old.Slabs.clear();
96  Old.CustomSizedSlabs.clear();
97  }
98 
100  DeallocateSlabs(Slabs.begin(), Slabs.end());
101  DeallocateCustomSizedSlabs();
102  }
103 
105  DeallocateSlabs(Slabs.begin(), Slabs.end());
106  DeallocateCustomSizedSlabs();
107 
108  CurPtr = RHS.CurPtr;
109  End = RHS.End;
110  BytesAllocated = RHS.BytesAllocated;
111  RedZoneSize = RHS.RedZoneSize;
112  Slabs = std::move(RHS.Slabs);
113  CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
114  AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
115 
116  RHS.CurPtr = RHS.End = nullptr;
117  RHS.BytesAllocated = 0;
118  RHS.Slabs.clear();
119  RHS.CustomSizedSlabs.clear();
120  return *this;
121  }
122 
123  /// Deallocate all but the current slab and reset the current pointer
124  /// to the beginning of it, freeing all memory allocated so far.
125  void Reset() {
126  // Deallocate all but the first slab, and deallocate all custom-sized slabs.
127  DeallocateCustomSizedSlabs();
128  CustomSizedSlabs.clear();
129 
130  if (Slabs.empty())
131  return;
132 
133  // Reset the state.
134  BytesAllocated = 0;
135  CurPtr = (char *)Slabs.front();
136  End = CurPtr + SlabSize;
137 
138  __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
139  DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
140  Slabs.erase(std::next(Slabs.begin()), Slabs.end());
141  }
142 
143  /// Allocate space at the specified alignment.
145  Allocate(size_t Size, Align Alignment) {
146  // Keep track of how many bytes we've allocated.
147  BytesAllocated += Size;
148 
149  size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
150  assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
151 
152  size_t SizeToAllocate = Size;
153 #if LLVM_ADDRESS_SANITIZER_BUILD
154  // Add trailing bytes as a "red zone" under ASan.
155  SizeToAllocate += RedZoneSize;
156 #endif
157 
158  // Check if we have enough space.
159  if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
160  char *AlignedPtr = CurPtr + Adjustment;
161  CurPtr = AlignedPtr + SizeToAllocate;
162  // Update the allocation point of this memory block in MemorySanitizer.
163  // Without this, MemorySanitizer messages for values originated from here
164  // will point to the allocation of the entire slab.
165  __msan_allocated_memory(AlignedPtr, Size);
166  // Similarly, tell ASan about this space.
167  __asan_unpoison_memory_region(AlignedPtr, Size);
168  return AlignedPtr;
169  }
170 
171  // If Size is really big, allocate a separate slab for it.
172  size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
173  if (PaddedSize > SizeThreshold) {
174  void *NewSlab =
175  AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
176  // We own the new slab and don't want anyone reading anyting other than
177  // pieces returned from this method. So poison the whole slab.
178  __asan_poison_memory_region(NewSlab, PaddedSize);
179  CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
180 
181  uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
182  assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
183  char *AlignedPtr = (char*)AlignedAddr;
184  __msan_allocated_memory(AlignedPtr, Size);
185  __asan_unpoison_memory_region(AlignedPtr, Size);
186  return AlignedPtr;
187  }
188 
189  // Otherwise, start a new slab and try again.
190  StartNewSlab();
191  uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
192  assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
193  "Unable to allocate memory!");
194  char *AlignedPtr = (char*)AlignedAddr;
195  CurPtr = AlignedPtr + SizeToAllocate;
196  __msan_allocated_memory(AlignedPtr, Size);
197  __asan_unpoison_memory_region(AlignedPtr, Size);
198  return AlignedPtr;
199  }
200 
202  Allocate(size_t Size, size_t Alignment) {
203  assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.");
204  return Allocate(Size, Align(Alignment));
205  }
206 
207  // Pull in base class overloads.
209 
210  // Bump pointer allocators are expected to never free their storage; and
211  // clients expect pointers to remain valid for non-dereferencing uses even
212  // after deallocation.
213  void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
215  }
216 
217  // Pull in base class overloads.
219 
220  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
221 
222  /// \return An index uniquely and reproducibly identifying
223  /// an input pointer \p Ptr in the given allocator.
224  /// The returned value is negative iff the object is inside a custom-size
225  /// slab.
226  /// Returns an empty optional if the pointer is not found in the allocator.
228  const char *P = static_cast<const char *>(Ptr);
229  int64_t InSlabIdx = 0;
230  for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
231  const char *S = static_cast<const char *>(Slabs[Idx]);
232  if (P >= S && P < S + computeSlabSize(Idx))
233  return InSlabIdx + static_cast<int64_t>(P - S);
234  InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
235  }
236 
237  // Use negative index to denote custom sized slabs.
238  int64_t InCustomSizedSlabIdx = -1;
239  for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
240  const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
241  size_t Size = CustomSizedSlabs[Idx].second;
242  if (P >= S && P < S + Size)
243  return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
244  InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
245  }
246  return None;
247  }
248 
249  /// A wrapper around identifyObject that additionally asserts that
250  /// the object is indeed within the allocator.
251  /// \return An index uniquely and reproducibly identifying
252  /// an input pointer \p Ptr in the given allocator.
253  int64_t identifyKnownObject(const void *Ptr) {
255  assert(Out && "Wrong allocator used");
256  return *Out;
257  }
258 
259  /// A wrapper around identifyKnownObject. Accepts type information
260  /// about the object and produces a smaller identifier by relying on
261  /// the alignment information. Note that sub-classes may have different
262  /// alignment, so the most base class should be passed as template parameter
263  /// in order to obtain correct results. For that reason automatic template
264  /// parameter deduction is disabled.
265  /// \return An index uniquely and reproducibly identifying
266  /// an input pointer \p Ptr in the given allocator. This identifier is
267  /// different from the ones produced by identifyObject and
268  /// identifyAlignedObject.
269  template <typename T>
270  int64_t identifyKnownAlignedObject(const void *Ptr) {
271  int64_t Out = identifyKnownObject(Ptr);
272  assert(Out % alignof(T) == 0 && "Wrong alignment information");
273  return Out / alignof(T);
274  }
275 
276  size_t getTotalMemory() const {
277  size_t TotalMemory = 0;
278  for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
279  TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
280  for (const auto &PtrAndSize : CustomSizedSlabs)
281  TotalMemory += PtrAndSize.second;
282  return TotalMemory;
283  }
284 
285  size_t getBytesAllocated() const { return BytesAllocated; }
286 
287  void setRedZoneSize(size_t NewSize) {
288  RedZoneSize = NewSize;
289  }
290 
291  void PrintStats() const {
292  detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
293  getTotalMemory());
294  }
295 
296 private:
297  /// The current pointer into the current slab.
298  ///
299  /// This points to the next free byte in the slab.
300  char *CurPtr = nullptr;
301 
302  /// The end of the current slab.
303  char *End = nullptr;
304 
305  /// The slabs allocated so far.
307 
308  /// Custom-sized slabs allocated for too-large allocation requests.
309  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
310 
311  /// How many bytes we've allocated.
312  ///
313  /// Used so that we can compute how much space was wasted.
314  size_t BytesAllocated = 0;
315 
316  /// The number of bytes to put between allocations when running under
317  /// a sanitizer.
318  size_t RedZoneSize = 1;
319 
320  static size_t computeSlabSize(unsigned SlabIdx) {
321  // Scale the actual allocated slab size based on the number of slabs
322  // allocated. Every GrowthDelay slabs allocated, we double
323  // the allocated size to reduce allocation frequency, but saturate at
324  // multiplying the slab size by 2^30.
325  return SlabSize *
326  ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
327  }
328 
329  /// Allocate a new slab and move the bump pointers over into the new
330  /// slab, modifying CurPtr and End.
331  void StartNewSlab() {
332  size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
333 
334  void *NewSlab =
335  AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
336  // We own the new slab and don't want anyone reading anything other than
337  // pieces returned from this method. So poison the whole slab.
338  __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
339 
340  Slabs.push_back(NewSlab);
341  CurPtr = (char *)(NewSlab);
342  End = ((char *)NewSlab) + AllocatedSlabSize;
343  }
344 
345  /// Deallocate a sequence of slabs.
346  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
348  for (; I != E; ++I) {
349  size_t AllocatedSlabSize =
350  computeSlabSize(std::distance(Slabs.begin(), I));
351  AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
352  }
353  }
354 
355  /// Deallocate all memory for custom sized slabs.
356  void DeallocateCustomSizedSlabs() {
357  for (auto &PtrAndSize : CustomSizedSlabs) {
358  void *Ptr = PtrAndSize.first;
359  size_t Size = PtrAndSize.second;
360  AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
361  }
362  }
363 
364  template <typename T> friend class SpecificBumpPtrAllocator;
365 };
366 
367 /// The standard BumpPtrAllocator which just uses the default template
368 /// parameters.
370 
371 /// A BumpPtrAllocator that allows only elements of a specific type to be
372 /// allocated.
373 ///
374 /// This allows calling the destructor in DestroyAll() and when the allocator is
375 /// destroyed.
376 template <typename T> class SpecificBumpPtrAllocator {
377  BumpPtrAllocator Allocator;
378 
379 public:
381  // Because SpecificBumpPtrAllocator walks the memory to call destructors,
382  // it can't have red zones between allocations.
383  Allocator.setRedZoneSize(0);
384  }
386  : Allocator(std::move(Old.Allocator)) {}
388 
390  Allocator = std::move(RHS.Allocator);
391  return *this;
392  }
393 
394  /// Call the destructor of each allocated object and deallocate all but the
395  /// current slab and reset the current pointer to the beginning of it, freeing
396  /// all memory allocated so far.
397  void DestroyAll() {
398  auto DestroyElements = [](char *Begin, char *End) {
399  assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()));
400  for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
401  reinterpret_cast<T *>(Ptr)->~T();
402  };
403 
404  for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
405  ++I) {
406  size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
407  std::distance(Allocator.Slabs.begin(), I));
408  char *Begin = (char *)alignAddr(*I, Align::Of<T>());
409  char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
410  : (char *)*I + AllocatedSlabSize;
411 
412  DestroyElements(Begin, End);
413  }
414 
415  for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
416  void *Ptr = PtrAndSize.first;
417  size_t Size = PtrAndSize.second;
418  DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
419  (char *)Ptr + Size);
420  }
421 
422  Allocator.Reset();
423  }
424 
425  /// Allocate space for an array of objects without constructing them.
426  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
427 };
428 
429 } // end namespace llvm
430 
431 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
432  size_t GrowthDelay>
433 void *
434 operator new(size_t Size,
435  llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
436  GrowthDelay> &Allocator) {
437  return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
438  alignof(std::max_align_t)));
439 }
440 
441 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
442  size_t GrowthDelay>
443 void operator delete(void *,
444  llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
445  SizeThreshold, GrowthDelay> &) {
446 }
447 
448 #endif // LLVM_SUPPORT_ALLOCATOR_H
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::NextPowerOf2
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:683
LLVM_ATTRIBUTE_RETURNS_NOALIAS
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
\macro LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a pointer that does not al...
Definition: Compiler.h:273
llvm::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator()
Definition: Allocator.h:380
MathExtras.h
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
__asan_poison_memory_region
#define __asan_poison_memory_region(p, size)
Definition: Compiler.h:444
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:705
Optional.h
llvm::BumpPtrAllocatorImpl::Reset
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Definition: Allocator.h:125
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition: Allocator.h:88
llvm::SpecificBumpPtrAllocator::operator=
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition: Allocator.h:389
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::SmallVector< void *, 4 >
ErrorHandling.h
llvm::detail::printBumpPtrAllocatorStats
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
Definition: Allocator.cpp:20
llvm::alignAddr
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Definition: Alignment.h:186
llvm::SpecificBumpPtrAllocator
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:376
llvm::Optional< int64_t >
llvm::BumpPtrAllocator
BumpPtrAllocatorImpl BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition: Allocator.h:369
T
#define T
Definition: Mips16ISelLowering.cpp:341
__asan_unpoison_memory_region
#define __asan_unpoison_memory_region(p, size)
Definition: Compiler.h:445
size_t
llvm::BumpPtrAllocatorImpl::Allocate
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
Definition: Allocator.h:145
llvm::SpecificBumpPtrAllocator::SpecificBumpPtrAllocator
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition: Allocator.h:385
llvm::BumpPtrAllocatorImpl::Allocate
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment)
Definition: Allocator.h:202
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::BumpPtrAllocatorImpl::~BumpPtrAllocatorImpl
~BumpPtrAllocatorImpl()
Definition: Allocator.h:99
llvm::BumpPtrAllocatorImpl::identifyObject
llvm::Optional< int64_t > identifyObject(const void *Ptr)
Definition: Allocator.h:227
llvm::BumpPtrAllocatorImpl::getBytesAllocated
size_t getBytesAllocated() const
Definition: Allocator.h:285
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::None
const NoneType None
Definition: None.h:23
MemAlloc.h
AllocatorBase.h
llvm::BumpPtrAllocatorImpl::identifyKnownObject
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
Definition: Allocator.h:253
llvm::BumpPtrAllocatorImpl::identifyKnownAlignedObject
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
Definition: Allocator.h:270
llvm::BumpPtrAllocatorImpl
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:67
LLVM_ATTRIBUTE_RETURNS_NONNULL
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
Definition: Compiler.h:263
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
I
#define I(x, y, z)
Definition: MD5.cpp:59
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1609
llvm::offsetToAlignedAddr
uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment)
Returns the necessary adjustment for aligning Addr to Alignment bytes, rounding up.
Definition: Alignment.h:202
llvm::BumpPtrAllocatorImpl::getTotalMemory
size_t getTotalMemory() const
Definition: Allocator.h:276
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
Compiler.h
llvm::BumpPtrAllocatorImpl::operator=
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition: Allocator.h:104
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl()=default
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::SpecificBumpPtrAllocator::Allocate
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition: Allocator.h:426
llvm::SpecificBumpPtrAllocator::~SpecificBumpPtrAllocator
~SpecificBumpPtrAllocator()
Definition: Allocator.h:387
Alignment.h
std
Definition: BitVector.h:838
llvm::BumpPtrAllocatorImpl::GetNumSlabs
size_t GetNumSlabs() const
Definition: Allocator.h:220
llvm::Align::value
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
__msan_allocated_memory
#define __msan_allocated_memory(p, size)
Definition: Compiler.h:432
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
SmallVector.h
llvm::BumpPtrAllocatorImpl::setRedZoneSize
void setRedZoneSize(size_t NewSize)
Definition: Allocator.h:287
Allocator
Basic Register Allocator
Definition: RegAllocBasic.cpp:146
llvm::SmallVectorImpl< void * >::iterator
typename SuperClass::iterator iterator
Definition: SmallVector.h:562
llvm::BumpPtrAllocatorImpl::PrintStats
void PrintStats() const
Definition: Allocator.h:291
llvm::BumpPtrAllocatorImpl::BumpPtrAllocatorImpl
BumpPtrAllocatorImpl(T &&Allocator)
Definition: Allocator.h:83
llvm::AllocatorBase
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Definition: AllocatorBase.h:33
llvm::BumpPtrAllocatorImpl::Deallocate
void Deallocate(const void *Ptr, size_t Size, size_t)
Definition: Allocator.h:213
llvm::SpecificBumpPtrAllocator::DestroyAll
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
Definition: Allocator.h:397