LLVM 22.0.0git
SectionMemoryManager.cpp
Go to the documentation of this file.
1//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the section-based memory manager used by the MCJIT
10// execution engine and RuntimeDyld
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/Config/config.h"
17
18namespace llvm {
19
20bool SectionMemoryManager::hasSpace(const MemoryGroup &MemGroup,
21 uintptr_t Size) const {
22 for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
23 if (FreeMB.Free.allocatedSize() >= Size)
24 return true;
25 }
26 return false;
27}
28
30 uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
31 Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
32 if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
33 return;
34
35 static const size_t PageSize = sys::Process::getPageSizeEstimate();
36
37 // Code alignment needs to be at least the stub alignment - however, we
38 // don't have an easy way to get that here so as a workaround, we assume
39 // it's 8, which is the largest value I observed across all platforms.
40 constexpr uint64_t StubAlign = 8;
41 CodeAlign = Align(std::max(CodeAlign.value(), StubAlign));
42 RODataAlign = Align(std::max(RODataAlign.value(), StubAlign));
43 RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign));
44
45 // Get space required for each section. Use the same calculation as
46 // allocateSection because we need to be able to satisfy it.
47 uint64_t RequiredCodeSize = alignTo(CodeSize, CodeAlign) + CodeAlign.value();
48 uint64_t RequiredRODataSize =
49 alignTo(RODataSize, RODataAlign) + RODataAlign.value();
50 uint64_t RequiredRWDataSize =
51 alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value();
52
53 if (hasSpace(CodeMem, RequiredCodeSize) &&
54 hasSpace(RODataMem, RequiredRODataSize) &&
55 hasSpace(RWDataMem, RequiredRWDataSize)) {
56 // Sufficient space in contiguous block already available.
57 return;
58 }
59
60 // MemoryManager does not have functions for releasing memory after it's
61 // allocated. Normally it tries to use any excess blocks that were allocated
62 // due to page alignment, but if we have insufficient free memory for the
63 // request this can lead to allocating disparate memory that can violate the
64 // ARM ABI. Clear free memory so only the new allocations are used, but do
65 // not release allocated memory as it may still be in-use.
66 CodeMem.FreeMem.clear();
67 RODataMem.FreeMem.clear();
68 RWDataMem.FreeMem.clear();
69
70 // Round up to the nearest page size. Blocks must be page-aligned.
71 RequiredCodeSize = alignTo(RequiredCodeSize, PageSize);
72 RequiredRODataSize = alignTo(RequiredRODataSize, PageSize);
73 RequiredRWDataSize = alignTo(RequiredRWDataSize, PageSize);
74 uint64_t RequiredSize =
75 RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
76
77 std::error_code ec;
78 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
79 AllocationPurpose::RWData, RequiredSize, nullptr,
81 if (ec) {
82 return;
83 }
84 // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
85 CodeMem.AllocatedMem.push_back(MB);
86 uintptr_t Addr = (uintptr_t)MB.base();
87 FreeMemBlock FreeMB;
88 FreeMB.PendingPrefixIndex = (unsigned)-1;
89
90 if (CodeSize > 0) {
91 assert(isAddrAligned(CodeAlign, (void *)Addr));
92 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
93 CodeMem.FreeMem.push_back(FreeMB);
94 Addr += RequiredCodeSize;
95 }
96
97 if (RODataSize > 0) {
98 assert(isAddrAligned(RODataAlign, (void *)Addr));
99 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
100 RODataMem.FreeMem.push_back(FreeMB);
101 Addr += RequiredRODataSize;
102 }
103
104 if (RWDataSize > 0) {
105 assert(isAddrAligned(RWDataAlign, (void *)Addr));
106 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
107 RWDataMem.FreeMem.push_back(FreeMB);
108 }
109}
110
112 unsigned Alignment,
113 unsigned SectionID,
115 bool IsReadOnly) {
116 if (IsReadOnly)
118 Size, Alignment);
120 Alignment);
121}
122
124 unsigned Alignment,
125 unsigned SectionID,
128 Alignment);
129}
130
131uint8_t *SectionMemoryManager::allocateSection(
133 unsigned Alignment) {
134 if (!Alignment)
135 Alignment = 16;
136
137 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
138
139 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
140 uintptr_t Addr = 0;
141
142 MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
143 switch (Purpose) {
145 return CodeMem;
147 return RODataMem;
149 return RWDataMem;
150 }
151 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
152 }();
153
154 // Look in the list of free memory regions and use a block there if one
155 // is available.
156 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
157 if (FreeMB.Free.allocatedSize() >= RequiredSize) {
158 Addr = (uintptr_t)FreeMB.Free.base();
159 uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
160 // Align the address.
161 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
162
163 if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
164 // The part of the block we're giving out to the user is now pending
165 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
166
167 // Remember this pending block, such that future allocations can just
168 // modify it rather than creating a new one
169 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
170 } else {
171 sys::MemoryBlock &PendingMB =
172 MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
173 PendingMB = sys::MemoryBlock(PendingMB.base(),
174 Addr + Size - (uintptr_t)PendingMB.base());
175 }
176
177 // Remember how much free space is now left in this block
178 FreeMB.Free =
179 sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
180 return (uint8_t *)Addr;
181 }
182 }
183
184 // No pre-allocated free block was large enough. Allocate a new memory region.
185 // Note that all sections get allocated as read-write. The permissions will
186 // be updated later based on memory group.
187 //
188 // FIXME: It would be useful to define a default allocation size (or add
189 // it as a constructor parameter) to minimize the number of allocations.
190 //
191 // FIXME: Initialize the Near member for each memory group to avoid
192 // interleaving.
193 std::error_code ec;
194 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
195 Purpose, RequiredSize, &MemGroup.Near,
197 if (ec) {
198 // FIXME: Add error propagation to the interface.
199 return nullptr;
200 }
201
202 // Save this address as the basis for our next request
203 MemGroup.Near = MB;
204
205 // Copy the address to all the other groups, if they have not
206 // been initialized.
207 if (CodeMem.Near.base() == nullptr)
208 CodeMem.Near = MB;
209 if (RODataMem.Near.base() == nullptr)
210 RODataMem.Near = MB;
211 if (RWDataMem.Near.base() == nullptr)
212 RWDataMem.Near = MB;
213
214 // Remember that we allocated this memory
215 MemGroup.AllocatedMem.push_back(MB);
216 Addr = (uintptr_t)MB.base();
217 uintptr_t EndOfBlock = Addr + MB.allocatedSize();
218
219 // Align the address.
220 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
221
222 // The part of the block we're giving out to the user is now pending
223 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
224
225 // The allocateMappedMemory may allocate much more memory than we need. In
226 // this case, we store the unused memory as a free memory block.
227 unsigned FreeSize = EndOfBlock - Addr - Size;
228 if (FreeSize > 16) {
229 FreeMemBlock FreeMB;
230 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
231 FreeMB.PendingPrefixIndex = (unsigned)-1;
232 MemGroup.FreeMem.push_back(FreeMB);
233 }
234
235 // Return aligned address
236 return (uint8_t *)Addr;
237}
238
239bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
240 // FIXME: Should in-progress permissions be reverted if an error occurs?
241 std::error_code ec;
242
243 // Make code memory executable.
244 ec = applyMemoryGroupPermissions(CodeMem,
246 if (ec) {
247 if (ErrMsg) {
248 *ErrMsg = ec.message();
249 }
250 return true;
251 }
252
253 // Make read-only data memory read-only.
254 ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
255 if (ec) {
256 if (ErrMsg) {
257 *ErrMsg = ec.message();
258 }
259 return true;
260 }
261
262 // Read-write data memory already has the correct permissions
263
264 // Some platforms with separate data cache and instruction cache require
265 // explicit cache flush, otherwise JIT code manipulations (like resolved
266 // relocations) will get to the data cache but not to the instruction cache.
268
269 return false;
270}
271
273 static const size_t PageSize = sys::Process::getPageSizeEstimate();
274
275 size_t StartOverlap =
276 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
277
278 size_t TrimmedSize = M.allocatedSize();
279 TrimmedSize -= StartOverlap;
280 TrimmedSize -= TrimmedSize % PageSize;
281
282 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
283 TrimmedSize);
284
285 assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
286 assert((Trimmed.allocatedSize() % PageSize) == 0);
287 assert(M.base() <= Trimmed.base() &&
288 Trimmed.allocatedSize() <= M.allocatedSize());
289
290 return Trimmed;
291}
292
293std::error_code
294SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
295 unsigned Permissions) {
296 for (sys::MemoryBlock &MB : MemGroup.PendingMem)
297 if (std::error_code EC = MMapper->protectMappedMemory(MB, Permissions))
298 return EC;
299
300 MemGroup.PendingMem.clear();
301
302 // Now go through free blocks and trim any of them that don't span the entire
303 // page because one of the pending blocks may have overlapped it.
304 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
305 FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
306 // We cleared the PendingMem list, so all these pointers are now invalid
307 FreeMB.PendingPrefixIndex = (unsigned)-1;
308 }
309
310 // Remove all blocks which are now empty
311 erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
312 return FreeMB.Free.allocatedSize() == 0;
313 });
314
315 return std::error_code();
316}
317
319 for (sys::MemoryBlock &Block : CodeMem.PendingMem)
321 Block.allocatedSize());
322}
323
325 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
326 for (sys::MemoryBlock &Block : Group->AllocatedMem)
327 MMapper->releaseMappedMemory(Block);
328 }
329}
330
332
333void SectionMemoryManager::anchor() {}
334
335namespace {
336// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
337// into sys::Memory.
338class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
339public:
340 sys::MemoryBlock
341 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
342 size_t NumBytes, const sys::MemoryBlock *const NearBlock,
343 unsigned Flags, std::error_code &EC) override {
344 return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
345 }
346
347 std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
348 unsigned Flags) override {
350 }
351
352 std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
354 }
355};
356} // namespace
357
359 bool ReserveAlloc)
360 : MMapper(UnownedMM), OwnedMMapper(nullptr),
361 ReserveAllocation(ReserveAlloc) {
362 if (!MMapper) {
363 OwnedMMapper = std::make_unique<DefaultMMapper>();
364 MMapper = OwnedMMapper.get();
365 }
366}
367
368} // namespace llvm
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
Provides a library for accessing information about this process and other processes on the operating ...
Implementations of this interface are used by SectionMemoryManager to request pages from the operatin...
virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block, unsigned Flags)=0
This method sets the protection flags for a block of memory to the state specified by Flags.
AllocationPurpose
This enum describes the various reasons to allocate pages from allocateMappedMemory.
void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize, Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) override
Implements allocating all memory in a single block.
virtual void invalidateInstructionCache()
Invalidate instruction cache for code sections.
uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool isReadOnly) override
Allocates a memory block of (at least) the given size suitable for executable code.
SectionMemoryManager(MemoryMapper *MM=nullptr, bool ReserveAlloc=false)
Creates a SectionMemoryManager instance with MM as the associated memory mapper.
uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override
Allocates a memory block of (at least) the given size suitable for executable code.
bool finalizeMemory(std::string *ErrMsg=nullptr) override
Update section-specific memory permissions and other attributes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
This class encapsulates the notion of a memory block which has an address and a size.
Definition Memory.h:33
void * base() const
Definition Memory.h:38
size_t allocatedSize() const
The size as it was allocated.
Definition Memory.h:41
static LLVM_ABI std::error_code protectMappedMemory(const MemoryBlock &Block, unsigned Flags)
This method sets the protection flags for a block of memory to the state specified by /p Flags.
static LLVM_ABI std::error_code releaseMappedMemory(MemoryBlock &Block)
This method releases a block of memory that was allocated with the allocateMappedMemory method.
static LLVM_ABI void InvalidateInstructionCache(const void *Addr, size_t Len)
InvalidateInstructionCache - Before the JIT can run a block of code that has been emitted it must inv...
static LLVM_ABI MemoryBlock allocateMappedMemory(size_t NumBytes, const MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC)
This method allocates a block of memory that is suitable for loading dynamically generated code (e....
static unsigned getPageSizeEstimate()
Get the process's estimated page size.
Definition Process.h:62
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2141
bool isAddrAligned(Align Lhs, const void *Addr)
Checks that Addr is a multiple of the alignment.
Definition Alignment.h:139
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77