8 #if defined(HAVE_CONFIG_H) 19 #include <sys/resource.h> 36 static inline size_t align_up(
size_t x,
size_t align)
38 return (x + align - 1) & ~(align - 1);
45 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
75 const size_t size_remaining = size_ptr_it->first - size;
76 auto allocated =
chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
78 if (size_ptr_it->first == size) {
85 chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
89 return reinterpret_cast<void*
>(allocated->first);
100 auto i =
chunks_used.find(static_cast<char*>(ptr));
102 throw std::runtime_error(
"Arena: invalid or double free");
104 std::pair<char*, size_t> freed = *i;
110 freed.first -= prev->second->first;
111 freed.second += prev->second->first;
117 auto next =
chunks_free.find(freed.first + freed.second);
119 freed.second += next->second->first;
134 r.used += chunk.second;
136 r.free += chunk.second->first;
137 r.total = r.used + r.free;
142 static void printchunk(
void* base,
size_t sz,
bool used) {
144 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
145 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
146 " 0x" << used << std::endl;
148 void Arena::walk()
const 151 printchunk(chunk.first, chunk.second,
true);
152 std::cout << std::endl;
154 printchunk(chunk.first, chunk.second->first,
false);
155 std::cout << std::endl;
168 Win32LockedPageAllocator();
170 void FreeLocked(
void* addr,
size_t len)
override;
176 Win32LockedPageAllocator::Win32LockedPageAllocator()
179 SYSTEM_INFO sSysInfo;
180 GetSystemInfo(&sSysInfo);
181 page_size = sSysInfo.dwPageSize;
183 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
186 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
192 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
196 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
200 VirtualUnlock(const_cast<void*>(addr), len);
203 size_t Win32LockedPageAllocator::GetLimit()
206 return std::numeric_limits<size_t>::max();
222 void FreeLocked(
void* addr,
size_t len)
override;
231 #if defined(PAGESIZE) // defined in limits.h 233 #else // assume some POSIX OS 240 #ifndef MAP_ANONYMOUS 241 #define MAP_ANONYMOUS MAP_ANON 248 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
249 if (addr == MAP_FAILED) {
253 *lockingSuccess = mlock(addr, len) == 0;
254 #if defined(MADV_DONTDUMP) // Linux 255 madvise(addr, len, MADV_DONTDUMP);
256 #elif defined(MADV_NOCORE) // FreeBSD 257 madvise(addr, len, MADV_NOCORE);
271 #ifdef RLIMIT_MEMLOCK 273 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
274 if (rlim.rlim_cur != RLIM_INFINITY) {
275 return rlim.rlim_cur;
279 return std::numeric_limits<size_t>::max();
287 allocator(
std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
296 std::lock_guard<std::mutex> lock(
mutex);
303 for (
auto &arena:
arenas) {
304 void *addr = arena.alloc(size);
311 return arenas.back().alloc(size);
318 std::lock_guard<std::mutex> lock(
mutex);
321 for (
auto &arena:
arenas) {
322 if (arena.addressInArena(ptr)) {
327 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
332 std::lock_guard<std::mutex> lock(
mutex);
334 for (
const auto &arena:
arenas) {
355 size = std::min(size, limit);
358 void *addr =
allocator->AllocateLocked(size, &locked);
375 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
405 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
OS-dependent allocation and deallocation of locked/pinned memory pages.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
char * base
Base address of arena.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator