12#include <sys/resource.h>
32static inline size_t align_up(
size_t x,
size_t align)
34 return (x + align - 1) & ~(align - 1);
41 base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
69 const size_t size_remaining = size_ptr_it->first - size;
70 char*
const free_chunk =
static_cast<char*
>(size_ptr_it->second);
71 auto allocated =
chunks_used.emplace(free_chunk + size_remaining, size).first;
73 if (size_ptr_it->first == size) {
84 return allocated->first;
97 throw std::runtime_error(
"Arena: invalid or double free");
99 auto freed = std::make_pair(
static_cast<char*
>(i->first), i->second);
105 freed.first -= prev->second->first;
106 freed.second += prev->second->first;
112 auto next =
chunks_free.find(freed.first + freed.second);
114 freed.second += next->second->first;
129 r.used += chunk.second;
131 r.free += chunk.second->first;
132 r.total = r.used + r.free;
137static void printchunk(
void* base,
size_t sz,
bool used) {
139 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
140 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
141 " 0x" << used << std::endl;
143void Arena::walk()
const
146 printchunk(chunk.first, chunk.second,
true);
147 std::cout << std::endl;
149 printchunk(chunk.first, chunk.second->first,
false);
150 std::cout << std::endl;
163 Win32LockedPageAllocator();
165 void FreeLocked(
void* addr,
size_t len)
override;
171Win32LockedPageAllocator::Win32LockedPageAllocator()
174 SYSTEM_INFO sSysInfo;
175 GetSystemInfo(&sSysInfo);
176 page_size = sSysInfo.dwPageSize;
178void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
181 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
187 *lockingSuccess = VirtualLock(
const_cast<void*
>(addr), len) != 0;
191void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
195 VirtualUnlock(
const_cast<void*
>(addr), len);
198size_t Win32LockedPageAllocator::GetLimit()
201 if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
204 return std::numeric_limits<size_t>::max();
220 void FreeLocked(
void* addr,
size_t len)
override;
240 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
241 if (addr == MAP_FAILED) {
245 *lockingSuccess = mlock(addr, len) == 0;
246#if defined(MADV_DONTDUMP)
247 madvise(addr, len, MADV_DONTDUMP);
248#elif defined(MADV_NOCORE)
249 madvise(addr, len, MADV_NOCORE);
265 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
266 if (rlim.rlim_cur != RLIM_INFINITY) {
267 return rlim.rlim_cur;
271 return std::numeric_limits<size_t>::max();
279 : allocator(
std::move(allocator_in)), lf_cb(lf_cb_in)
287 std::lock_guard<std::mutex> lock(
mutex);
294 for (
auto &arena:
arenas) {
295 void *addr = arena.alloc(size);
302 return arenas.back().alloc(size);
309 std::lock_guard<std::mutex> lock(
mutex);
312 for (
auto &arena:
arenas) {
313 if (arena.addressInArena(ptr)) {
318 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
323 std::lock_guard<std::mutex> lock(
mutex);
325 for (
const auto &arena:
arenas) {
346 size = std::min(size, limit);
349 void *addr =
allocator->AllocateLocked(size, &locked);
366 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
396 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
void * base
Base address of arena.
size_t alignment
Minimum chunk alignment.
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unordered_map< void *, size_t > chunks_used
Map from begin of used chunk to its size.
void * alloc(size_t size)
Allocate size bytes from this arena.
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Arena(void *base, size_t size, size_t alignment)
Stats stats() const
Get arena usage statistics.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
void free(void *ptr)
Free a previously allocated chunk of memory.
OS-dependent allocation and deallocation of locked/pinned memory pages.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Pool for locked memory chunks.
void free(void *ptr)
Free a previously allocated chunk of memory.
Stats stats() const
Get pool usage statistics.
std::unique_ptr< LockedPageAllocator > allocator
void * alloc(size_t size)
Allocate size bytes from this arena.
size_t cumulative_bytes_locked
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
LockingFailed_Callback lf_cb
std::list< LockedPageArena > arenas
bool new_arena(size_t size, size_t align)
static const size_t ARENA_ALIGN
Chunk alignment.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
static bool LockingFailed()
Called when locking fails, warn the user here.
static LockedPoolManager * _instance
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes.
PosixLockedPageAllocator()
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.