8 #if defined(HAVE_CONFIG_H)
19 #include <sys/resource.h>
36 static inline size_t align_up(
size_t x,
size_t align)
38 return (x + align - 1) & ~(align - 1);
45 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
75 const size_t size_remaining = size_ptr_it->first - size;
76 auto allocated =
chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
78 if (size_ptr_it->first == size) {
85 chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
89 return reinterpret_cast<void*
>(allocated->first);
100 auto i =
chunks_used.find(
static_cast<char*
>(ptr));
102 throw std::runtime_error(
"Arena: invalid or double free");
104 std::pair<char*, size_t> freed = *i;
110 freed.first -= prev->second->first;
111 freed.second += prev->second->first;
117 auto next =
chunks_free.find(freed.first + freed.second);
119 freed.second += next->second->first;
134 r.used += chunk.second;
136 r.free += chunk.second->first;
137 r.total = r.used + r.free;
142 static void printchunk(
void* base,
size_t sz,
bool used) {
144 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
145 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
146 " 0x" << used << std::endl;
148 void Arena::walk()
const
151 printchunk(chunk.first, chunk.second,
true);
152 std::cout << std::endl;
154 printchunk(chunk.first, chunk.second->first,
false);
155 std::cout << std::endl;
168 Win32LockedPageAllocator();
170 void FreeLocked(
void* addr,
size_t len)
override;
176 Win32LockedPageAllocator::Win32LockedPageAllocator()
179 SYSTEM_INFO sSysInfo;
180 GetSystemInfo(&sSysInfo);
181 page_size = sSysInfo.dwPageSize;
183 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
186 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
192 *lockingSuccess = VirtualLock(
const_cast<void*
>(addr), len) != 0;
196 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
200 VirtualUnlock(
const_cast<void*
>(addr), len);
203 size_t Win32LockedPageAllocator::GetLimit()
206 return std::numeric_limits<size_t>::max();
222 void FreeLocked(
void* addr,
size_t len)
override;
231 #if defined(PAGESIZE) // defined in limits.h
233 #else // assume some POSIX OS
240 #ifndef MAP_ANONYMOUS
241 #define MAP_ANONYMOUS MAP_ANON
248 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
249 if (addr == MAP_FAILED) {
253 *lockingSuccess = mlock(addr, len) == 0;
254 #if defined(MADV_DONTDUMP) // Linux
255 madvise(addr, len, MADV_DONTDUMP);
256 #elif defined(MADV_NOCORE) // FreeBSD
257 madvise(addr, len, MADV_NOCORE);
271 #ifdef RLIMIT_MEMLOCK
273 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
274 if (rlim.rlim_cur != RLIM_INFINITY) {
275 return rlim.rlim_cur;
279 return std::numeric_limits<size_t>::max();
287 allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
296 std::lock_guard<std::mutex> lock(
mutex);
303 for (
auto &arena:
arenas) {
304 void *addr = arena.alloc(size);
311 return arenas.back().alloc(size);
318 std::lock_guard<std::mutex> lock(
mutex);
321 for (
auto &arena:
arenas) {
322 if (arena.addressInArena(ptr)) {
327 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
332 std::lock_guard<std::mutex> lock(
mutex);
334 for (
const auto &arena:
arenas) {
355 size = std::min(size, limit);
358 void *addr =
allocator->AllocateLocked(size, &locked);
375 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
405 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());