5 #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H 6 #define BITCOIN_SUPPORT_LOCKEDPOOL_H 13 #include <unordered_map> 31 virtual void*
AllocateLocked(
size_t len,
bool *lockingSuccess) = 0;
36 virtual void FreeLocked(
void* addr,
size_t len) = 0;
51 Arena(
void *base,
size_t size,
size_t alignment);
71 void* alloc(
size_t size);
96 typedef std::unordered_map<char*, SizeToChunkSortedMap::const_iterator>
ChunkToSizeMap;
134 static const size_t ARENA_SIZE = 256*1024;
138 static const size_t ARENA_ALIGN = 16;
142 typedef bool (*LockingFailed_Callback)();
162 explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in =
nullptr);
172 void* alloc(
size_t size);
178 void free(
void *ptr);
197 bool new_arena(
size_t size,
size_t align);
224 static std::once_flag init_flag;
233 static void CreateInstance();
235 static bool LockingFailed();
240 #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
std::multimap< size_t, char * > SizeToChunkSortedMap
std::list< LockedPageArena > arenas
static LockedPoolManager & Instance()
Return the current instance, or create it once.
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
OS-dependent allocation and deallocation of locked/pinned memory pages.
bool addressInArena(void *ptr) const
Return whether a pointer points inside this arena.
LockedPageAllocator * allocator
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
char * end
End address of arena.
static LockedPoolManager * _instance
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
std::unordered_map< char *, SizeToChunkSortedMap::const_iterator > ChunkToSizeMap
virtual ~LockedPageAllocator()
Pool for locked memory chunks.
Create an arena from locked pages.
char * base
Base address of arena.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes...
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator