Bitcoin Core 31.99.0
P2P Digital Currency
pool.h
Go to the documentation of this file.
1// Copyright (c) 2022-present The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H
6#define BITCOIN_SUPPORT_ALLOCATORS_POOL_H
7
8#include <array>
9#include <cassert>
10#include <cstddef>
11#include <list>
12#include <memory>
13#include <new>
14#include <type_traits>
15#include <utility>
16
17#include <util/check.h>
18#include <util/overflow.h>
19
72template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
73class PoolResource final
74{
75 static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero");
76 static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two");
77
81 struct ListNode {
83
84 explicit ListNode(ListNode* next) : m_next(next) {}
85 };
86 static_assert(std::is_trivially_destructible_v<ListNode>, "Make sure we don't need to manually call a destructor");
87
91 static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES);
92 static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two");
93 static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode");
94 static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment.");
95
99 const size_t m_chunk_size_bytes;
100
104 std::list<std::byte*> m_allocated_chunks{};
105
110 std::array<ListNode*, MAX_BLOCK_SIZE_BYTES / ELEM_ALIGN_BYTES + 1> m_free_lists{};
111
115 std::byte* m_available_memory_it = nullptr;
116
123 std::byte* m_available_memory_end = nullptr;
124
129 [[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes)
130 {
131 return CeilDiv(bytes, ELEM_ALIGN_BYTES) + (bytes == 0);
132 }
133
137 [[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment)
138 {
139 return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES;
140 }
141
146 {
147 node = new (p) ListNode{node};
148 }
149
157 {
158 // if there is still any available memory left, put it into the freelist.
159 size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it;
160 if (0 != remaining_available_bytes) {
164 }
165
166 void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES});
167 m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes];
171 }
172
176 friend class PoolResourceTester;
177
178public:
183 explicit PoolResource(std::size_t chunk_size_bytes)
185 {
186 assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES);
188 }
189
194
198 PoolResource(const PoolResource&) = delete;
202
207 {
208 for (std::byte* chunk : m_allocated_chunks) {
209 std::destroy(chunk, chunk + m_chunk_size_bytes);
210 ::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES});
212 }
213 }
214
219 void* Allocate(std::size_t bytes, std::size_t alignment)
220 {
221 if (IsFreeListUsable(bytes, alignment)) {
222 const std::size_t num_alignments = NumElemAlignBytes(bytes);
223 if (nullptr != m_free_lists[num_alignments]) {
224 // we've already got data in the pool's freelist, unlink one element and return the pointer
225 // to the unlinked memory. Since FreeList is trivially destructible we can just treat it as
226 // uninitialized memory.
227 ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode));
228 auto* next{m_free_lists[num_alignments]->m_next};
229 ASAN_POISON_MEMORY_REGION(m_free_lists[num_alignments], sizeof(ListNode));
230 ASAN_UNPOISON_MEMORY_REGION(m_free_lists[num_alignments], bytes);
231 return std::exchange(m_free_lists[num_alignments], next);
232 }
233
234 // freelist is empty: get one allocation from allocated chunk memory.
235 const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES);
236 if (round_bytes > m_available_memory_end - m_available_memory_it) {
237 // slow path, only happens when a new chunk needs to be allocated
239 }
240
241 // Make sure we use the right amount of bytes for that freelist (might be rounded up),
243 return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes);
244 }
245
246 // Can't use the pool => use operator new()
247 return ::operator new (bytes, std::align_val_t{alignment});
248 }
249
253 void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept
254 {
255 if (IsFreeListUsable(bytes, alignment)) {
256 const std::size_t num_alignments = NumElemAlignBytes(bytes);
257 // put the memory block into the linked list. We can placement construct the FreeList
258 // into the memory since we can be sure the alignment is correct.
260 PlacementAddToList(p, m_free_lists[num_alignments]);
261 ASAN_POISON_MEMORY_REGION(p, std::max(bytes, sizeof(ListNode)));
262 } else {
263 // Can't use the pool => forward deallocation to ::operator delete().
264 ::operator delete (p, std::align_val_t{alignment});
265 }
266 }
267
271 [[nodiscard]] std::size_t NumAllocatedChunks() const
272 {
273 return m_allocated_chunks.size();
274 }
275
279 [[nodiscard]] size_t ChunkSizeBytes() const
280 {
281 return m_chunk_size_bytes;
282 }
283};
284
285
289template <class T, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES = alignof(T)>
291{
293
294 template <typename U, std::size_t M, std::size_t A>
295 friend class PoolAllocator;
296
297public:
298 using value_type = T;
300
306 {
307 }
308
309 PoolAllocator(const PoolAllocator& other) noexcept = default;
310 PoolAllocator& operator=(const PoolAllocator& other) noexcept = default;
311
312 template <class U>
314 : m_resource(other.resource())
315 {
316 }
317
322 template <typename U>
323 struct rebind {
325 };
326
330 T* allocate(size_t n)
331 {
332 return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T)));
333 }
334
338 void deallocate(T* p, size_t n) noexcept
339 {
340 m_resource->Deallocate(p, n * sizeof(T), alignof(T));
341 }
342
343 ResourceType* resource() const noexcept
344 {
345 return m_resource;
346 }
347};
348
349template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
352{
353 return a.resource() == b.resource();
354}
355
356#endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H
#define ASAN_POISON_MEMORY_REGION(addr, size)
Definition: check.h:140
#define ASAN_UNPOISON_MEMORY_REGION(addr, size)
Definition: check.h:141
Forwards all allocations/deallocations to the PoolResource.
Definition: pool.h:291
PoolResource< MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > * m_resource
Definition: pool.h:292
ResourceType * resource() const noexcept
Definition: pool.h:343
T value_type
Definition: pool.h:298
PoolAllocator & operator=(const PoolAllocator &other) noexcept=default
void deallocate(T *p, size_t n) noexcept
Forwards each call to the resource.
Definition: pool.h:338
PoolAllocator(const PoolAllocator &other) noexcept=default
PoolAllocator(ResourceType *resource) noexcept
Not explicit so we can easily construct it with the correct resource.
Definition: pool.h:304
T * allocate(size_t n)
Forwards each call to the resource.
Definition: pool.h:330
PoolAllocator(const PoolAllocator< U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > &other) noexcept
Definition: pool.h:313
A memory resource similar to std::pmr::unsynchronized_pool_resource, but optimized for node-based con...
Definition: pool.h:74
PoolResource & operator=(const PoolResource &)=delete
void PlacementAddToList(void *p, ListNode *&node)
Replaces node with placement constructed ListNode that points to the previous node.
Definition: pool.h:145
static constexpr std::size_t NumElemAlignBytes(std::size_t bytes)
How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes.
Definition: pool.h:129
PoolResource(const PoolResource &)=delete
Disable copy & move semantics, these are not supported for the resource.
size_t ChunkSizeBytes() const
Size in bytes to allocate per chunk, currently hardcoded to a fixed size.
Definition: pool.h:279
std::array< ListNode *, MAX_BLOCK_SIZE_BYTES/ELEM_ALIGN_BYTES+1 > m_free_lists
Single linked lists of all data that came from deallocating.
Definition: pool.h:110
std::byte * m_available_memory_it
Points to the beginning of available memory for carving out allocations.
Definition: pool.h:115
PoolResource & operator=(PoolResource &&)=delete
std::size_t NumAllocatedChunks() const
Number of allocated chunks.
Definition: pool.h:271
static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment)
True when it is possible to make use of the freelist.
Definition: pool.h:137
void Deallocate(void *p, std::size_t bytes, std::size_t alignment) noexcept
Returns a block to the freelists, or deletes the block when it did not come from the chunks.
Definition: pool.h:253
std::byte * m_available_memory_end
Points to the end of available memory for carving out allocations.
Definition: pool.h:123
PoolResource(std::size_t chunk_size_bytes)
Construct a new PoolResource object which allocates the first chunk.
Definition: pool.h:183
~PoolResource()
Deallocates all memory allocated associated with the memory resource.
Definition: pool.h:206
static constexpr std::size_t ELEM_ALIGN_BYTES
Internal alignment value.
Definition: pool.h:91
const size_t m_chunk_size_bytes
Size in bytes to allocate per chunk.
Definition: pool.h:99
std::list< std::byte * > m_allocated_chunks
Contains all allocated pools of memory, used to free the data in the destructor.
Definition: pool.h:104
PoolResource()
Construct a new Pool Resource object, defaults to 2^18=262144 chunk size.
Definition: pool.h:193
PoolResource(PoolResource &&)=delete
void * Allocate(std::size_t bytes, std::size_t alignment)
Allocates a block of bytes.
Definition: pool.h:219
void AllocateChunk()
Allocate one full memory chunk which will be used to carve out allocations.
Definition: pool.h:156
Helper to get access to private parts of PoolResource.
Definition: messages.h:21
constexpr auto CeilDiv(const Dividend dividend, const Divisor divisor)
Integer ceiling division (for unsigned values).
Definition: overflow.h:70
bool operator==(const PoolAllocator< T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > &a, const PoolAllocator< T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > &b) noexcept
Definition: pool.h:350
The rebind struct here is mandatory because we use non type template arguments for PoolAllocator.
Definition: pool.h:323
In-place linked list of the allocations, used for the freelist.
Definition: pool.h:81
ListNode * m_next
Definition: pool.h:82
ListNode(ListNode *next)
Definition: pool.h:84
assert(!tx.IsCoinBase())