19template <std::
size_t MAX_BLOCK_SIZE_BYTES, std::
size_t ALIGN_BYTES>
20class PoolResourceFuzzer
24 uint64_t m_sequence{0};
25 size_t m_total_allocated{};
32 Entry(
Span<std::byte> s,
size_t a, uint64_t se) : span(
s), alignment(a), seed(se) {}
35 std::vector<Entry> m_entries;
39 : m_provider{provider},
40 m_test_resource{provider.ConsumeIntegralInRange<size_t>(MAX_BLOCK_SIZE_BYTES, 262144)}
44 void Allocate(
size_t size,
size_t alignment)
48 assert((alignment & (alignment - 1)) == 0);
49 assert((size & (alignment - 1)) == 0);
51 auto span =
Span(
static_cast<std::byte*
>(m_test_resource.
Allocate(size, alignment)), size);
52 m_total_allocated += size;
54 auto ptr_val =
reinterpret_cast<std::uintptr_t
>(span.data());
55 assert((ptr_val & (alignment - 1)) == 0);
57 uint64_t seed = m_sequence++;
58 RandomContentFill(m_entries.emplace_back(span, alignment, seed));
64 if (m_total_allocated > 0x1000000)
return;
65 size_t alignment_bits = m_provider.ConsumeIntegralInRange<
size_t>(0, 7);
66 size_t alignment =
size_t{1} << alignment_bits;
67 size_t size_bits = m_provider.ConsumeIntegralInRange<
size_t>(0, 16 - alignment_bits);
68 size_t size = m_provider.ConsumeIntegralInRange<
size_t>(
size_t{1} << size_bits, (
size_t{1} << (size_bits + 1)) - 1U) << alignment_bits;
69 Allocate(size, alignment);
72 void RandomContentFill(Entry& entry)
77 void RandomContentCheck(
const Entry& entry)
79 std::vector<std::byte>
expect(entry.span.size());
84 void Deallocate(
const Entry& entry)
86 auto ptr_val =
reinterpret_cast<std::uintptr_t
>(entry.span.data());
87 assert((ptr_val & (entry.alignment - 1)) == 0);
88 RandomContentCheck(entry);
89 m_total_allocated -= entry.span.size();
90 m_test_resource.
Deallocate(entry.span.data(), entry.span.size(), entry.alignment);
95 if (m_entries.empty()) {
99 size_t idx = m_provider.ConsumeIntegralInRange<
size_t>(0, m_entries.size() - 1);
100 Deallocate(m_entries[idx]);
101 if (idx != m_entries.size() - 1) {
102 m_entries[idx] = std::move(m_entries.back());
104 m_entries.pop_back();
109 while (!m_entries.empty()) {
123 [&] { Deallocate(); });
137 [&] { PoolResourceFuzzer<128, 1>{provider}.Fuzz(); },
138 [&] { PoolResourceFuzzer<128, 2>{provider}.Fuzz(); },
139 [&] { PoolResourceFuzzer<128, 4>{provider}.Fuzz(); },
140 [&] { PoolResourceFuzzer<128, 8>{provider}.Fuzz(); },
142 [&] { PoolResourceFuzzer<8, 8>{provider}.Fuzz(); },
143 [&] { PoolResourceFuzzer<16, 16>{provider}.Fuzz(); },
145 [&] { PoolResourceFuzzer<256,
alignof(max_align_t)>{provider}.Fuzz(); },
146 [&] { PoolResourceFuzzer<256, 64>{provider}.Fuzz(); });
A memory resource similar to std::pmr::unsynchronized_pool_resource, but optimized for node-based con...
void Deallocate(void *p, std::size_t bytes, std::size_t alignment) noexcept
Returns a block to the freelists, or deletes the block when it did not come from the chunks.
void * Allocate(std::size_t bytes, std::size_t alignment)
Allocates a block of bytes.
static void CheckAllDataAccountedFor(const PoolResource< MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES > &resource)
Once all blocks are given back to the resource, tests that the freelists are consistent:
void fillrand(Span< std::byte > span) noexcept
Fill a Span with random bytes.
A Span is an object that can refer to a contiguous sequence of objects.
#define LIMITED_WHILE(condition, limit)
Can be used to limit a theoretically unbounded loop.
FUZZ_TARGET(pool_resource)
Span(T *, EndOrSize) -> Span< T >
size_t CallOneOf(FuzzedDataProvider &fuzzed_data_provider, Callables... callables)