Bitcoin Core 29.99.0
P2P Digital Currency
txorphanage.cpp
Go to the documentation of this file.
1// Copyright (c) The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <bench/bench.h>
6#include <consensus/amount.h>
7#include <net.h>
8#include <policy/policy.h>
10#include <pubkey.h>
11#include <script/sign.h>
13#include <node/txorphanage.h>
14#include <util/check.h>
16
17#include <cstdint>
18#include <memory>
19
21static constexpr int64_t APPROX_WEIGHT_PER_INPUT{200};
22
23// Creates a transaction with num_inputs inputs and 1 output, padded to target_weight. Use this function to maximize m_outpoint_to_orphan_it operations.
24// If num_inputs is 0, we maximize the number of inputs.
25static CTransactionRef MakeTransactionBulkedTo(unsigned int num_inputs, int64_t target_weight, FastRandomContext& det_rand)
26{
28 assert(target_weight >= 40 + APPROX_WEIGHT_PER_INPUT);
29 if (!num_inputs) num_inputs = (target_weight - 40) / APPROX_WEIGHT_PER_INPUT;
30 for (unsigned int i = 0; i < num_inputs; ++i) {
31 tx.vin.emplace_back(Txid::FromUint256(det_rand.rand256()), 0);
32 }
33 assert(GetTransactionWeight(*MakeTransactionRef(tx)) <= target_weight);
34
35 tx.vout.resize(1);
36
37 // If necessary, pad the transaction to the target weight.
38 if (GetTransactionWeight(*MakeTransactionRef(tx)) < target_weight - 4) {
39 BulkTransaction(tx, target_weight);
40 }
41 return MakeTransactionRef(tx);
42}
43
44// Constructs a transaction using a subset of inputs[start_input : start_input + num_inputs] up to the weight_limit.
45static CTransactionRef MakeTransactionSpendingUpTo(const std::vector<CTxIn>& inputs, unsigned int start_input, unsigned int num_inputs, int64_t weight_limit)
46{
48 for (unsigned int i{start_input}; i < start_input + num_inputs; ++i) {
49 if (GetTransactionWeight(*MakeTransactionRef(tx)) + APPROX_WEIGHT_PER_INPUT >= weight_limit) break;
50 tx.vin.emplace_back(inputs.at(i % inputs.size()));
51 }
52 assert(tx.vin.size() > 0);
53 return MakeTransactionRef(tx);
54}
56{
57 FastRandomContext det_rand{true};
58
59 // Fill up announcements slots with tiny txns, followed by a single large one
60 unsigned int NUM_TINY_TRANSACTIONS((node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE));
61
62 // Construct transactions to submit to orphanage: 1-in-1-out tiny transactions
63 std::vector<CTransactionRef> tiny_txs;
64 tiny_txs.reserve(NUM_TINY_TRANSACTIONS);
65 for (unsigned int i{0}; i < NUM_TINY_TRANSACTIONS; ++i) {
66 tiny_txs.emplace_back(MakeTransactionBulkedTo(1, TINY_TX_WEIGHT, det_rand));
67 }
68 auto large_tx = MakeTransactionBulkedTo(1, MAX_STANDARD_TX_WEIGHT, det_rand);
70
71 const auto orphanage{node::MakeTxOrphanage(/*max_global_latency_score=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)};
72
73 // Populate the orphanage. To maximize the number of evictions, first fill up with tiny transactions, then add a huge one.
74 NodeId peer{0};
75 // Add tiny transactions until we are just about to hit the memory limit, up to the max number of announcements.
76 // We use the same tiny transactions for all peers to minimize their contribution to the usage limit.
77 int64_t total_weight_to_add{0};
78 for (unsigned int txindex{0}; txindex < NUM_TINY_TRANSACTIONS; ++txindex) {
79 const auto& tx{tiny_txs.at(txindex)};
80
81 total_weight_to_add += GetTransactionWeight(*tx);
82 if (total_weight_to_add > orphanage->MaxGlobalUsage()) break;
83
84 assert(orphanage->AddTx(tx, peer));
85
86 // Sanity check: we should always be exiting at the point of hitting the weight limit.
87 assert(txindex < NUM_TINY_TRANSACTIONS - 1);
88 }
89
90 // In the real world, we always trim after each new tx.
91 // If we need to trim already, that means the benchmark is not representative of what LimitOrphans may do in a single call.
92 assert(orphanage->TotalOrphanUsage() <= orphanage->MaxGlobalUsage());
93 assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore());
94 assert(orphanage->TotalOrphanUsage() + TINY_TX_WEIGHT > orphanage->MaxGlobalUsage());
95
97 // Lastly, add the large transaction.
98 const auto num_announcements_before_trim{orphanage->CountAnnouncements()};
99 assert(orphanage->AddTx(large_tx, peer));
100
101 // If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer.
102 const auto num_announcements_after_trim{orphanage->CountAnnouncements()};
103 const auto num_evicted{num_announcements_before_trim - num_announcements_after_trim};
104
105 // The number of evictions is the same regardless of the number of peers. In both cases, we can exceed the
106 // usage limit using 1 maximally-sized transaction.
108 });
109}
111{
112 // Best number is just below sqrt(DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE)
113 static constexpr unsigned int NUM_PEERS{39};
114 // All peers will have the same transactions. We want to be just under the weight limit, so divide the max usage limit by the number of unique transactions.
115 static constexpr node::TxOrphanage::Count NUM_UNIQUE_TXNS{node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS};
116 static constexpr node::TxOrphanage::Usage TOTAL_USAGE_LIMIT{node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER * NUM_PEERS};
117 // Subtract 4 because BulkTransaction rounds up and we must avoid going over the weight limit early.
118 static constexpr node::TxOrphanage::Usage LARGE_TX_WEIGHT{TOTAL_USAGE_LIMIT / NUM_UNIQUE_TXNS - 4};
119 static_assert(LARGE_TX_WEIGHT >= TINY_TX_WEIGHT * 2, "Tx is too small, increase NUM_PEERS");
120 // The orphanage does not permit any transactions larger than 400'000, so this test will not work if the large tx is much larger.
121 static_assert(LARGE_TX_WEIGHT <= MAX_STANDARD_TX_WEIGHT, "Tx is too large, decrease NUM_PEERS");
122
123 FastRandomContext det_rand{true};
124 // Construct large transactions
125 std::vector<CTransactionRef> shared_txs;
126 shared_txs.reserve(NUM_UNIQUE_TXNS);
127 for (unsigned int i{0}; i < NUM_UNIQUE_TXNS; ++i) {
128 shared_txs.emplace_back(MakeTransactionBulkedTo(9, LARGE_TX_WEIGHT, det_rand));
129 }
130 std::vector<size_t> indexes;
131 indexes.resize(NUM_UNIQUE_TXNS);
132 std::iota(indexes.begin(), indexes.end(), 0);
133
134 const auto orphanage{node::MakeTxOrphanage(/*max_global_latency_score=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)};
135 // Every peer sends the same transactions, all from shared_txs.
136 // Each peer has 1 or 2 assigned transactions, which they must place as the last and second-to-last positions.
137 // The assignments ensure that every transaction is in some peer's last 2 transactions, and is thus remains in the orphanage until the end of LimitOrphans.
138 static_assert(NUM_UNIQUE_TXNS <= NUM_PEERS * 2);
139
140 // We need each peer to send some transactions so that the global limit (which is a function of the number of peers providing at least 1 announcement) rises.
141 for (unsigned int i{0}; i < NUM_UNIQUE_TXNS; ++i) {
142 for (NodeId peer{0}; peer < NUM_PEERS; ++peer) {
143 const CTransactionRef& reserved_last_tx{shared_txs.at(peer)};
144 CTransactionRef reserved_second_to_last_tx{peer < NUM_UNIQUE_TXNS - NUM_PEERS ? shared_txs.at(peer + NUM_PEERS) : nullptr};
145
146 const auto& tx{shared_txs.at(indexes.at(i))};
147 if (tx == reserved_last_tx) {
148 // Skip
149 } else if (reserved_second_to_last_tx && tx == reserved_second_to_last_tx) {
150 // Skip
151 } else {
152 orphanage->AddTx(tx, peer);
153 }
154 }
155 }
156
157 // Now add the final reserved transactions.
158 for (NodeId peer{0}; peer < NUM_PEERS; ++peer) {
159 const CTransactionRef& reserved_last_tx{shared_txs.at(peer)};
160 CTransactionRef reserved_second_to_last_tx{peer < NUM_UNIQUE_TXNS - NUM_PEERS ? shared_txs.at(peer + NUM_PEERS) : nullptr};
161 // Add the final reserved transactions.
162 if (reserved_second_to_last_tx) {
163 orphanage->AddTx(reserved_second_to_last_tx, peer);
164 }
165 orphanage->AddTx(reserved_last_tx, peer);
166 }
167
168 assert(orphanage->CountAnnouncements() == NUM_PEERS * NUM_UNIQUE_TXNS);
169 const auto total_usage{orphanage->TotalOrphanUsage()};
170 const auto max_usage{orphanage->MaxGlobalUsage()};
171 assert(max_usage - total_usage <= LARGE_TX_WEIGHT);
172 assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore());
173
174 auto last_tx = MakeTransactionBulkedTo(0, max_usage - total_usage + 1, det_rand);
175
177 const auto num_announcements_before_trim{orphanage->CountAnnouncements()};
178 // There is a small gap between the total usage and the max usage. Add a transaction to fill it.
179 assert(orphanage->AddTx(last_tx, 0));
180
181 // If there are multiple peers, note that they all have the same DoS score. We will evict only 1 item at a time for each new DoSiest peer.
182 const auto num_evicted{num_announcements_before_trim - orphanage->CountAnnouncements() + 1};
183 // The trimming happens as a round robin. In the first NUM_UNIQUE_TXNS - 2 rounds for each peer, only duplicates are evicted.
184 // Once each peer has 2 transactions left, it's possible to select a peer whose oldest transaction is unique.
185 assert(num_evicted >= (NUM_UNIQUE_TXNS - 2) * NUM_PEERS);
186 });
187}
188
189static void OrphanageEraseAll(benchmark::Bench& bench, bool block_or_disconnect)
190{
191 FastRandomContext det_rand{true};
192 const auto orphanage{node::MakeTxOrphanage(/*max_global_latency_score=*/node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE, /*reserved_peer_usage=*/node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER)};
193 // This is an unrealistically large number of inputs for a block, as there is almost no room given to witness data,
194 // outputs, and overhead for individual transactions. The entire block is 1 transaction with 20,000 inputs.
195 constexpr unsigned int NUM_BLOCK_INPUTS{MAX_BLOCK_WEIGHT / APPROX_WEIGHT_PER_INPUT};
196 const auto block_tx{MakeTransactionBulkedTo(NUM_BLOCK_INPUTS, MAX_BLOCK_WEIGHT - 4000, det_rand)};
197 CBlock block;
198 block.vtx.push_back(block_tx);
199
200 // Transactions with 9 inputs maximize the computation / LatencyScore ratio.
201 constexpr unsigned int INPUTS_PER_TX{9};
202 constexpr unsigned int NUM_PEERS{125};
203 constexpr unsigned int NUM_TXNS_PER_PEER = node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS;
204
205 // Divide the block's inputs evenly among the peers.
206 constexpr unsigned int INPUTS_PER_PEER = NUM_BLOCK_INPUTS / NUM_PEERS;
207 static_assert(INPUTS_PER_PEER > 0);
208 // All the block inputs are spent by the orphanage transactions. Each peer is assigned 76 of them.
209 // Each peer has 24 transactions spending 9 inputs each, so jumping by 3 ensures we cover all of the inputs.
210 static_assert(7 * NUM_TXNS_PER_PEER + INPUTS_PER_TX - 1 >= INPUTS_PER_PEER);
211
212 for (NodeId peer{0}; peer < NUM_PEERS; ++peer) {
213 int64_t weight_left_for_peer{node::DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER};
214 for (unsigned int txnum{0}; txnum < node::DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE / NUM_PEERS; ++txnum) {
215 // Transactions must be unique since they use different (though overlapping) inputs.
216 const unsigned int start_input = peer * INPUTS_PER_PEER + txnum * 7;
217
218 // Note that we shouldn't be able to hit the weight limit with these small transactions.
219 const int64_t weight_limit{std::min<int64_t>(weight_left_for_peer, MAX_STANDARD_TX_WEIGHT)};
220 auto ptx = MakeTransactionSpendingUpTo(block_tx->vin, /*start_input=*/start_input, /*num_inputs=*/INPUTS_PER_TX, /*weight_limit=*/weight_limit);
221
223 assert(!orphanage->HaveTx(ptx->GetWitnessHash()));
224 assert(orphanage->AddTx(ptx, peer));
225
226 weight_left_for_peer -= GetTransactionWeight(*ptx);
227 if (weight_left_for_peer < TINY_TX_WEIGHT * 2) break;
228 }
229 }
230
231 // If these fail, it means this benchmark is not realistic because the orphanage would have been trimmed already.
232 assert(orphanage->TotalLatencyScore() <= orphanage->MaxGlobalLatencyScore());
233 assert(orphanage->TotalOrphanUsage() <= orphanage->MaxGlobalUsage());
234
235 // 3000 announcements (and unique transactions) in the orphanage.
236 // They spend a total of 27,000 inputs (20,000 unique ones)
237 assert(orphanage->CountAnnouncements() == NUM_PEERS * NUM_TXNS_PER_PEER);
238 assert(orphanage->TotalLatencyScore() == orphanage->CountAnnouncements());
239
241 if (block_or_disconnect) {
242 // Erase everything through EraseForBlock.
243 // Every tx conflicts with this block.
244 orphanage->EraseForBlock(block);
245 assert(orphanage->CountAnnouncements() == 0);
246 } else {
247 // Erase everything through EraseForPeer.
248 for (NodeId peer{0}; peer < NUM_PEERS; ++peer) {
249 orphanage->EraseForPeer(peer);
250 }
251 assert(orphanage->CountAnnouncements() == 0);
252 }
253 });
254}
255
257{
258 OrphanageEraseAll(bench, /*block_or_disconnect=*/true);
259}
261{
262 OrphanageEraseAll(bench, /*block_or_disconnect=*/false);
263}
264
static CTransactionRef MakeTransactionSpendingUpTo(const std::vector< CTxIn > &inputs, unsigned int start_input, unsigned int num_inputs, int64_t weight_limit)
Definition: txorphanage.cpp:45
static void OrphanageEraseForPeer(benchmark::Bench &bench)
static void OrphanageEraseForBlock(benchmark::Bench &bench)
static CTransactionRef MakeTransactionBulkedTo(unsigned int num_inputs, int64_t target_weight, FastRandomContext &det_rand)
Definition: txorphanage.cpp:25
static void OrphanageEraseAll(benchmark::Bench &bench, bool block_or_disconnect)
static constexpr node::TxOrphanage::Usage TINY_TX_WEIGHT
Definition: txorphanage.cpp:20
BENCHMARK(OrphanageSinglePeerEviction, benchmark::PriorityLevel::LOW)
static constexpr int64_t APPROX_WEIGHT_PER_INPUT
Definition: txorphanage.cpp:21
static void OrphanageSinglePeerEviction(benchmark::Bench &bench)
Definition: txorphanage.cpp:55
static void OrphanageMultiPeerEviction(benchmark::Bench &bench)
Definition: block.h:69
std::vector< CTransactionRef > vtx
Definition: block.h:72
Fast randomness source.
Definition: random.h:386
uint256 rand256() noexcept
generate a random uint256.
Definition: random.h:317
Main entry point to nanobench's benchmarking facility.
Definition: nanobench.h:627
Bench & run(char const *benchmarkName, Op &&op)
Repeatedly calls op() based on the configuration, and performs measurements.
Definition: nanobench.h:1234
Bench & epochs(size_t numEpochs) noexcept
Controls number of epochs, the number of measurements to perform.
Bench & epochIterations(uint64_t numIters) noexcept
Sets exactly the number of iterations for each epoch.
unsigned int Count
Definition: txorphanage.h:41
static transaction_identifier FromUint256(const uint256 &id)
static int32_t GetTransactionWeight(const CTransaction &tx)
Definition: validation.h:132
static const unsigned int MAX_BLOCK_WEIGHT
The maximum allowed weight for a block, see BIP 141 (network rule)
Definition: consensus.h:15
@ LOW
Definition: bench.h:47
std::unique_ptr< TxOrphanage > MakeTxOrphanage() noexcept
Create a new TxOrphanage instance.
static constexpr unsigned int DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE
Default value for TxOrphanage::m_max_global_latency_score.
Definition: txorphanage.h:23
static constexpr int64_t DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER
Default value for TxOrphanage::m_reserved_usage_per_peer.
Definition: txorphanage.h:20
int64_t NodeId
Definition: net.h:98
static constexpr int32_t MAX_STANDARD_TX_WEIGHT
The maximum weight for transactions we're willing to relay/mine.
Definition: policy.h:34
static CTransactionRef MakeTransactionRef(Tx &&txIn)
Definition: transaction.h:424
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:423
A mutable version of CTransaction.
Definition: transaction.h:378
std::vector< CTxOut > vout
Definition: transaction.h:380
std::vector< CTxIn > vin
Definition: transaction.h:379
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:53
void BulkTransaction(CMutableTransaction &tx, int32_t target_weight)
assert(!tx.IsCoinBase())