Bitcoin Core 28.99.0
P2P Digital Currency
txrequest_tests.cpp
Go to the documentation of this file.
1// Copyright (c) 2020-2021 The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
6#include <txrequest.h>
7#include <uint256.h>
8
9#include <test/util/random.h>
11
12#include <algorithm>
13#include <functional>
14#include <vector>
15
16#include <boost/test/unit_test.hpp>
17
18namespace {
19
20class Scenario;
21
22struct TxRequestTest : BasicTestingSetup {
23 std::chrono::microseconds RandomTime8s();
24 std::chrono::microseconds RandomTime1y();
25 void BuildSingleTest(Scenario& scenario, int config);
26 void BuildPriorityTest(Scenario& scenario, int config);
27 void BuildBigPriorityTest(Scenario& scenario, int peers);
28 void BuildRequestOrderTest(Scenario& scenario, int config);
29 void BuildWtxidTest(Scenario& scenario, int config);
30 void BuildTimeBackwardsTest(Scenario& scenario);
31 void BuildWeirdRequestsTest(Scenario& scenario);
32 void TestInterleavedScenarios();
33};
34
35constexpr std::chrono::microseconds MIN_TIME = std::chrono::microseconds::min();
36constexpr std::chrono::microseconds MAX_TIME = std::chrono::microseconds::max();
37constexpr std::chrono::microseconds MICROSECOND = std::chrono::microseconds{1};
38constexpr std::chrono::microseconds NO_TIME = std::chrono::microseconds{0};
39
41using Action = std::pair<std::chrono::microseconds, std::function<void()>>;
42
47struct Runner
48{
50 TxRequestTracker txrequest;
51
53 std::vector<Action> actions;
54
56 std::set<NodeId> peerset;
57
59 std::set<uint256> txhashset;
60
64 std::multiset<std::pair<NodeId, GenTxid>> expired;
65};
66
67std::chrono::microseconds TxRequestTest::RandomTime8s() { return std::chrono::microseconds{1 + m_rng.randbits(23)}; }
68std::chrono::microseconds TxRequestTest::RandomTime1y() { return std::chrono::microseconds{1 + m_rng.randbits(45)}; }
69
79class Scenario
80{
81 FastRandomContext& m_rng;
82 Runner& m_runner;
83 std::chrono::microseconds m_now;
84 std::string m_testname;
85
86public:
87 Scenario(FastRandomContext& rng, Runner& runner, std::chrono::microseconds starttime) : m_rng(rng), m_runner(runner), m_now(starttime) {}
88
90 void SetTestName(std::string testname)
91 {
92 m_testname = std::move(testname);
93 }
94
96 void AdvanceTime(std::chrono::microseconds amount)
97 {
98 assert(amount.count() >= 0);
99 m_now += amount;
100 }
101
103 void ForgetTxHash(const uint256& txhash)
104 {
105 auto& runner = m_runner;
106 runner.actions.emplace_back(m_now, [=,&runner]() {
107 runner.txrequest.ForgetTxHash(txhash);
108 runner.txrequest.SanityCheck();
109 });
110 }
111
113 void ReceivedInv(NodeId peer, const GenTxid& gtxid, bool pref, std::chrono::microseconds reqtime)
114 {
115 auto& runner = m_runner;
116 runner.actions.emplace_back(m_now, [=,&runner]() {
117 runner.txrequest.ReceivedInv(peer, gtxid, pref, reqtime);
118 runner.txrequest.SanityCheck();
119 });
120 }
121
123 void DisconnectedPeer(NodeId peer)
124 {
125 auto& runner = m_runner;
126 runner.actions.emplace_back(m_now, [=,&runner]() {
127 runner.txrequest.DisconnectedPeer(peer);
128 runner.txrequest.SanityCheck();
129 });
130 }
131
133 void RequestedTx(NodeId peer, const uint256& txhash, std::chrono::microseconds exptime)
134 {
135 auto& runner = m_runner;
136 runner.actions.emplace_back(m_now, [=,&runner]() {
137 runner.txrequest.RequestedTx(peer, txhash, exptime);
138 runner.txrequest.SanityCheck();
139 });
140 }
141
143 void ReceivedResponse(NodeId peer, const uint256& txhash)
144 {
145 auto& runner = m_runner;
146 runner.actions.emplace_back(m_now, [=,&runner]() {
147 runner.txrequest.ReceivedResponse(peer, txhash);
148 runner.txrequest.SanityCheck();
149 });
150 }
151
163 void Check(NodeId peer, const std::vector<GenTxid>& expected, size_t candidates, size_t inflight,
164 size_t completed, const std::string& checkname,
165 std::chrono::microseconds offset = std::chrono::microseconds{0})
166 {
167 const auto comment = m_testname + " " + checkname;
168 auto& runner = m_runner;
169 const auto now = m_now;
170 assert(offset.count() <= 0);
171 runner.actions.emplace_back(m_now, [=,&runner]() {
172 std::vector<std::pair<NodeId, GenTxid>> expired_now;
173 auto ret = runner.txrequest.GetRequestable(peer, now + offset, &expired_now);
174 for (const auto& entry : expired_now) runner.expired.insert(entry);
175 runner.txrequest.SanityCheck();
176 runner.txrequest.PostGetRequestableSanityCheck(now + offset);
177 size_t total = candidates + inflight + completed;
178 size_t real_total = runner.txrequest.Count(peer);
179 size_t real_candidates = runner.txrequest.CountCandidates(peer);
180 size_t real_inflight = runner.txrequest.CountInFlight(peer);
181 BOOST_CHECK_MESSAGE(real_total == total, strprintf("[%s] total %i (%i expected)", comment, real_total, total));
182 BOOST_CHECK_MESSAGE(real_inflight == inflight, strprintf("[%s] inflight %i (%i expected)", comment, real_inflight, inflight));
183 BOOST_CHECK_MESSAGE(real_candidates == candidates, strprintf("[%s] candidates %i (%i expected)", comment, real_candidates, candidates));
184 BOOST_CHECK_MESSAGE(ret == expected, strprintf("[%s] mismatching requestables", comment));
185 });
186 }
187
192 void CheckExpired(NodeId peer, GenTxid gtxid)
193 {
194 const auto& testname = m_testname;
195 auto& runner = m_runner;
196 runner.actions.emplace_back(m_now, [=,&runner]() {
197 auto it = runner.expired.find(std::pair<NodeId, GenTxid>{peer, gtxid});
198 BOOST_CHECK_MESSAGE(it != runner.expired.end(), "[" + testname + "] missing expiration");
199 if (it != runner.expired.end()) runner.expired.erase(it);
200 });
201 }
202
211 uint256 NewTxHash(const std::vector<std::vector<NodeId>>& orders = {})
212 {
213 uint256 ret;
214 bool ok;
215 do {
216 ret = m_rng.rand256();
217 ok = true;
218 for (const auto& order : orders) {
219 for (size_t pos = 1; pos < order.size(); ++pos) {
220 uint64_t prio_prev = m_runner.txrequest.ComputePriority(ret, order[pos - 1], true);
221 uint64_t prio_cur = m_runner.txrequest.ComputePriority(ret, order[pos], true);
222 if (prio_prev <= prio_cur) {
223 ok = false;
224 break;
225 }
226 }
227 if (!ok) break;
228 }
229 if (ok) {
230 ok = m_runner.txhashset.insert(ret).second;
231 }
232 } while(!ok);
233 return ret;
234 }
235
237 GenTxid NewGTxid(const std::vector<std::vector<NodeId>>& orders = {})
238 {
239 return m_rng.randbool() ? GenTxid::Wtxid(NewTxHash(orders)) : GenTxid::Txid(NewTxHash(orders));
240 }
241
244 NodeId NewPeer()
245 {
246 bool ok;
247 NodeId ret;
248 do {
249 ret = m_rng.randbits(63);
250 ok = m_runner.peerset.insert(ret).second;
251 } while(!ok);
252 return ret;
253 }
254
255 std::chrono::microseconds Now() const { return m_now; }
256};
257
262void TxRequestTest::BuildSingleTest(Scenario& scenario, int config)
263{
264 auto peer = scenario.NewPeer();
265 auto gtxid = scenario.NewGTxid();
266 bool immediate = config & 1;
267 bool preferred = config & 2;
268 auto delay = immediate ? NO_TIME : RandomTime8s();
269
270 scenario.SetTestName(strprintf("Single(config=%i)", config));
271
272 // Receive an announcement, either immediately requestable or delayed.
273 scenario.ReceivedInv(peer, gtxid, preferred, immediate ? MIN_TIME : scenario.Now() + delay);
274 if (immediate) {
275 scenario.Check(peer, {gtxid}, 1, 0, 0, "s1");
276 } else {
277 scenario.Check(peer, {}, 1, 0, 0, "s2");
278 scenario.AdvanceTime(delay - MICROSECOND);
279 scenario.Check(peer, {}, 1, 0, 0, "s3");
280 scenario.AdvanceTime(MICROSECOND);
281 scenario.Check(peer, {gtxid}, 1, 0, 0, "s4");
282 }
283
284 if (config >> 3) { // We'll request the transaction
285 scenario.AdvanceTime(RandomTime8s());
286 auto expiry = RandomTime8s();
287 scenario.Check(peer, {gtxid}, 1, 0, 0, "s5");
288 scenario.RequestedTx(peer, gtxid.GetHash(), scenario.Now() + expiry);
289 scenario.Check(peer, {}, 0, 1, 0, "s6");
290
291 if ((config >> 3) == 1) { // The request will time out
292 scenario.AdvanceTime(expiry - MICROSECOND);
293 scenario.Check(peer, {}, 0, 1, 0, "s7");
294 scenario.AdvanceTime(MICROSECOND);
295 scenario.Check(peer, {}, 0, 0, 0, "s8");
296 scenario.CheckExpired(peer, gtxid);
297 return;
298 } else {
299 scenario.AdvanceTime(std::chrono::microseconds{m_rng.randrange(expiry.count())});
300 scenario.Check(peer, {}, 0, 1, 0, "s9");
301 if ((config >> 3) == 3) { // A response will arrive for the transaction
302 scenario.ReceivedResponse(peer, gtxid.GetHash());
303 scenario.Check(peer, {}, 0, 0, 0, "s10");
304 return;
305 }
306 }
307 }
308
309 if (config & 4) { // The peer will go offline
310 scenario.DisconnectedPeer(peer);
311 } else { // The transaction is no longer needed
312 scenario.ForgetTxHash(gtxid.GetHash());
313 }
314 scenario.Check(peer, {}, 0, 0, 0, "s11");
315}
316
322void TxRequestTest::BuildPriorityTest(Scenario& scenario, int config)
323{
324 scenario.SetTestName(strprintf("Priority(config=%i)", config));
325
326 // Two peers. They will announce in order {peer1, peer2}.
327 auto peer1 = scenario.NewPeer(), peer2 = scenario.NewPeer();
328 // Construct a transaction that under random rules would be preferred by peer2 or peer1,
329 // depending on configuration.
330 bool prio1 = config & 1;
331 auto gtxid = prio1 ? scenario.NewGTxid({{peer1, peer2}}) : scenario.NewGTxid({{peer2, peer1}});
332 bool pref1 = config & 2, pref2 = config & 4;
333
334 scenario.ReceivedInv(peer1, gtxid, pref1, MIN_TIME);
335 scenario.Check(peer1, {gtxid}, 1, 0, 0, "p1");
336 if (m_rng.randbool()) {
337 scenario.AdvanceTime(RandomTime8s());
338 scenario.Check(peer1, {gtxid}, 1, 0, 0, "p2");
339 }
340
341 scenario.ReceivedInv(peer2, gtxid, pref2, MIN_TIME);
342 bool stage2_prio =
343 // At this point, peer2 will be given priority if:
344 // - It is preferred and peer1 is not
345 (pref2 && !pref1) ||
346 // - They're in the same preference class,
347 // and the randomized priority favors peer2 over peer1.
348 (pref1 == pref2 && !prio1);
349 NodeId priopeer = stage2_prio ? peer2 : peer1, otherpeer = stage2_prio ? peer1 : peer2;
350 scenario.Check(otherpeer, {}, 1, 0, 0, "p3");
351 scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p4");
352 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
353 scenario.Check(otherpeer, {}, 1, 0, 0, "p5");
354 scenario.Check(priopeer, {gtxid}, 1, 0, 0, "p6");
355
356 // We possibly request from the selected peer.
357 if (config & 8) {
358 scenario.RequestedTx(priopeer, gtxid.GetHash(), MAX_TIME);
359 scenario.Check(priopeer, {}, 0, 1, 0, "p7");
360 scenario.Check(otherpeer, {}, 1, 0, 0, "p8");
361 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
362 }
363
364 // The peer which was selected (or requested from) now goes offline, or a NOTFOUND is received from them.
365 if (config & 16) {
366 scenario.DisconnectedPeer(priopeer);
367 } else {
368 scenario.ReceivedResponse(priopeer, gtxid.GetHash());
369 }
370 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
371 scenario.Check(priopeer, {}, 0, 0, !(config & 16), "p8");
372 scenario.Check(otherpeer, {gtxid}, 1, 0, 0, "p9");
373 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
374
375 // Now the other peer goes offline.
376 scenario.DisconnectedPeer(otherpeer);
377 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
378 scenario.Check(peer1, {}, 0, 0, 0, "p10");
379 scenario.Check(peer2, {}, 0, 0, 0, "p11");
380}
381
384void TxRequestTest::BuildBigPriorityTest(Scenario& scenario, int peers)
385{
386 scenario.SetTestName(strprintf("BigPriority(peers=%i)", peers));
387
388 // We will have N peers announce the same transaction.
389 std::map<NodeId, bool> preferred;
390 std::vector<NodeId> pref_peers, npref_peers;
391 int num_pref = m_rng.randrange(peers + 1) ; // Some preferred, ...
392 int num_npref = peers - num_pref; // some not preferred.
393 for (int i = 0; i < num_pref; ++i) {
394 pref_peers.push_back(scenario.NewPeer());
395 preferred[pref_peers.back()] = true;
396 }
397 for (int i = 0; i < num_npref; ++i) {
398 npref_peers.push_back(scenario.NewPeer());
399 preferred[npref_peers.back()] = false;
400 }
401 // Make a list of all peers, in order of intended request order (concatenation of pref_peers and npref_peers).
402 std::vector<NodeId> request_order;
403 request_order.reserve(num_pref + num_npref);
404 for (int i = 0; i < num_pref; ++i) request_order.push_back(pref_peers[i]);
405 for (int i = 0; i < num_npref; ++i) request_order.push_back(npref_peers[i]);
406
407 // Determine the announcement order randomly.
408 std::vector<NodeId> announce_order = request_order;
409 std::shuffle(announce_order.begin(), announce_order.end(), m_rng);
410
411 // Find a gtxid whose txhash prioritization is consistent with the required ordering within pref_peers and
412 // within npref_peers.
413 auto gtxid = scenario.NewGTxid({pref_peers, npref_peers});
414
415 // Decide reqtimes in opposite order of the expected request order. This means that as time passes we expect the
416 // to-be-requested-from-peer will change every time a subsequent reqtime is passed.
417 std::map<NodeId, std::chrono::microseconds> reqtimes;
418 auto reqtime = scenario.Now();
419 for (int i = peers - 1; i >= 0; --i) {
420 reqtime += RandomTime8s();
421 reqtimes[request_order[i]] = reqtime;
422 }
423
424 // Actually announce from all peers simultaneously (but in announce_order).
425 for (const auto peer : announce_order) {
426 scenario.ReceivedInv(peer, gtxid, preferred[peer], reqtimes[peer]);
427 }
428 for (const auto peer : announce_order) {
429 scenario.Check(peer, {}, 1, 0, 0, "b1");
430 }
431
432 // Let time pass and observe the to-be-requested-from peer change, from nonpreferred to preferred, and from
433 // high priority to low priority within each class.
434 for (int i = peers - 1; i >= 0; --i) {
435 scenario.AdvanceTime(reqtimes[request_order[i]] - scenario.Now() - MICROSECOND);
436 scenario.Check(request_order[i], {}, 1, 0, 0, "b2");
437 scenario.AdvanceTime(MICROSECOND);
438 scenario.Check(request_order[i], {gtxid}, 1, 0, 0, "b3");
439 }
440
441 // Peers now in random order go offline, or send NOTFOUNDs. At every point in time the new to-be-requested-from
442 // peer should be the best remaining one, so verify this after every response.
443 for (int i = 0; i < peers; ++i) {
444 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
445 const int pos = m_rng.randrange(request_order.size());
446 const auto peer = request_order[pos];
447 request_order.erase(request_order.begin() + pos);
448 if (m_rng.randbool()) {
449 scenario.DisconnectedPeer(peer);
450 scenario.Check(peer, {}, 0, 0, 0, "b4");
451 } else {
452 scenario.ReceivedResponse(peer, gtxid.GetHash());
453 scenario.Check(peer, {}, 0, 0, request_order.size() > 0, "b5");
454 }
455 if (request_order.size()) {
456 scenario.Check(request_order[0], {gtxid}, 1, 0, 0, "b6");
457 }
458 }
459
460 // Everything is gone in the end.
461 for (const auto peer : announce_order) {
462 scenario.Check(peer, {}, 0, 0, 0, "b7");
463 }
464}
465
471void TxRequestTest::BuildRequestOrderTest(Scenario& scenario, int config)
472{
473 scenario.SetTestName(strprintf("RequestOrder(config=%i)", config));
474
475 auto peer = scenario.NewPeer();
476 auto gtxid1 = scenario.NewGTxid();
477 auto gtxid2 = scenario.NewGTxid();
478
479 auto reqtime2 = scenario.Now() + RandomTime8s();
480 auto reqtime1 = reqtime2 + RandomTime8s();
481
482 scenario.ReceivedInv(peer, gtxid1, config & 1, reqtime1);
483 // Simulate time going backwards by giving the second announcement an earlier reqtime.
484 scenario.ReceivedInv(peer, gtxid2, config & 2, reqtime2);
485
486 scenario.AdvanceTime(reqtime2 - MICROSECOND - scenario.Now());
487 scenario.Check(peer, {}, 2, 0, 0, "o1");
488 scenario.AdvanceTime(MICROSECOND);
489 scenario.Check(peer, {gtxid2}, 2, 0, 0, "o2");
490 scenario.AdvanceTime(reqtime1 - MICROSECOND - scenario.Now());
491 scenario.Check(peer, {gtxid2}, 2, 0, 0, "o3");
492 scenario.AdvanceTime(MICROSECOND);
493 // Even with time going backwards in between announcements, the return value of GetRequestable is in
494 // announcement order.
495 scenario.Check(peer, {gtxid1, gtxid2}, 2, 0, 0, "o4");
496
497 scenario.DisconnectedPeer(peer);
498 scenario.Check(peer, {}, 0, 0, 0, "o5");
499}
500
506void TxRequestTest::BuildWtxidTest(Scenario& scenario, int config)
507{
508 scenario.SetTestName(strprintf("Wtxid(config=%i)", config));
509
510 auto peerT = scenario.NewPeer();
511 auto peerW = scenario.NewPeer();
512 auto txhash = scenario.NewTxHash();
513 auto txid{GenTxid::Txid(txhash)};
514 auto wtxid{GenTxid::Wtxid(txhash)};
515
516 auto reqtimeT = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s();
517 auto reqtimeW = m_rng.randbool() ? MIN_TIME : scenario.Now() + RandomTime8s();
518
519 // Announce txid first or wtxid first.
520 if (config & 1) {
521 scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
522 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
523 scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
524 } else {
525 scenario.ReceivedInv(peerW, wtxid, !(config & 2), reqtimeW);
526 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
527 scenario.ReceivedInv(peerT, txid, config & 2, reqtimeT);
528 }
529
530 // Let time pass if needed, and check that the preferred announcement (txid or wtxid)
531 // is correctly to-be-requested (and with the correct wtxidness).
532 auto max_reqtime = std::max(reqtimeT, reqtimeW);
533 if (max_reqtime > scenario.Now()) scenario.AdvanceTime(max_reqtime - scenario.Now());
534 if (config & 2) {
535 scenario.Check(peerT, {txid}, 1, 0, 0, "w1");
536 scenario.Check(peerW, {}, 1, 0, 0, "w2");
537 } else {
538 scenario.Check(peerT, {}, 1, 0, 0, "w3");
539 scenario.Check(peerW, {wtxid}, 1, 0, 0, "w4");
540 }
541
542 // Let the preferred announcement be requested. It's not going to be delivered.
543 auto expiry = RandomTime8s();
544 if (config & 2) {
545 scenario.RequestedTx(peerT, txid.GetHash(), scenario.Now() + expiry);
546 scenario.Check(peerT, {}, 0, 1, 0, "w5");
547 scenario.Check(peerW, {}, 1, 0, 0, "w6");
548 } else {
549 scenario.RequestedTx(peerW, wtxid.GetHash(), scenario.Now() + expiry);
550 scenario.Check(peerT, {}, 1, 0, 0, "w7");
551 scenario.Check(peerW, {}, 0, 1, 0, "w8");
552 }
553
554 // After reaching expiration time of the preferred announcement, verify that the
555 // remaining one is requestable
556 scenario.AdvanceTime(expiry);
557 if (config & 2) {
558 scenario.Check(peerT, {}, 0, 0, 1, "w9");
559 scenario.Check(peerW, {wtxid}, 1, 0, 0, "w10");
560 scenario.CheckExpired(peerT, txid);
561 } else {
562 scenario.Check(peerT, {txid}, 1, 0, 0, "w11");
563 scenario.Check(peerW, {}, 0, 0, 1, "w12");
564 scenario.CheckExpired(peerW, wtxid);
565 }
566
567 // If a good transaction with either that hash as wtxid or txid arrives, both
568 // announcements are gone.
569 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
570 scenario.ForgetTxHash(txhash);
571 scenario.Check(peerT, {}, 0, 0, 0, "w13");
572 scenario.Check(peerW, {}, 0, 0, 0, "w14");
573}
574
576void TxRequestTest::BuildTimeBackwardsTest(Scenario& scenario)
577{
578 auto peer1 = scenario.NewPeer();
579 auto peer2 = scenario.NewPeer();
580 auto gtxid = scenario.NewGTxid({{peer1, peer2}});
581
582 // Announce from peer2.
583 auto reqtime = scenario.Now() + RandomTime8s();
584 scenario.ReceivedInv(peer2, gtxid, true, reqtime);
585 scenario.Check(peer2, {}, 1, 0, 0, "r1");
586 scenario.AdvanceTime(reqtime - scenario.Now());
587 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r2");
588 // Check that if the clock goes backwards by 1us, the transaction would stop being requested.
589 scenario.Check(peer2, {}, 1, 0, 0, "r3", -MICROSECOND);
590 // But it reverts to being requested if time goes forward again.
591 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r4");
592
593 // Announce from peer1.
594 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
595 scenario.ReceivedInv(peer1, gtxid, true, MAX_TIME);
596 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r5");
597 scenario.Check(peer1, {}, 1, 0, 0, "r6");
598
599 // Request from peer1.
600 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
601 auto expiry = scenario.Now() + RandomTime8s();
602 scenario.RequestedTx(peer1, gtxid.GetHash(), expiry);
603 scenario.Check(peer1, {}, 0, 1, 0, "r7");
604 scenario.Check(peer2, {}, 1, 0, 0, "r8");
605
606 // Expiration passes.
607 scenario.AdvanceTime(expiry - scenario.Now());
608 scenario.Check(peer1, {}, 0, 0, 1, "r9");
609 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r10"); // Request goes back to peer2.
610 scenario.CheckExpired(peer1, gtxid);
611 scenario.Check(peer1, {}, 0, 0, 1, "r11", -MICROSECOND); // Going back does not unexpire.
612 scenario.Check(peer2, {gtxid}, 1, 0, 0, "r12", -MICROSECOND);
613
614 // Peer2 goes offline, meaning no viable announcements remain.
615 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
616 scenario.DisconnectedPeer(peer2);
617 scenario.Check(peer1, {}, 0, 0, 0, "r13");
618 scenario.Check(peer2, {}, 0, 0, 0, "r14");
619}
620
622void TxRequestTest::BuildWeirdRequestsTest(Scenario& scenario)
623{
624 auto peer1 = scenario.NewPeer();
625 auto peer2 = scenario.NewPeer();
626 auto gtxid1 = scenario.NewGTxid({{peer1, peer2}});
627 auto gtxid2 = scenario.NewGTxid({{peer2, peer1}});
628
629 // Announce gtxid1 by peer1.
630 scenario.ReceivedInv(peer1, gtxid1, true, MIN_TIME);
631 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q1");
632
633 // Announce gtxid2 by peer2.
634 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
635 scenario.ReceivedInv(peer2, gtxid2, true, MIN_TIME);
636 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q2");
637 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q3");
638
639 // We request gtxid2 from *peer1* - no effect.
640 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
641 scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
642 scenario.Check(peer1, {gtxid1}, 1, 0, 0, "q4");
643 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q5");
644
645 // Now request gtxid1 from peer1 - marks it as REQUESTED.
646 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
647 auto expiryA = scenario.Now() + RandomTime8s();
648 scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryA);
649 scenario.Check(peer1, {}, 0, 1, 0, "q6");
650 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q7");
651
652 // Request it a second time - nothing happens, as it's already REQUESTED.
653 auto expiryB = expiryA + RandomTime8s();
654 scenario.RequestedTx(peer1, gtxid1.GetHash(), expiryB);
655 scenario.Check(peer1, {}, 0, 1, 0, "q8");
656 scenario.Check(peer2, {gtxid2}, 1, 0, 0, "q9");
657
658 // Also announce gtxid1 from peer2 now, so that the txhash isn't forgotten when the peer1 request expires.
659 scenario.ReceivedInv(peer2, gtxid1, true, MIN_TIME);
660 scenario.Check(peer1, {}, 0, 1, 0, "q10");
661 scenario.Check(peer2, {gtxid2}, 2, 0, 0, "q11");
662
663 // When reaching expiryA, it expires (not expiryB, which is later).
664 scenario.AdvanceTime(expiryA - scenario.Now());
665 scenario.Check(peer1, {}, 0, 0, 1, "q12");
666 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q13");
667 scenario.CheckExpired(peer1, gtxid1);
668
669 // Requesting it yet again from peer1 doesn't do anything, as it's already COMPLETED.
670 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
671 scenario.RequestedTx(peer1, gtxid1.GetHash(), MAX_TIME);
672 scenario.Check(peer1, {}, 0, 0, 1, "q14");
673 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q15");
674
675 // Now announce gtxid2 from peer1.
676 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
677 scenario.ReceivedInv(peer1, gtxid2, true, MIN_TIME);
678 scenario.Check(peer1, {}, 1, 0, 1, "q16");
679 scenario.Check(peer2, {gtxid2, gtxid1}, 2, 0, 0, "q17");
680
681 // And request it from peer1 (weird as peer2 has the preference).
682 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
683 scenario.RequestedTx(peer1, gtxid2.GetHash(), MAX_TIME);
684 scenario.Check(peer1, {}, 0, 1, 1, "q18");
685 scenario.Check(peer2, {gtxid1}, 2, 0, 0, "q19");
686
687 // If peer2 now (normally) requests gtxid2, the existing request by peer1 becomes COMPLETED.
688 if (m_rng.randbool()) scenario.AdvanceTime(RandomTime8s());
689 scenario.RequestedTx(peer2, gtxid2.GetHash(), MAX_TIME);
690 scenario.Check(peer1, {}, 0, 0, 2, "q20");
691 scenario.Check(peer2, {gtxid1}, 1, 1, 0, "q21");
692
693 // If peer2 goes offline, no viable announcements remain.
694 scenario.DisconnectedPeer(peer2);
695 scenario.Check(peer1, {}, 0, 0, 0, "q22");
696 scenario.Check(peer2, {}, 0, 0, 0, "q23");
697}
698
699void TxRequestTest::TestInterleavedScenarios()
700{
701 // Create a list of functions which add tests to scenarios.
702 std::vector<std::function<void(Scenario&)>> builders;
703 // Add instances of every test, for every configuration.
704 for (int n = 0; n < 64; ++n) {
705 builders.emplace_back([this, n](Scenario& scenario) { BuildWtxidTest(scenario, n); });
706 builders.emplace_back([this, n](Scenario& scenario) { BuildRequestOrderTest(scenario, n & 3); });
707 builders.emplace_back([this, n](Scenario& scenario) { BuildSingleTest(scenario, n & 31); });
708 builders.emplace_back([this, n](Scenario& scenario) { BuildPriorityTest(scenario, n & 31); });
709 builders.emplace_back([this, n](Scenario& scenario) { BuildBigPriorityTest(scenario, (n & 7) + 1); });
710 builders.emplace_back([this](Scenario& scenario) { BuildTimeBackwardsTest(scenario); });
711 builders.emplace_back([this](Scenario& scenario) { BuildWeirdRequestsTest(scenario); });
712 }
713 // Randomly shuffle all those functions.
714 std::shuffle(builders.begin(), builders.end(), m_rng);
715
716 Runner runner;
717 auto starttime = RandomTime1y();
718 // Construct many scenarios, and run (up to) 10 randomly-chosen tests consecutively in each.
719 while (builders.size()) {
720 // Introduce some variation in the start time of each scenario, so they don't all start off
721 // concurrently, but get a more random interleaving.
722 auto scenario_start = starttime + RandomTime8s() + RandomTime8s() + RandomTime8s();
723 Scenario scenario(m_rng, runner, scenario_start);
724 for (int j = 0; builders.size() && j < 10; ++j) {
725 builders.back()(scenario);
726 builders.pop_back();
727 }
728 }
729 // Sort all the actions from all those scenarios chronologically, resulting in the actions from
730 // distinct scenarios to become interleaved. Use stable_sort so that actions from one scenario
731 // aren't reordered w.r.t. each other.
732 std::stable_sort(runner.actions.begin(), runner.actions.end(), [](const Action& a1, const Action& a2) {
733 return a1.first < a2.first;
734 });
735
736 // Run all actions from all scenarios, in order.
737 for (auto& action : runner.actions) {
738 action.second();
739 }
740
741 BOOST_CHECK_EQUAL(runner.txrequest.Size(), 0U);
742 BOOST_CHECK(runner.expired.empty());
743}
744
745} // namespace
746
747BOOST_FIXTURE_TEST_SUITE(txrequest_tests, TxRequestTest)
748
750{
751 for (int i = 0; i < 5; ++i) {
752 TestInterleavedScenarios();
753 }
754}
755
int ret
Fast randomness source.
Definition: random.h:377
A generic txid reference (txid or wtxid).
Definition: transaction.h:428
static GenTxid Wtxid(const uint256 &hash)
Definition: transaction.h:435
const uint256 & GetHash() const LIFETIMEBOUND
Definition: transaction.h:437
static GenTxid Txid(const uint256 &hash)
Definition: transaction.h:434
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:254
uint256 rand256() noexcept
generate a random uint256.
Definition: random.h:308
bool randbool() noexcept
Generate a random boolean.
Definition: random.h:316
uint64_t randbits(int bits) noexcept
Generate a random (bits)-bit integer.
Definition: random.h:204
Data structure to keep track of, and schedule, transaction downloads from peers.
Definition: txrequest.h:96
256-bit opaque blob.
Definition: uint256.h:190
BOOST_FIXTURE_TEST_SUITE(cuckoocache_tests, BasicTestingSetup)
Test Suite for CuckooCache.
BOOST_AUTO_TEST_SUITE_END()
int64_t NodeId
Definition: net.h:97
#define BOOST_CHECK_EQUAL(v1, v2)
Definition: object.cpp:18
#define BOOST_CHECK(expr)
Definition: object.cpp:17
Basic testing setup.
Definition: setup_common.h:63
T Now()
Return the current time point cast to the given precision.
Definition: time.h:93
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1165
BOOST_AUTO_TEST_CASE(TxRequestTest)
assert(!tx.IsCoinBase())