Bitcoin Core 31.99.0
P2P Digital Currency
cluster_linearize.h
Go to the documentation of this file.
1// Copyright (c) The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#ifndef BITCOIN_CLUSTER_LINEARIZE_H
6#define BITCOIN_CLUSTER_LINEARIZE_H
7
8#include <algorithm>
9#include <cstdint>
10#include <numeric>
11#include <optional>
12#include <utility>
13#include <vector>
14
15#include <attributes.h>
16#include <memusage.h>
17#include <random.h>
18#include <span.h>
19#include <util/feefrac.h>
20#include <util/vecdeque.h>
21
23
25using DepGraphIndex = uint32_t;
26
29template<typename SetType>
31{
33 struct Entry
34 {
38 SetType ancestors;
40 SetType descendants;
41
43 friend bool operator==(const Entry&, const Entry&) noexcept = default;
44
46 Entry() noexcept = default;
48 Entry(const FeeFrac& f, const SetType& a, const SetType& d) noexcept : feerate(f), ancestors(a), descendants(d) {}
49 };
50
52 std::vector<Entry> entries;
53
55 SetType m_used;
56
57public:
59 friend bool operator==(const DepGraph& a, const DepGraph& b) noexcept
60 {
61 if (a.m_used != b.m_used) return false;
62 // Only compare the used positions within the entries vector.
63 for (auto idx : a.m_used) {
64 if (a.entries[idx] != b.entries[idx]) return false;
65 }
66 return true;
67 }
68
69 // Default constructors.
70 DepGraph() noexcept = default;
71 DepGraph(const DepGraph&) noexcept = default;
72 DepGraph(DepGraph&&) noexcept = default;
73 DepGraph& operator=(const DepGraph&) noexcept = default;
74 DepGraph& operator=(DepGraph&&) noexcept = default;
75
91 DepGraph(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> mapping, DepGraphIndex pos_range) noexcept : entries(pos_range)
92 {
93 Assume(mapping.size() == depgraph.PositionRange());
94 Assume((pos_range == 0) == (depgraph.TxCount() == 0));
95 for (DepGraphIndex i : depgraph.Positions()) {
96 auto new_idx = mapping[i];
97 Assume(new_idx < pos_range);
98 // Add transaction.
99 entries[new_idx].ancestors = SetType::Singleton(new_idx);
100 entries[new_idx].descendants = SetType::Singleton(new_idx);
101 m_used.Set(new_idx);
102 // Fill in fee and size.
103 entries[new_idx].feerate = depgraph.entries[i].feerate;
104 }
105 for (DepGraphIndex i : depgraph.Positions()) {
106 // Fill in dependencies by mapping direct parents.
107 SetType parents;
108 for (auto j : depgraph.GetReducedParents(i)) parents.Set(mapping[j]);
109 AddDependencies(parents, mapping[i]);
110 }
111 // Verify that the provided pos_range was correct (no unused positions at the end).
112 Assume(m_used.None() ? (pos_range == 0) : (pos_range == m_used.Last() + 1));
113 }
114
116 const SetType& Positions() const noexcept { return m_used; }
118 DepGraphIndex PositionRange() const noexcept { return entries.size(); }
120 auto TxCount() const noexcept { return m_used.Count(); }
122 const FeeFrac& FeeRate(DepGraphIndex i) const noexcept { return entries[i].feerate; }
124 FeeFrac& FeeRate(DepGraphIndex i) noexcept { return entries[i].feerate; }
126 const SetType& Ancestors(DepGraphIndex i) const noexcept { return entries[i].ancestors; }
128 const SetType& Descendants(DepGraphIndex i) const noexcept { return entries[i].descendants; }
129
135 DepGraphIndex AddTransaction(const FeeFrac& feefrac) noexcept
136 {
137 static constexpr auto ALL_POSITIONS = SetType::Fill(SetType::Size());
138 auto available = ALL_POSITIONS - m_used;
139 Assume(available.Any());
140 DepGraphIndex new_idx = available.First();
141 if (new_idx == entries.size()) {
142 entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
143 } else {
144 entries[new_idx] = Entry(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
145 }
146 m_used.Set(new_idx);
147 return new_idx;
148 }
149
159 void RemoveTransactions(const SetType& del) noexcept
160 {
161 m_used -= del;
162 // Remove now-unused trailing entries.
163 while (!entries.empty() && !m_used[entries.size() - 1]) {
164 entries.pop_back();
165 }
166 // Remove the deleted transactions from ancestors/descendants of other transactions. Note
167 // that the deleted positions will retain old feerate and dependency information. This does
168 // not matter as they will be overwritten by AddTransaction if they get used again.
169 for (auto& entry : entries) {
170 entry.ancestors &= m_used;
171 entry.descendants &= m_used;
172 }
173 }
174
179 void AddDependencies(const SetType& parents, DepGraphIndex child) noexcept
180 {
181 Assume(m_used[child]);
182 Assume(parents.IsSubsetOf(m_used));
183 // Compute the ancestors of parents that are not already ancestors of child.
184 SetType par_anc;
185 for (auto par : parents - Ancestors(child)) {
186 par_anc |= Ancestors(par);
187 }
188 par_anc -= Ancestors(child);
189 // Bail out if there are no such ancestors.
190 if (par_anc.None()) return;
191 // To each such ancestor, add as descendants the descendants of the child.
192 const auto& chl_des = entries[child].descendants;
193 for (auto anc_of_par : par_anc) {
194 entries[anc_of_par].descendants |= chl_des;
195 }
196 // To each descendant of the child, add those ancestors.
197 for (auto dec_of_chl : Descendants(child)) {
198 entries[dec_of_chl].ancestors |= par_anc;
199 }
200 }
201
210 SetType GetReducedParents(DepGraphIndex i) const noexcept
211 {
212 SetType parents = Ancestors(i);
213 parents.Reset(i);
214 for (auto parent : parents) {
215 if (parents[parent]) {
216 parents -= Ancestors(parent);
217 parents.Set(parent);
218 }
219 }
220 return parents;
221 }
222
231 SetType GetReducedChildren(DepGraphIndex i) const noexcept
232 {
233 SetType children = Descendants(i);
234 children.Reset(i);
235 for (auto child : children) {
236 if (children[child]) {
237 children -= Descendants(child);
238 children.Set(child);
239 }
240 }
241 return children;
242 }
243
248 FeeFrac FeeRate(const SetType& elems) const noexcept
249 {
250 FeeFrac ret;
251 for (auto pos : elems) ret += entries[pos].feerate;
252 return ret;
253 }
254
265 SetType GetConnectedComponent(const SetType& todo, DepGraphIndex tx) const noexcept
266 {
267 Assume(todo[tx]);
268 Assume(todo.IsSubsetOf(m_used));
269 auto to_add = SetType::Singleton(tx);
270 SetType ret;
271 do {
272 SetType old = ret;
273 for (auto add : to_add) {
274 ret |= Descendants(add);
275 ret |= Ancestors(add);
276 }
277 ret &= todo;
278 to_add = ret - old;
279 } while (to_add.Any());
280 return ret;
281 }
282
290 SetType FindConnectedComponent(const SetType& todo) const noexcept
291 {
292 if (todo.None()) return todo;
293 return GetConnectedComponent(todo, todo.First());
294 }
295
300 bool IsConnected(const SetType& subset) const noexcept
301 {
302 return FindConnectedComponent(subset) == subset;
303 }
304
309 bool IsConnected() const noexcept { return IsConnected(m_used); }
310
315 void AppendTopo(std::vector<DepGraphIndex>& list, const SetType& select) const noexcept
316 {
317 DepGraphIndex old_len = list.size();
318 for (auto i : select) list.push_back(i);
319 std::sort(list.begin() + old_len, list.end(), [&](DepGraphIndex a, DepGraphIndex b) noexcept {
320 const auto a_anc_count = entries[a].ancestors.Count();
321 const auto b_anc_count = entries[b].ancestors.Count();
322 if (a_anc_count != b_anc_count) return a_anc_count < b_anc_count;
323 return a < b;
324 });
325 }
326
328 bool IsAcyclic() const noexcept
329 {
330 for (auto i : Positions()) {
331 if ((Ancestors(i) & Descendants(i)) != SetType::Singleton(i)) {
332 return false;
333 }
334 }
335 return true;
336 }
337
338 unsigned CountDependencies() const noexcept
339 {
340 unsigned ret = 0;
341 for (auto i : Positions()) {
342 ret += GetReducedParents(i).Count();
343 }
344 return ret;
345 }
346
348 void Compact() noexcept
349 {
350 entries.shrink_to_fit();
351 }
352
353 size_t DynamicMemoryUsage() const noexcept
354 {
356 }
357};
358
360template<typename SetType>
362{
367
369 SetInfo() noexcept = default;
370
372 SetInfo(const SetType& txn, const FeeFrac& fr) noexcept : transactions(txn), feerate(fr) {}
373
375 explicit SetInfo(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept :
376 transactions(SetType::Singleton(pos)), feerate(depgraph.FeeRate(pos)) {}
377
379 explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
380 transactions(txn), feerate(depgraph.FeeRate(txn)) {}
381
383 void Set(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept
384 {
385 Assume(!transactions[pos]);
386 transactions.Set(pos);
387 feerate += depgraph.FeeRate(pos);
388 }
389
391 SetInfo& operator|=(const SetInfo& other) noexcept
392 {
393 Assume(!transactions.Overlaps(other.transactions));
394 transactions |= other.transactions;
395 feerate += other.feerate;
396 return *this;
397 }
398
400 SetInfo& operator-=(const SetInfo& other) noexcept
401 {
402 Assume(other.transactions.IsSubsetOf(transactions));
403 transactions -= other.transactions;
404 feerate -= other.feerate;
405 return *this;
406 }
407
409 SetInfo operator-(const SetInfo& other) const noexcept
410 {
411 Assume(other.transactions.IsSubsetOf(transactions));
412 return {transactions - other.transactions, feerate - other.feerate};
413 }
414
416 friend void swap(SetInfo& a, SetInfo& b) noexcept
417 {
418 swap(a.transactions, b.transactions);
419 swap(a.feerate, b.feerate);
420 }
421
423 friend bool operator==(const SetInfo&, const SetInfo&) noexcept = default;
424};
425
427template<typename SetType>
428std::vector<SetInfo<SetType>> ChunkLinearizationInfo(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
429{
430 std::vector<SetInfo<SetType>> ret;
431 for (DepGraphIndex i : linearization) {
433 SetInfo<SetType> new_chunk(depgraph, i);
434 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
435 while (!ret.empty() && new_chunk.feerate >> ret.back().feerate) {
436 new_chunk |= ret.back();
437 ret.pop_back();
438 }
439 // Actually move that new chunk into the chunking.
440 ret.emplace_back(std::move(new_chunk));
441 }
442 return ret;
443}
444
447template<typename SetType>
448std::vector<FeeFrac> ChunkLinearization(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
449{
450 std::vector<FeeFrac> ret;
451 for (DepGraphIndex i : linearization) {
453 auto new_chunk = depgraph.FeeRate(i);
454 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
455 while (!ret.empty() && new_chunk >> ret.back()) {
456 new_chunk += ret.back();
457 ret.pop_back();
458 }
459 // Actually move that new chunk into the chunking.
460 ret.push_back(std::move(new_chunk));
461 }
462 return ret;
463}
464
466template<typename F, typename Arg>
468 std::regular_invocable<F, Arg, Arg> &&
469 std::is_same_v<std::invoke_result_t<F, Arg, Arg>, std::strong_ordering>;
470
473using IndexTxOrder = std::compare_three_way;
474
497{
498 uint64_t m_cost{0};
499
500public:
501 inline void InitializeBegin() noexcept {}
502 inline void InitializeEnd(int num_txns, int num_deps) noexcept
503 {
504 // Cost of initialization.
505 m_cost += 39 * num_txns;
506 // Cost of producing linearization at the end.
507 m_cost += 48 * num_txns + 4 * num_deps;
508 }
509 inline void GetLinearizationBegin() noexcept {}
510 inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept
511 {
512 // Note that we account for the cost of the final linearization at the beginning (see
513 // InitializeEnd), because the cost budget decision needs to be made before calling
514 // GetLinearization.
515 // This function exists here to allow overriding it easily for benchmark purposes.
516 }
517 inline void MakeTopologicalBegin() noexcept {}
518 inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
519 {
520 m_cost += 20 * num_chunks + 28 * num_steps;
521 }
522 inline void StartOptimizingBegin() noexcept {}
523 inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; }
524 inline void ActivateBegin() noexcept {}
525 inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; }
526 inline void DeactivateBegin() noexcept {}
527 inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; }
528 inline void MergeChunksBegin() noexcept {}
529 inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; }
530 inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; }
531 inline void PickMergeCandidateBegin() noexcept {}
532 inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; }
533 inline void PickChunkToOptimizeBegin() noexcept {}
534 inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; }
535 inline void PickDependencyToSplitBegin() noexcept {}
536 inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; }
537 inline void StartMinimizingBegin() noexcept {}
538 inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; }
539 inline void MinimizeStepBegin() noexcept {}
540 inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; }
541 inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; }
542
543 inline uint64_t GetCost() const noexcept { return m_cost; }
544};
545
721template<typename SetType, typename CostModel = SFLDefaultCostModel>
723{
724private:
727
732 using SetIdx = std::conditional_t<(SetType::Size() <= 0xff),
733 uint8_t,
734 std::conditional_t<(SetType::Size() <= 0xffff),
735 uint16_t,
736 uint32_t>>;
738 static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1);
739
741 struct TxData {
744 std::array<SetIdx, SetType::Size()> dep_top_idx;
746 SetType parents;
748 SetType children;
753 };
754
766 std::vector<TxData> m_tx_data;
768 std::vector<SetInfo<SetType>> m_set_info;
771 std::vector<std::pair<SetType, SetType>> m_reachable;
781
784
786 CostModel m_cost;
787
789 TxIdx PickRandomTx(const SetType& tx_idxs) noexcept
790 {
791 Assume(tx_idxs.Any());
792 unsigned pos = m_rng.randrange<unsigned>(tx_idxs.Count());
793 for (auto tx_idx : tx_idxs) {
794 if (pos == 0) return tx_idx;
795 --pos;
796 }
797 Assume(false);
798 return TxIdx(-1);
799 }
800
804 std::pair<SetType, SetType> GetReachable(const SetType& tx_idxs) const noexcept
805 {
806 SetType parents, children;
807 for (auto tx_idx : tx_idxs) {
808 const auto& tx_data = m_tx_data[tx_idx];
809 parents |= tx_data.parents;
810 children |= tx_data.children;
811 }
812 return {parents - tx_idxs, children - tx_idxs};
813 }
814
817 SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
818 {
819 m_cost.ActivateBegin();
820 // Gather and check information about the parent and child transactions.
821 auto& parent_data = m_tx_data[parent_idx];
822 auto& child_data = m_tx_data[child_idx];
823 Assume(parent_data.children[child_idx]);
824 Assume(!parent_data.active_children[child_idx]);
825 // Get the set index of the chunks the parent and child are currently in. The parent chunk
826 // will become the top set of the newly activated dependency, while the child chunk will be
827 // grown to become the merged chunk.
828 auto parent_chunk_idx = parent_data.chunk_idx;
829 auto child_chunk_idx = child_data.chunk_idx;
830 Assume(parent_chunk_idx != child_chunk_idx);
831 Assume(m_chunk_idxs[parent_chunk_idx]);
832 Assume(m_chunk_idxs[child_chunk_idx]);
833 auto& top_info = m_set_info[parent_chunk_idx];
834 auto& bottom_info = m_set_info[child_chunk_idx];
835
836 // Consider the following example:
837 //
838 // A A There are two chunks, ABC and DEF, and the inactive E->C dependency
839 // / \ / \ is activated, resulting in a single chunk ABCDEF.
840 // B C B C
841 // : ==> | Dependency | top set before | top set after | change
842 // D E D E B->A | AC | ACDEF | +DEF
843 // \ / \ / C->A | AB | AB |
844 // F F F->D | D | D |
845 // F->E | E | ABCE | +ABC
846 //
847 // The common pattern here is that any dependency which has the parent or child of the
848 // dependency being activated (E->C here) in its top set, will have the opposite part added
849 // to it. This is true for B->A and F->E, but not for C->A and F->D.
850 //
851 // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to
852 // every dependency's top set which has the parent (C) in it. At the same time, change the
853 // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk.
854 for (auto tx_idx : top_info.transactions) {
855 auto& tx_data = m_tx_data[tx_idx];
856 tx_data.chunk_idx = child_chunk_idx;
857 for (auto dep_child_idx : tx_data.active_children) {
858 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
859 if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info;
860 }
861 }
862 // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to
863 // every dependency's top set which has the child (E) in it.
864 for (auto tx_idx : bottom_info.transactions) {
865 auto& tx_data = m_tx_data[tx_idx];
866 for (auto dep_child_idx : tx_data.active_children) {
867 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
868 if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info;
869 }
870 }
871 // Merge top_info into bottom_info, which becomes the merged chunk.
872 bottom_info |= top_info;
873 // Compute merged sets of reachable transactions from the new chunk, based on the input
874 // chunks' reachable sets.
875 m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first;
876 m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second;
877 m_reachable[child_chunk_idx].first -= bottom_info.transactions;
878 m_reachable[child_chunk_idx].second -= bottom_info.transactions;
879 // Make parent chunk the set for the new active dependency.
880 parent_data.dep_top_idx[child_idx] = parent_chunk_idx;
881 parent_data.active_children.Set(child_idx);
882 m_chunk_idxs.Reset(parent_chunk_idx);
883 // Return the newly merged chunk.
884 m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1);
885 return child_chunk_idx;
886 }
887
890 std::pair<SetIdx, SetIdx> Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
891 {
892 m_cost.DeactivateBegin();
893 // Gather and check information about the parent transactions.
894 auto& parent_data = m_tx_data[parent_idx];
895 Assume(parent_data.children[child_idx]);
896 Assume(parent_data.active_children[child_idx]);
897 // Get the top set of the active dependency (which will become the parent chunk) and the
898 // chunk set the transactions are currently in (which will become the bottom chunk).
899 auto parent_chunk_idx = parent_data.dep_top_idx[child_idx];
900 auto child_chunk_idx = parent_data.chunk_idx;
901 Assume(parent_chunk_idx != child_chunk_idx);
902 Assume(m_chunk_idxs[child_chunk_idx]);
903 Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk
904 auto& top_info = m_set_info[parent_chunk_idx];
905 auto& bottom_info = m_set_info[child_chunk_idx];
906
907 // Remove the active dependency.
908 parent_data.active_children.Reset(child_idx);
909 m_chunk_idxs.Set(parent_chunk_idx);
910 auto ntx = bottom_info.transactions.Count();
911 // Subtract the top_info from the bottom_info, as it will become the child chunk.
912 bottom_info -= top_info;
913 // See the comment above in Activate(). We perform the opposite operations here, removing
914 // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children.
915 SetType top_parents, top_children;
916 for (auto tx_idx : top_info.transactions) {
917 auto& tx_data = m_tx_data[tx_idx];
918 tx_data.chunk_idx = parent_chunk_idx;
919 top_parents |= tx_data.parents;
920 top_children |= tx_data.children;
921 for (auto dep_child_idx : tx_data.active_children) {
922 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
923 if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info;
924 }
925 }
926 SetType bottom_parents, bottom_children;
927 for (auto tx_idx : bottom_info.transactions) {
928 auto& tx_data = m_tx_data[tx_idx];
929 bottom_parents |= tx_data.parents;
930 bottom_children |= tx_data.children;
931 for (auto dep_child_idx : tx_data.active_children) {
932 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
933 if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info;
934 }
935 }
936 // Compute the new sets of reachable transactions for each new chunk, based on the
937 // top/bottom parents and children computed above.
938 m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions;
939 m_reachable[parent_chunk_idx].second = top_children - top_info.transactions;
940 m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions;
941 m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions;
942 // Return the two new set idxs.
943 m_cost.DeactivateEnd(/*num_deps=*/ntx - 1);
944 return {parent_chunk_idx, child_chunk_idx};
945 }
946
949 SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
950 {
951 m_cost.MergeChunksBegin();
952 Assume(m_chunk_idxs[top_idx]);
953 Assume(m_chunk_idxs[bottom_idx]);
954 auto& top_chunk_info = m_set_info[top_idx];
955 auto& bottom_chunk_info = m_set_info[bottom_idx];
956 // Count the number of dependencies between bottom_chunk and top_chunk.
957 unsigned num_deps{0};
958 for (auto tx_idx : top_chunk_info.transactions) {
959 auto& tx_data = m_tx_data[tx_idx];
960 num_deps += (tx_data.children & bottom_chunk_info.transactions).Count();
961 }
962 m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count());
963 Assume(num_deps > 0);
964 // Uniformly randomly pick one of them and activate it.
965 unsigned pick = m_rng.randrange(num_deps);
966 unsigned num_steps = 0;
967 for (auto tx_idx : top_chunk_info.transactions) {
968 ++num_steps;
969 auto& tx_data = m_tx_data[tx_idx];
970 auto intersect = tx_data.children & bottom_chunk_info.transactions;
971 auto count = intersect.Count();
972 if (pick < count) {
973 for (auto child_idx : intersect) {
974 if (pick == 0) {
975 m_cost.MergeChunksEnd(/*num_steps=*/num_steps);
976 return Activate(tx_idx, child_idx);
977 }
978 --pick;
979 }
980 Assume(false);
981 break;
982 }
983 pick -= count;
984 }
985 Assume(false);
986 return INVALID_SET_IDX;
987 }
988
991 template<bool DownWard>
992 SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
993 {
994 if constexpr (DownWard) {
995 return MergeChunks(chunk_idx, merge_chunk_idx);
996 } else {
997 return MergeChunks(merge_chunk_idx, chunk_idx);
998 }
999 }
1000
1002 template<bool DownWard>
1004 {
1005 m_cost.PickMergeCandidateBegin();
1007 Assume(m_chunk_idxs[chunk_idx]);
1008 auto& chunk_info = m_set_info[chunk_idx];
1009 // Iterate over all chunks reachable from this one. For those depended-on chunks,
1010 // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one.
1011 // If multiple equal-feerate candidate chunks to merge with exist, pick a random one
1012 // among them.
1013
1017 FeeFrac best_other_chunk_feerate = chunk_info.feerate;
1019 SetIdx best_other_chunk_idx = INVALID_SET_IDX;
1022 uint64_t best_other_chunk_tiebreak{0};
1023
1025 auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first;
1026 unsigned steps = 0;
1027 while (todo.Any()) {
1028 ++steps;
1029 // Find a chunk for a transaction in todo, and remove all its transactions from todo.
1030 auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx;
1031 auto& reached_chunk_info = m_set_info[reached_chunk_idx];
1032 todo -= reached_chunk_info.transactions;
1033 // See if it has an acceptable feerate.
1034 auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk_info.feerate)
1035 : FeeRateCompare(reached_chunk_info.feerate, best_other_chunk_feerate);
1036 if (cmp > 0) continue;
1037 uint64_t tiebreak = m_rng.rand64();
1038 if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
1039 best_other_chunk_feerate = reached_chunk_info.feerate;
1040 best_other_chunk_idx = reached_chunk_idx;
1041 best_other_chunk_tiebreak = tiebreak;
1042 }
1043 }
1044 Assume(steps <= m_set_info.size());
1045
1046 m_cost.PickMergeCandidateEnd(/*num_steps=*/steps);
1047 return best_other_chunk_idx;
1048 }
1049
1052 template<bool DownWard>
1053 SetIdx MergeStep(SetIdx chunk_idx) noexcept
1054 {
1055 auto merge_chunk_idx = PickMergeCandidate<DownWard>(chunk_idx);
1056 if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX;
1057 chunk_idx = MergeChunksDirected<DownWard>(chunk_idx, merge_chunk_idx);
1058 Assume(chunk_idx != INVALID_SET_IDX);
1059 return chunk_idx;
1060 }
1061
1063 template<bool DownWard>
1064 void MergeSequence(SetIdx chunk_idx) noexcept
1065 {
1066 Assume(m_chunk_idxs[chunk_idx]);
1067 while (true) {
1068 auto merged_chunk_idx = MergeStep<DownWard>(chunk_idx);
1069 if (merged_chunk_idx == INVALID_SET_IDX) break;
1070 chunk_idx = merged_chunk_idx;
1071 }
1072 // Add the chunk to the queue of improvable chunks, if it wasn't already there.
1073 if (!m_suboptimal_idxs[chunk_idx]) {
1074 m_suboptimal_idxs.Set(chunk_idx);
1075 m_suboptimal_chunks.push_back(chunk_idx);
1076 }
1077 }
1078
1081 void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
1082 {
1083 // Deactivate the specified dependency, splitting it into two new chunks: a top containing
1084 // the parent, and a bottom containing the child. The top should have a higher feerate.
1085 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx);
1086
1087 // At this point we have exactly two chunks which may violate topology constraints (the
1088 // parent chunk and child chunk that were produced by deactivation). We can fix
1089 // these using just merge sequences, one upwards and one downwards, avoiding the need for a
1090 // full MakeTopological.
1091 const auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1092 const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1093 if (parent_reachable.Overlaps(child_chunk_txn)) {
1094 // The parent chunk has a dependency on a transaction in the child chunk. In this case,
1095 // the parent needs to merge back with the child chunk (a self-merge), and no other
1096 // merges are needed. Special-case this, so the overhead of PickMergeCandidate and
1097 // MergeSequence can be avoided.
1098
1099 // In the self-merge, the roles reverse: the parent chunk (from the split) depends
1100 // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the
1101 // "bottom" for MergeChunks.
1102 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1103 if (!m_suboptimal_idxs[merged_chunk_idx]) {
1104 m_suboptimal_idxs.Set(merged_chunk_idx);
1105 m_suboptimal_chunks.push_back(merged_chunk_idx);
1106 }
1107 } else {
1108 // Merge the top chunk with lower-feerate chunks it depends on.
1109 MergeSequence<false>(parent_chunk_idx);
1110 // Merge the bottom chunk with higher-feerate chunks that depend on it.
1111 MergeSequence<true>(child_chunk_idx);
1112 }
1113 }
1114
1117 {
1118 m_cost.PickChunkToOptimizeBegin();
1119 unsigned steps{0};
1120 while (!m_suboptimal_chunks.empty()) {
1121 ++steps;
1122 // Pop an entry from the potentially-suboptimal chunk queue.
1123 SetIdx chunk_idx = m_suboptimal_chunks.front();
1124 Assume(m_suboptimal_idxs[chunk_idx]);
1125 m_suboptimal_idxs.Reset(chunk_idx);
1127 if (m_chunk_idxs[chunk_idx]) {
1128 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1129 return chunk_idx;
1130 }
1131 // If what was popped is not currently a chunk, continue. This may
1132 // happen when a split chunk merges in Improve() with one or more existing chunks that
1133 // are themselves on the suboptimal queue already.
1134 }
1135 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1136 return INVALID_SET_IDX;
1137 }
1138
1140 std::pair<TxIdx, TxIdx> PickDependencyToSplit(SetIdx chunk_idx) noexcept
1141 {
1142 m_cost.PickDependencyToSplitBegin();
1143 Assume(m_chunk_idxs[chunk_idx]);
1144 auto& chunk_info = m_set_info[chunk_idx];
1145
1146 // Remember the best dependency {par, chl} seen so far.
1147 std::pair<TxIdx, TxIdx> candidate_dep = {TxIdx(-1), TxIdx(-1)};
1148 uint64_t candidate_tiebreak = 0;
1149 // Iterate over all transactions.
1150 for (auto tx_idx : chunk_info.transactions) {
1151 const auto& tx_data = m_tx_data[tx_idx];
1152 // Iterate over all active child dependencies of the transaction.
1153 for (auto child_idx : tx_data.active_children) {
1154 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1155 // Skip if this dependency is ineligible (the top chunk that would be created
1156 // does not have higher feerate than the chunk it is currently part of).
1157 auto cmp = FeeRateCompare(dep_top_info.feerate, chunk_info.feerate);
1158 if (cmp <= 0) continue;
1159 // Generate a random tiebreak for this dependency, and reject it if its tiebreak
1160 // is worse than the best so far. This means that among all eligible
1161 // dependencies, a uniformly random one will be chosen.
1162 uint64_t tiebreak = m_rng.rand64();
1163 if (tiebreak < candidate_tiebreak) continue;
1164 // Remember this as our (new) candidate dependency.
1165 candidate_dep = {tx_idx, child_idx};
1166 candidate_tiebreak = tiebreak;
1167 }
1168 }
1169 m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count());
1170 return candidate_dep;
1171 }
1172
1173public:
1176 explicit SpanningForestState(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept :
1177 m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost)
1178 {
1179 m_cost.InitializeBegin();
1180 m_transaction_idxs = depgraph.Positions();
1181 auto num_transactions = m_transaction_idxs.Count();
1182 m_tx_data.resize(depgraph.PositionRange());
1183 m_set_info.resize(num_transactions);
1184 m_reachable.resize(num_transactions);
1185 size_t num_chunks = 0;
1186 size_t num_deps = 0;
1187 for (auto tx_idx : m_transaction_idxs) {
1188 // Fill in transaction data.
1189 auto& tx_data = m_tx_data[tx_idx];
1190 tx_data.parents = depgraph.GetReducedParents(tx_idx);
1191 for (auto parent_idx : tx_data.parents) {
1192 m_tx_data[parent_idx].children.Set(tx_idx);
1193 }
1194 num_deps += tx_data.parents.Count();
1195 // Create a singleton chunk for it.
1196 tx_data.chunk_idx = num_chunks;
1197 m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx);
1198 }
1199 // Set the reachable transactions for each chunk to the transactions' parents and children.
1200 for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) {
1201 auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()];
1202 m_reachable[chunk_idx].first = tx_data.parents;
1203 m_reachable[chunk_idx].second = tx_data.children;
1204 }
1205 Assume(num_chunks == num_transactions);
1206 // Mark all chunk sets as chunks.
1207 m_chunk_idxs = SetType::Fill(num_chunks);
1208 m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps);
1209 }
1210
1214 void LoadLinearization(std::span<const DepGraphIndex> old_linearization) noexcept
1215 {
1216 // Add transactions one by one, in order of existing linearization.
1217 for (DepGraphIndex tx_idx : old_linearization) {
1218 auto chunk_idx = m_tx_data[tx_idx].chunk_idx;
1219 // Merge the chunk upwards, as long as merging succeeds.
1220 while (true) {
1221 chunk_idx = MergeStep<false>(chunk_idx);
1222 if (chunk_idx == INVALID_SET_IDX) break;
1223 }
1224 }
1225 }
1226
1228 void MakeTopological() noexcept
1229 {
1230 m_cost.MakeTopologicalBegin();
1237 unsigned init_dir = m_rng.randbool();
1240 SetType merged_chunks;
1241 // Mark chunks as suboptimal.
1243 for (auto chunk_idx : m_chunk_idxs) {
1245 // Randomize the initial order of suboptimal chunks in the queue.
1247 if (j != m_suboptimal_chunks.size() - 1) {
1249 }
1250 }
1251 unsigned chunks = m_chunk_idxs.Count();
1252 unsigned steps = 0;
1253 while (!m_suboptimal_chunks.empty()) {
1254 ++steps;
1255 // Pop an entry from the potentially-suboptimal chunk queue.
1256 SetIdx chunk_idx = m_suboptimal_chunks.front();
1258 Assume(m_suboptimal_idxs[chunk_idx]);
1259 m_suboptimal_idxs.Reset(chunk_idx);
1260 // If what was popped is not currently a chunk, continue. This may
1261 // happen when it was merged with something else since being added.
1262 if (!m_chunk_idxs[chunk_idx]) continue;
1264 unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1;
1265 int flip = m_rng.randbool();
1266 for (int i = 0; i < 2; ++i) {
1267 if (i ^ flip) {
1268 if (!(direction & 1)) continue;
1269 // Attempt to merge the chunk upwards.
1270 auto result_up = MergeStep<false>(chunk_idx);
1271 if (result_up != INVALID_SET_IDX) {
1272 if (!m_suboptimal_idxs[result_up]) {
1273 m_suboptimal_idxs.Set(result_up);
1274 m_suboptimal_chunks.push_back(result_up);
1275 }
1276 merged_chunks.Set(result_up);
1277 break;
1278 }
1279 } else {
1280 if (!(direction & 2)) continue;
1281 // Attempt to merge the chunk downwards.
1282 auto result_down = MergeStep<true>(chunk_idx);
1283 if (result_down != INVALID_SET_IDX) {
1284 if (!m_suboptimal_idxs[result_down]) {
1285 m_suboptimal_idxs.Set(result_down);
1286 m_suboptimal_chunks.push_back(result_down);
1287 }
1288 merged_chunks.Set(result_down);
1289 break;
1290 }
1291 }
1292 }
1293 }
1294 m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps);
1295 }
1296
1298 void StartOptimizing() noexcept
1299 {
1300 m_cost.StartOptimizingBegin();
1302 // Mark chunks suboptimal.
1304 for (auto chunk_idx : m_chunk_idxs) {
1305 m_suboptimal_chunks.push_back(chunk_idx);
1306 // Randomize the initial order of suboptimal chunks in the queue.
1308 if (j != m_suboptimal_chunks.size() - 1) {
1310 }
1311 }
1312 m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size());
1313 }
1314
1316 bool OptimizeStep() noexcept
1317 {
1318 auto chunk_idx = PickChunkToOptimize();
1319 if (chunk_idx == INVALID_SET_IDX) {
1320 // No improvable chunk was found, we are done.
1321 return false;
1322 }
1323 auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx);
1324 if (parent_idx == TxIdx(-1)) {
1325 // Nothing to improve in chunk_idx. Need to continue with other chunks, if any.
1326 return !m_suboptimal_chunks.empty();
1327 }
1328 // Deactivate the found dependency and then make the state topological again with a
1329 // sequence of merges.
1330 Improve(parent_idx, child_idx);
1331 return true;
1332 }
1333
1336 void StartMinimizing() noexcept
1337 {
1338 m_cost.StartMinimizingBegin();
1341 // Gather all chunks, and for each, add it with a random pivot in it, and a random initial
1342 // direction, to m_nonminimal_chunks.
1343 for (auto chunk_idx : m_chunk_idxs) {
1344 TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions);
1345 m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>());
1346 // Randomize the initial order of nonminimal chunks in the queue.
1348 if (j != m_nonminimal_chunks.size() - 1) {
1350 }
1351 }
1352 m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size());
1353 }
1354
1356 bool MinimizeStep() noexcept
1357 {
1358 // If the queue of potentially-non-minimal chunks is empty, we are done.
1359 if (m_nonminimal_chunks.empty()) return false;
1360 m_cost.MinimizeStepBegin();
1361 // Pop an entry from the potentially-non-minimal chunk queue.
1362 auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front();
1364 auto& chunk_info = m_set_info[chunk_idx];
1366 bool move_pivot_down = flags & 1;
1368 bool second_stage = flags & 2;
1369
1370 // Find a random dependency whose top and bottom set feerates are equal, and which has
1371 // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down).
1372 std::pair<TxIdx, TxIdx> candidate_dep;
1373 uint64_t candidate_tiebreak{0};
1374 bool have_any = false;
1375 // Iterate over all transactions.
1376 for (auto tx_idx : chunk_info.transactions) {
1377 const auto& tx_data = m_tx_data[tx_idx];
1378 // Iterate over all active child dependencies of the transaction.
1379 for (auto child_idx : tx_data.active_children) {
1380 const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1381 // Skip if this dependency does not have equal top and bottom set feerates. Note
1382 // that the top cannot have higher feerate than the bottom, or OptimizeSteps would
1383 // have dealt with it.
1384 if (dep_top_info.feerate << chunk_info.feerate) continue;
1385 have_any = true;
1386 // Skip if this dependency does not have pivot in the right place.
1387 if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue;
1388 // Remember this as our chosen dependency if it has a better tiebreak.
1389 uint64_t tiebreak = m_rng.rand64() | 1;
1390 if (tiebreak > candidate_tiebreak) {
1391 candidate_tiebreak = tiebreak;
1392 candidate_dep = {tx_idx, child_idx};
1393 }
1394 }
1395 }
1396 m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count());
1397 // If no dependencies have equal top and bottom set feerate, this chunk is minimal.
1398 if (!have_any) return true;
1399 // If all found dependencies have the pivot in the wrong place, try moving it in the other
1400 // direction. If this was the second stage already, we are done.
1401 if (candidate_tiebreak == 0) {
1402 // Switch to other direction, and to second phase.
1403 flags ^= 3;
1404 if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags);
1405 return true;
1406 }
1407
1408 // Otherwise, deactivate the dependency that was found.
1409 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second);
1410 // Determine if there is a dependency from the new bottom to the new top (opposite from the
1411 // dependency that was just deactivated).
1412 auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1413 auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1414 if (parent_reachable.Overlaps(child_chunk_txn)) {
1415 // A self-merge is needed. Note that the child_chunk_idx is the top, and
1416 // parent_chunk_idx is the bottom, because we activate a dependency in the reverse
1417 // direction compared to the deactivation above.
1418 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1419 // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx
1420 // will have changed.
1421 m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags);
1422 m_cost.MinimizeStepEnd(/*split=*/false);
1423 } else {
1424 // No self-merge happens, and thus we have found a way to split the chunk. Create two
1425 // smaller chunks, and add them to the queue. The one that contains the current pivot
1426 // gets to continue with it in the same direction, to minimize the number of times we
1427 // alternate direction. If we were in the second phase already, the newly created chunk
1428 // inherits that too, because we know no split with the pivot on the other side is
1429 // possible already. The new chunk without the current pivot gets a new randomly-chosen
1430 // one.
1431 if (move_pivot_down) {
1432 auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions);
1433 m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>());
1434 m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags);
1435 } else {
1436 auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions);
1437 m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags);
1438 m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>());
1439 }
1440 if (m_rng.randbool()) {
1442 }
1443 m_cost.MinimizeStepEnd(/*split=*/true);
1444 }
1445 return true;
1446 }
1447
1464 std::vector<DepGraphIndex> GetLinearization(const StrongComparator<DepGraphIndex> auto& fallback_order) noexcept
1465 {
1466 m_cost.GetLinearizationBegin();
1468 std::vector<DepGraphIndex> ret;
1469 ret.reserve(m_set_info.size());
1473 std::vector<std::pair<SetIdx, TxIdx>> ready_chunks;
1476 std::vector<TxIdx> chunk_deps(m_set_info.size(), 0);
1479 std::vector<TxIdx> tx_deps(m_tx_data.size(), 0);
1482 std::vector<TxIdx> ready_tx;
1483 // Populate chunk_deps and tx_deps.
1484 unsigned num_deps{0};
1485 for (TxIdx chl_idx : m_transaction_idxs) {
1486 const auto& chl_data = m_tx_data[chl_idx];
1487 tx_deps[chl_idx] = chl_data.parents.Count();
1488 num_deps += tx_deps[chl_idx];
1489 auto chl_chunk_idx = chl_data.chunk_idx;
1490 auto& chl_chunk_info = m_set_info[chl_chunk_idx];
1491 chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count();
1492 }
1494 auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept {
1495 auto& chunk = m_set_info[chunk_idx].transactions;
1496 auto it = chunk.begin();
1497 DepGraphIndex ret = *it;
1498 ++it;
1499 while (it != chunk.end()) {
1500 if (fallback_order(*it, ret) > 0) ret = *it;
1501 ++it;
1502 }
1503 return ret;
1504 };
1507 auto tx_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1508 // Bail out for identical transactions.
1509 if (a == b) return false;
1510 // First sort by increasing transaction feerate.
1511 auto& a_feerate = m_depgraph.FeeRate(a);
1512 auto& b_feerate = m_depgraph.FeeRate(b);
1513 auto feerate_cmp = FeeRateCompare(a_feerate, b_feerate);
1514 if (feerate_cmp != 0) return feerate_cmp < 0;
1515 // Then by decreasing transaction size.
1516 if (a_feerate.size != b_feerate.size) {
1517 return a_feerate.size > b_feerate.size;
1518 }
1519 // Tie-break by decreasing fallback_order.
1520 auto fallback_cmp = fallback_order(a, b);
1521 if (fallback_cmp != 0) return fallback_cmp > 0;
1522 // This should not be hit, because fallback_order defines a strong ordering.
1523 Assume(false);
1524 return a < b;
1525 };
1526 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1529 auto chunk_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1530 // Bail out for identical chunks.
1531 if (a.first == b.first) return false;
1532 // First sort by increasing chunk feerate.
1533 auto& chunk_feerate_a = m_set_info[a.first].feerate;
1534 auto& chunk_feerate_b = m_set_info[b.first].feerate;
1535 auto feerate_cmp = FeeRateCompare(chunk_feerate_a, chunk_feerate_b);
1536 if (feerate_cmp != 0) return feerate_cmp < 0;
1537 // Then by decreasing chunk size.
1538 if (chunk_feerate_a.size != chunk_feerate_b.size) {
1539 return chunk_feerate_a.size > chunk_feerate_b.size;
1540 }
1541 // Tie-break by decreasing fallback_order.
1542 auto fallback_cmp = fallback_order(a.second, b.second);
1543 if (fallback_cmp != 0) return fallback_cmp > 0;
1544 // This should not be hit, because fallback_order defines a strong ordering.
1545 Assume(false);
1546 return a.second < b.second;
1547 };
1548 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1549 for (SetIdx chunk_idx : m_chunk_idxs) {
1550 if (chunk_deps[chunk_idx] == 0) {
1551 ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx));
1552 }
1553 }
1554 std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1555 // Pop chunks off the heap.
1556 while (!ready_chunks.empty()) {
1557 auto [chunk_idx, _rnd] = ready_chunks.front();
1558 std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1559 ready_chunks.pop_back();
1560 Assume(chunk_deps[chunk_idx] == 0);
1561 const auto& chunk_txn = m_set_info[chunk_idx].transactions;
1562 // Build heap of all includable transactions in chunk.
1563 Assume(ready_tx.empty());
1564 for (TxIdx tx_idx : chunk_txn) {
1565 if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx);
1566 }
1567 Assume(!ready_tx.empty());
1568 std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1569 // Pick transactions from the ready heap, append them to linearization, and decrement
1570 // dependency counts.
1571 while (!ready_tx.empty()) {
1572 // Pop an element from the tx_ready heap.
1573 auto tx_idx = ready_tx.front();
1574 std::pop_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1575 ready_tx.pop_back();
1576 // Append to linearization.
1577 ret.push_back(tx_idx);
1578 // Decrement dependency counts.
1579 auto& tx_data = m_tx_data[tx_idx];
1580 for (TxIdx chl_idx : tx_data.children) {
1581 auto& chl_data = m_tx_data[chl_idx];
1582 // Decrement tx dependency count.
1583 Assume(tx_deps[chl_idx] > 0);
1584 if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) {
1585 // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap.
1586 ready_tx.push_back(chl_idx);
1587 std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1588 }
1589 // Decrement chunk dependency count if this is out-of-chunk dependency.
1590 if (chl_data.chunk_idx != chunk_idx) {
1591 Assume(chunk_deps[chl_data.chunk_idx] > 0);
1592 if (--chunk_deps[chl_data.chunk_idx] == 0) {
1593 // Child chunk has no dependencies left. Add it to the chunk heap.
1594 ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx));
1595 std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1596 }
1597 }
1598 }
1599 }
1600 }
1601 Assume(ret.size() == m_set_info.size());
1602 m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps);
1603 return ret;
1604 }
1605
1619 std::vector<FeeFrac> GetDiagram() const noexcept
1620 {
1621 std::vector<FeeFrac> ret;
1622 for (auto chunk_idx : m_chunk_idxs) {
1623 ret.push_back(m_set_info[chunk_idx].feerate);
1624 }
1625 std::sort(ret.begin(), ret.end(), std::greater{});
1626 return ret;
1627 }
1628
1630 uint64_t GetCost() const noexcept { return m_cost.GetCost(); }
1631
1633 void SanityCheck() const
1634 {
1635 //
1636 // Verify dependency parent/child information, and build list of (active) dependencies.
1637 //
1638 std::vector<std::pair<TxIdx, TxIdx>> expected_dependencies;
1639 std::vector<std::pair<TxIdx, TxIdx>> all_dependencies;
1640 std::vector<std::pair<TxIdx, TxIdx>> active_dependencies;
1641 for (auto parent_idx : m_depgraph.Positions()) {
1642 for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) {
1643 expected_dependencies.emplace_back(parent_idx, child_idx);
1644 }
1645 }
1646 for (auto tx_idx : m_transaction_idxs) {
1647 for (auto child_idx : m_tx_data[tx_idx].children) {
1648 all_dependencies.emplace_back(tx_idx, child_idx);
1649 if (m_tx_data[tx_idx].active_children[child_idx]) {
1650 active_dependencies.emplace_back(tx_idx, child_idx);
1651 }
1652 }
1653 }
1654 std::sort(expected_dependencies.begin(), expected_dependencies.end());
1655 std::sort(all_dependencies.begin(), all_dependencies.end());
1656 assert(expected_dependencies == all_dependencies);
1657
1658 //
1659 // Verify the chunks against the list of active dependencies
1660 //
1661 SetType chunk_cover;
1662 for (auto chunk_idx : m_chunk_idxs) {
1663 const auto& chunk_info = m_set_info[chunk_idx];
1664 // Verify that transactions in the chunk point back to it. This guarantees
1665 // that chunks are non-overlapping.
1666 for (auto tx_idx : chunk_info.transactions) {
1667 assert(m_tx_data[tx_idx].chunk_idx == chunk_idx);
1668 }
1669 assert(!chunk_cover.Overlaps(chunk_info.transactions));
1670 chunk_cover |= chunk_info.transactions;
1671 // Verify the chunk's transaction set: start from an arbitrary chunk transaction,
1672 // and for every active dependency, if it contains the parent or child, add the
1673 // other. It must have exactly N-1 active dependencies in it, guaranteeing it is
1674 // acyclic.
1675 assert(chunk_info.transactions.Any());
1676 SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First());
1677 while (true) {
1678 auto old = expected_chunk;
1679 size_t active_dep_count{0};
1680 for (const auto& [par, chl] : active_dependencies) {
1681 if (expected_chunk[par] || expected_chunk[chl]) {
1682 expected_chunk.Set(par);
1683 expected_chunk.Set(chl);
1684 ++active_dep_count;
1685 }
1686 }
1687 if (old == expected_chunk) {
1688 assert(expected_chunk.Count() == active_dep_count + 1);
1689 break;
1690 }
1691 }
1692 assert(chunk_info.transactions == expected_chunk);
1693 // Verify the chunk's feerate.
1694 assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions));
1695 // Verify the chunk's reachable transactions.
1696 assert(m_reachable[chunk_idx] == GetReachable(expected_chunk));
1697 // Verify that the chunk's reachable transactions don't include its own transactions.
1698 assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions));
1699 assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions));
1700 }
1701 // Verify that together, the chunks cover all transactions.
1702 assert(chunk_cover == m_depgraph.Positions());
1703
1704 //
1705 // Verify transaction data.
1706 //
1707 assert(m_transaction_idxs == m_depgraph.Positions());
1708 for (auto tx_idx : m_transaction_idxs) {
1709 const auto& tx_data = m_tx_data[tx_idx];
1710 // Verify it has a valid chunk index, and that chunk includes this transaction.
1711 assert(m_chunk_idxs[tx_data.chunk_idx]);
1712 assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]);
1713 // Verify parents/children.
1714 assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx));
1715 assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx));
1716 // Verify active_children is a subset of children.
1717 assert(tx_data.active_children.IsSubsetOf(tx_data.children));
1718 // Verify each active child's dep_top_idx points to a valid non-chunk set.
1719 for (auto child_idx : tx_data.active_children) {
1720 assert(tx_data.dep_top_idx[child_idx] < m_set_info.size());
1721 assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]);
1722 }
1723 }
1724
1725 //
1726 // Verify active dependencies' top sets.
1727 //
1728 for (const auto& [par_idx, chl_idx] : active_dependencies) {
1729 // Verify the top set's transactions: it must contain the parent, and for every
1730 // active dependency, except the chl_idx->par_idx dependency itself, if it contains the
1731 // parent or child, it must contain both. It must have exactly N-1 active dependencies
1732 // in it, guaranteeing it is acyclic.
1733 SetType expected_top = SetType::Singleton(par_idx);
1734 while (true) {
1735 auto old = expected_top;
1736 size_t active_dep_count{0};
1737 for (const auto& [par2_idx, chl2_idx] : active_dependencies) {
1738 if (par_idx == par2_idx && chl_idx == chl2_idx) continue;
1739 if (expected_top[par2_idx] || expected_top[chl2_idx]) {
1740 expected_top.Set(par2_idx);
1741 expected_top.Set(chl2_idx);
1742 ++active_dep_count;
1743 }
1744 }
1745 if (old == expected_top) {
1746 assert(expected_top.Count() == active_dep_count + 1);
1747 break;
1748 }
1749 }
1750 assert(!expected_top[chl_idx]);
1751 auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]];
1752 assert(dep_top_info.transactions == expected_top);
1753 // Verify the top set's feerate.
1754 assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions));
1755 }
1756
1757 //
1758 // Verify m_suboptimal_chunks.
1759 //
1760 SetType suboptimal_idxs;
1761 for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) {
1762 auto chunk_idx = m_suboptimal_chunks[i];
1763 assert(!suboptimal_idxs[chunk_idx]);
1764 suboptimal_idxs.Set(chunk_idx);
1765 }
1766 assert(m_suboptimal_idxs == suboptimal_idxs);
1767
1768 //
1769 // Verify m_nonminimal_chunks.
1770 //
1771 SetType nonminimal_idxs;
1772 for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) {
1773 auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i];
1774 assert(m_tx_data[pivot].chunk_idx == chunk_idx);
1775 assert(!nonminimal_idxs[chunk_idx]);
1776 nonminimal_idxs.Set(chunk_idx);
1777 }
1778 assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs));
1779 }
1780};
1781
1802template<typename SetType>
1803std::tuple<std::vector<DepGraphIndex>, bool, uint64_t> Linearize(
1804 const DepGraph<SetType>& depgraph,
1805 uint64_t max_cost,
1806 uint64_t rng_seed,
1807 const StrongComparator<DepGraphIndex> auto& fallback_order,
1808 std::span<const DepGraphIndex> old_linearization = {},
1809 bool is_topological = true) noexcept
1810{
1812 SpanningForestState forest(depgraph, rng_seed);
1813 if (!old_linearization.empty()) {
1814 forest.LoadLinearization(old_linearization);
1815 if (!is_topological) forest.MakeTopological();
1816 } else {
1817 forest.MakeTopological();
1818 }
1819 // Make improvement steps to it until we hit the max_iterations limit, or an optimal result
1820 // is found.
1821 if (forest.GetCost() < max_cost) {
1822 forest.StartOptimizing();
1823 do {
1824 if (!forest.OptimizeStep()) break;
1825 } while (forest.GetCost() < max_cost);
1826 }
1827 // Make chunk minimization steps until we hit the max_iterations limit, or all chunks are
1828 // minimal.
1829 bool optimal = false;
1830 if (forest.GetCost() < max_cost) {
1831 forest.StartMinimizing();
1832 do {
1833 if (!forest.MinimizeStep()) {
1834 optimal = true;
1835 break;
1836 }
1837 } while (forest.GetCost() < max_cost);
1838 }
1839 return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()};
1840}
1841
1858template<typename SetType>
1859void PostLinearize(const DepGraph<SetType>& depgraph, std::span<DepGraphIndex> linearization)
1860{
1861 // This algorithm performs a number of passes (currently 2); the even ones operate from back to
1862 // front, the odd ones from front to back. Each results in an equal-or-better linearization
1863 // than the one started from.
1864 // - One pass in either direction guarantees that the resulting chunks are connected.
1865 // - Each direction corresponds to one shape of tree being linearized optimally (forward passes
1866 // guarantee this for graphs where each transaction has at most one child; backward passes
1867 // guarantee this for graphs where each transaction has at most one parent).
1868 // - Starting with a backward pass guarantees the moved-tree property.
1869 //
1870 // During an odd (forward) pass, the high-level operation is:
1871 // - Start with an empty list of groups L=[].
1872 // - For every transaction i in the old linearization, from front to back:
1873 // - Append a new group C=[i], containing just i, to the back of L.
1874 // - While L has at least one group before C, and the group immediately before C has feerate
1875 // lower than C:
1876 // - If C depends on P:
1877 // - Merge P into C, making C the concatenation of P+C, continuing with the combined C.
1878 // - Otherwise:
1879 // - Swap P with C, continuing with the now-moved C.
1880 // - The output linearization is the concatenation of the groups in L.
1881 //
1882 // During even (backward) passes, i iterates from the back to the front of the existing
1883 // linearization, and new groups are prepended instead of appended to the list L. To enable
1884 // more code reuse, both passes append groups, but during even passes the meanings of
1885 // parent/child, and of high/low feerate are reversed, and the final concatenation is reversed
1886 // on output.
1887 //
1888 // In the implementation below, the groups are represented by singly-linked lists (pointing
1889 // from the back to the front), which are themselves organized in a singly-linked circular
1890 // list (each group pointing to its predecessor, with a special sentinel group at the front
1891 // that points back to the last group).
1892 //
1893 // Information about transaction t is stored in entries[t + 1], while the sentinel is in
1894 // entries[0].
1895
1897 static constexpr DepGraphIndex SENTINEL{0};
1899 static constexpr DepGraphIndex NO_PREV_TX{0};
1900
1901
1903 struct TxEntry
1904 {
1907 DepGraphIndex prev_tx;
1908
1909 // The fields below are only used for transactions that are the last one in a group
1910 // (referred to as tail transactions below).
1911
1913 DepGraphIndex first_tx;
1916 DepGraphIndex prev_group;
1918 SetType group;
1920 SetType deps;
1922 FeeFrac feerate;
1923 };
1924
1925 // As an example, consider the state corresponding to the linearization [1,0,3,2], with
1926 // groups [1,0,3] and [2], in an odd pass. The linked lists would be:
1927 //
1928 // +-----+
1929 // 0<-P-- | 0 S | ---\ Legend:
1930 // +-----+ |
1931 // ^ | - digit in box: entries index
1932 // /--------------F---------+ G | (note: one more than tx value)
1933 // v \ | | - S: sentinel group
1934 // +-----+ +-----+ +-----+ | (empty feerate)
1935 // 0<-P-- | 2 | <--P-- | 1 | <--P-- | 4 T | | - T: tail transaction, contains
1936 // +-----+ +-----+ +-----+ | fields beyond prev_tv.
1937 // ^ | - P: prev_tx reference
1938 // G G - F: first_tx reference
1939 // | | - G: prev_group reference
1940 // +-----+ |
1941 // 0<-P-- | 3 T | <--/
1942 // +-----+
1943 // ^ |
1944 // \-F-/
1945 //
1946 // During an even pass, the diagram above would correspond to linearization [2,3,0,1], with
1947 // groups [2] and [3,0,1].
1948
1949 std::vector<TxEntry> entries(depgraph.PositionRange() + 1);
1950
1951 // Perform two passes over the linearization.
1952 for (int pass = 0; pass < 2; ++pass) {
1953 int rev = !(pass & 1);
1954 // Construct a sentinel group, identifying the start of the list.
1955 entries[SENTINEL].prev_group = SENTINEL;
1956 Assume(entries[SENTINEL].feerate.IsEmpty());
1957
1958 // Iterate over all elements in the existing linearization.
1959 for (DepGraphIndex i = 0; i < linearization.size(); ++i) {
1960 // Even passes are from back to front; odd passes from front to back.
1961 DepGraphIndex idx = linearization[rev ? linearization.size() - 1 - i : i];
1962 // Construct a new group containing just idx. In even passes, the meaning of
1963 // parent/child and high/low feerate are swapped.
1964 DepGraphIndex cur_group = idx + 1;
1965 entries[cur_group].group = SetType::Singleton(idx);
1966 entries[cur_group].deps = rev ? depgraph.Descendants(idx): depgraph.Ancestors(idx);
1967 entries[cur_group].feerate = depgraph.FeeRate(idx);
1968 if (rev) entries[cur_group].feerate.fee = -entries[cur_group].feerate.fee;
1969 entries[cur_group].prev_tx = NO_PREV_TX; // No previous transaction in group.
1970 entries[cur_group].first_tx = cur_group; // Transaction itself is first of group.
1971 // Insert the new group at the back of the groups linked list.
1972 entries[cur_group].prev_group = entries[SENTINEL].prev_group;
1973 entries[SENTINEL].prev_group = cur_group;
1974
1975 // Start merge/swap cycle.
1976 DepGraphIndex next_group = SENTINEL; // We inserted at the end, so next group is sentinel.
1977 DepGraphIndex prev_group = entries[cur_group].prev_group;
1978 // Continue as long as the current group has higher feerate than the previous one.
1979 while (entries[cur_group].feerate >> entries[prev_group].feerate) {
1980 // prev_group/cur_group/next_group refer to (the last transactions of) 3
1981 // consecutive entries in groups list.
1982 Assume(cur_group == entries[next_group].prev_group);
1983 Assume(prev_group == entries[cur_group].prev_group);
1984 // The sentinel has empty feerate, which is neither higher or lower than other
1985 // feerates. Thus, the while loop we are in here guarantees that cur_group and
1986 // prev_group are not the sentinel.
1987 Assume(cur_group != SENTINEL);
1988 Assume(prev_group != SENTINEL);
1989 if (entries[cur_group].deps.Overlaps(entries[prev_group].group)) {
1990 // There is a dependency between cur_group and prev_group; merge prev_group
1991 // into cur_group. The group/deps/feerate fields of prev_group remain unchanged
1992 // but become unused.
1993 entries[cur_group].group |= entries[prev_group].group;
1994 entries[cur_group].deps |= entries[prev_group].deps;
1995 entries[cur_group].feerate += entries[prev_group].feerate;
1996 // Make the first of the current group point to the tail of the previous group.
1997 entries[entries[cur_group].first_tx].prev_tx = prev_group;
1998 // The first of the previous group becomes the first of the newly-merged group.
1999 entries[cur_group].first_tx = entries[prev_group].first_tx;
2000 // The previous group becomes whatever group was before the former one.
2001 prev_group = entries[prev_group].prev_group;
2002 entries[cur_group].prev_group = prev_group;
2003 } else {
2004 // There is no dependency between cur_group and prev_group; swap them.
2005 DepGraphIndex preprev_group = entries[prev_group].prev_group;
2006 // If PP, P, C, N were the old preprev, prev, cur, next groups, then the new
2007 // layout becomes [PP, C, P, N]. Update prev_groups to reflect that order.
2008 entries[next_group].prev_group = prev_group;
2009 entries[prev_group].prev_group = cur_group;
2010 entries[cur_group].prev_group = preprev_group;
2011 // The current group remains the same, but the groups before/after it have
2012 // changed.
2013 next_group = prev_group;
2014 prev_group = preprev_group;
2015 }
2016 }
2017 }
2018
2019 // Convert the entries back to linearization (overwriting the existing one).
2020 DepGraphIndex cur_group = entries[0].prev_group;
2021 DepGraphIndex done = 0;
2022 while (cur_group != SENTINEL) {
2023 DepGraphIndex cur_tx = cur_group;
2024 // Traverse the transactions of cur_group (from back to front), and write them in the
2025 // same order during odd passes, and reversed (front to back) in even passes.
2026 if (rev) {
2027 do {
2028 *(linearization.begin() + (done++)) = cur_tx - 1;
2029 cur_tx = entries[cur_tx].prev_tx;
2030 } while (cur_tx != NO_PREV_TX);
2031 } else {
2032 do {
2033 *(linearization.end() - (++done)) = cur_tx - 1;
2034 cur_tx = entries[cur_tx].prev_tx;
2035 } while (cur_tx != NO_PREV_TX);
2036 }
2037 cur_group = entries[cur_group].prev_group;
2038 }
2039 Assume(done == linearization.size());
2040 }
2041}
2042
2043} // namespace cluster_linearize
2044
2045#endif // BITCOIN_CLUSTER_LINEARIZE_H
#define LIFETIMEBOUND
Definition: attributes.h:16
int ret
int flags
Definition: bitcoin-tx.cpp:529
#define Assume(val)
Assume is the identity function.
Definition: check.h:125
xoroshiro128++ PRNG.
Definition: random.h:425
constexpr uint64_t rand64() noexcept
Definition: random.h:448
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:254
bool randbool() noexcept
Generate a random boolean.
Definition: random.h:325
uint64_t randbits(int bits) noexcept
Generate a random (bits)-bit integer.
Definition: random.h:204
bool empty() const noexcept
Test whether the contents of this deque is empty.
Definition: vecdeque.h:310
void clear() noexcept
Resize the deque to be size 0.
Definition: vecdeque.h:126
void push_back(T &&elem)
Move-construct a new element at the end of the deque.
Definition: vecdeque.h:227
void pop_front()
Remove the first element of the deque.
Definition: vecdeque.h:250
size_t size() const noexcept
Get the number of elements in this deque.
Definition: vecdeque.h:312
void emplace_back(Args &&... args)
Construct a new element at the end of the deque.
Definition: vecdeque.h:219
T & front() noexcept
Get a mutable reference to the first element of the deque.
Definition: vecdeque.h:268
void reserve(size_t capacity)
Increase the capacity to capacity.
Definition: vecdeque.h:206
T & back() noexcept
Get a mutable reference to the last element of the deque.
Definition: vecdeque.h:282
Data structure that holds a transaction graph's preprocessed data (fee, size, ancestors,...
unsigned CountDependencies() const noexcept
const SetType & Ancestors(DepGraphIndex i) const noexcept
Get the ancestors of a given transaction i.
FeeFrac & FeeRate(DepGraphIndex i) noexcept
Get the mutable feerate of a given transaction i.
SetType GetReducedChildren(DepGraphIndex i) const noexcept
Compute the (reduced) set of children of node i in this graph.
SetType GetReducedParents(DepGraphIndex i) const noexcept
Compute the (reduced) set of parents of node i in this graph.
void AppendTopo(std::vector< DepGraphIndex > &list, const SetType &select) const noexcept
Append the entries of select to list in a topologically valid order.
const FeeFrac & FeeRate(DepGraphIndex i) const noexcept
Get the feerate of a given transaction i.
SetType GetConnectedComponent(const SetType &todo, DepGraphIndex tx) const noexcept
Get the connected component within the subset "todo" that contains tx (which must be in todo).
bool IsConnected() const noexcept
Determine if this entire graph is connected.
bool IsConnected(const SetType &subset) const noexcept
Determine if a subset is connected.
DepGraphIndex PositionRange() const noexcept
Get the range of positions in this DepGraph.
SetType FindConnectedComponent(const SetType &todo) const noexcept
Find some connected component within the subset "todo" of this graph.
DepGraphIndex AddTransaction(const FeeFrac &feefrac) noexcept
Add a new unconnected transaction to this transaction graph (in the first available position),...
void RemoveTransactions(const SetType &del) noexcept
Remove the specified positions from this DepGraph.
auto TxCount() const noexcept
Get the number of transactions in the graph.
std::vector< Entry > entries
Data for each transaction.
const SetType & Descendants(DepGraphIndex i) const noexcept
Get the descendants of a given transaction i.
friend bool operator==(const DepGraph &a, const DepGraph &b) noexcept
Equality operator (primarily for testing purposes).
size_t DynamicMemoryUsage() const noexcept
DepGraph() noexcept=default
SetType m_used
Which positions are used.
const SetType & Positions() const noexcept
Get the set of transactions positions in use.
void Compact() noexcept
Reduce memory usage if possible.
FeeFrac FeeRate(const SetType &elems) const noexcept
Compute the aggregate feerate of a set of nodes in this graph.
void AddDependencies(const SetType &parents, DepGraphIndex child) noexcept
Modify this transaction graph, adding multiple parents to a specified child.
bool IsAcyclic() const noexcept
Check if this graph is acyclic.
A default cost model for SFL for SetType=BitSet<64>, based on benchmarks.
void PickChunkToOptimizeEnd(int num_steps) noexcept
void PickMergeCandidateEnd(int num_steps) noexcept
void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
void StartOptimizingEnd(int num_chunks) noexcept
void MergeChunksMid(int num_txns) noexcept
void StartMinimizingEnd(int num_chunks) noexcept
void MergeChunksEnd(int num_steps) noexcept
void InitializeEnd(int num_txns, int num_deps) noexcept
void PickDependencyToSplitEnd(int num_txns) noexcept
void GetLinearizationEnd(int num_txns, int num_deps) noexcept
void MinimizeStepMid(int num_txns) noexcept
void DeactivateEnd(int num_deps) noexcept
void ActivateEnd(int num_deps) noexcept
void MinimizeStepEnd(bool split) noexcept
Class to represent the internal state of the spanning-forest linearization (SFL) algorithm.
std::pair< TxIdx, TxIdx > PickDependencyToSplit(SetIdx chunk_idx) noexcept
Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none.
void StartMinimizing() noexcept
Initialize data structure for minimizing the chunks.
SetIdx PickChunkToOptimize() noexcept
Determine the next chunk to optimize, or INVALID_SET_IDX if none.
std::vector< DepGraphIndex > GetLinearization(const StrongComparator< DepGraphIndex > auto &fallback_order) noexcept
Construct a topologically-valid linearization from the current forest state.
SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency from merge_ch...
SpanningForestState(const DepGraph< SetType > &depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel &cost=CostModel{}) noexcept
Construct a spanning forest for the given DepGraph, with every transaction in its own chunk (not topo...
static constexpr SetIdx INVALID_SET_IDX
An invalid SetIdx.
bool MinimizeStep() noexcept
Try to reduce a chunk's size.
std::vector< TxData > m_tx_data
Information about each transaction (and chunks).
std::vector< FeeFrac > GetDiagram() const noexcept
Get the diagram for the current state, which must be topological.
CostModel m_cost
Accounting for the cost of this computation.
SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
Activate a dependency from the bottom set to the top set, which must exist.
void LoadLinearization(std::span< const DepGraphIndex > old_linearization) noexcept
Load an existing linearization.
SetIdx MergeStep(SetIdx chunk_idx) noexcept
Perform an upward or downward merge step, on the specified chunk.
std::vector< SetInfo< SetType > > m_set_info
Information about each set (chunk, or active dependency top set).
SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none.
SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make the inactive dependency from child to parent, which must not be in the same chunk already,...
TxIdx PickRandomTx(const SetType &tx_idxs) noexcept
Pick a random transaction within a set (which must be non-empty).
bool OptimizeStep() noexcept
Try to improve the forest.
void StartOptimizing() noexcept
Initialize the data structure for optimization.
std::pair< SetType, SetType > GetReachable(const SetType &tx_idxs) const noexcept
Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and downwards direc...
std::vector< std::pair< SetType, SetType > > m_reachable
For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions,...
void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
Split a chunk, and then merge the resulting two chunks to make the graph topological again.
SetType m_transaction_idxs
The set of all TxIdx's of transactions in the cluster indexing into m_tx_data.
InsecureRandomContext m_rng
Internal RNG.
DepGraphIndex TxIdx
Data type to represent indexing into m_tx_data.
const DepGraph< SetType > & m_depgraph
The DepGraph we are trying to linearize.
VecDeque< std::tuple< SetIdx, TxIdx, unsigned > > m_nonminimal_chunks
A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their status:
std::conditional_t<(SetType::Size()<=0xff), uint8_t, std::conditional_t<(SetType::Size()<=0xffff), uint16_t, uint32_t > > SetIdx
Data type to represent indexing into m_set_info.
void MergeSequence(SetIdx chunk_idx) noexcept
Perform an upward or downward merge sequence on the specified chunk.
void MakeTopological() noexcept
Make state topological.
std::pair< SetIdx, SetIdx > Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make a specified active dependency inactive.
uint64_t GetCost() const noexcept
Determine how much work was performed so far.
SetType m_chunk_idxs
The set of all chunk SetIdx's.
VecDeque< SetIdx > m_suboptimal_chunks
A FIFO of chunk SetIdxs for chunks that may be improved still.
SetType m_suboptimal_idxs
The set of all SetIdx's that appear in m_suboptimal_chunks.
void SanityCheck() const
Verify internal consistency of the data structure.
Concept for function objects that return std::strong_ordering when invoked with two Args.
std::vector< FeeFrac > ChunkLinearization(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the feerates of the chunks of linearization.
std::tuple< std::vector< DepGraphIndex >, bool, uint64_t > Linearize(const DepGraph< SetType > &depgraph, uint64_t max_cost, uint64_t rng_seed, const StrongComparator< DepGraphIndex > auto &fallback_order, std::span< const DepGraphIndex > old_linearization={}, bool is_topological=true) noexcept
Find or improve a linearization for a cluster.
std::compare_three_way IndexTxOrder
Simple default transaction ordering function for SpanningForestState::GetLinearization() and Lineariz...
uint32_t DepGraphIndex
Data type to represent transaction indices in DepGraphs and the clusters they represent.
void PostLinearize(const DepGraph< SetType > &depgraph, std::span< DepGraphIndex > linearization)
Improve a given linearization.
std::vector< SetInfo< SetType > > ChunkLinearizationInfo(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the chunks of linearization as SetInfos.
static size_t DynamicUsage(const int8_t &v)
Dynamic memory usage for built-in types is zero.
Definition: memusage.h:31
Definition: common.h:29
static std::vector< std::string > split(const std::string &str, const std::string &delims=" \t")
Definition: subprocess.h:311
Data structure storing a fee and size, ordered by increasing fee/size.
Definition: feefrac.h:41
int64_t fee
Definition: feefrac.h:108
bool IsEmpty() const noexcept
Check if this is empty (size and fee are 0).
Definition: feefrac.h:121
Information about a single transaction.
SetType descendants
All descendants of the transaction (including itself).
friend bool operator==(const Entry &, const Entry &) noexcept=default
Equality operator (primarily for testing purposes).
Entry() noexcept=default
Construct an empty entry.
FeeFrac feerate
Fee and size of transaction itself.
SetType ancestors
All ancestors of the transaction (including itself).
A set of transactions together with their aggregate feerate.
SetInfo(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Construct a SetInfo for a given transaction in a depgraph.
SetInfo operator-(const SetInfo &other) const noexcept
Compute the difference between this and other SetInfo (which must be a subset).
FeeFrac feerate
Their combined fee and size.
SetInfo() noexcept=default
Construct a SetInfo for the empty set.
void Set(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Add a transaction to this SetInfo (which must not yet be in it).
SetType transactions
The transactions in the set.
friend void swap(SetInfo &a, SetInfo &b) noexcept
Swap two SetInfo objects.
SetInfo(const DepGraph< SetType > &depgraph, const SetType &txn) noexcept
Construct a SetInfo for a set of transactions in a depgraph.
SetInfo & operator|=(const SetInfo &other) noexcept
Add the transactions of other to this SetInfo (no overlap allowed).
SetInfo & operator-=(const SetInfo &other) noexcept
Remove the transactions of other from this SetInfo (which must be a subset).
friend bool operator==(const SetInfo &, const SetInfo &) noexcept=default
Permit equality testing.
Structure with information about a single transaction.
SetType active_children
The set of child transactions reachable through an active dependency.
SetType children
The set of child transactions of this transaction.
SetIdx chunk_idx
Which chunk this transaction belongs to.
std::array< SetIdx, SetType::Size()> dep_top_idx
The top set for every active child dependency this transaction has, indexed by child TxIdx.
SetType parents
The set of parent transactions of this transaction.
static int count
assert(!tx.IsCoinBase())