Bitcoin Core 31.99.0
P2P Digital Currency
cluster_linearize.h
Go to the documentation of this file.
1// Copyright (c) The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#ifndef BITCOIN_CLUSTER_LINEARIZE_H
6#define BITCOIN_CLUSTER_LINEARIZE_H
7
8#include <algorithm>
9#include <cstdint>
10#include <numeric>
11#include <optional>
12#include <ranges>
13#include <utility>
14#include <vector>
15
16#include <attributes.h>
17#include <memusage.h>
18#include <random.h>
19#include <span.h>
20#include <util/feefrac.h>
21#include <util/vecdeque.h>
22
24
26using DepGraphIndex = uint32_t;
27
30template<typename SetType>
32{
34 struct Entry
35 {
39 SetType ancestors;
41 SetType descendants;
42
44 friend bool operator==(const Entry&, const Entry&) noexcept = default;
45
47 Entry() noexcept = default;
49 Entry(const FeeFrac& f, const SetType& a, const SetType& d) noexcept : feerate(f), ancestors(a), descendants(d) {}
50 };
51
53 std::vector<Entry> entries;
54
56 SetType m_used;
57
58public:
60 friend bool operator==(const DepGraph& a, const DepGraph& b) noexcept
61 {
62 if (a.m_used != b.m_used) return false;
63 // Only compare the used positions within the entries vector.
64 for (auto idx : a.m_used) {
65 if (a.entries[idx] != b.entries[idx]) return false;
66 }
67 return true;
68 }
69
70 // Default constructors.
71 DepGraph() noexcept = default;
72 DepGraph(const DepGraph&) noexcept = default;
73 DepGraph(DepGraph&&) noexcept = default;
74 DepGraph& operator=(const DepGraph&) noexcept = default;
75 DepGraph& operator=(DepGraph&&) noexcept = default;
76
92 DepGraph(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> mapping, DepGraphIndex pos_range) noexcept : entries(pos_range)
93 {
94 Assume(mapping.size() == depgraph.PositionRange());
95 Assume((pos_range == 0) == (depgraph.TxCount() == 0));
96 for (DepGraphIndex i : depgraph.Positions()) {
97 auto new_idx = mapping[i];
98 Assume(new_idx < pos_range);
99 // Add transaction.
100 entries[new_idx].ancestors = SetType::Singleton(new_idx);
101 entries[new_idx].descendants = SetType::Singleton(new_idx);
102 m_used.Set(new_idx);
103 // Fill in fee and size.
104 entries[new_idx].feerate = depgraph.entries[i].feerate;
105 }
106 for (DepGraphIndex i : depgraph.Positions()) {
107 // Fill in dependencies by mapping direct parents.
108 SetType parents;
109 for (auto j : depgraph.GetReducedParents(i)) parents.Set(mapping[j]);
110 AddDependencies(parents, mapping[i]);
111 }
112 // Verify that the provided pos_range was correct (no unused positions at the end).
113 Assume(m_used.None() ? (pos_range == 0) : (pos_range == m_used.Last() + 1));
114 }
115
117 const SetType& Positions() const noexcept { return m_used; }
119 DepGraphIndex PositionRange() const noexcept { return entries.size(); }
121 auto TxCount() const noexcept { return m_used.Count(); }
123 const FeeFrac& FeeRate(DepGraphIndex i) const noexcept { return entries[i].feerate; }
125 FeeFrac& FeeRate(DepGraphIndex i) noexcept { return entries[i].feerate; }
127 const SetType& Ancestors(DepGraphIndex i) const noexcept { return entries[i].ancestors; }
129 const SetType& Descendants(DepGraphIndex i) const noexcept { return entries[i].descendants; }
130
136 DepGraphIndex AddTransaction(const FeeFrac& feefrac) noexcept
137 {
138 static constexpr auto ALL_POSITIONS = SetType::Fill(SetType::Size());
139 auto available = ALL_POSITIONS - m_used;
140 Assume(available.Any());
141 DepGraphIndex new_idx = available.First();
142 if (new_idx == entries.size()) {
143 entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
144 } else {
145 entries[new_idx] = Entry(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
146 }
147 m_used.Set(new_idx);
148 return new_idx;
149 }
150
160 void RemoveTransactions(const SetType& del) noexcept
161 {
162 m_used -= del;
163 // Remove now-unused trailing entries.
164 while (!entries.empty() && !m_used[entries.size() - 1]) {
165 entries.pop_back();
166 }
167 // Remove the deleted transactions from ancestors/descendants of other transactions. Note
168 // that the deleted positions will retain old feerate and dependency information. This does
169 // not matter as they will be overwritten by AddTransaction if they get used again.
170 for (auto& entry : entries) {
171 entry.ancestors &= m_used;
172 entry.descendants &= m_used;
173 }
174 }
175
180 void AddDependencies(const SetType& parents, DepGraphIndex child) noexcept
181 {
182 Assume(m_used[child]);
183 Assume(parents.IsSubsetOf(m_used));
184 // Compute the ancestors of parents that are not already ancestors of child.
185 SetType par_anc;
186 for (auto par : parents - Ancestors(child)) {
187 par_anc |= Ancestors(par);
188 }
189 par_anc -= Ancestors(child);
190 // Bail out if there are no such ancestors.
191 if (par_anc.None()) return;
192 // To each such ancestor, add as descendants the descendants of the child.
193 const auto& chl_des = entries[child].descendants;
194 for (auto anc_of_par : par_anc) {
195 entries[anc_of_par].descendants |= chl_des;
196 }
197 // To each descendant of the child, add those ancestors.
198 for (auto dec_of_chl : Descendants(child)) {
199 entries[dec_of_chl].ancestors |= par_anc;
200 }
201 }
202
211 SetType GetReducedParents(DepGraphIndex i) const noexcept
212 {
213 SetType parents = Ancestors(i);
214 parents.Reset(i);
215 for (auto parent : parents) {
216 if (parents[parent]) {
217 parents -= Ancestors(parent);
218 parents.Set(parent);
219 }
220 }
221 return parents;
222 }
223
232 SetType GetReducedChildren(DepGraphIndex i) const noexcept
233 {
234 SetType children = Descendants(i);
235 children.Reset(i);
236 for (auto child : children) {
237 if (children[child]) {
238 children -= Descendants(child);
239 children.Set(child);
240 }
241 }
242 return children;
243 }
244
249 FeeFrac FeeRate(const SetType& elems) const noexcept
250 {
251 FeeFrac ret;
252 for (auto pos : elems) ret += entries[pos].feerate;
253 return ret;
254 }
255
266 SetType GetConnectedComponent(const SetType& todo, DepGraphIndex tx) const noexcept
267 {
268 Assume(todo[tx]);
269 Assume(todo.IsSubsetOf(m_used));
270 auto to_add = SetType::Singleton(tx);
271 SetType ret;
272 do {
273 SetType old = ret;
274 for (auto add : to_add) {
275 ret |= Descendants(add);
276 ret |= Ancestors(add);
277 }
278 ret &= todo;
279 to_add = ret - old;
280 } while (to_add.Any());
281 return ret;
282 }
283
291 SetType FindConnectedComponent(const SetType& todo) const noexcept
292 {
293 if (todo.None()) return todo;
294 return GetConnectedComponent(todo, todo.First());
295 }
296
301 bool IsConnected(const SetType& subset) const noexcept
302 {
303 return FindConnectedComponent(subset) == subset;
304 }
305
310 bool IsConnected() const noexcept { return IsConnected(m_used); }
311
316 void AppendTopo(std::vector<DepGraphIndex>& list, const SetType& select) const noexcept
317 {
318 DepGraphIndex old_len = list.size();
319 for (auto i : select) list.push_back(i);
320 std::ranges::sort(std::span{list}.subspan(old_len), [&](DepGraphIndex a, DepGraphIndex b) noexcept {
321 const auto a_anc_count = entries[a].ancestors.Count();
322 const auto b_anc_count = entries[b].ancestors.Count();
323 if (a_anc_count != b_anc_count) return a_anc_count < b_anc_count;
324 return a < b;
325 });
326 }
327
329 bool IsAcyclic() const noexcept
330 {
331 for (auto i : Positions()) {
332 if ((Ancestors(i) & Descendants(i)) != SetType::Singleton(i)) {
333 return false;
334 }
335 }
336 return true;
337 }
338
339 unsigned CountDependencies() const noexcept
340 {
341 unsigned ret = 0;
342 for (auto i : Positions()) {
343 ret += GetReducedParents(i).Count();
344 }
345 return ret;
346 }
347
349 void Compact() noexcept
350 {
351 entries.shrink_to_fit();
352 }
353
354 size_t DynamicMemoryUsage() const noexcept
355 {
357 }
358};
359
361template<typename SetType>
363{
368
370 SetInfo() noexcept = default;
371
373 SetInfo(const SetType& txn, const FeeFrac& fr) noexcept : transactions(txn), feerate(fr) {}
374
376 explicit SetInfo(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept :
377 transactions(SetType::Singleton(pos)), feerate(depgraph.FeeRate(pos)) {}
378
380 explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
381 transactions(txn), feerate(depgraph.FeeRate(txn)) {}
382
384 void Set(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept
385 {
386 Assume(!transactions[pos]);
387 transactions.Set(pos);
388 feerate += depgraph.FeeRate(pos);
389 }
390
392 SetInfo& operator|=(const SetInfo& other) noexcept
393 {
394 Assume(!transactions.Overlaps(other.transactions));
395 transactions |= other.transactions;
396 feerate += other.feerate;
397 return *this;
398 }
399
401 SetInfo& operator-=(const SetInfo& other) noexcept
402 {
403 Assume(other.transactions.IsSubsetOf(transactions));
404 transactions -= other.transactions;
405 feerate -= other.feerate;
406 return *this;
407 }
408
410 SetInfo operator-(const SetInfo& other) const noexcept
411 {
412 Assume(other.transactions.IsSubsetOf(transactions));
413 return {transactions - other.transactions, feerate - other.feerate};
414 }
415
417 friend void swap(SetInfo& a, SetInfo& b) noexcept
418 {
419 swap(a.transactions, b.transactions);
420 swap(a.feerate, b.feerate);
421 }
422
424 friend bool operator==(const SetInfo&, const SetInfo&) noexcept = default;
425};
426
428template<typename SetType>
429std::vector<SetInfo<SetType>> ChunkLinearizationInfo(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
430{
431 std::vector<SetInfo<SetType>> ret;
432 for (DepGraphIndex i : linearization) {
434 SetInfo<SetType> new_chunk(depgraph, i);
435 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
436 while (!ret.empty() && ByRatio{new_chunk.feerate} > ByRatio{ret.back().feerate}) {
437 new_chunk |= ret.back();
438 ret.pop_back();
439 }
440 // Actually move that new chunk into the chunking.
441 ret.emplace_back(std::move(new_chunk));
442 }
443 return ret;
444}
445
448template<typename SetType>
449std::vector<FeeFrac> ChunkLinearization(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
450{
451 std::vector<FeeFrac> ret;
452 for (DepGraphIndex i : linearization) {
454 auto new_chunk = depgraph.FeeRate(i);
455 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
456 while (!ret.empty() && ByRatio{new_chunk} > ByRatio{ret.back()}) {
457 new_chunk += ret.back();
458 ret.pop_back();
459 }
460 // Actually move that new chunk into the chunking.
461 ret.push_back(std::move(new_chunk));
462 }
463 return ret;
464}
465
467template<typename F, typename Arg>
469 std::regular_invocable<F, Arg, Arg> &&
470 std::is_same_v<std::invoke_result_t<F, Arg, Arg>, std::strong_ordering>;
471
474using IndexTxOrder = std::compare_three_way;
475
498{
499 uint64_t m_cost{0};
500
501public:
502 inline void InitializeBegin() noexcept {}
503 inline void InitializeEnd(int num_txns, int num_deps) noexcept
504 {
505 // Cost of initialization.
506 m_cost += 39 * num_txns;
507 // Cost of producing linearization at the end.
508 m_cost += 48 * num_txns + 4 * num_deps;
509 }
510 inline void GetLinearizationBegin() noexcept {}
511 inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept
512 {
513 // Note that we account for the cost of the final linearization at the beginning (see
514 // InitializeEnd), because the cost budget decision needs to be made before calling
515 // GetLinearization.
516 // This function exists here to allow overriding it easily for benchmark purposes.
517 }
518 inline void MakeTopologicalBegin() noexcept {}
519 inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
520 {
521 m_cost += 20 * num_chunks + 28 * num_steps;
522 }
523 inline void StartOptimizingBegin() noexcept {}
524 inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; }
525 inline void ActivateBegin() noexcept {}
526 inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; }
527 inline void DeactivateBegin() noexcept {}
528 inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; }
529 inline void MergeChunksBegin() noexcept {}
530 inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; }
531 inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; }
532 inline void PickMergeCandidateBegin() noexcept {}
533 inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; }
534 inline void PickChunkToOptimizeBegin() noexcept {}
535 inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; }
536 inline void PickDependencyToSplitBegin() noexcept {}
537 inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; }
538 inline void StartMinimizingBegin() noexcept {}
539 inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; }
540 inline void MinimizeStepBegin() noexcept {}
541 inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; }
542 inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; }
543
544 inline uint64_t GetCost() const noexcept { return m_cost; }
545};
546
722template<typename SetType, typename CostModel = SFLDefaultCostModel>
724{
725private:
728
733 using SetIdx = std::conditional_t<(SetType::Size() <= 0xff),
734 uint8_t,
735 std::conditional_t<(SetType::Size() <= 0xffff),
736 uint16_t,
737 uint32_t>>;
739 static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1);
740
742 struct TxData {
745 std::array<SetIdx, SetType::Size()> dep_top_idx;
747 SetType parents;
749 SetType children;
754 };
755
767 std::vector<TxData> m_tx_data;
769 std::vector<SetInfo<SetType>> m_set_info;
772 std::vector<std::pair<SetType, SetType>> m_reachable;
782
785
787 CostModel m_cost;
788
790 TxIdx PickRandomTx(const SetType& tx_idxs) noexcept
791 {
792 Assume(tx_idxs.Any());
793 unsigned pos = m_rng.randrange<unsigned>(tx_idxs.Count());
794 for (auto tx_idx : tx_idxs) {
795 if (pos == 0) return tx_idx;
796 --pos;
797 }
798 Assume(false);
799 return TxIdx(-1);
800 }
801
805 std::pair<SetType, SetType> GetReachable(const SetType& tx_idxs) const noexcept
806 {
807 SetType parents, children;
808 for (auto tx_idx : tx_idxs) {
809 const auto& tx_data = m_tx_data[tx_idx];
810 parents |= tx_data.parents;
811 children |= tx_data.children;
812 }
813 return {parents - tx_idxs, children - tx_idxs};
814 }
815
818 SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
819 {
820 m_cost.ActivateBegin();
821 // Gather and check information about the parent and child transactions.
822 auto& parent_data = m_tx_data[parent_idx];
823 auto& child_data = m_tx_data[child_idx];
824 Assume(parent_data.children[child_idx]);
825 Assume(!parent_data.active_children[child_idx]);
826 // Get the set index of the chunks the parent and child are currently in. The parent chunk
827 // will become the top set of the newly activated dependency, while the child chunk will be
828 // grown to become the merged chunk.
829 auto parent_chunk_idx = parent_data.chunk_idx;
830 auto child_chunk_idx = child_data.chunk_idx;
831 Assume(parent_chunk_idx != child_chunk_idx);
832 Assume(m_chunk_idxs[parent_chunk_idx]);
833 Assume(m_chunk_idxs[child_chunk_idx]);
834 auto& top_info = m_set_info[parent_chunk_idx];
835 auto& bottom_info = m_set_info[child_chunk_idx];
836
837 // Consider the following example:
838 //
839 // A A There are two chunks, ABC and DEF, and the inactive E->C dependency
840 // / \ / \ is activated, resulting in a single chunk ABCDEF.
841 // B C B C
842 // : ==> | Dependency | top set before | top set after | change
843 // D E D E B->A | AC | ACDEF | +DEF
844 // \ / \ / C->A | AB | AB |
845 // F F F->D | D | D |
846 // F->E | E | ABCE | +ABC
847 //
848 // The common pattern here is that any dependency which has the parent or child of the
849 // dependency being activated (E->C here) in its top set, will have the opposite part added
850 // to it. This is true for B->A and F->E, but not for C->A and F->D.
851 //
852 // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to
853 // every dependency's top set which has the parent (C) in it. At the same time, change the
854 // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk.
855 for (auto tx_idx : top_info.transactions) {
856 auto& tx_data = m_tx_data[tx_idx];
857 tx_data.chunk_idx = child_chunk_idx;
858 for (auto dep_child_idx : tx_data.active_children) {
859 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
860 if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info;
861 }
862 }
863 // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to
864 // every dependency's top set which has the child (E) in it.
865 for (auto tx_idx : bottom_info.transactions) {
866 auto& tx_data = m_tx_data[tx_idx];
867 for (auto dep_child_idx : tx_data.active_children) {
868 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
869 if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info;
870 }
871 }
872 // Merge top_info into bottom_info, which becomes the merged chunk.
873 bottom_info |= top_info;
874 // Compute merged sets of reachable transactions from the new chunk, based on the input
875 // chunks' reachable sets.
876 m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first;
877 m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second;
878 m_reachable[child_chunk_idx].first -= bottom_info.transactions;
879 m_reachable[child_chunk_idx].second -= bottom_info.transactions;
880 // Make parent chunk the set for the new active dependency.
881 parent_data.dep_top_idx[child_idx] = parent_chunk_idx;
882 parent_data.active_children.Set(child_idx);
883 m_chunk_idxs.Reset(parent_chunk_idx);
884 // Return the newly merged chunk.
885 m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1);
886 return child_chunk_idx;
887 }
888
891 std::pair<SetIdx, SetIdx> Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
892 {
893 m_cost.DeactivateBegin();
894 // Gather and check information about the parent transactions.
895 auto& parent_data = m_tx_data[parent_idx];
896 Assume(parent_data.children[child_idx]);
897 Assume(parent_data.active_children[child_idx]);
898 // Get the top set of the active dependency (which will become the parent chunk) and the
899 // chunk set the transactions are currently in (which will become the bottom chunk).
900 auto parent_chunk_idx = parent_data.dep_top_idx[child_idx];
901 auto child_chunk_idx = parent_data.chunk_idx;
902 Assume(parent_chunk_idx != child_chunk_idx);
903 Assume(m_chunk_idxs[child_chunk_idx]);
904 Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk
905 auto& top_info = m_set_info[parent_chunk_idx];
906 auto& bottom_info = m_set_info[child_chunk_idx];
907
908 // Remove the active dependency.
909 parent_data.active_children.Reset(child_idx);
910 m_chunk_idxs.Set(parent_chunk_idx);
911 auto ntx = bottom_info.transactions.Count();
912 // Subtract the top_info from the bottom_info, as it will become the child chunk.
913 bottom_info -= top_info;
914 // See the comment above in Activate(). We perform the opposite operations here, removing
915 // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children.
916 SetType top_parents, top_children;
917 for (auto tx_idx : top_info.transactions) {
918 auto& tx_data = m_tx_data[tx_idx];
919 tx_data.chunk_idx = parent_chunk_idx;
920 top_parents |= tx_data.parents;
921 top_children |= tx_data.children;
922 for (auto dep_child_idx : tx_data.active_children) {
923 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
924 if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info;
925 }
926 }
927 SetType bottom_parents, bottom_children;
928 for (auto tx_idx : bottom_info.transactions) {
929 auto& tx_data = m_tx_data[tx_idx];
930 bottom_parents |= tx_data.parents;
931 bottom_children |= tx_data.children;
932 for (auto dep_child_idx : tx_data.active_children) {
933 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
934 if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info;
935 }
936 }
937 // Compute the new sets of reachable transactions for each new chunk, based on the
938 // top/bottom parents and children computed above.
939 m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions;
940 m_reachable[parent_chunk_idx].second = top_children - top_info.transactions;
941 m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions;
942 m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions;
943 // Return the two new set idxs.
944 m_cost.DeactivateEnd(/*num_deps=*/ntx - 1);
945 return {parent_chunk_idx, child_chunk_idx};
946 }
947
950 SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
951 {
952 m_cost.MergeChunksBegin();
953 Assume(m_chunk_idxs[top_idx]);
954 Assume(m_chunk_idxs[bottom_idx]);
955 auto& top_chunk_info = m_set_info[top_idx];
956 auto& bottom_chunk_info = m_set_info[bottom_idx];
957 // Count the number of dependencies between bottom_chunk and top_chunk.
958 unsigned num_deps{0};
959 for (auto tx_idx : top_chunk_info.transactions) {
960 auto& tx_data = m_tx_data[tx_idx];
961 num_deps += (tx_data.children & bottom_chunk_info.transactions).Count();
962 }
963 m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count());
964 Assume(num_deps > 0);
965 // Uniformly randomly pick one of them and activate it.
966 unsigned pick = m_rng.randrange(num_deps);
967 unsigned num_steps = 0;
968 for (auto tx_idx : top_chunk_info.transactions) {
969 ++num_steps;
970 auto& tx_data = m_tx_data[tx_idx];
971 auto intersect = tx_data.children & bottom_chunk_info.transactions;
972 auto count = intersect.Count();
973 if (pick < count) {
974 for (auto child_idx : intersect) {
975 if (pick == 0) {
976 m_cost.MergeChunksEnd(/*num_steps=*/num_steps);
977 return Activate(tx_idx, child_idx);
978 }
979 --pick;
980 }
981 Assume(false);
982 break;
983 }
984 pick -= count;
985 }
986 Assume(false);
987 return INVALID_SET_IDX;
988 }
989
992 template<bool DownWard>
993 SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
994 {
995 if constexpr (DownWard) {
996 return MergeChunks(chunk_idx, merge_chunk_idx);
997 } else {
998 return MergeChunks(merge_chunk_idx, chunk_idx);
999 }
1000 }
1001
1003 template<bool DownWard>
1005 {
1006 m_cost.PickMergeCandidateBegin();
1008 Assume(m_chunk_idxs[chunk_idx]);
1009 auto& chunk_info = m_set_info[chunk_idx];
1010 // Iterate over all chunks reachable from this one. For those depended-on chunks,
1011 // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one.
1012 // If multiple equal-feerate candidate chunks to merge with exist, pick a random one
1013 // among them.
1014
1018 FeeFrac best_other_chunk_feerate = chunk_info.feerate;
1020 SetIdx best_other_chunk_idx = INVALID_SET_IDX;
1023 uint64_t best_other_chunk_tiebreak{0};
1024
1026 auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first;
1027 unsigned steps = 0;
1028 while (todo.Any()) {
1029 ++steps;
1030 // Find a chunk for a transaction in todo, and remove all its transactions from todo.
1031 auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx;
1032 auto& reached_chunk_info = m_set_info[reached_chunk_idx];
1033 todo -= reached_chunk_info.transactions;
1034 // See if it has an acceptable feerate.
1035 auto cmp = DownWard ? ByRatio{best_other_chunk_feerate} <=> ByRatio{reached_chunk_info.feerate}
1036 : ByRatio{reached_chunk_info.feerate} <=> ByRatio{best_other_chunk_feerate};
1037 if (cmp > 0) continue;
1038 uint64_t tiebreak = m_rng.rand64();
1039 if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
1040 best_other_chunk_feerate = reached_chunk_info.feerate;
1041 best_other_chunk_idx = reached_chunk_idx;
1042 best_other_chunk_tiebreak = tiebreak;
1043 }
1044 }
1045 Assume(steps <= m_set_info.size());
1046
1047 m_cost.PickMergeCandidateEnd(/*num_steps=*/steps);
1048 return best_other_chunk_idx;
1049 }
1050
1053 template<bool DownWard>
1054 SetIdx MergeStep(SetIdx chunk_idx) noexcept
1055 {
1056 auto merge_chunk_idx = PickMergeCandidate<DownWard>(chunk_idx);
1057 if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX;
1058 chunk_idx = MergeChunksDirected<DownWard>(chunk_idx, merge_chunk_idx);
1059 Assume(chunk_idx != INVALID_SET_IDX);
1060 return chunk_idx;
1061 }
1062
1064 template<bool DownWard>
1065 void MergeSequence(SetIdx chunk_idx) noexcept
1066 {
1067 Assume(m_chunk_idxs[chunk_idx]);
1068 while (true) {
1069 auto merged_chunk_idx = MergeStep<DownWard>(chunk_idx);
1070 if (merged_chunk_idx == INVALID_SET_IDX) break;
1071 chunk_idx = merged_chunk_idx;
1072 }
1073 // Add the chunk to the queue of improvable chunks, if it wasn't already there.
1074 if (!m_suboptimal_idxs[chunk_idx]) {
1075 m_suboptimal_idxs.Set(chunk_idx);
1076 m_suboptimal_chunks.push_back(chunk_idx);
1077 }
1078 }
1079
1082 void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
1083 {
1084 // Deactivate the specified dependency, splitting it into two new chunks: a top containing
1085 // the parent, and a bottom containing the child. The top should have a higher feerate.
1086 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx);
1087
1088 // At this point we have exactly two chunks which may violate topology constraints (the
1089 // parent chunk and child chunk that were produced by deactivation). We can fix
1090 // these using just merge sequences, one upwards and one downwards, avoiding the need for a
1091 // full MakeTopological.
1092 const auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1093 const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1094 if (parent_reachable.Overlaps(child_chunk_txn)) {
1095 // The parent chunk has a dependency on a transaction in the child chunk. In this case,
1096 // the parent needs to merge back with the child chunk (a self-merge), and no other
1097 // merges are needed. Special-case this, so the overhead of PickMergeCandidate and
1098 // MergeSequence can be avoided.
1099
1100 // In the self-merge, the roles reverse: the parent chunk (from the split) depends
1101 // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the
1102 // "bottom" for MergeChunks.
1103 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1104 if (!m_suboptimal_idxs[merged_chunk_idx]) {
1105 m_suboptimal_idxs.Set(merged_chunk_idx);
1106 m_suboptimal_chunks.push_back(merged_chunk_idx);
1107 }
1108 } else {
1109 // Merge the top chunk with lower-feerate chunks it depends on.
1110 MergeSequence<false>(parent_chunk_idx);
1111 // Merge the bottom chunk with higher-feerate chunks that depend on it.
1112 MergeSequence<true>(child_chunk_idx);
1113 }
1114 }
1115
1118 {
1119 m_cost.PickChunkToOptimizeBegin();
1120 unsigned steps{0};
1121 while (!m_suboptimal_chunks.empty()) {
1122 ++steps;
1123 // Pop an entry from the potentially-suboptimal chunk queue.
1124 SetIdx chunk_idx = m_suboptimal_chunks.front();
1125 Assume(m_suboptimal_idxs[chunk_idx]);
1126 m_suboptimal_idxs.Reset(chunk_idx);
1128 if (m_chunk_idxs[chunk_idx]) {
1129 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1130 return chunk_idx;
1131 }
1132 // If what was popped is not currently a chunk, continue. This may
1133 // happen when a split chunk merges in Improve() with one or more existing chunks that
1134 // are themselves on the suboptimal queue already.
1135 }
1136 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1137 return INVALID_SET_IDX;
1138 }
1139
1141 std::pair<TxIdx, TxIdx> PickDependencyToSplit(SetIdx chunk_idx) noexcept
1142 {
1143 m_cost.PickDependencyToSplitBegin();
1144 Assume(m_chunk_idxs[chunk_idx]);
1145 auto& chunk_info = m_set_info[chunk_idx];
1146
1147 // Remember the best dependency {par, chl} seen so far.
1148 std::pair<TxIdx, TxIdx> candidate_dep = {TxIdx(-1), TxIdx(-1)};
1149 uint64_t candidate_tiebreak = 0;
1150 // Iterate over all transactions.
1151 for (auto tx_idx : chunk_info.transactions) {
1152 const auto& tx_data = m_tx_data[tx_idx];
1153 // Iterate over all active child dependencies of the transaction.
1154 for (auto child_idx : tx_data.active_children) {
1155 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1156 // Skip if this dependency is ineligible (the top chunk that would be created
1157 // does not have higher feerate than the chunk it is currently part of).
1158 auto cmp = ByRatio{dep_top_info.feerate} <=> ByRatio{chunk_info.feerate};
1159 if (cmp <= 0) continue;
1160 // Generate a random tiebreak for this dependency, and reject it if its tiebreak
1161 // is worse than the best so far. This means that among all eligible
1162 // dependencies, a uniformly random one will be chosen.
1163 uint64_t tiebreak = m_rng.rand64();
1164 if (tiebreak < candidate_tiebreak) continue;
1165 // Remember this as our (new) candidate dependency.
1166 candidate_dep = {tx_idx, child_idx};
1167 candidate_tiebreak = tiebreak;
1168 }
1169 }
1170 m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count());
1171 return candidate_dep;
1172 }
1173
1174public:
1177 explicit SpanningForestState(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept :
1178 m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost)
1179 {
1180 m_cost.InitializeBegin();
1181 m_transaction_idxs = depgraph.Positions();
1182 auto num_transactions = m_transaction_idxs.Count();
1183 m_tx_data.resize(depgraph.PositionRange());
1184 m_set_info.resize(num_transactions);
1185 m_reachable.resize(num_transactions);
1186 size_t num_chunks = 0;
1187 size_t num_deps = 0;
1188 for (auto tx_idx : m_transaction_idxs) {
1189 // Fill in transaction data.
1190 auto& tx_data = m_tx_data[tx_idx];
1191 tx_data.parents = depgraph.GetReducedParents(tx_idx);
1192 for (auto parent_idx : tx_data.parents) {
1193 m_tx_data[parent_idx].children.Set(tx_idx);
1194 }
1195 num_deps += tx_data.parents.Count();
1196 // Create a singleton chunk for it.
1197 tx_data.chunk_idx = num_chunks;
1198 m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx);
1199 }
1200 // Set the reachable transactions for each chunk to the transactions' parents and children.
1201 for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) {
1202 auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()];
1203 m_reachable[chunk_idx].first = tx_data.parents;
1204 m_reachable[chunk_idx].second = tx_data.children;
1205 }
1206 Assume(num_chunks == num_transactions);
1207 // Mark all chunk sets as chunks.
1208 m_chunk_idxs = SetType::Fill(num_chunks);
1209 m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps);
1210 }
1211
1215 void LoadLinearization(std::span<const DepGraphIndex> old_linearization) noexcept
1216 {
1217 // Add transactions one by one, in order of existing linearization.
1218 for (DepGraphIndex tx_idx : old_linearization) {
1219 auto chunk_idx = m_tx_data[tx_idx].chunk_idx;
1220 // Merge the chunk upwards, as long as merging succeeds.
1221 while (true) {
1222 chunk_idx = MergeStep<false>(chunk_idx);
1223 if (chunk_idx == INVALID_SET_IDX) break;
1224 }
1225 }
1226 }
1227
1229 void MakeTopological() noexcept
1230 {
1231 m_cost.MakeTopologicalBegin();
1238 unsigned init_dir = m_rng.randbool();
1241 SetType merged_chunks;
1242 // Mark chunks as suboptimal.
1244 for (auto chunk_idx : m_chunk_idxs) {
1246 // Randomize the initial order of suboptimal chunks in the queue.
1248 if (j != m_suboptimal_chunks.size() - 1) {
1250 }
1251 }
1252 unsigned chunks = m_chunk_idxs.Count();
1253 unsigned steps = 0;
1254 while (!m_suboptimal_chunks.empty()) {
1255 ++steps;
1256 // Pop an entry from the potentially-suboptimal chunk queue.
1257 SetIdx chunk_idx = m_suboptimal_chunks.front();
1259 Assume(m_suboptimal_idxs[chunk_idx]);
1260 m_suboptimal_idxs.Reset(chunk_idx);
1261 // If what was popped is not currently a chunk, continue. This may
1262 // happen when it was merged with something else since being added.
1263 if (!m_chunk_idxs[chunk_idx]) continue;
1265 unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1;
1266 int flip = m_rng.randbool();
1267 for (int i = 0; i < 2; ++i) {
1268 if (i ^ flip) {
1269 if (!(direction & 1)) continue;
1270 // Attempt to merge the chunk upwards.
1271 auto result_up = MergeStep<false>(chunk_idx);
1272 if (result_up != INVALID_SET_IDX) {
1273 if (!m_suboptimal_idxs[result_up]) {
1274 m_suboptimal_idxs.Set(result_up);
1275 m_suboptimal_chunks.push_back(result_up);
1276 }
1277 merged_chunks.Set(result_up);
1278 break;
1279 }
1280 } else {
1281 if (!(direction & 2)) continue;
1282 // Attempt to merge the chunk downwards.
1283 auto result_down = MergeStep<true>(chunk_idx);
1284 if (result_down != INVALID_SET_IDX) {
1285 if (!m_suboptimal_idxs[result_down]) {
1286 m_suboptimal_idxs.Set(result_down);
1287 m_suboptimal_chunks.push_back(result_down);
1288 }
1289 merged_chunks.Set(result_down);
1290 break;
1291 }
1292 }
1293 }
1294 }
1295 m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps);
1296 }
1297
1299 void StartOptimizing() noexcept
1300 {
1301 m_cost.StartOptimizingBegin();
1303 // Mark chunks suboptimal.
1305 for (auto chunk_idx : m_chunk_idxs) {
1306 m_suboptimal_chunks.push_back(chunk_idx);
1307 // Randomize the initial order of suboptimal chunks in the queue.
1309 if (j != m_suboptimal_chunks.size() - 1) {
1311 }
1312 }
1313 m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size());
1314 }
1315
1317 bool OptimizeStep() noexcept
1318 {
1319 auto chunk_idx = PickChunkToOptimize();
1320 if (chunk_idx == INVALID_SET_IDX) {
1321 // No improvable chunk was found, we are done.
1322 return false;
1323 }
1324 auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx);
1325 if (parent_idx == TxIdx(-1)) {
1326 // Nothing to improve in chunk_idx. Need to continue with other chunks, if any.
1327 return !m_suboptimal_chunks.empty();
1328 }
1329 // Deactivate the found dependency and then make the state topological again with a
1330 // sequence of merges.
1331 Improve(parent_idx, child_idx);
1332 return true;
1333 }
1334
1337 void StartMinimizing() noexcept
1338 {
1339 m_cost.StartMinimizingBegin();
1342 // Gather all chunks, and for each, add it with a random pivot in it, and a random initial
1343 // direction, to m_nonminimal_chunks.
1344 for (auto chunk_idx : m_chunk_idxs) {
1345 TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions);
1346 m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>());
1347 // Randomize the initial order of nonminimal chunks in the queue.
1349 if (j != m_nonminimal_chunks.size() - 1) {
1351 }
1352 }
1353 m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size());
1354 }
1355
1357 bool MinimizeStep() noexcept
1358 {
1359 // If the queue of potentially-non-minimal chunks is empty, we are done.
1360 if (m_nonminimal_chunks.empty()) return false;
1361 m_cost.MinimizeStepBegin();
1362 // Pop an entry from the potentially-non-minimal chunk queue.
1363 auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front();
1365 auto& chunk_info = m_set_info[chunk_idx];
1367 bool move_pivot_down = flags & 1;
1369 bool second_stage = flags & 2;
1370
1371 // Find a random dependency whose top and bottom set feerates are equal, and which has
1372 // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down).
1373 std::pair<TxIdx, TxIdx> candidate_dep;
1374 uint64_t candidate_tiebreak{0};
1375 bool have_any = false;
1376 // Iterate over all transactions.
1377 for (auto tx_idx : chunk_info.transactions) {
1378 const auto& tx_data = m_tx_data[tx_idx];
1379 // Iterate over all active child dependencies of the transaction.
1380 for (auto child_idx : tx_data.active_children) {
1381 const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1382 // Skip if this dependency does not have equal top and bottom set feerates. Note
1383 // that the top cannot have higher feerate than the bottom, or OptimizeSteps would
1384 // have dealt with it.
1385 if (ByRatio{dep_top_info.feerate} < ByRatio{chunk_info.feerate}) continue;
1386 have_any = true;
1387 // Skip if this dependency does not have pivot in the right place.
1388 if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue;
1389 // Remember this as our chosen dependency if it has a better tiebreak.
1390 uint64_t tiebreak = m_rng.rand64() | 1;
1391 if (tiebreak > candidate_tiebreak) {
1392 candidate_tiebreak = tiebreak;
1393 candidate_dep = {tx_idx, child_idx};
1394 }
1395 }
1396 }
1397 m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count());
1398 // If no dependencies have equal top and bottom set feerate, this chunk is minimal.
1399 if (!have_any) return true;
1400 // If all found dependencies have the pivot in the wrong place, try moving it in the other
1401 // direction. If this was the second stage already, we are done.
1402 if (candidate_tiebreak == 0) {
1403 // Switch to other direction, and to second phase.
1404 flags ^= 3;
1405 if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags);
1406 return true;
1407 }
1408
1409 // Otherwise, deactivate the dependency that was found.
1410 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second);
1411 // Determine if there is a dependency from the new bottom to the new top (opposite from the
1412 // dependency that was just deactivated).
1413 auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1414 auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1415 if (parent_reachable.Overlaps(child_chunk_txn)) {
1416 // A self-merge is needed. Note that the child_chunk_idx is the top, and
1417 // parent_chunk_idx is the bottom, because we activate a dependency in the reverse
1418 // direction compared to the deactivation above.
1419 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1420 // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx
1421 // will have changed.
1422 m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags);
1423 m_cost.MinimizeStepEnd(/*split=*/false);
1424 } else {
1425 // No self-merge happens, and thus we have found a way to split the chunk. Create two
1426 // smaller chunks, and add them to the queue. The one that contains the current pivot
1427 // gets to continue with it in the same direction, to minimize the number of times we
1428 // alternate direction. If we were in the second phase already, the newly created chunk
1429 // inherits that too, because we know no split with the pivot on the other side is
1430 // possible already. The new chunk without the current pivot gets a new randomly-chosen
1431 // one.
1432 if (move_pivot_down) {
1433 auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions);
1434 m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>());
1435 m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags);
1436 } else {
1437 auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions);
1438 m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags);
1439 m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>());
1440 }
1441 if (m_rng.randbool()) {
1443 }
1444 m_cost.MinimizeStepEnd(/*split=*/true);
1445 }
1446 return true;
1447 }
1448
1465 std::vector<DepGraphIndex> GetLinearization(const StrongComparator<DepGraphIndex> auto& fallback_order) noexcept
1466 {
1467 m_cost.GetLinearizationBegin();
1469 std::vector<DepGraphIndex> ret;
1470 ret.reserve(m_set_info.size());
1474 std::vector<std::pair<SetIdx, TxIdx>> ready_chunks;
1477 std::vector<TxIdx> chunk_deps(m_set_info.size(), 0);
1480 std::vector<TxIdx> tx_deps(m_tx_data.size(), 0);
1483 std::vector<TxIdx> ready_tx;
1484 // Populate chunk_deps and tx_deps.
1485 unsigned num_deps{0};
1486 for (TxIdx chl_idx : m_transaction_idxs) {
1487 const auto& chl_data = m_tx_data[chl_idx];
1488 tx_deps[chl_idx] = chl_data.parents.Count();
1489 num_deps += tx_deps[chl_idx];
1490 auto chl_chunk_idx = chl_data.chunk_idx;
1491 auto& chl_chunk_info = m_set_info[chl_chunk_idx];
1492 chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count();
1493 }
1495 auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept {
1496 auto& chunk = m_set_info[chunk_idx].transactions;
1497 auto it = chunk.begin();
1498 DepGraphIndex ret = *it;
1499 ++it;
1500 while (it != chunk.end()) {
1501 if (fallback_order(*it, ret) > 0) ret = *it;
1502 ++it;
1503 }
1504 return ret;
1505 };
1508 auto tx_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1509 // Bail out for identical transactions.
1510 if (a == b) return false;
1511 // First sort by increasing transaction feerate.
1512 auto& a_feerate = m_depgraph.FeeRate(a);
1513 auto& b_feerate = m_depgraph.FeeRate(b);
1514 auto feerate_cmp = ByRatio{a_feerate} <=> ByRatio{b_feerate};
1515 if (feerate_cmp != 0) return feerate_cmp < 0;
1516 // Then by decreasing transaction size.
1517 if (a_feerate.size != b_feerate.size) {
1518 return a_feerate.size > b_feerate.size;
1519 }
1520 // Tie-break by decreasing fallback_order.
1521 auto fallback_cmp = fallback_order(a, b);
1522 if (fallback_cmp != 0) return fallback_cmp > 0;
1523 // This should not be hit, because fallback_order defines a strong ordering.
1524 Assume(false);
1525 return a < b;
1526 };
1527 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1530 auto chunk_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1531 // Bail out for identical chunks.
1532 if (a.first == b.first) return false;
1533 // First sort by increasing chunk feerate.
1534 auto& chunk_feerate_a = m_set_info[a.first].feerate;
1535 auto& chunk_feerate_b = m_set_info[b.first].feerate;
1536 auto feerate_cmp = ByRatio{chunk_feerate_a} <=> ByRatio{chunk_feerate_b};
1537 if (feerate_cmp != 0) return feerate_cmp < 0;
1538 // Then by decreasing chunk size.
1539 if (chunk_feerate_a.size != chunk_feerate_b.size) {
1540 return chunk_feerate_a.size > chunk_feerate_b.size;
1541 }
1542 // Tie-break by decreasing fallback_order.
1543 auto fallback_cmp = fallback_order(a.second, b.second);
1544 if (fallback_cmp != 0) return fallback_cmp > 0;
1545 // This should not be hit, because fallback_order defines a strong ordering.
1546 Assume(false);
1547 return a.second < b.second;
1548 };
1549 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1550 for (SetIdx chunk_idx : m_chunk_idxs) {
1551 if (chunk_deps[chunk_idx] == 0) {
1552 ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx));
1553 }
1554 }
1555 std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1556 // Pop chunks off the heap.
1557 while (!ready_chunks.empty()) {
1558 auto [chunk_idx, _rnd] = ready_chunks.front();
1559 std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1560 ready_chunks.pop_back();
1561 Assume(chunk_deps[chunk_idx] == 0);
1562 const auto& chunk_txn = m_set_info[chunk_idx].transactions;
1563 // Build heap of all includable transactions in chunk.
1564 Assume(ready_tx.empty());
1565 for (TxIdx tx_idx : chunk_txn) {
1566 if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx);
1567 }
1568 Assume(!ready_tx.empty());
1569 std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1570 // Pick transactions from the ready heap, append them to linearization, and decrement
1571 // dependency counts.
1572 while (!ready_tx.empty()) {
1573 // Pop an element from the tx_ready heap.
1574 auto tx_idx = ready_tx.front();
1575 std::pop_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1576 ready_tx.pop_back();
1577 // Append to linearization.
1578 ret.push_back(tx_idx);
1579 // Decrement dependency counts.
1580 auto& tx_data = m_tx_data[tx_idx];
1581 for (TxIdx chl_idx : tx_data.children) {
1582 auto& chl_data = m_tx_data[chl_idx];
1583 // Decrement tx dependency count.
1584 Assume(tx_deps[chl_idx] > 0);
1585 if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) {
1586 // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap.
1587 ready_tx.push_back(chl_idx);
1588 std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1589 }
1590 // Decrement chunk dependency count if this is out-of-chunk dependency.
1591 if (chl_data.chunk_idx != chunk_idx) {
1592 Assume(chunk_deps[chl_data.chunk_idx] > 0);
1593 if (--chunk_deps[chl_data.chunk_idx] == 0) {
1594 // Child chunk has no dependencies left. Add it to the chunk heap.
1595 ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx));
1596 std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1597 }
1598 }
1599 }
1600 }
1601 }
1602 Assume(ret.size() == m_set_info.size());
1603 m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps);
1604 return ret;
1605 }
1606
1620 std::vector<FeeFrac> GetDiagram() const noexcept
1621 {
1622 std::vector<FeeFrac> ret;
1623 for (auto chunk_idx : m_chunk_idxs) {
1624 ret.push_back(m_set_info[chunk_idx].feerate);
1625 }
1626 std::ranges::sort(ret, std::greater<ByRatioNegSize<FeeFrac>>{});
1627 return ret;
1628 }
1629
1631 uint64_t GetCost() const noexcept { return m_cost.GetCost(); }
1632
1634 void SanityCheck() const
1635 {
1636 //
1637 // Verify dependency parent/child information, and build list of (active) dependencies.
1638 //
1639 std::vector<std::pair<TxIdx, TxIdx>> expected_dependencies;
1640 std::vector<std::pair<TxIdx, TxIdx>> all_dependencies;
1641 std::vector<std::pair<TxIdx, TxIdx>> active_dependencies;
1642 for (auto parent_idx : m_depgraph.Positions()) {
1643 for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) {
1644 expected_dependencies.emplace_back(parent_idx, child_idx);
1645 }
1646 }
1647 for (auto tx_idx : m_transaction_idxs) {
1648 for (auto child_idx : m_tx_data[tx_idx].children) {
1649 all_dependencies.emplace_back(tx_idx, child_idx);
1650 if (m_tx_data[tx_idx].active_children[child_idx]) {
1651 active_dependencies.emplace_back(tx_idx, child_idx);
1652 }
1653 }
1654 }
1655 std::ranges::sort(expected_dependencies);
1656 std::ranges::sort(all_dependencies);
1657 assert(expected_dependencies == all_dependencies);
1658
1659 //
1660 // Verify the chunks against the list of active dependencies
1661 //
1662 SetType chunk_cover;
1663 for (auto chunk_idx : m_chunk_idxs) {
1664 const auto& chunk_info = m_set_info[chunk_idx];
1665 // Verify that transactions in the chunk point back to it. This guarantees
1666 // that chunks are non-overlapping.
1667 for (auto tx_idx : chunk_info.transactions) {
1668 assert(m_tx_data[tx_idx].chunk_idx == chunk_idx);
1669 }
1670 assert(!chunk_cover.Overlaps(chunk_info.transactions));
1671 chunk_cover |= chunk_info.transactions;
1672 // Verify the chunk's transaction set: start from an arbitrary chunk transaction,
1673 // and for every active dependency, if it contains the parent or child, add the
1674 // other. It must have exactly N-1 active dependencies in it, guaranteeing it is
1675 // acyclic.
1676 assert(chunk_info.transactions.Any());
1677 SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First());
1678 while (true) {
1679 auto old = expected_chunk;
1680 size_t active_dep_count{0};
1681 for (const auto& [par, chl] : active_dependencies) {
1682 if (expected_chunk[par] || expected_chunk[chl]) {
1683 expected_chunk.Set(par);
1684 expected_chunk.Set(chl);
1685 ++active_dep_count;
1686 }
1687 }
1688 if (old == expected_chunk) {
1689 assert(expected_chunk.Count() == active_dep_count + 1);
1690 break;
1691 }
1692 }
1693 assert(chunk_info.transactions == expected_chunk);
1694 // Verify the chunk's feerate.
1695 assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions));
1696 // Verify the chunk's reachable transactions.
1697 assert(m_reachable[chunk_idx] == GetReachable(expected_chunk));
1698 // Verify that the chunk's reachable transactions don't include its own transactions.
1699 assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions));
1700 assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions));
1701 }
1702 // Verify that together, the chunks cover all transactions.
1703 assert(chunk_cover == m_depgraph.Positions());
1704
1705 //
1706 // Verify transaction data.
1707 //
1708 assert(m_transaction_idxs == m_depgraph.Positions());
1709 for (auto tx_idx : m_transaction_idxs) {
1710 const auto& tx_data = m_tx_data[tx_idx];
1711 // Verify it has a valid chunk index, and that chunk includes this transaction.
1712 assert(m_chunk_idxs[tx_data.chunk_idx]);
1713 assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]);
1714 // Verify parents/children.
1715 assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx));
1716 assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx));
1717 // Verify active_children is a subset of children.
1718 assert(tx_data.active_children.IsSubsetOf(tx_data.children));
1719 // Verify each active child's dep_top_idx points to a valid non-chunk set.
1720 for (auto child_idx : tx_data.active_children) {
1721 assert(tx_data.dep_top_idx[child_idx] < m_set_info.size());
1722 assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]);
1723 }
1724 }
1725
1726 //
1727 // Verify active dependencies' top sets.
1728 //
1729 for (const auto& [par_idx, chl_idx] : active_dependencies) {
1730 // Verify the top set's transactions: it must contain the parent, and for every
1731 // active dependency, except the chl_idx->par_idx dependency itself, if it contains the
1732 // parent or child, it must contain both. It must have exactly N-1 active dependencies
1733 // in it, guaranteeing it is acyclic.
1734 SetType expected_top = SetType::Singleton(par_idx);
1735 while (true) {
1736 auto old = expected_top;
1737 size_t active_dep_count{0};
1738 for (const auto& [par2_idx, chl2_idx] : active_dependencies) {
1739 if (par_idx == par2_idx && chl_idx == chl2_idx) continue;
1740 if (expected_top[par2_idx] || expected_top[chl2_idx]) {
1741 expected_top.Set(par2_idx);
1742 expected_top.Set(chl2_idx);
1743 ++active_dep_count;
1744 }
1745 }
1746 if (old == expected_top) {
1747 assert(expected_top.Count() == active_dep_count + 1);
1748 break;
1749 }
1750 }
1751 assert(!expected_top[chl_idx]);
1752 auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]];
1753 assert(dep_top_info.transactions == expected_top);
1754 // Verify the top set's feerate.
1755 assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions));
1756 }
1757
1758 //
1759 // Verify m_suboptimal_chunks.
1760 //
1761 SetType suboptimal_idxs;
1762 for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) {
1763 auto chunk_idx = m_suboptimal_chunks[i];
1764 assert(!suboptimal_idxs[chunk_idx]);
1765 suboptimal_idxs.Set(chunk_idx);
1766 }
1767 assert(m_suboptimal_idxs == suboptimal_idxs);
1768
1769 //
1770 // Verify m_nonminimal_chunks.
1771 //
1772 SetType nonminimal_idxs;
1773 for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) {
1774 auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i];
1775 assert(m_tx_data[pivot].chunk_idx == chunk_idx);
1776 assert(!nonminimal_idxs[chunk_idx]);
1777 nonminimal_idxs.Set(chunk_idx);
1778 }
1779 assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs));
1780 }
1781};
1782
1803template<typename SetType>
1804std::tuple<std::vector<DepGraphIndex>, bool, uint64_t> Linearize(
1805 const DepGraph<SetType>& depgraph,
1806 uint64_t max_cost,
1807 uint64_t rng_seed,
1808 const StrongComparator<DepGraphIndex> auto& fallback_order,
1809 std::span<const DepGraphIndex> old_linearization = {},
1810 bool is_topological = true) noexcept
1811{
1813 SpanningForestState forest(depgraph, rng_seed);
1814 if (!old_linearization.empty()) {
1815 forest.LoadLinearization(old_linearization);
1816 if (!is_topological) forest.MakeTopological();
1817 } else {
1818 forest.MakeTopological();
1819 }
1820 // Make improvement steps to it until we hit the max_iterations limit, or an optimal result
1821 // is found.
1822 if (forest.GetCost() < max_cost) {
1823 forest.StartOptimizing();
1824 do {
1825 if (!forest.OptimizeStep()) break;
1826 } while (forest.GetCost() < max_cost);
1827 }
1828 // Make chunk minimization steps until we hit the max_iterations limit, or all chunks are
1829 // minimal.
1830 bool optimal = false;
1831 if (forest.GetCost() < max_cost) {
1832 forest.StartMinimizing();
1833 do {
1834 if (!forest.MinimizeStep()) {
1835 optimal = true;
1836 break;
1837 }
1838 } while (forest.GetCost() < max_cost);
1839 }
1840 return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()};
1841}
1842
1859template<typename SetType>
1860void PostLinearize(const DepGraph<SetType>& depgraph, std::span<DepGraphIndex> linearization)
1861{
1862 // This algorithm performs a number of passes (currently 2); the even ones operate from back to
1863 // front, the odd ones from front to back. Each results in an equal-or-better linearization
1864 // than the one started from.
1865 // - One pass in either direction guarantees that the resulting chunks are connected.
1866 // - Each direction corresponds to one shape of tree being linearized optimally (forward passes
1867 // guarantee this for graphs where each transaction has at most one child; backward passes
1868 // guarantee this for graphs where each transaction has at most one parent).
1869 // - Starting with a backward pass guarantees the moved-tree property.
1870 //
1871 // During an odd (forward) pass, the high-level operation is:
1872 // - Start with an empty list of groups L=[].
1873 // - For every transaction i in the old linearization, from front to back:
1874 // - Append a new group C=[i], containing just i, to the back of L.
1875 // - While L has at least one group before C, and the group immediately before C has feerate
1876 // lower than C:
1877 // - If C depends on P:
1878 // - Merge P into C, making C the concatenation of P+C, continuing with the combined C.
1879 // - Otherwise:
1880 // - Swap P with C, continuing with the now-moved C.
1881 // - The output linearization is the concatenation of the groups in L.
1882 //
1883 // During even (backward) passes, i iterates from the back to the front of the existing
1884 // linearization, and new groups are prepended instead of appended to the list L. To enable
1885 // more code reuse, both passes append groups, but during even passes the meanings of
1886 // parent/child, and of high/low feerate are reversed, and the final concatenation is reversed
1887 // on output.
1888 //
1889 // In the implementation below, the groups are represented by singly-linked lists (pointing
1890 // from the back to the front), which are themselves organized in a singly-linked circular
1891 // list (each group pointing to its predecessor, with a special sentinel group at the front
1892 // that points back to the last group).
1893 //
1894 // Information about transaction t is stored in entries[t + 1], while the sentinel is in
1895 // entries[0].
1896
1898 static constexpr DepGraphIndex SENTINEL{0};
1900 static constexpr DepGraphIndex NO_PREV_TX{0};
1901
1902
1904 struct TxEntry
1905 {
1908 DepGraphIndex prev_tx;
1909
1910 // The fields below are only used for transactions that are the last one in a group
1911 // (referred to as tail transactions below).
1912
1914 DepGraphIndex first_tx;
1917 DepGraphIndex prev_group;
1919 SetType group;
1921 SetType deps;
1923 FeeFrac feerate;
1924 };
1925
1926 // As an example, consider the state corresponding to the linearization [1,0,3,2], with
1927 // groups [1,0,3] and [2], in an odd pass. The linked lists would be:
1928 //
1929 // +-----+
1930 // 0<-P-- | 0 S | ---\ Legend:
1931 // +-----+ |
1932 // ^ | - digit in box: entries index
1933 // /--------------F---------+ G | (note: one more than tx value)
1934 // v \ | | - S: sentinel group
1935 // +-----+ +-----+ +-----+ | (empty feerate)
1936 // 0<-P-- | 2 | <--P-- | 1 | <--P-- | 4 T | | - T: tail transaction, contains
1937 // +-----+ +-----+ +-----+ | fields beyond prev_tv.
1938 // ^ | - P: prev_tx reference
1939 // G G - F: first_tx reference
1940 // | | - G: prev_group reference
1941 // +-----+ |
1942 // 0<-P-- | 3 T | <--/
1943 // +-----+
1944 // ^ |
1945 // \-F-/
1946 //
1947 // During an even pass, the diagram above would correspond to linearization [2,3,0,1], with
1948 // groups [2] and [3,0,1].
1949
1950 std::vector<TxEntry> entries(depgraph.PositionRange() + 1);
1951
1952 // Perform two passes over the linearization.
1953 for (int pass = 0; pass < 2; ++pass) {
1954 int rev = !(pass & 1);
1955 // Construct a sentinel group, identifying the start of the list.
1956 entries[SENTINEL].prev_group = SENTINEL;
1957 Assume(entries[SENTINEL].feerate.IsEmpty());
1958
1959 // Iterate over all elements in the existing linearization.
1960 for (DepGraphIndex i = 0; i < linearization.size(); ++i) {
1961 // Even passes are from back to front; odd passes from front to back.
1962 DepGraphIndex idx = linearization[rev ? linearization.size() - 1 - i : i];
1963 // Construct a new group containing just idx. In even passes, the meaning of
1964 // parent/child and high/low feerate are swapped.
1965 DepGraphIndex cur_group = idx + 1;
1966 entries[cur_group].group = SetType::Singleton(idx);
1967 entries[cur_group].deps = rev ? depgraph.Descendants(idx): depgraph.Ancestors(idx);
1968 entries[cur_group].feerate = depgraph.FeeRate(idx);
1969 if (rev) entries[cur_group].feerate.fee = -entries[cur_group].feerate.fee;
1970 entries[cur_group].prev_tx = NO_PREV_TX; // No previous transaction in group.
1971 entries[cur_group].first_tx = cur_group; // Transaction itself is first of group.
1972 // Insert the new group at the back of the groups linked list.
1973 entries[cur_group].prev_group = entries[SENTINEL].prev_group;
1974 entries[SENTINEL].prev_group = cur_group;
1975
1976 // Start merge/swap cycle.
1977 DepGraphIndex next_group = SENTINEL; // We inserted at the end, so next group is sentinel.
1978 DepGraphIndex prev_group = entries[cur_group].prev_group;
1979 // Continue as long as the current group has higher feerate than the previous one.
1980 while (ByRatio{entries[cur_group].feerate} > ByRatio{entries[prev_group].feerate}) {
1981 // prev_group/cur_group/next_group refer to (the last transactions of) 3
1982 // consecutive entries in groups list.
1983 Assume(cur_group == entries[next_group].prev_group);
1984 Assume(prev_group == entries[cur_group].prev_group);
1985 // The sentinel has empty feerate, which is neither higher or lower than other
1986 // feerates. Thus, the while loop we are in here guarantees that cur_group and
1987 // prev_group are not the sentinel.
1988 Assume(cur_group != SENTINEL);
1989 Assume(prev_group != SENTINEL);
1990 if (entries[cur_group].deps.Overlaps(entries[prev_group].group)) {
1991 // There is a dependency between cur_group and prev_group; merge prev_group
1992 // into cur_group. The group/deps/feerate fields of prev_group remain unchanged
1993 // but become unused.
1994 entries[cur_group].group |= entries[prev_group].group;
1995 entries[cur_group].deps |= entries[prev_group].deps;
1996 entries[cur_group].feerate += entries[prev_group].feerate;
1997 // Make the first of the current group point to the tail of the previous group.
1998 entries[entries[cur_group].first_tx].prev_tx = prev_group;
1999 // The first of the previous group becomes the first of the newly-merged group.
2000 entries[cur_group].first_tx = entries[prev_group].first_tx;
2001 // The previous group becomes whatever group was before the former one.
2002 prev_group = entries[prev_group].prev_group;
2003 entries[cur_group].prev_group = prev_group;
2004 } else {
2005 // There is no dependency between cur_group and prev_group; swap them.
2006 DepGraphIndex preprev_group = entries[prev_group].prev_group;
2007 // If PP, P, C, N were the old preprev, prev, cur, next groups, then the new
2008 // layout becomes [PP, C, P, N]. Update prev_groups to reflect that order.
2009 entries[next_group].prev_group = prev_group;
2010 entries[prev_group].prev_group = cur_group;
2011 entries[cur_group].prev_group = preprev_group;
2012 // The current group remains the same, but the groups before/after it have
2013 // changed.
2014 next_group = prev_group;
2015 prev_group = preprev_group;
2016 }
2017 }
2018 }
2019
2020 // Convert the entries back to linearization (overwriting the existing one).
2021 DepGraphIndex cur_group = entries[0].prev_group;
2022 DepGraphIndex done = 0;
2023 while (cur_group != SENTINEL) {
2024 DepGraphIndex cur_tx = cur_group;
2025 // Traverse the transactions of cur_group (from back to front), and write them in the
2026 // same order during odd passes, and reversed (front to back) in even passes.
2027 if (rev) {
2028 do {
2029 *(linearization.begin() + (done++)) = cur_tx - 1;
2030 cur_tx = entries[cur_tx].prev_tx;
2031 } while (cur_tx != NO_PREV_TX);
2032 } else {
2033 do {
2034 *(linearization.end() - (++done)) = cur_tx - 1;
2035 cur_tx = entries[cur_tx].prev_tx;
2036 } while (cur_tx != NO_PREV_TX);
2037 }
2038 cur_group = entries[cur_group].prev_group;
2039 }
2040 Assume(done == linearization.size());
2041 }
2042}
2043
2044} // namespace cluster_linearize
2045
2046#endif // BITCOIN_CLUSTER_LINEARIZE_H
#define LIFETIMEBOUND
Definition: attributes.h:16
int ret
int flags
Definition: bitcoin-tx.cpp:530
#define Assume(val)
Assume is the identity function.
Definition: check.h:128
Wrapper around FeeFrac & derived types, which adds a feerate-based ordering which treats equal-feerat...
Definition: feefrac.h:219
Wrapper around FeeFrac & derived types, which adds a total ordering which first sorts by feerate and ...
Definition: feefrac.h:290
xoroshiro128++ PRNG.
Definition: random.h:425
constexpr uint64_t rand64() noexcept
Definition: random.h:448
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:254
bool randbool() noexcept
Generate a random boolean.
Definition: random.h:325
uint64_t randbits(int bits) noexcept
Generate a random (bits)-bit integer.
Definition: random.h:204
bool empty() const noexcept
Test whether the contents of this deque is empty.
Definition: vecdeque.h:310
void clear() noexcept
Resize the deque to be size 0.
Definition: vecdeque.h:126
void push_back(T &&elem)
Move-construct a new element at the end of the deque.
Definition: vecdeque.h:227
void pop_front()
Remove the first element of the deque.
Definition: vecdeque.h:250
size_t size() const noexcept
Get the number of elements in this deque.
Definition: vecdeque.h:312
void emplace_back(Args &&... args)
Construct a new element at the end of the deque.
Definition: vecdeque.h:219
T & front() noexcept
Get a mutable reference to the first element of the deque.
Definition: vecdeque.h:268
void reserve(size_t capacity)
Increase the capacity to capacity.
Definition: vecdeque.h:206
T & back() noexcept
Get a mutable reference to the last element of the deque.
Definition: vecdeque.h:282
Data structure that holds a transaction graph's preprocessed data (fee, size, ancestors,...
unsigned CountDependencies() const noexcept
const SetType & Ancestors(DepGraphIndex i) const noexcept
Get the ancestors of a given transaction i.
FeeFrac & FeeRate(DepGraphIndex i) noexcept
Get the mutable feerate of a given transaction i.
SetType GetReducedChildren(DepGraphIndex i) const noexcept
Compute the (reduced) set of children of node i in this graph.
SetType GetReducedParents(DepGraphIndex i) const noexcept
Compute the (reduced) set of parents of node i in this graph.
void AppendTopo(std::vector< DepGraphIndex > &list, const SetType &select) const noexcept
Append the entries of select to list in a topologically valid order.
const FeeFrac & FeeRate(DepGraphIndex i) const noexcept
Get the feerate of a given transaction i.
SetType GetConnectedComponent(const SetType &todo, DepGraphIndex tx) const noexcept
Get the connected component within the subset "todo" that contains tx (which must be in todo).
bool IsConnected() const noexcept
Determine if this entire graph is connected.
bool IsConnected(const SetType &subset) const noexcept
Determine if a subset is connected.
DepGraphIndex PositionRange() const noexcept
Get the range of positions in this DepGraph.
SetType FindConnectedComponent(const SetType &todo) const noexcept
Find some connected component within the subset "todo" of this graph.
DepGraphIndex AddTransaction(const FeeFrac &feefrac) noexcept
Add a new unconnected transaction to this transaction graph (in the first available position),...
void RemoveTransactions(const SetType &del) noexcept
Remove the specified positions from this DepGraph.
auto TxCount() const noexcept
Get the number of transactions in the graph.
std::vector< Entry > entries
Data for each transaction.
const SetType & Descendants(DepGraphIndex i) const noexcept
Get the descendants of a given transaction i.
friend bool operator==(const DepGraph &a, const DepGraph &b) noexcept
Equality operator (primarily for testing purposes).
size_t DynamicMemoryUsage() const noexcept
DepGraph() noexcept=default
SetType m_used
Which positions are used.
const SetType & Positions() const noexcept
Get the set of transactions positions in use.
void Compact() noexcept
Reduce memory usage if possible.
FeeFrac FeeRate(const SetType &elems) const noexcept
Compute the aggregate feerate of a set of nodes in this graph.
void AddDependencies(const SetType &parents, DepGraphIndex child) noexcept
Modify this transaction graph, adding multiple parents to a specified child.
bool IsAcyclic() const noexcept
Check if this graph is acyclic.
A default cost model for SFL for SetType=BitSet<64>, based on benchmarks.
void PickChunkToOptimizeEnd(int num_steps) noexcept
void PickMergeCandidateEnd(int num_steps) noexcept
void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
void StartOptimizingEnd(int num_chunks) noexcept
void MergeChunksMid(int num_txns) noexcept
void StartMinimizingEnd(int num_chunks) noexcept
void MergeChunksEnd(int num_steps) noexcept
void InitializeEnd(int num_txns, int num_deps) noexcept
void PickDependencyToSplitEnd(int num_txns) noexcept
void GetLinearizationEnd(int num_txns, int num_deps) noexcept
void MinimizeStepMid(int num_txns) noexcept
void DeactivateEnd(int num_deps) noexcept
void ActivateEnd(int num_deps) noexcept
void MinimizeStepEnd(bool split) noexcept
Class to represent the internal state of the spanning-forest linearization (SFL) algorithm.
std::pair< TxIdx, TxIdx > PickDependencyToSplit(SetIdx chunk_idx) noexcept
Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none.
void StartMinimizing() noexcept
Initialize data structure for minimizing the chunks.
SetIdx PickChunkToOptimize() noexcept
Determine the next chunk to optimize, or INVALID_SET_IDX if none.
std::vector< DepGraphIndex > GetLinearization(const StrongComparator< DepGraphIndex > auto &fallback_order) noexcept
Construct a topologically-valid linearization from the current forest state.
SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency from merge_ch...
SpanningForestState(const DepGraph< SetType > &depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel &cost=CostModel{}) noexcept
Construct a spanning forest for the given DepGraph, with every transaction in its own chunk (not topo...
static constexpr SetIdx INVALID_SET_IDX
An invalid SetIdx.
bool MinimizeStep() noexcept
Try to reduce a chunk's size.
std::vector< TxData > m_tx_data
Information about each transaction (and chunks).
std::vector< FeeFrac > GetDiagram() const noexcept
Get the diagram for the current state, which must be topological.
CostModel m_cost
Accounting for the cost of this computation.
SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
Activate a dependency from the bottom set to the top set, which must exist.
void LoadLinearization(std::span< const DepGraphIndex > old_linearization) noexcept
Load an existing linearization.
SetIdx MergeStep(SetIdx chunk_idx) noexcept
Perform an upward or downward merge step, on the specified chunk.
std::vector< SetInfo< SetType > > m_set_info
Information about each set (chunk, or active dependency top set).
SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none.
SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make the inactive dependency from child to parent, which must not be in the same chunk already,...
TxIdx PickRandomTx(const SetType &tx_idxs) noexcept
Pick a random transaction within a set (which must be non-empty).
bool OptimizeStep() noexcept
Try to improve the forest.
void StartOptimizing() noexcept
Initialize the data structure for optimization.
std::pair< SetType, SetType > GetReachable(const SetType &tx_idxs) const noexcept
Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and downwards direc...
std::vector< std::pair< SetType, SetType > > m_reachable
For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions,...
void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
Split a chunk, and then merge the resulting two chunks to make the graph topological again.
SetType m_transaction_idxs
The set of all TxIdx's of transactions in the cluster indexing into m_tx_data.
InsecureRandomContext m_rng
Internal RNG.
DepGraphIndex TxIdx
Data type to represent indexing into m_tx_data.
const DepGraph< SetType > & m_depgraph
The DepGraph we are trying to linearize.
VecDeque< std::tuple< SetIdx, TxIdx, unsigned > > m_nonminimal_chunks
A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their status:
std::conditional_t<(SetType::Size()<=0xff), uint8_t, std::conditional_t<(SetType::Size()<=0xffff), uint16_t, uint32_t > > SetIdx
Data type to represent indexing into m_set_info.
void MergeSequence(SetIdx chunk_idx) noexcept
Perform an upward or downward merge sequence on the specified chunk.
void MakeTopological() noexcept
Make state topological.
std::pair< SetIdx, SetIdx > Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make a specified active dependency inactive.
uint64_t GetCost() const noexcept
Determine how much work was performed so far.
SetType m_chunk_idxs
The set of all chunk SetIdx's.
VecDeque< SetIdx > m_suboptimal_chunks
A FIFO of chunk SetIdxs for chunks that may be improved still.
SetType m_suboptimal_idxs
The set of all SetIdx's that appear in m_suboptimal_chunks.
void SanityCheck() const
Verify internal consistency of the data structure.
Concept for function objects that return std::strong_ordering when invoked with two Args.
std::vector< FeeFrac > ChunkLinearization(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the feerates of the chunks of linearization.
std::tuple< std::vector< DepGraphIndex >, bool, uint64_t > Linearize(const DepGraph< SetType > &depgraph, uint64_t max_cost, uint64_t rng_seed, const StrongComparator< DepGraphIndex > auto &fallback_order, std::span< const DepGraphIndex > old_linearization={}, bool is_topological=true) noexcept
Find or improve a linearization for a cluster.
std::compare_three_way IndexTxOrder
Simple default transaction ordering function for SpanningForestState::GetLinearization() and Lineariz...
uint32_t DepGraphIndex
Data type to represent transaction indices in DepGraphs and the clusters they represent.
void PostLinearize(const DepGraph< SetType > &depgraph, std::span< DepGraphIndex > linearization)
Improve a given linearization.
std::vector< SetInfo< SetType > > ChunkLinearizationInfo(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the chunks of linearization as SetInfos.
static size_t DynamicUsage(const int8_t &v)
Dynamic memory usage for built-in types is zero.
Definition: memusage.h:31
Definition: common.h:30
static std::vector< std::string > split(const std::string &str, const std::string &delims=" \t")
Definition: subprocess.h:311
Data structure storing a fee and size.
Definition: feefrac.h:22
int64_t fee
Definition: feefrac.h:89
bool IsEmpty() const noexcept
Check if this is empty (size and fee are 0).
Definition: feefrac.h:102
Information about a single transaction.
SetType descendants
All descendants of the transaction (including itself).
friend bool operator==(const Entry &, const Entry &) noexcept=default
Equality operator (primarily for testing purposes).
Entry() noexcept=default
Construct an empty entry.
FeeFrac feerate
Fee and size of transaction itself.
SetType ancestors
All ancestors of the transaction (including itself).
A set of transactions together with their aggregate feerate.
SetInfo(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Construct a SetInfo for a given transaction in a depgraph.
SetInfo operator-(const SetInfo &other) const noexcept
Compute the difference between this and other SetInfo (which must be a subset).
FeeFrac feerate
Their combined fee and size.
SetInfo() noexcept=default
Construct a SetInfo for the empty set.
void Set(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Add a transaction to this SetInfo (which must not yet be in it).
SetType transactions
The transactions in the set.
friend void swap(SetInfo &a, SetInfo &b) noexcept
Swap two SetInfo objects.
SetInfo(const DepGraph< SetType > &depgraph, const SetType &txn) noexcept
Construct a SetInfo for a set of transactions in a depgraph.
SetInfo & operator|=(const SetInfo &other) noexcept
Add the transactions of other to this SetInfo (no overlap allowed).
SetInfo & operator-=(const SetInfo &other) noexcept
Remove the transactions of other from this SetInfo (which must be a subset).
friend bool operator==(const SetInfo &, const SetInfo &) noexcept=default
Permit equality testing.
Structure with information about a single transaction.
SetType active_children
The set of child transactions reachable through an active dependency.
SetType children
The set of child transactions of this transaction.
SetIdx chunk_idx
Which chunk this transaction belongs to.
std::array< SetIdx, SetType::Size()> dep_top_idx
The top set for every active child dependency this transaction has, indexed by child TxIdx.
SetType parents
The set of parent transactions of this transaction.
static int count
assert(!tx.IsCoinBase())