Bitcoin Core 30.99.0
P2P Digital Currency
cluster_linearize.h
Go to the documentation of this file.
1// Copyright (c) The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#ifndef BITCOIN_CLUSTER_LINEARIZE_H
6#define BITCOIN_CLUSTER_LINEARIZE_H
7
8#include <algorithm>
9#include <cstdint>
10#include <numeric>
11#include <optional>
12#include <utility>
13#include <vector>
14
15#include <attributes.h>
16#include <memusage.h>
17#include <random.h>
18#include <span.h>
19#include <util/feefrac.h>
20#include <util/vecdeque.h>
21
23
25using DepGraphIndex = uint32_t;
26
29template<typename SetType>
31{
33 struct Entry
34 {
38 SetType ancestors;
40 SetType descendants;
41
43 friend bool operator==(const Entry&, const Entry&) noexcept = default;
44
46 Entry() noexcept = default;
48 Entry(const FeeFrac& f, const SetType& a, const SetType& d) noexcept : feerate(f), ancestors(a), descendants(d) {}
49 };
50
52 std::vector<Entry> entries;
53
55 SetType m_used;
56
57public:
59 friend bool operator==(const DepGraph& a, const DepGraph& b) noexcept
60 {
61 if (a.m_used != b.m_used) return false;
62 // Only compare the used positions within the entries vector.
63 for (auto idx : a.m_used) {
64 if (a.entries[idx] != b.entries[idx]) return false;
65 }
66 return true;
67 }
68
69 // Default constructors.
70 DepGraph() noexcept = default;
71 DepGraph(const DepGraph&) noexcept = default;
72 DepGraph(DepGraph&&) noexcept = default;
73 DepGraph& operator=(const DepGraph&) noexcept = default;
74 DepGraph& operator=(DepGraph&&) noexcept = default;
75
91 DepGraph(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> mapping, DepGraphIndex pos_range) noexcept : entries(pos_range)
92 {
93 Assume(mapping.size() == depgraph.PositionRange());
94 Assume((pos_range == 0) == (depgraph.TxCount() == 0));
95 for (DepGraphIndex i : depgraph.Positions()) {
96 auto new_idx = mapping[i];
97 Assume(new_idx < pos_range);
98 // Add transaction.
99 entries[new_idx].ancestors = SetType::Singleton(new_idx);
100 entries[new_idx].descendants = SetType::Singleton(new_idx);
101 m_used.Set(new_idx);
102 // Fill in fee and size.
103 entries[new_idx].feerate = depgraph.entries[i].feerate;
104 }
105 for (DepGraphIndex i : depgraph.Positions()) {
106 // Fill in dependencies by mapping direct parents.
107 SetType parents;
108 for (auto j : depgraph.GetReducedParents(i)) parents.Set(mapping[j]);
109 AddDependencies(parents, mapping[i]);
110 }
111 // Verify that the provided pos_range was correct (no unused positions at the end).
112 Assume(m_used.None() ? (pos_range == 0) : (pos_range == m_used.Last() + 1));
113 }
114
116 const SetType& Positions() const noexcept { return m_used; }
118 DepGraphIndex PositionRange() const noexcept { return entries.size(); }
120 auto TxCount() const noexcept { return m_used.Count(); }
122 const FeeFrac& FeeRate(DepGraphIndex i) const noexcept { return entries[i].feerate; }
124 FeeFrac& FeeRate(DepGraphIndex i) noexcept { return entries[i].feerate; }
126 const SetType& Ancestors(DepGraphIndex i) const noexcept { return entries[i].ancestors; }
128 const SetType& Descendants(DepGraphIndex i) const noexcept { return entries[i].descendants; }
129
135 DepGraphIndex AddTransaction(const FeeFrac& feefrac) noexcept
136 {
137 static constexpr auto ALL_POSITIONS = SetType::Fill(SetType::Size());
138 auto available = ALL_POSITIONS - m_used;
139 Assume(available.Any());
140 DepGraphIndex new_idx = available.First();
141 if (new_idx == entries.size()) {
142 entries.emplace_back(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
143 } else {
144 entries[new_idx] = Entry(feefrac, SetType::Singleton(new_idx), SetType::Singleton(new_idx));
145 }
146 m_used.Set(new_idx);
147 return new_idx;
148 }
149
159 void RemoveTransactions(const SetType& del) noexcept
160 {
161 m_used -= del;
162 // Remove now-unused trailing entries.
163 while (!entries.empty() && !m_used[entries.size() - 1]) {
164 entries.pop_back();
165 }
166 // Remove the deleted transactions from ancestors/descendants of other transactions. Note
167 // that the deleted positions will retain old feerate and dependency information. This does
168 // not matter as they will be overwritten by AddTransaction if they get used again.
169 for (auto& entry : entries) {
170 entry.ancestors &= m_used;
171 entry.descendants &= m_used;
172 }
173 }
174
179 void AddDependencies(const SetType& parents, DepGraphIndex child) noexcept
180 {
181 Assume(m_used[child]);
182 Assume(parents.IsSubsetOf(m_used));
183 // Compute the ancestors of parents that are not already ancestors of child.
184 SetType par_anc;
185 for (auto par : parents - Ancestors(child)) {
186 par_anc |= Ancestors(par);
187 }
188 par_anc -= Ancestors(child);
189 // Bail out if there are no such ancestors.
190 if (par_anc.None()) return;
191 // To each such ancestor, add as descendants the descendants of the child.
192 const auto& chl_des = entries[child].descendants;
193 for (auto anc_of_par : par_anc) {
194 entries[anc_of_par].descendants |= chl_des;
195 }
196 // To each descendant of the child, add those ancestors.
197 for (auto dec_of_chl : Descendants(child)) {
198 entries[dec_of_chl].ancestors |= par_anc;
199 }
200 }
201
210 SetType GetReducedParents(DepGraphIndex i) const noexcept
211 {
212 SetType parents = Ancestors(i);
213 parents.Reset(i);
214 for (auto parent : parents) {
215 if (parents[parent]) {
216 parents -= Ancestors(parent);
217 parents.Set(parent);
218 }
219 }
220 return parents;
221 }
222
231 SetType GetReducedChildren(DepGraphIndex i) const noexcept
232 {
233 SetType children = Descendants(i);
234 children.Reset(i);
235 for (auto child : children) {
236 if (children[child]) {
237 children -= Descendants(child);
238 children.Set(child);
239 }
240 }
241 return children;
242 }
243
248 FeeFrac FeeRate(const SetType& elems) const noexcept
249 {
250 FeeFrac ret;
251 for (auto pos : elems) ret += entries[pos].feerate;
252 return ret;
253 }
254
265 SetType GetConnectedComponent(const SetType& todo, DepGraphIndex tx) const noexcept
266 {
267 Assume(todo[tx]);
268 Assume(todo.IsSubsetOf(m_used));
269 auto to_add = SetType::Singleton(tx);
270 SetType ret;
271 do {
272 SetType old = ret;
273 for (auto add : to_add) {
274 ret |= Descendants(add);
275 ret |= Ancestors(add);
276 }
277 ret &= todo;
278 to_add = ret - old;
279 } while (to_add.Any());
280 return ret;
281 }
282
290 SetType FindConnectedComponent(const SetType& todo) const noexcept
291 {
292 if (todo.None()) return todo;
293 return GetConnectedComponent(todo, todo.First());
294 }
295
300 bool IsConnected(const SetType& subset) const noexcept
301 {
302 return FindConnectedComponent(subset) == subset;
303 }
304
309 bool IsConnected() const noexcept { return IsConnected(m_used); }
310
315 void AppendTopo(std::vector<DepGraphIndex>& list, const SetType& select) const noexcept
316 {
317 DepGraphIndex old_len = list.size();
318 for (auto i : select) list.push_back(i);
319 std::sort(list.begin() + old_len, list.end(), [&](DepGraphIndex a, DepGraphIndex b) noexcept {
320 const auto a_anc_count = entries[a].ancestors.Count();
321 const auto b_anc_count = entries[b].ancestors.Count();
322 if (a_anc_count != b_anc_count) return a_anc_count < b_anc_count;
323 return a < b;
324 });
325 }
326
328 bool IsAcyclic() const noexcept
329 {
330 for (auto i : Positions()) {
331 if ((Ancestors(i) & Descendants(i)) != SetType::Singleton(i)) {
332 return false;
333 }
334 }
335 return true;
336 }
337
338 unsigned CountDependencies() const noexcept
339 {
340 unsigned ret = 0;
341 for (auto i : Positions()) {
342 ret += GetReducedParents(i).Count();
343 }
344 return ret;
345 }
346
348 void Compact() noexcept
349 {
350 entries.shrink_to_fit();
351 }
352
353 size_t DynamicMemoryUsage() const noexcept
354 {
356 }
357};
358
360template<typename SetType>
362{
367
369 SetInfo() noexcept = default;
370
372 SetInfo(const SetType& txn, const FeeFrac& fr) noexcept : transactions(txn), feerate(fr) {}
373
375 explicit SetInfo(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept :
376 transactions(SetType::Singleton(pos)), feerate(depgraph.FeeRate(pos)) {}
377
379 explicit SetInfo(const DepGraph<SetType>& depgraph, const SetType& txn) noexcept :
380 transactions(txn), feerate(depgraph.FeeRate(txn)) {}
381
383 void Set(const DepGraph<SetType>& depgraph, DepGraphIndex pos) noexcept
384 {
385 Assume(!transactions[pos]);
386 transactions.Set(pos);
387 feerate += depgraph.FeeRate(pos);
388 }
389
391 SetInfo& operator|=(const SetInfo& other) noexcept
392 {
393 Assume(!transactions.Overlaps(other.transactions));
394 transactions |= other.transactions;
395 feerate += other.feerate;
396 return *this;
397 }
398
400 SetInfo& operator-=(const SetInfo& other) noexcept
401 {
402 Assume(other.transactions.IsSubsetOf(transactions));
403 transactions -= other.transactions;
404 feerate -= other.feerate;
405 return *this;
406 }
407
409 SetInfo operator-(const SetInfo& other) const noexcept
410 {
411 Assume(other.transactions.IsSubsetOf(transactions));
412 return {transactions - other.transactions, feerate - other.feerate};
413 }
414
416 friend void swap(SetInfo& a, SetInfo& b) noexcept
417 {
418 swap(a.transactions, b.transactions);
419 swap(a.feerate, b.feerate);
420 }
421
423 friend bool operator==(const SetInfo&, const SetInfo&) noexcept = default;
424};
425
427template<typename SetType>
428std::vector<SetInfo<SetType>> ChunkLinearizationInfo(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
429{
430 std::vector<SetInfo<SetType>> ret;
431 for (DepGraphIndex i : linearization) {
433 SetInfo<SetType> new_chunk(depgraph, i);
434 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
435 while (!ret.empty() && new_chunk.feerate >> ret.back().feerate) {
436 new_chunk |= ret.back();
437 ret.pop_back();
438 }
439 // Actually move that new chunk into the chunking.
440 ret.emplace_back(std::move(new_chunk));
441 }
442 return ret;
443}
444
447template<typename SetType>
448std::vector<FeeFrac> ChunkLinearization(const DepGraph<SetType>& depgraph, std::span<const DepGraphIndex> linearization) noexcept
449{
450 std::vector<FeeFrac> ret;
451 for (DepGraphIndex i : linearization) {
453 auto new_chunk = depgraph.FeeRate(i);
454 // As long as the new chunk has a higher feerate than the last chunk so far, absorb it.
455 while (!ret.empty() && new_chunk >> ret.back()) {
456 new_chunk += ret.back();
457 ret.pop_back();
458 }
459 // Actually move that new chunk into the chunking.
460 ret.push_back(std::move(new_chunk));
461 }
462 return ret;
463}
464
466template<typename F, typename Arg>
468 std::regular_invocable<F, Arg, Arg> &&
469 std::is_same_v<std::invoke_result_t<F, Arg, Arg>, std::strong_ordering>;
470
473using IndexTxOrder = std::compare_three_way;
474
497{
498 uint64_t m_cost{0};
499
500public:
501 inline void InitializeBegin() noexcept {}
502 inline void InitializeEnd(int num_txns, int num_deps) noexcept
503 {
504 // Cost of initialization.
505 m_cost += 39 * num_txns;
506 // Cost of producing linearization at the end.
507 m_cost += 48 * num_txns + 4 * num_deps;
508 }
509 inline void GetLinearizationBegin() noexcept {}
510 inline void GetLinearizationEnd(int num_txns, int num_deps) noexcept
511 {
512 // Note that we account for the cost of the final linearization at the beginning (see
513 // InitializeEnd), because the cost budget decision needs to be made before calling
514 // GetLinearization.
515 // This function exists here to allow overriding it easily for benchmark purposes.
516 }
517 inline void MakeTopologicalBegin() noexcept {}
518 inline void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
519 {
520 m_cost += 20 * num_chunks + 28 * num_steps;
521 }
522 inline void StartOptimizingBegin() noexcept {}
523 inline void StartOptimizingEnd(int num_chunks) noexcept { m_cost += 13 * num_chunks; }
524 inline void ActivateBegin() noexcept {}
525 inline void ActivateEnd(int num_deps) noexcept { m_cost += 10 * num_deps + 1; }
526 inline void DeactivateBegin() noexcept {}
527 inline void DeactivateEnd(int num_deps) noexcept { m_cost += 11 * num_deps + 8; }
528 inline void MergeChunksBegin() noexcept {}
529 inline void MergeChunksMid(int num_txns) noexcept { m_cost += 2 * num_txns; }
530 inline void MergeChunksEnd(int num_steps) noexcept { m_cost += 3 * num_steps + 5; }
531 inline void PickMergeCandidateBegin() noexcept {}
532 inline void PickMergeCandidateEnd(int num_steps) noexcept { m_cost += 8 * num_steps; }
533 inline void PickChunkToOptimizeBegin() noexcept {}
534 inline void PickChunkToOptimizeEnd(int num_steps) noexcept { m_cost += num_steps + 4; }
535 inline void PickDependencyToSplitBegin() noexcept {}
536 inline void PickDependencyToSplitEnd(int num_txns) noexcept { m_cost += 8 * num_txns + 9; }
537 inline void StartMinimizingBegin() noexcept {}
538 inline void StartMinimizingEnd(int num_chunks) noexcept { m_cost += 18 * num_chunks; }
539 inline void MinimizeStepBegin() noexcept {}
540 inline void MinimizeStepMid(int num_txns) noexcept { m_cost += 11 * num_txns + 11; }
541 inline void MinimizeStepEnd(bool split) noexcept { m_cost += 17 * split + 7; }
542
543 inline uint64_t GetCost() const noexcept { return m_cost; }
544};
545
717template<typename SetType, typename CostModel = SFLDefaultCostModel>
719{
720private:
723
728 using SetIdx = std::conditional_t<(SetType::Size() <= 0xff),
729 uint8_t,
730 std::conditional_t<(SetType::Size() <= 0xffff),
731 uint16_t,
732 uint32_t>>;
734 static constexpr SetIdx INVALID_SET_IDX = SetIdx(-1);
735
737 struct TxData {
740 std::array<SetIdx, SetType::Size()> dep_top_idx;
742 SetType parents;
744 SetType children;
749 };
750
762 std::vector<TxData> m_tx_data;
764 std::vector<SetInfo<SetType>> m_set_info;
767 std::vector<std::pair<SetType, SetType>> m_reachable;
777
780
782 CostModel m_cost;
783
785 TxIdx PickRandomTx(const SetType& tx_idxs) noexcept
786 {
787 Assume(tx_idxs.Any());
788 unsigned pos = m_rng.randrange<unsigned>(tx_idxs.Count());
789 for (auto tx_idx : tx_idxs) {
790 if (pos == 0) return tx_idx;
791 --pos;
792 }
793 Assume(false);
794 return TxIdx(-1);
795 }
796
800 std::pair<SetType, SetType> GetReachable(const SetType& tx_idxs) const noexcept
801 {
802 SetType parents, children;
803 for (auto tx_idx : tx_idxs) {
804 const auto& tx_data = m_tx_data[tx_idx];
805 parents |= tx_data.parents;
806 children |= tx_data.children;
807 }
808 return {parents - tx_idxs, children - tx_idxs};
809 }
810
813 SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
814 {
815 m_cost.ActivateBegin();
816 // Gather and check information about the parent and child transactions.
817 auto& parent_data = m_tx_data[parent_idx];
818 auto& child_data = m_tx_data[child_idx];
819 Assume(parent_data.children[child_idx]);
820 Assume(!parent_data.active_children[child_idx]);
821 // Get the set index of the chunks the parent and child are currently in. The parent chunk
822 // will become the top set of the newly activated dependency, while the child chunk will be
823 // grown to become the merged chunk.
824 auto parent_chunk_idx = parent_data.chunk_idx;
825 auto child_chunk_idx = child_data.chunk_idx;
826 Assume(parent_chunk_idx != child_chunk_idx);
827 Assume(m_chunk_idxs[parent_chunk_idx]);
828 Assume(m_chunk_idxs[child_chunk_idx]);
829 auto& top_info = m_set_info[parent_chunk_idx];
830 auto& bottom_info = m_set_info[child_chunk_idx];
831
832 // Consider the following example:
833 //
834 // A A There are two chunks, ABC and DEF, and the inactive E->C dependency
835 // / \ / \ is activated, resulting in a single chunk ABCDEF.
836 // B C B C
837 // : ==> | Dependency | top set before | top set after | change
838 // D E D E B->A | AC | ACDEF | +DEF
839 // \ / \ / C->A | AB | AB |
840 // F F F->D | D | D |
841 // F->E | E | ABCE | +ABC
842 //
843 // The common pattern here is that any dependency which has the parent or child of the
844 // dependency being activated (E->C here) in its top set, will have the opposite part added
845 // to it. This is true for B->A and F->E, but not for C->A and F->D.
846 //
847 // Traverse the old parent chunk top_info (ABC in example), and add bottom_info (DEF) to
848 // every dependency's top set which has the parent (C) in it. At the same time, change the
849 // chunk_idx for each to be child_chunk_idx, which becomes the set for the merged chunk.
850 for (auto tx_idx : top_info.transactions) {
851 auto& tx_data = m_tx_data[tx_idx];
852 tx_data.chunk_idx = child_chunk_idx;
853 for (auto dep_child_idx : tx_data.active_children) {
854 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
855 if (dep_top_info.transactions[parent_idx]) dep_top_info |= bottom_info;
856 }
857 }
858 // Traverse the old child chunk bottom_info (DEF in example), and add top_info (ABC) to
859 // every dependency's top set which has the child (E) in it.
860 for (auto tx_idx : bottom_info.transactions) {
861 auto& tx_data = m_tx_data[tx_idx];
862 for (auto dep_child_idx : tx_data.active_children) {
863 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
864 if (dep_top_info.transactions[child_idx]) dep_top_info |= top_info;
865 }
866 }
867 // Merge top_info into bottom_info, which becomes the merged chunk.
868 bottom_info |= top_info;
869 // Compute merged sets of reachable transactions from the new chunk, based on the input
870 // chunks' reachable sets.
871 m_reachable[child_chunk_idx].first |= m_reachable[parent_chunk_idx].first;
872 m_reachable[child_chunk_idx].second |= m_reachable[parent_chunk_idx].second;
873 m_reachable[child_chunk_idx].first -= bottom_info.transactions;
874 m_reachable[child_chunk_idx].second -= bottom_info.transactions;
875 // Make parent chunk the set for the new active dependency.
876 parent_data.dep_top_idx[child_idx] = parent_chunk_idx;
877 parent_data.active_children.Set(child_idx);
878 m_chunk_idxs.Reset(parent_chunk_idx);
879 // Return the newly merged chunk.
880 m_cost.ActivateEnd(/*num_deps=*/bottom_info.transactions.Count() - 1);
881 return child_chunk_idx;
882 }
883
886 std::pair<SetIdx, SetIdx> Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
887 {
888 m_cost.DeactivateBegin();
889 // Gather and check information about the parent transactions.
890 auto& parent_data = m_tx_data[parent_idx];
891 Assume(parent_data.children[child_idx]);
892 Assume(parent_data.active_children[child_idx]);
893 // Get the top set of the active dependency (which will become the parent chunk) and the
894 // chunk set the transactions are currently in (which will become the bottom chunk).
895 auto parent_chunk_idx = parent_data.dep_top_idx[child_idx];
896 auto child_chunk_idx = parent_data.chunk_idx;
897 Assume(parent_chunk_idx != child_chunk_idx);
898 Assume(m_chunk_idxs[child_chunk_idx]);
899 Assume(!m_chunk_idxs[parent_chunk_idx]); // top set, not a chunk
900 auto& top_info = m_set_info[parent_chunk_idx];
901 auto& bottom_info = m_set_info[child_chunk_idx];
902
903 // Remove the active dependency.
904 parent_data.active_children.Reset(child_idx);
905 m_chunk_idxs.Set(parent_chunk_idx);
906 auto ntx = bottom_info.transactions.Count();
907 // Subtract the top_info from the bottom_info, as it will become the child chunk.
908 bottom_info -= top_info;
909 // See the comment above in Activate(). We perform the opposite operations here, removing
910 // instead of adding. Simultaneously, aggregate the top/bottom's union of parents/children.
911 SetType top_parents, top_children;
912 for (auto tx_idx : top_info.transactions) {
913 auto& tx_data = m_tx_data[tx_idx];
914 tx_data.chunk_idx = parent_chunk_idx;
915 top_parents |= tx_data.parents;
916 top_children |= tx_data.children;
917 for (auto dep_child_idx : tx_data.active_children) {
918 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
919 if (dep_top_info.transactions[parent_idx]) dep_top_info -= bottom_info;
920 }
921 }
922 SetType bottom_parents, bottom_children;
923 for (auto tx_idx : bottom_info.transactions) {
924 auto& tx_data = m_tx_data[tx_idx];
925 bottom_parents |= tx_data.parents;
926 bottom_children |= tx_data.children;
927 for (auto dep_child_idx : tx_data.active_children) {
928 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[dep_child_idx]];
929 if (dep_top_info.transactions[child_idx]) dep_top_info -= top_info;
930 }
931 }
932 // Compute the new sets of reachable transactions for each new chunk, based on the
933 // top/bottom parents and children computed above.
934 m_reachable[parent_chunk_idx].first = top_parents - top_info.transactions;
935 m_reachable[parent_chunk_idx].second = top_children - top_info.transactions;
936 m_reachable[child_chunk_idx].first = bottom_parents - bottom_info.transactions;
937 m_reachable[child_chunk_idx].second = bottom_children - bottom_info.transactions;
938 // Return the two new set idxs.
939 m_cost.DeactivateEnd(/*num_deps=*/ntx - 1);
940 return {parent_chunk_idx, child_chunk_idx};
941 }
942
945 SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
946 {
947 m_cost.MergeChunksBegin();
948 Assume(m_chunk_idxs[top_idx]);
949 Assume(m_chunk_idxs[bottom_idx]);
950 auto& top_chunk_info = m_set_info[top_idx];
951 auto& bottom_chunk_info = m_set_info[bottom_idx];
952 // Count the number of dependencies between bottom_chunk and top_chunk.
953 unsigned num_deps{0};
954 for (auto tx_idx : top_chunk_info.transactions) {
955 auto& tx_data = m_tx_data[tx_idx];
956 num_deps += (tx_data.children & bottom_chunk_info.transactions).Count();
957 }
958 m_cost.MergeChunksMid(/*num_txns=*/top_chunk_info.transactions.Count());
959 Assume(num_deps > 0);
960 // Uniformly randomly pick one of them and activate it.
961 unsigned pick = m_rng.randrange(num_deps);
962 unsigned num_steps = 0;
963 for (auto tx_idx : top_chunk_info.transactions) {
964 ++num_steps;
965 auto& tx_data = m_tx_data[tx_idx];
966 auto intersect = tx_data.children & bottom_chunk_info.transactions;
967 auto count = intersect.Count();
968 if (pick < count) {
969 for (auto child_idx : intersect) {
970 if (pick == 0) {
971 m_cost.MergeChunksEnd(/*num_steps=*/num_steps);
972 return Activate(tx_idx, child_idx);
973 }
974 --pick;
975 }
976 Assume(false);
977 break;
978 }
979 pick -= count;
980 }
981 Assume(false);
982 return INVALID_SET_IDX;
983 }
984
987 template<bool DownWard>
988 SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
989 {
990 if constexpr (DownWard) {
991 return MergeChunks(chunk_idx, merge_chunk_idx);
992 } else {
993 return MergeChunks(merge_chunk_idx, chunk_idx);
994 }
995 }
996
998 template<bool DownWard>
999 SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
1000 {
1001 m_cost.PickMergeCandidateBegin();
1003 Assume(m_chunk_idxs[chunk_idx]);
1004 auto& chunk_info = m_set_info[chunk_idx];
1005 // Iterate over all chunks reachable from this one. For those depended-on chunks,
1006 // remember the highest-feerate (if DownWard) or lowest-feerate (if !DownWard) one.
1007 // If multiple equal-feerate candidate chunks to merge with exist, pick a random one
1008 // among them.
1009
1013 FeeFrac best_other_chunk_feerate = chunk_info.feerate;
1015 SetIdx best_other_chunk_idx = INVALID_SET_IDX;
1018 uint64_t best_other_chunk_tiebreak{0};
1019
1021 auto todo = DownWard ? m_reachable[chunk_idx].second : m_reachable[chunk_idx].first;
1022 unsigned steps = 0;
1023 while (todo.Any()) {
1024 ++steps;
1025 // Find a chunk for a transaction in todo, and remove all its transactions from todo.
1026 auto reached_chunk_idx = m_tx_data[todo.First()].chunk_idx;
1027 auto& reached_chunk_info = m_set_info[reached_chunk_idx];
1028 todo -= reached_chunk_info.transactions;
1029 // See if it has an acceptable feerate.
1030 auto cmp = DownWard ? FeeRateCompare(best_other_chunk_feerate, reached_chunk_info.feerate)
1031 : FeeRateCompare(reached_chunk_info.feerate, best_other_chunk_feerate);
1032 if (cmp > 0) continue;
1033 uint64_t tiebreak = m_rng.rand64();
1034 if (cmp < 0 || tiebreak >= best_other_chunk_tiebreak) {
1035 best_other_chunk_feerate = reached_chunk_info.feerate;
1036 best_other_chunk_idx = reached_chunk_idx;
1037 best_other_chunk_tiebreak = tiebreak;
1038 }
1039 }
1040 Assume(steps <= m_set_info.size());
1041
1042 m_cost.PickMergeCandidateEnd(/*num_steps=*/steps);
1043 return best_other_chunk_idx;
1044 }
1045
1048 template<bool DownWard>
1049 SetIdx MergeStep(SetIdx chunk_idx) noexcept
1050 {
1051 auto merge_chunk_idx = PickMergeCandidate<DownWard>(chunk_idx);
1052 if (merge_chunk_idx == INVALID_SET_IDX) return INVALID_SET_IDX;
1053 chunk_idx = MergeChunksDirected<DownWard>(chunk_idx, merge_chunk_idx);
1054 Assume(chunk_idx != INVALID_SET_IDX);
1055 return chunk_idx;
1056 }
1057
1059 template<bool DownWard>
1060 void MergeSequence(SetIdx chunk_idx) noexcept
1061 {
1062 Assume(m_chunk_idxs[chunk_idx]);
1063 while (true) {
1064 auto merged_chunk_idx = MergeStep<DownWard>(chunk_idx);
1065 if (merged_chunk_idx == INVALID_SET_IDX) break;
1066 chunk_idx = merged_chunk_idx;
1067 }
1068 // Add the chunk to the queue of improvable chunks, if it wasn't already there.
1069 if (!m_suboptimal_idxs[chunk_idx]) {
1070 m_suboptimal_idxs.Set(chunk_idx);
1071 m_suboptimal_chunks.push_back(chunk_idx);
1072 }
1073 }
1074
1077 void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
1078 {
1079 // Deactivate the specified dependency, splitting it into two new chunks: a top containing
1080 // the parent, and a bottom containing the child. The top should have a higher feerate.
1081 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(parent_idx, child_idx);
1082
1083 // At this point we have exactly two chunks which may violate topology constraints (the
1084 // parent chunk and child chunk that were produced by deactivation). We can fix
1085 // these using just merge sequences, one upwards and one downwards, avoiding the need for a
1086 // full MakeTopological.
1087 const auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1088 const auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1089 if (parent_reachable.Overlaps(child_chunk_txn)) {
1090 // The parent chunk has a dependency on a transaction in the child chunk. In this case,
1091 // the parent needs to merge back with the child chunk (a self-merge), and no other
1092 // merges are needed. Special-case this, so the overhead of PickMergeCandidate and
1093 // MergeSequence can be avoided.
1094
1095 // In the self-merge, the roles reverse: the parent chunk (from the split) depends
1096 // on the child chunk, so child_chunk_idx is the "top" and parent_chunk_idx is the
1097 // "bottom" for MergeChunks.
1098 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1099 if (!m_suboptimal_idxs[merged_chunk_idx]) {
1100 m_suboptimal_idxs.Set(merged_chunk_idx);
1101 m_suboptimal_chunks.push_back(merged_chunk_idx);
1102 }
1103 } else {
1104 // Merge the top chunk with lower-feerate chunks it depends on.
1105 MergeSequence<false>(parent_chunk_idx);
1106 // Merge the bottom chunk with higher-feerate chunks that depend on it.
1107 MergeSequence<true>(child_chunk_idx);
1108 }
1109 }
1110
1113 {
1114 m_cost.PickChunkToOptimizeBegin();
1115 unsigned steps{0};
1116 while (!m_suboptimal_chunks.empty()) {
1117 ++steps;
1118 // Pop an entry from the potentially-suboptimal chunk queue.
1119 SetIdx chunk_idx = m_suboptimal_chunks.front();
1120 Assume(m_suboptimal_idxs[chunk_idx]);
1121 m_suboptimal_idxs.Reset(chunk_idx);
1123 if (m_chunk_idxs[chunk_idx]) {
1124 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1125 return chunk_idx;
1126 }
1127 // If what was popped is not currently a chunk, continue. This may
1128 // happen when a split chunk merges in Improve() with one or more existing chunks that
1129 // are themselves on the suboptimal queue already.
1130 }
1131 m_cost.PickChunkToOptimizeEnd(/*num_steps=*/steps);
1132 return INVALID_SET_IDX;
1133 }
1134
1136 std::pair<TxIdx, TxIdx> PickDependencyToSplit(SetIdx chunk_idx) noexcept
1137 {
1138 m_cost.PickDependencyToSplitBegin();
1139 Assume(m_chunk_idxs[chunk_idx]);
1140 auto& chunk_info = m_set_info[chunk_idx];
1141
1142 // Remember the best dependency {par, chl} seen so far.
1143 std::pair<TxIdx, TxIdx> candidate_dep = {TxIdx(-1), TxIdx(-1)};
1144 uint64_t candidate_tiebreak = 0;
1145 // Iterate over all transactions.
1146 for (auto tx_idx : chunk_info.transactions) {
1147 const auto& tx_data = m_tx_data[tx_idx];
1148 // Iterate over all active child dependencies of the transaction.
1149 for (auto child_idx : tx_data.active_children) {
1150 auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1151 // Skip if this dependency is ineligible (the top chunk that would be created
1152 // does not have higher feerate than the chunk it is currently part of).
1153 auto cmp = FeeRateCompare(dep_top_info.feerate, chunk_info.feerate);
1154 if (cmp <= 0) continue;
1155 // Generate a random tiebreak for this dependency, and reject it if its tiebreak
1156 // is worse than the best so far. This means that among all eligible
1157 // dependencies, a uniformly random one will be chosen.
1158 uint64_t tiebreak = m_rng.rand64();
1159 if (tiebreak < candidate_tiebreak) continue;
1160 // Remember this as our (new) candidate dependency.
1161 candidate_dep = {tx_idx, child_idx};
1162 candidate_tiebreak = tiebreak;
1163 }
1164 }
1165 m_cost.PickDependencyToSplitEnd(/*num_txns=*/chunk_info.transactions.Count());
1166 return candidate_dep;
1167 }
1168
1169public:
1172 explicit SpanningForestState(const DepGraph<SetType>& depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel& cost = CostModel{}) noexcept :
1173 m_rng(rng_seed), m_depgraph(depgraph), m_cost(cost)
1174 {
1175 m_cost.InitializeBegin();
1176 m_transaction_idxs = depgraph.Positions();
1177 auto num_transactions = m_transaction_idxs.Count();
1178 m_tx_data.resize(depgraph.PositionRange());
1179 m_set_info.resize(num_transactions);
1180 m_reachable.resize(num_transactions);
1181 size_t num_chunks = 0;
1182 size_t num_deps = 0;
1183 for (auto tx_idx : m_transaction_idxs) {
1184 // Fill in transaction data.
1185 auto& tx_data = m_tx_data[tx_idx];
1186 tx_data.parents = depgraph.GetReducedParents(tx_idx);
1187 for (auto parent_idx : tx_data.parents) {
1188 m_tx_data[parent_idx].children.Set(tx_idx);
1189 }
1190 num_deps += tx_data.parents.Count();
1191 // Create a singleton chunk for it.
1192 tx_data.chunk_idx = num_chunks;
1193 m_set_info[num_chunks++] = SetInfo(depgraph, tx_idx);
1194 }
1195 // Set the reachable transactions for each chunk to the transactions' parents and children.
1196 for (SetIdx chunk_idx = 0; chunk_idx < num_transactions; ++chunk_idx) {
1197 auto& tx_data = m_tx_data[m_set_info[chunk_idx].transactions.First()];
1198 m_reachable[chunk_idx].first = tx_data.parents;
1199 m_reachable[chunk_idx].second = tx_data.children;
1200 }
1201 Assume(num_chunks == num_transactions);
1202 // Mark all chunk sets as chunks.
1203 m_chunk_idxs = SetType::Fill(num_chunks);
1204 m_cost.InitializeEnd(/*num_txns=*/num_chunks, /*num_deps=*/num_deps);
1205 }
1206
1210 void LoadLinearization(std::span<const DepGraphIndex> old_linearization) noexcept
1211 {
1212 // Add transactions one by one, in order of existing linearization.
1213 for (DepGraphIndex tx_idx : old_linearization) {
1214 auto chunk_idx = m_tx_data[tx_idx].chunk_idx;
1215 // Merge the chunk upwards, as long as merging succeeds.
1216 while (true) {
1217 chunk_idx = MergeStep<false>(chunk_idx);
1218 if (chunk_idx == INVALID_SET_IDX) break;
1219 }
1220 }
1221 }
1222
1224 void MakeTopological() noexcept
1225 {
1226 m_cost.MakeTopologicalBegin();
1233 unsigned init_dir = m_rng.randbool();
1236 SetType merged_chunks;
1237 // Mark chunks as suboptimal.
1239 for (auto chunk_idx : m_chunk_idxs) {
1241 // Randomize the initial order of suboptimal chunks in the queue.
1243 if (j != m_suboptimal_chunks.size() - 1) {
1245 }
1246 }
1247 unsigned chunks = m_chunk_idxs.Count();
1248 unsigned steps = 0;
1249 while (!m_suboptimal_chunks.empty()) {
1250 ++steps;
1251 // Pop an entry from the potentially-suboptimal chunk queue.
1252 SetIdx chunk_idx = m_suboptimal_chunks.front();
1254 Assume(m_suboptimal_idxs[chunk_idx]);
1255 m_suboptimal_idxs.Reset(chunk_idx);
1256 // If what was popped is not currently a chunk, continue. This may
1257 // happen when it was merged with something else since being added.
1258 if (!m_chunk_idxs[chunk_idx]) continue;
1260 unsigned direction = merged_chunks[chunk_idx] ? 3 : init_dir + 1;
1261 int flip = m_rng.randbool();
1262 for (int i = 0; i < 2; ++i) {
1263 if (i ^ flip) {
1264 if (!(direction & 1)) continue;
1265 // Attempt to merge the chunk upwards.
1266 auto result_up = MergeStep<false>(chunk_idx);
1267 if (result_up != INVALID_SET_IDX) {
1268 if (!m_suboptimal_idxs[result_up]) {
1269 m_suboptimal_idxs.Set(result_up);
1270 m_suboptimal_chunks.push_back(result_up);
1271 }
1272 merged_chunks.Set(result_up);
1273 break;
1274 }
1275 } else {
1276 if (!(direction & 2)) continue;
1277 // Attempt to merge the chunk downwards.
1278 auto result_down = MergeStep<true>(chunk_idx);
1279 if (result_down != INVALID_SET_IDX) {
1280 if (!m_suboptimal_idxs[result_down]) {
1281 m_suboptimal_idxs.Set(result_down);
1282 m_suboptimal_chunks.push_back(result_down);
1283 }
1284 merged_chunks.Set(result_down);
1285 break;
1286 }
1287 }
1288 }
1289 }
1290 m_cost.MakeTopologicalEnd(/*num_chunks=*/chunks, /*num_steps=*/steps);
1291 }
1292
1294 void StartOptimizing() noexcept
1295 {
1296 m_cost.StartOptimizingBegin();
1298 // Mark chunks suboptimal.
1300 for (auto chunk_idx : m_chunk_idxs) {
1301 m_suboptimal_chunks.push_back(chunk_idx);
1302 // Randomize the initial order of suboptimal chunks in the queue.
1304 if (j != m_suboptimal_chunks.size() - 1) {
1306 }
1307 }
1308 m_cost.StartOptimizingEnd(/*num_chunks=*/m_suboptimal_chunks.size());
1309 }
1310
1312 bool OptimizeStep() noexcept
1313 {
1314 auto chunk_idx = PickChunkToOptimize();
1315 if (chunk_idx == INVALID_SET_IDX) {
1316 // No improvable chunk was found, we are done.
1317 return false;
1318 }
1319 auto [parent_idx, child_idx] = PickDependencyToSplit(chunk_idx);
1320 if (parent_idx == TxIdx(-1)) {
1321 // Nothing to improve in chunk_idx. Need to continue with other chunks, if any.
1322 return !m_suboptimal_chunks.empty();
1323 }
1324 // Deactivate the found dependency and then make the state topological again with a
1325 // sequence of merges.
1326 Improve(parent_idx, child_idx);
1327 return true;
1328 }
1329
1332 void StartMinimizing() noexcept
1333 {
1334 m_cost.StartMinimizingBegin();
1337 // Gather all chunks, and for each, add it with a random pivot in it, and a random initial
1338 // direction, to m_nonminimal_chunks.
1339 for (auto chunk_idx : m_chunk_idxs) {
1340 TxIdx pivot_idx = PickRandomTx(m_set_info[chunk_idx].transactions);
1341 m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, m_rng.randbits<1>());
1342 // Randomize the initial order of nonminimal chunks in the queue.
1344 if (j != m_nonminimal_chunks.size() - 1) {
1346 }
1347 }
1348 m_cost.StartMinimizingEnd(/*num_chunks=*/m_nonminimal_chunks.size());
1349 }
1350
1352 bool MinimizeStep() noexcept
1353 {
1354 // If the queue of potentially-non-minimal chunks is empty, we are done.
1355 if (m_nonminimal_chunks.empty()) return false;
1356 m_cost.MinimizeStepBegin();
1357 // Pop an entry from the potentially-non-minimal chunk queue.
1358 auto [chunk_idx, pivot_idx, flags] = m_nonminimal_chunks.front();
1360 auto& chunk_info = m_set_info[chunk_idx];
1362 bool move_pivot_down = flags & 1;
1364 bool second_stage = flags & 2;
1365
1366 // Find a random dependency whose top and bottom set feerates are equal, and which has
1367 // pivot in bottom set (if move_pivot_down) or in top set (if !move_pivot_down).
1368 std::pair<TxIdx, TxIdx> candidate_dep;
1369 uint64_t candidate_tiebreak{0};
1370 bool have_any = false;
1371 // Iterate over all transactions.
1372 for (auto tx_idx : chunk_info.transactions) {
1373 const auto& tx_data = m_tx_data[tx_idx];
1374 // Iterate over all active child dependencies of the transaction.
1375 for (auto child_idx : tx_data.active_children) {
1376 const auto& dep_top_info = m_set_info[tx_data.dep_top_idx[child_idx]];
1377 // Skip if this dependency does not have equal top and bottom set feerates. Note
1378 // that the top cannot have higher feerate than the bottom, or OptimizeSteps would
1379 // have dealt with it.
1380 if (dep_top_info.feerate << chunk_info.feerate) continue;
1381 have_any = true;
1382 // Skip if this dependency does not have pivot in the right place.
1383 if (move_pivot_down == dep_top_info.transactions[pivot_idx]) continue;
1384 // Remember this as our chosen dependency if it has a better tiebreak.
1385 uint64_t tiebreak = m_rng.rand64() | 1;
1386 if (tiebreak > candidate_tiebreak) {
1387 candidate_tiebreak = tiebreak;
1388 candidate_dep = {tx_idx, child_idx};
1389 }
1390 }
1391 }
1392 m_cost.MinimizeStepMid(/*num_txns=*/chunk_info.transactions.Count());
1393 // If no dependencies have equal top and bottom set feerate, this chunk is minimal.
1394 if (!have_any) return true;
1395 // If all found dependencies have the pivot in the wrong place, try moving it in the other
1396 // direction. If this was the second stage already, we are done.
1397 if (candidate_tiebreak == 0) {
1398 // Switch to other direction, and to second phase.
1399 flags ^= 3;
1400 if (!second_stage) m_nonminimal_chunks.emplace_back(chunk_idx, pivot_idx, flags);
1401 return true;
1402 }
1403
1404 // Otherwise, deactivate the dependency that was found.
1405 auto [parent_chunk_idx, child_chunk_idx] = Deactivate(candidate_dep.first, candidate_dep.second);
1406 // Determine if there is a dependency from the new bottom to the new top (opposite from the
1407 // dependency that was just deactivated).
1408 auto& parent_reachable = m_reachable[parent_chunk_idx].first;
1409 auto& child_chunk_txn = m_set_info[child_chunk_idx].transactions;
1410 if (parent_reachable.Overlaps(child_chunk_txn)) {
1411 // A self-merge is needed. Note that the child_chunk_idx is the top, and
1412 // parent_chunk_idx is the bottom, because we activate a dependency in the reverse
1413 // direction compared to the deactivation above.
1414 auto merged_chunk_idx = MergeChunks(child_chunk_idx, parent_chunk_idx);
1415 // Re-insert the chunk into the queue, in the same direction. Note that the chunk_idx
1416 // will have changed.
1417 m_nonminimal_chunks.emplace_back(merged_chunk_idx, pivot_idx, flags);
1418 m_cost.MinimizeStepEnd(/*split=*/false);
1419 } else {
1420 // No self-merge happens, and thus we have found a way to split the chunk. Create two
1421 // smaller chunks, and add them to the queue. The one that contains the current pivot
1422 // gets to continue with it in the same direction, to minimize the number of times we
1423 // alternate direction. If we were in the second phase already, the newly created chunk
1424 // inherits that too, because we know no split with the pivot on the other side is
1425 // possible already. The new chunk without the current pivot gets a new randomly-chosen
1426 // one.
1427 if (move_pivot_down) {
1428 auto parent_pivot_idx = PickRandomTx(m_set_info[parent_chunk_idx].transactions);
1429 m_nonminimal_chunks.emplace_back(parent_chunk_idx, parent_pivot_idx, m_rng.randbits<1>());
1430 m_nonminimal_chunks.emplace_back(child_chunk_idx, pivot_idx, flags);
1431 } else {
1432 auto child_pivot_idx = PickRandomTx(m_set_info[child_chunk_idx].transactions);
1433 m_nonminimal_chunks.emplace_back(parent_chunk_idx, pivot_idx, flags);
1434 m_nonminimal_chunks.emplace_back(child_chunk_idx, child_pivot_idx, m_rng.randbits<1>());
1435 }
1436 if (m_rng.randbool()) {
1438 }
1439 m_cost.MinimizeStepEnd(/*split=*/true);
1440 }
1441 return true;
1442 }
1443
1460 std::vector<DepGraphIndex> GetLinearization(const StrongComparator<DepGraphIndex> auto& fallback_order) noexcept
1461 {
1462 m_cost.GetLinearizationBegin();
1464 std::vector<DepGraphIndex> ret;
1465 ret.reserve(m_set_info.size());
1469 std::vector<std::pair<SetIdx, TxIdx>> ready_chunks;
1472 std::vector<TxIdx> chunk_deps(m_set_info.size(), 0);
1475 std::vector<TxIdx> tx_deps(m_tx_data.size(), 0);
1478 std::vector<TxIdx> ready_tx;
1479 // Populate chunk_deps and tx_deps.
1480 unsigned num_deps{0};
1481 for (TxIdx chl_idx : m_transaction_idxs) {
1482 const auto& chl_data = m_tx_data[chl_idx];
1483 tx_deps[chl_idx] = chl_data.parents.Count();
1484 num_deps += tx_deps[chl_idx];
1485 auto chl_chunk_idx = chl_data.chunk_idx;
1486 auto& chl_chunk_info = m_set_info[chl_chunk_idx];
1487 chunk_deps[chl_chunk_idx] += (chl_data.parents - chl_chunk_info.transactions).Count();
1488 }
1490 auto max_fallback_fn = [&](SetIdx chunk_idx) noexcept {
1491 auto& chunk = m_set_info[chunk_idx].transactions;
1492 auto it = chunk.begin();
1493 DepGraphIndex ret = *it;
1494 ++it;
1495 while (it != chunk.end()) {
1496 if (fallback_order(*it, ret) > 0) ret = *it;
1497 ++it;
1498 }
1499 return ret;
1500 };
1503 auto tx_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1504 // Bail out for identical transactions.
1505 if (a == b) return false;
1506 // First sort by increasing transaction feerate.
1507 auto& a_feerate = m_depgraph.FeeRate(a);
1508 auto& b_feerate = m_depgraph.FeeRate(b);
1509 auto feerate_cmp = FeeRateCompare(a_feerate, b_feerate);
1510 if (feerate_cmp != 0) return feerate_cmp < 0;
1511 // Then by decreasing transaction size.
1512 if (a_feerate.size != b_feerate.size) {
1513 return a_feerate.size > b_feerate.size;
1514 }
1515 // Tie-break by decreasing fallback_order.
1516 auto fallback_cmp = fallback_order(a, b);
1517 if (fallback_cmp != 0) return fallback_cmp > 0;
1518 // This should not be hit, because fallback_order defines a strong ordering.
1519 Assume(false);
1520 return a < b;
1521 };
1522 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1525 auto chunk_cmp_fn = [&](const auto& a, const auto& b) noexcept {
1526 // Bail out for identical chunks.
1527 if (a.first == b.first) return false;
1528 // First sort by increasing chunk feerate.
1529 auto& chunk_feerate_a = m_set_info[a.first].feerate;
1530 auto& chunk_feerate_b = m_set_info[b.first].feerate;
1531 auto feerate_cmp = FeeRateCompare(chunk_feerate_a, chunk_feerate_b);
1532 if (feerate_cmp != 0) return feerate_cmp < 0;
1533 // Then by decreasing chunk size.
1534 if (chunk_feerate_a.size != chunk_feerate_b.size) {
1535 return chunk_feerate_a.size > chunk_feerate_b.size;
1536 }
1537 // Tie-break by decreasing fallback_order.
1538 auto fallback_cmp = fallback_order(a.second, b.second);
1539 if (fallback_cmp != 0) return fallback_cmp > 0;
1540 // This should not be hit, because fallback_order defines a strong ordering.
1541 Assume(false);
1542 return a.second < b.second;
1543 };
1544 // Construct a heap with all chunks that have no out-of-chunk dependencies.
1545 for (SetIdx chunk_idx : m_chunk_idxs) {
1546 if (chunk_deps[chunk_idx] == 0) {
1547 ready_chunks.emplace_back(chunk_idx, max_fallback_fn(chunk_idx));
1548 }
1549 }
1550 std::make_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1551 // Pop chunks off the heap.
1552 while (!ready_chunks.empty()) {
1553 auto [chunk_idx, _rnd] = ready_chunks.front();
1554 std::pop_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1555 ready_chunks.pop_back();
1556 Assume(chunk_deps[chunk_idx] == 0);
1557 const auto& chunk_txn = m_set_info[chunk_idx].transactions;
1558 // Build heap of all includable transactions in chunk.
1559 Assume(ready_tx.empty());
1560 for (TxIdx tx_idx : chunk_txn) {
1561 if (tx_deps[tx_idx] == 0) ready_tx.push_back(tx_idx);
1562 }
1563 Assume(!ready_tx.empty());
1564 std::make_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1565 // Pick transactions from the ready heap, append them to linearization, and decrement
1566 // dependency counts.
1567 while (!ready_tx.empty()) {
1568 // Pop an element from the tx_ready heap.
1569 auto tx_idx = ready_tx.front();
1570 std::pop_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1571 ready_tx.pop_back();
1572 // Append to linearization.
1573 ret.push_back(tx_idx);
1574 // Decrement dependency counts.
1575 auto& tx_data = m_tx_data[tx_idx];
1576 for (TxIdx chl_idx : tx_data.children) {
1577 auto& chl_data = m_tx_data[chl_idx];
1578 // Decrement tx dependency count.
1579 Assume(tx_deps[chl_idx] > 0);
1580 if (--tx_deps[chl_idx] == 0 && chunk_txn[chl_idx]) {
1581 // Child tx has no dependencies left, and is in this chunk. Add it to the tx heap.
1582 ready_tx.push_back(chl_idx);
1583 std::push_heap(ready_tx.begin(), ready_tx.end(), tx_cmp_fn);
1584 }
1585 // Decrement chunk dependency count if this is out-of-chunk dependency.
1586 if (chl_data.chunk_idx != chunk_idx) {
1587 Assume(chunk_deps[chl_data.chunk_idx] > 0);
1588 if (--chunk_deps[chl_data.chunk_idx] == 0) {
1589 // Child chunk has no dependencies left. Add it to the chunk heap.
1590 ready_chunks.emplace_back(chl_data.chunk_idx, max_fallback_fn(chl_data.chunk_idx));
1591 std::push_heap(ready_chunks.begin(), ready_chunks.end(), chunk_cmp_fn);
1592 }
1593 }
1594 }
1595 }
1596 }
1597 Assume(ret.size() == m_set_info.size());
1598 m_cost.GetLinearizationEnd(/*num_txns=*/m_set_info.size(), /*num_deps=*/num_deps);
1599 return ret;
1600 }
1601
1615 std::vector<FeeFrac> GetDiagram() const noexcept
1616 {
1617 std::vector<FeeFrac> ret;
1618 for (auto chunk_idx : m_chunk_idxs) {
1619 ret.push_back(m_set_info[chunk_idx].feerate);
1620 }
1621 std::sort(ret.begin(), ret.end(), std::greater{});
1622 return ret;
1623 }
1624
1626 uint64_t GetCost() const noexcept { return m_cost.GetCost(); }
1627
1629 void SanityCheck() const
1630 {
1631 //
1632 // Verify dependency parent/child information, and build list of (active) dependencies.
1633 //
1634 std::vector<std::pair<TxIdx, TxIdx>> expected_dependencies;
1635 std::vector<std::pair<TxIdx, TxIdx>> all_dependencies;
1636 std::vector<std::pair<TxIdx, TxIdx>> active_dependencies;
1637 for (auto parent_idx : m_depgraph.Positions()) {
1638 for (auto child_idx : m_depgraph.GetReducedChildren(parent_idx)) {
1639 expected_dependencies.emplace_back(parent_idx, child_idx);
1640 }
1641 }
1642 for (auto tx_idx : m_transaction_idxs) {
1643 for (auto child_idx : m_tx_data[tx_idx].children) {
1644 all_dependencies.emplace_back(tx_idx, child_idx);
1645 if (m_tx_data[tx_idx].active_children[child_idx]) {
1646 active_dependencies.emplace_back(tx_idx, child_idx);
1647 }
1648 }
1649 }
1650 std::sort(expected_dependencies.begin(), expected_dependencies.end());
1651 std::sort(all_dependencies.begin(), all_dependencies.end());
1652 assert(expected_dependencies == all_dependencies);
1653
1654 //
1655 // Verify the chunks against the list of active dependencies
1656 //
1657 SetType chunk_cover;
1658 for (auto chunk_idx : m_chunk_idxs) {
1659 const auto& chunk_info = m_set_info[chunk_idx];
1660 // Verify that transactions in the chunk point back to it. This guarantees
1661 // that chunks are non-overlapping.
1662 for (auto tx_idx : chunk_info.transactions) {
1663 assert(m_tx_data[tx_idx].chunk_idx == chunk_idx);
1664 }
1665 assert(!chunk_cover.Overlaps(chunk_info.transactions));
1666 chunk_cover |= chunk_info.transactions;
1667 // Verify the chunk's transaction set: start from an arbitrary chunk transaction,
1668 // and for every active dependency, if it contains the parent or child, add the
1669 // other. It must have exactly N-1 active dependencies in it, guaranteeing it is
1670 // acyclic.
1671 assert(chunk_info.transactions.Any());
1672 SetType expected_chunk = SetType::Singleton(chunk_info.transactions.First());
1673 while (true) {
1674 auto old = expected_chunk;
1675 size_t active_dep_count{0};
1676 for (const auto& [par, chl] : active_dependencies) {
1677 if (expected_chunk[par] || expected_chunk[chl]) {
1678 expected_chunk.Set(par);
1679 expected_chunk.Set(chl);
1680 ++active_dep_count;
1681 }
1682 }
1683 if (old == expected_chunk) {
1684 assert(expected_chunk.Count() == active_dep_count + 1);
1685 break;
1686 }
1687 }
1688 assert(chunk_info.transactions == expected_chunk);
1689 // Verify the chunk's feerate.
1690 assert(chunk_info.feerate == m_depgraph.FeeRate(chunk_info.transactions));
1691 // Verify the chunk's reachable transactions.
1692 assert(m_reachable[chunk_idx] == GetReachable(expected_chunk));
1693 // Verify that the chunk's reachable transactions don't include its own transactions.
1694 assert(!m_reachable[chunk_idx].first.Overlaps(chunk_info.transactions));
1695 assert(!m_reachable[chunk_idx].second.Overlaps(chunk_info.transactions));
1696 }
1697 // Verify that together, the chunks cover all transactions.
1698 assert(chunk_cover == m_depgraph.Positions());
1699
1700 //
1701 // Verify transaction data.
1702 //
1703 assert(m_transaction_idxs == m_depgraph.Positions());
1704 for (auto tx_idx : m_transaction_idxs) {
1705 const auto& tx_data = m_tx_data[tx_idx];
1706 // Verify it has a valid chunk index, and that chunk includes this transaction.
1707 assert(m_chunk_idxs[tx_data.chunk_idx]);
1708 assert(m_set_info[tx_data.chunk_idx].transactions[tx_idx]);
1709 // Verify parents/children.
1710 assert(tx_data.parents == m_depgraph.GetReducedParents(tx_idx));
1711 assert(tx_data.children == m_depgraph.GetReducedChildren(tx_idx));
1712 // Verify active_children is a subset of children.
1713 assert(tx_data.active_children.IsSubsetOf(tx_data.children));
1714 // Verify each active child's dep_top_idx points to a valid non-chunk set.
1715 for (auto child_idx : tx_data.active_children) {
1716 assert(tx_data.dep_top_idx[child_idx] < m_set_info.size());
1717 assert(!m_chunk_idxs[tx_data.dep_top_idx[child_idx]]);
1718 }
1719 }
1720
1721 //
1722 // Verify active dependencies' top sets.
1723 //
1724 for (const auto& [par_idx, chl_idx] : active_dependencies) {
1725 // Verify the top set's transactions: it must contain the parent, and for every
1726 // active dependency, except the chl_idx->par_idx dependency itself, if it contains the
1727 // parent or child, it must contain both. It must have exactly N-1 active dependencies
1728 // in it, guaranteeing it is acyclic.
1729 SetType expected_top = SetType::Singleton(par_idx);
1730 while (true) {
1731 auto old = expected_top;
1732 size_t active_dep_count{0};
1733 for (const auto& [par2_idx, chl2_idx] : active_dependencies) {
1734 if (par_idx == par2_idx && chl_idx == chl2_idx) continue;
1735 if (expected_top[par2_idx] || expected_top[chl2_idx]) {
1736 expected_top.Set(par2_idx);
1737 expected_top.Set(chl2_idx);
1738 ++active_dep_count;
1739 }
1740 }
1741 if (old == expected_top) {
1742 assert(expected_top.Count() == active_dep_count + 1);
1743 break;
1744 }
1745 }
1746 assert(!expected_top[chl_idx]);
1747 auto& dep_top_info = m_set_info[m_tx_data[par_idx].dep_top_idx[chl_idx]];
1748 assert(dep_top_info.transactions == expected_top);
1749 // Verify the top set's feerate.
1750 assert(dep_top_info.feerate == m_depgraph.FeeRate(dep_top_info.transactions));
1751 }
1752
1753 //
1754 // Verify m_suboptimal_chunks.
1755 //
1756 SetType suboptimal_idxs;
1757 for (size_t i = 0; i < m_suboptimal_chunks.size(); ++i) {
1758 auto chunk_idx = m_suboptimal_chunks[i];
1759 assert(!suboptimal_idxs[chunk_idx]);
1760 suboptimal_idxs.Set(chunk_idx);
1761 }
1762 assert(m_suboptimal_idxs == suboptimal_idxs);
1763
1764 //
1765 // Verify m_nonminimal_chunks.
1766 //
1767 SetType nonminimal_idxs;
1768 for (size_t i = 0; i < m_nonminimal_chunks.size(); ++i) {
1769 auto [chunk_idx, pivot, flags] = m_nonminimal_chunks[i];
1770 assert(m_tx_data[pivot].chunk_idx == chunk_idx);
1771 assert(!nonminimal_idxs[chunk_idx]);
1772 nonminimal_idxs.Set(chunk_idx);
1773 }
1774 assert(nonminimal_idxs.IsSubsetOf(m_chunk_idxs));
1775 }
1776};
1777
1798template<typename SetType>
1799std::tuple<std::vector<DepGraphIndex>, bool, uint64_t> Linearize(
1800 const DepGraph<SetType>& depgraph,
1801 uint64_t max_cost,
1802 uint64_t rng_seed,
1803 const StrongComparator<DepGraphIndex> auto& fallback_order,
1804 std::span<const DepGraphIndex> old_linearization = {},
1805 bool is_topological = true) noexcept
1806{
1808 SpanningForestState forest(depgraph, rng_seed);
1809 if (!old_linearization.empty()) {
1810 forest.LoadLinearization(old_linearization);
1811 if (!is_topological) forest.MakeTopological();
1812 } else {
1813 forest.MakeTopological();
1814 }
1815 // Make improvement steps to it until we hit the max_iterations limit, or an optimal result
1816 // is found.
1817 if (forest.GetCost() < max_cost) {
1818 forest.StartOptimizing();
1819 do {
1820 if (!forest.OptimizeStep()) break;
1821 } while (forest.GetCost() < max_cost);
1822 }
1823 // Make chunk minimization steps until we hit the max_iterations limit, or all chunks are
1824 // minimal.
1825 bool optimal = false;
1826 if (forest.GetCost() < max_cost) {
1827 forest.StartMinimizing();
1828 do {
1829 if (!forest.MinimizeStep()) {
1830 optimal = true;
1831 break;
1832 }
1833 } while (forest.GetCost() < max_cost);
1834 }
1835 return {forest.GetLinearization(fallback_order), optimal, forest.GetCost()};
1836}
1837
1854template<typename SetType>
1855void PostLinearize(const DepGraph<SetType>& depgraph, std::span<DepGraphIndex> linearization)
1856{
1857 // This algorithm performs a number of passes (currently 2); the even ones operate from back to
1858 // front, the odd ones from front to back. Each results in an equal-or-better linearization
1859 // than the one started from.
1860 // - One pass in either direction guarantees that the resulting chunks are connected.
1861 // - Each direction corresponds to one shape of tree being linearized optimally (forward passes
1862 // guarantee this for graphs where each transaction has at most one child; backward passes
1863 // guarantee this for graphs where each transaction has at most one parent).
1864 // - Starting with a backward pass guarantees the moved-tree property.
1865 //
1866 // During an odd (forward) pass, the high-level operation is:
1867 // - Start with an empty list of groups L=[].
1868 // - For every transaction i in the old linearization, from front to back:
1869 // - Append a new group C=[i], containing just i, to the back of L.
1870 // - While L has at least one group before C, and the group immediately before C has feerate
1871 // lower than C:
1872 // - If C depends on P:
1873 // - Merge P into C, making C the concatenation of P+C, continuing with the combined C.
1874 // - Otherwise:
1875 // - Swap P with C, continuing with the now-moved C.
1876 // - The output linearization is the concatenation of the groups in L.
1877 //
1878 // During even (backward) passes, i iterates from the back to the front of the existing
1879 // linearization, and new groups are prepended instead of appended to the list L. To enable
1880 // more code reuse, both passes append groups, but during even passes the meanings of
1881 // parent/child, and of high/low feerate are reversed, and the final concatenation is reversed
1882 // on output.
1883 //
1884 // In the implementation below, the groups are represented by singly-linked lists (pointing
1885 // from the back to the front), which are themselves organized in a singly-linked circular
1886 // list (each group pointing to its predecessor, with a special sentinel group at the front
1887 // that points back to the last group).
1888 //
1889 // Information about transaction t is stored in entries[t + 1], while the sentinel is in
1890 // entries[0].
1891
1893 static constexpr DepGraphIndex SENTINEL{0};
1895 static constexpr DepGraphIndex NO_PREV_TX{0};
1896
1897
1899 struct TxEntry
1900 {
1903 DepGraphIndex prev_tx;
1904
1905 // The fields below are only used for transactions that are the last one in a group
1906 // (referred to as tail transactions below).
1907
1909 DepGraphIndex first_tx;
1912 DepGraphIndex prev_group;
1914 SetType group;
1916 SetType deps;
1918 FeeFrac feerate;
1919 };
1920
1921 // As an example, consider the state corresponding to the linearization [1,0,3,2], with
1922 // groups [1,0,3] and [2], in an odd pass. The linked lists would be:
1923 //
1924 // +-----+
1925 // 0<-P-- | 0 S | ---\ Legend:
1926 // +-----+ |
1927 // ^ | - digit in box: entries index
1928 // /--------------F---------+ G | (note: one more than tx value)
1929 // v \ | | - S: sentinel group
1930 // +-----+ +-----+ +-----+ | (empty feerate)
1931 // 0<-P-- | 2 | <--P-- | 1 | <--P-- | 4 T | | - T: tail transaction, contains
1932 // +-----+ +-----+ +-----+ | fields beyond prev_tv.
1933 // ^ | - P: prev_tx reference
1934 // G G - F: first_tx reference
1935 // | | - G: prev_group reference
1936 // +-----+ |
1937 // 0<-P-- | 3 T | <--/
1938 // +-----+
1939 // ^ |
1940 // \-F-/
1941 //
1942 // During an even pass, the diagram above would correspond to linearization [2,3,0,1], with
1943 // groups [2] and [3,0,1].
1944
1945 std::vector<TxEntry> entries(depgraph.PositionRange() + 1);
1946
1947 // Perform two passes over the linearization.
1948 for (int pass = 0; pass < 2; ++pass) {
1949 int rev = !(pass & 1);
1950 // Construct a sentinel group, identifying the start of the list.
1951 entries[SENTINEL].prev_group = SENTINEL;
1952 Assume(entries[SENTINEL].feerate.IsEmpty());
1953
1954 // Iterate over all elements in the existing linearization.
1955 for (DepGraphIndex i = 0; i < linearization.size(); ++i) {
1956 // Even passes are from back to front; odd passes from front to back.
1957 DepGraphIndex idx = linearization[rev ? linearization.size() - 1 - i : i];
1958 // Construct a new group containing just idx. In even passes, the meaning of
1959 // parent/child and high/low feerate are swapped.
1960 DepGraphIndex cur_group = idx + 1;
1961 entries[cur_group].group = SetType::Singleton(idx);
1962 entries[cur_group].deps = rev ? depgraph.Descendants(idx): depgraph.Ancestors(idx);
1963 entries[cur_group].feerate = depgraph.FeeRate(idx);
1964 if (rev) entries[cur_group].feerate.fee = -entries[cur_group].feerate.fee;
1965 entries[cur_group].prev_tx = NO_PREV_TX; // No previous transaction in group.
1966 entries[cur_group].first_tx = cur_group; // Transaction itself is first of group.
1967 // Insert the new group at the back of the groups linked list.
1968 entries[cur_group].prev_group = entries[SENTINEL].prev_group;
1969 entries[SENTINEL].prev_group = cur_group;
1970
1971 // Start merge/swap cycle.
1972 DepGraphIndex next_group = SENTINEL; // We inserted at the end, so next group is sentinel.
1973 DepGraphIndex prev_group = entries[cur_group].prev_group;
1974 // Continue as long as the current group has higher feerate than the previous one.
1975 while (entries[cur_group].feerate >> entries[prev_group].feerate) {
1976 // prev_group/cur_group/next_group refer to (the last transactions of) 3
1977 // consecutive entries in groups list.
1978 Assume(cur_group == entries[next_group].prev_group);
1979 Assume(prev_group == entries[cur_group].prev_group);
1980 // The sentinel has empty feerate, which is neither higher or lower than other
1981 // feerates. Thus, the while loop we are in here guarantees that cur_group and
1982 // prev_group are not the sentinel.
1983 Assume(cur_group != SENTINEL);
1984 Assume(prev_group != SENTINEL);
1985 if (entries[cur_group].deps.Overlaps(entries[prev_group].group)) {
1986 // There is a dependency between cur_group and prev_group; merge prev_group
1987 // into cur_group. The group/deps/feerate fields of prev_group remain unchanged
1988 // but become unused.
1989 entries[cur_group].group |= entries[prev_group].group;
1990 entries[cur_group].deps |= entries[prev_group].deps;
1991 entries[cur_group].feerate += entries[prev_group].feerate;
1992 // Make the first of the current group point to the tail of the previous group.
1993 entries[entries[cur_group].first_tx].prev_tx = prev_group;
1994 // The first of the previous group becomes the first of the newly-merged group.
1995 entries[cur_group].first_tx = entries[prev_group].first_tx;
1996 // The previous group becomes whatever group was before the former one.
1997 prev_group = entries[prev_group].prev_group;
1998 entries[cur_group].prev_group = prev_group;
1999 } else {
2000 // There is no dependency between cur_group and prev_group; swap them.
2001 DepGraphIndex preprev_group = entries[prev_group].prev_group;
2002 // If PP, P, C, N were the old preprev, prev, cur, next groups, then the new
2003 // layout becomes [PP, C, P, N]. Update prev_groups to reflect that order.
2004 entries[next_group].prev_group = prev_group;
2005 entries[prev_group].prev_group = cur_group;
2006 entries[cur_group].prev_group = preprev_group;
2007 // The current group remains the same, but the groups before/after it have
2008 // changed.
2009 next_group = prev_group;
2010 prev_group = preprev_group;
2011 }
2012 }
2013 }
2014
2015 // Convert the entries back to linearization (overwriting the existing one).
2016 DepGraphIndex cur_group = entries[0].prev_group;
2017 DepGraphIndex done = 0;
2018 while (cur_group != SENTINEL) {
2019 DepGraphIndex cur_tx = cur_group;
2020 // Traverse the transactions of cur_group (from back to front), and write them in the
2021 // same order during odd passes, and reversed (front to back) in even passes.
2022 if (rev) {
2023 do {
2024 *(linearization.begin() + (done++)) = cur_tx - 1;
2025 cur_tx = entries[cur_tx].prev_tx;
2026 } while (cur_tx != NO_PREV_TX);
2027 } else {
2028 do {
2029 *(linearization.end() - (++done)) = cur_tx - 1;
2030 cur_tx = entries[cur_tx].prev_tx;
2031 } while (cur_tx != NO_PREV_TX);
2032 }
2033 cur_group = entries[cur_group].prev_group;
2034 }
2035 Assume(done == linearization.size());
2036 }
2037}
2038
2039} // namespace cluster_linearize
2040
2041#endif // BITCOIN_CLUSTER_LINEARIZE_H
#define LIFETIMEBOUND
Definition: attributes.h:16
int ret
int flags
Definition: bitcoin-tx.cpp:529
#define Assume(val)
Assume is the identity function.
Definition: check.h:125
xoroshiro128++ PRNG.
Definition: random.h:425
constexpr uint64_t rand64() noexcept
Definition: random.h:448
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:254
bool randbool() noexcept
Generate a random boolean.
Definition: random.h:325
uint64_t randbits(int bits) noexcept
Generate a random (bits)-bit integer.
Definition: random.h:204
bool empty() const noexcept
Test whether the contents of this deque is empty.
Definition: vecdeque.h:310
void clear() noexcept
Resize the deque to be size 0.
Definition: vecdeque.h:126
void push_back(T &&elem)
Move-construct a new element at the end of the deque.
Definition: vecdeque.h:227
void pop_front()
Remove the first element of the deque.
Definition: vecdeque.h:250
size_t size() const noexcept
Get the number of elements in this deque.
Definition: vecdeque.h:312
void emplace_back(Args &&... args)
Construct a new element at the end of the deque.
Definition: vecdeque.h:219
T & front() noexcept
Get a mutable reference to the first element of the deque.
Definition: vecdeque.h:268
void reserve(size_t capacity)
Increase the capacity to capacity.
Definition: vecdeque.h:206
T & back() noexcept
Get a mutable reference to the last element of the deque.
Definition: vecdeque.h:282
Data structure that holds a transaction graph's preprocessed data (fee, size, ancestors,...
unsigned CountDependencies() const noexcept
const SetType & Ancestors(DepGraphIndex i) const noexcept
Get the ancestors of a given transaction i.
FeeFrac & FeeRate(DepGraphIndex i) noexcept
Get the mutable feerate of a given transaction i.
SetType GetReducedChildren(DepGraphIndex i) const noexcept
Compute the (reduced) set of children of node i in this graph.
SetType GetReducedParents(DepGraphIndex i) const noexcept
Compute the (reduced) set of parents of node i in this graph.
void AppendTopo(std::vector< DepGraphIndex > &list, const SetType &select) const noexcept
Append the entries of select to list in a topologically valid order.
const FeeFrac & FeeRate(DepGraphIndex i) const noexcept
Get the feerate of a given transaction i.
SetType GetConnectedComponent(const SetType &todo, DepGraphIndex tx) const noexcept
Get the connected component within the subset "todo" that contains tx (which must be in todo).
bool IsConnected() const noexcept
Determine if this entire graph is connected.
bool IsConnected(const SetType &subset) const noexcept
Determine if a subset is connected.
DepGraphIndex PositionRange() const noexcept
Get the range of positions in this DepGraph.
SetType FindConnectedComponent(const SetType &todo) const noexcept
Find some connected component within the subset "todo" of this graph.
DepGraphIndex AddTransaction(const FeeFrac &feefrac) noexcept
Add a new unconnected transaction to this transaction graph (in the first available position),...
void RemoveTransactions(const SetType &del) noexcept
Remove the specified positions from this DepGraph.
auto TxCount() const noexcept
Get the number of transactions in the graph.
std::vector< Entry > entries
Data for each transaction.
const SetType & Descendants(DepGraphIndex i) const noexcept
Get the descendants of a given transaction i.
friend bool operator==(const DepGraph &a, const DepGraph &b) noexcept
Equality operator (primarily for testing purposes).
size_t DynamicMemoryUsage() const noexcept
DepGraph() noexcept=default
SetType m_used
Which positions are used.
const SetType & Positions() const noexcept
Get the set of transactions positions in use.
void Compact() noexcept
Reduce memory usage if possible.
FeeFrac FeeRate(const SetType &elems) const noexcept
Compute the aggregate feerate of a set of nodes in this graph.
void AddDependencies(const SetType &parents, DepGraphIndex child) noexcept
Modify this transaction graph, adding multiple parents to a specified child.
bool IsAcyclic() const noexcept
Check if this graph is acyclic.
A default cost model for SFL for SetType=BitSet<64>, based on benchmarks.
void PickChunkToOptimizeEnd(int num_steps) noexcept
void PickMergeCandidateEnd(int num_steps) noexcept
void MakeTopologicalEnd(int num_chunks, int num_steps) noexcept
void StartOptimizingEnd(int num_chunks) noexcept
void MergeChunksMid(int num_txns) noexcept
void StartMinimizingEnd(int num_chunks) noexcept
void MergeChunksEnd(int num_steps) noexcept
void InitializeEnd(int num_txns, int num_deps) noexcept
void PickDependencyToSplitEnd(int num_txns) noexcept
void GetLinearizationEnd(int num_txns, int num_deps) noexcept
void MinimizeStepMid(int num_txns) noexcept
void DeactivateEnd(int num_deps) noexcept
void ActivateEnd(int num_deps) noexcept
void MinimizeStepEnd(bool split) noexcept
Class to represent the internal state of the spanning-forest linearization (SFL) algorithm.
std::pair< TxIdx, TxIdx > PickDependencyToSplit(SetIdx chunk_idx) noexcept
Find a (parent, child) dependency to deactivate in chunk_idx, or (-1, -1) if none.
void StartMinimizing() noexcept
Initialize data structure for minimizing the chunks.
SetIdx PickChunkToOptimize() noexcept
Determine the next chunk to optimize, or INVALID_SET_IDX if none.
std::vector< DepGraphIndex > GetLinearization(const StrongComparator< DepGraphIndex > auto &fallback_order) noexcept
Construct a topologically-valid linearization from the current forest state.
SetIdx MergeChunksDirected(SetIdx chunk_idx, SetIdx merge_chunk_idx) noexcept
Activate a dependency from chunk_idx to merge_chunk_idx (if !DownWard), or a dependency from merge_ch...
SpanningForestState(const DepGraph< SetType > &depgraph LIFETIMEBOUND, uint64_t rng_seed, const CostModel &cost=CostModel{}) noexcept
Construct a spanning forest for the given DepGraph, with every transaction in its own chunk (not topo...
static constexpr SetIdx INVALID_SET_IDX
An invalid SetIdx.
bool MinimizeStep() noexcept
Try to reduce a chunk's size.
std::vector< TxData > m_tx_data
Information about each transaction (and chunks).
std::vector< FeeFrac > GetDiagram() const noexcept
Get the diagram for the current state, which must be topological.
CostModel m_cost
Accounting for the cost of this computation.
SetIdx MergeChunks(SetIdx top_idx, SetIdx bottom_idx) noexcept
Activate a dependency from the bottom set to the top set, which must exist.
void LoadLinearization(std::span< const DepGraphIndex > old_linearization) noexcept
Load an existing linearization.
SetIdx MergeStep(SetIdx chunk_idx) noexcept
Perform an upward or downward merge step, on the specified chunk.
std::vector< SetInfo< SetType > > m_set_info
Information about each set (chunk, or active dependency top set).
SetIdx PickMergeCandidate(SetIdx chunk_idx) noexcept
Determine which chunk to merge chunk_idx with, or INVALID_SET_IDX if none.
SetIdx Activate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make the inactive dependency from child to parent, which must not be in the same chunk already,...
TxIdx PickRandomTx(const SetType &tx_idxs) noexcept
Pick a random transaction within a set (which must be non-empty).
bool OptimizeStep() noexcept
Try to improve the forest.
void StartOptimizing() noexcept
Initialize the data structure for optimization.
std::pair< SetType, SetType > GetReachable(const SetType &tx_idxs) const noexcept
Find the set of out-of-chunk transactions reachable from tx_idxs, both in upwards and downwards direc...
std::vector< std::pair< SetType, SetType > > m_reachable
For each chunk, indexed by SetIdx, the set of out-of-chunk reachable transactions,...
void Improve(TxIdx parent_idx, TxIdx child_idx) noexcept
Split a chunk, and then merge the resulting two chunks to make the graph topological again.
SetType m_transaction_idxs
The set of all TxIdx's of transactions in the cluster indexing into m_tx_data.
InsecureRandomContext m_rng
Internal RNG.
DepGraphIndex TxIdx
Data type to represent indexing into m_tx_data.
const DepGraph< SetType > & m_depgraph
The DepGraph we are trying to linearize.
VecDeque< std::tuple< SetIdx, TxIdx, unsigned > > m_nonminimal_chunks
A FIFO of chunk indexes with a pivot transaction in them, and a flag to indicate their status:
std::conditional_t<(SetType::Size()<=0xff), uint8_t, std::conditional_t<(SetType::Size()<=0xffff), uint16_t, uint32_t > > SetIdx
Data type to represent indexing into m_set_info.
void MergeSequence(SetIdx chunk_idx) noexcept
Perform an upward or downward merge sequence on the specified chunk.
void MakeTopological() noexcept
Make state topological.
std::pair< SetIdx, SetIdx > Deactivate(TxIdx parent_idx, TxIdx child_idx) noexcept
Make a specified active dependency inactive.
uint64_t GetCost() const noexcept
Determine how much work was performed so far.
SetType m_chunk_idxs
The set of all chunk SetIdx's.
VecDeque< SetIdx > m_suboptimal_chunks
A FIFO of chunk SetIdxs for chunks that may be improved still.
SetType m_suboptimal_idxs
The set of all SetIdx's that appear in m_suboptimal_chunks.
void SanityCheck() const
Verify internal consistency of the data structure.
Concept for function objects that return std::strong_ordering when invoked with two Args.
std::vector< FeeFrac > ChunkLinearization(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the feerates of the chunks of linearization.
std::tuple< std::vector< DepGraphIndex >, bool, uint64_t > Linearize(const DepGraph< SetType > &depgraph, uint64_t max_cost, uint64_t rng_seed, const StrongComparator< DepGraphIndex > auto &fallback_order, std::span< const DepGraphIndex > old_linearization={}, bool is_topological=true) noexcept
Find or improve a linearization for a cluster.
std::compare_three_way IndexTxOrder
Simple default transaction ordering function for SpanningForestState::GetLinearization() and Lineariz...
uint32_t DepGraphIndex
Data type to represent transaction indices in DepGraphs and the clusters they represent.
void PostLinearize(const DepGraph< SetType > &depgraph, std::span< DepGraphIndex > linearization)
Improve a given linearization.
std::vector< SetInfo< SetType > > ChunkLinearizationInfo(const DepGraph< SetType > &depgraph, std::span< const DepGraphIndex > linearization) noexcept
Compute the chunks of linearization as SetInfos.
static size_t DynamicUsage(const int8_t &v)
Dynamic memory usage for built-in types is zero.
Definition: memusage.h:31
Definition: common.h:29
static std::vector< std::string > split(const std::string &str, const std::string &delims=" \t")
Definition: subprocess.h:315
Data structure storing a fee and size, ordered by increasing fee/size.
Definition: feefrac.h:40
int64_t fee
Definition: feefrac.h:107
bool IsEmpty() const noexcept
Check if this is empty (size and fee are 0).
Definition: feefrac.h:120
Information about a single transaction.
SetType descendants
All descendants of the transaction (including itself).
friend bool operator==(const Entry &, const Entry &) noexcept=default
Equality operator (primarily for testing purposes).
Entry() noexcept=default
Construct an empty entry.
FeeFrac feerate
Fee and size of transaction itself.
SetType ancestors
All ancestors of the transaction (including itself).
A set of transactions together with their aggregate feerate.
SetInfo(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Construct a SetInfo for a given transaction in a depgraph.
SetInfo operator-(const SetInfo &other) const noexcept
Compute the difference between this and other SetInfo (which must be a subset).
FeeFrac feerate
Their combined fee and size.
SetInfo() noexcept=default
Construct a SetInfo for the empty set.
void Set(const DepGraph< SetType > &depgraph, DepGraphIndex pos) noexcept
Add a transaction to this SetInfo (which must not yet be in it).
SetType transactions
The transactions in the set.
friend void swap(SetInfo &a, SetInfo &b) noexcept
Swap two SetInfo objects.
SetInfo(const DepGraph< SetType > &depgraph, const SetType &txn) noexcept
Construct a SetInfo for a set of transactions in a depgraph.
SetInfo & operator|=(const SetInfo &other) noexcept
Add the transactions of other to this SetInfo (no overlap allowed).
SetInfo & operator-=(const SetInfo &other) noexcept
Remove the transactions of other from this SetInfo (which must be a subset).
friend bool operator==(const SetInfo &, const SetInfo &) noexcept=default
Permit equality testing.
Structure with information about a single transaction.
SetType active_children
The set of child transactions reachable through an active dependency.
SetType children
The set of child transactions of this transaction.
SetIdx chunk_idx
Which chunk this transaction belongs to.
std::array< SetIdx, SetType::Size()> dep_top_idx
The top set for every active child dependency this transaction has, indexed by child TxIdx.
SetType parents
The set of parent transactions of this transaction.
static int count
assert(!tx.IsCoinBase())