Bitcoin Core  22.99.0
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <validation.h>
7 
8 #include <arith_uint256.h>
9 #include <chain.h>
10 #include <chainparams.h>
11 #include <checkqueue.h>
12 #include <consensus/amount.h>
13 #include <consensus/consensus.h>
14 #include <consensus/merkle.h>
15 #include <consensus/tx_check.h>
16 #include <consensus/tx_verify.h>
17 #include <consensus/validation.h>
18 #include <cuckoocache.h>
19 #include <deploymentstatus.h>
20 #include <flatfile.h>
21 #include <hash.h>
22 #include <index/blockfilterindex.h>
23 #include <logging.h>
24 #include <logging/timer.h>
25 #include <node/blockstorage.h>
26 #include <node/coinstats.h>
27 #include <node/ui_interface.h>
28 #include <node/utxo_snapshot.h>
29 #include <policy/policy.h>
30 #include <policy/rbf.h>
31 #include <policy/settings.h>
32 #include <pow.h>
33 #include <primitives/block.h>
34 #include <primitives/transaction.h>
35 #include <random.h>
36 #include <reverse_iterator.h>
37 #include <script/script.h>
38 #include <script/sigcache.h>
39 #include <shutdown.h>
40 #include <signet.h>
41 #include <timedata.h>
42 #include <tinyformat.h>
43 #include <txdb.h>
44 #include <txmempool.h>
45 #include <uint256.h>
46 #include <undo.h>
47 #include <util/check.h> // For NDEBUG compile time check
48 #include <util/hasher.h>
49 #include <util/moneystr.h>
50 #include <util/rbf.h>
51 #include <util/strencodings.h>
52 #include <util/system.h>
53 #include <util/trace.h>
54 #include <util/translation.h>
55 #include <validationinterface.h>
56 #include <warnings.h>
57 
58 #include <numeric>
59 #include <optional>
60 #include <string>
61 
62 #include <boost/algorithm/string/replace.hpp>
63 
64 #define MICRO 0.000001
65 #define MILLI 0.001
66 
72 static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
74 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
76 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
78 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
80 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
81 const std::vector<std::string> CHECKLEVEL_DOC {
82  "level 0 reads the blocks from disk",
83  "level 1 verifies block validity",
84  "level 2 verifies undo data",
85  "level 3 checks disconnection of tip blocks",
86  "level 4 tries to reconnect the blocks",
87  "each level includes the checks of the previous levels",
88 };
89 
91  // First sort by most total work, ...
92  if (pa->nChainWork > pb->nChainWork) return false;
93  if (pa->nChainWork < pb->nChainWork) return true;
94 
95  // ... then by earliest time received, ...
96  if (pa->nSequenceId < pb->nSequenceId) return false;
97  if (pa->nSequenceId > pb->nSequenceId) return true;
98 
99  // Use pointer address as tie breaker (should only happen with blocks
100  // loaded from disk, as those all have id 0).
101  if (pa < pb) return false;
102  if (pa > pb) return true;
103 
104  // Identical blocks.
105  return false;
106 }
107 
119 
122 std::condition_variable g_best_block_cv;
125 bool fRequireStandard = true;
126 bool fCheckBlockIndex = false;
129 
132 
134 
135 // Internal stuff
136 namespace {
137  CBlockIndex* pindexBestInvalid = nullptr;
138 } // namespace
139 
140 // Internal stuff from blockstorage ...
142 extern std::vector<CBlockFileInfo> vinfoBlockFile;
143 extern int nLastBlockFile;
144 extern bool fCheckForPruning;
145 extern std::set<CBlockIndex*> setDirtyBlockIndex;
146 extern std::set<int> setDirtyFileInfo;
147 void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
148 // ... TODO move fully to blockstorage
149 
151 {
153  BlockMap::const_iterator it = m_block_index.find(hash);
154  return it == m_block_index.end() ? nullptr : it->second;
155 }
156 
158 {
160 
161  // Find the latest block common to locator and chain - we expect that
162  // locator.vHave is sorted descending by height.
163  for (const uint256& hash : locator.vHave) {
164  CBlockIndex* pindex = LookupBlockIndex(hash);
165  if (pindex) {
166  if (chain.Contains(pindex))
167  return pindex;
168  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
169  return chain.Tip();
170  }
171  }
172  }
173  return chain.Genesis();
174 }
175 
176 bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
177  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
178  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
179  std::vector<CScriptCheck>* pvChecks = nullptr)
181 
182 bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
183 {
185  assert(active_chain_tip); // TODO: Make active_chain_tip a reference
186 
187  // By convention a negative value for flags indicates that the
188  // current network-enforced consensus rules should be used. In
189  // a future soft-fork scenario that would mean checking which
190  // rules would be enforced for the next block and setting the
191  // appropriate flags. At the present time no soft-forks are
192  // scheduled, so no flags are set.
193  flags = std::max(flags, 0);
194 
195  // CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate
196  // nLockTime because when IsFinalTx() is called within
197  // AcceptBlock(), the height of the block *being*
198  // evaluated is what is used. Thus if we want to know if a
199  // transaction can be part of the *next* block, we need to call
200  // IsFinalTx() with one more than active_chain_tip.Height().
201  const int nBlockHeight = active_chain_tip->nHeight + 1;
202 
203  // BIP113 requires that time-locked transactions have nLockTime set to
204  // less than the median time of the previous block they're contained in.
205  // When the next block is created its previous block will be the current
206  // chain tip, so we use that to calculate the median time passed to
207  // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
208  const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
209  ? active_chain_tip->GetMedianTimePast()
210  : GetAdjustedTime();
211 
212  return IsFinalTx(tx, nBlockHeight, nBlockTime);
213 }
214 
215 bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp)
216 {
218  assert(lp);
219  // If there are relative lock times then the maxInputBlock will be set
220  // If there are no relative lock times, the LockPoints don't depend on the chain
221  if (lp->maxInputBlock) {
222  // Check whether active_chain is an extension of the block at which the LockPoints
223  // calculation was valid. If not LockPoints are no longer valid
224  if (!active_chain.Contains(lp->maxInputBlock)) {
225  return false;
226  }
227  }
228 
229  // LockPoints still valid
230  return true;
231 }
232 
234  const CCoinsView& coins_view,
235  const CTransaction& tx,
236  int flags,
237  LockPoints* lp,
238  bool useExistingLockPoints)
239 {
240  assert(tip != nullptr);
241 
242  CBlockIndex index;
243  index.pprev = tip;
244  // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate
245  // height based locks because when SequenceLocks() is called within
246  // ConnectBlock(), the height of the block *being*
247  // evaluated is what is used.
248  // Thus if we want to know if a transaction can be part of the
249  // *next* block, we need to use one more than active_chainstate.m_chain.Height()
250  index.nHeight = tip->nHeight + 1;
251 
252  std::pair<int, int64_t> lockPair;
253  if (useExistingLockPoints) {
254  assert(lp);
255  lockPair.first = lp->height;
256  lockPair.second = lp->time;
257  }
258  else {
259  std::vector<int> prevheights;
260  prevheights.resize(tx.vin.size());
261  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
262  const CTxIn& txin = tx.vin[txinIndex];
263  Coin coin;
264  if (!coins_view.GetCoin(txin.prevout, coin)) {
265  return error("%s: Missing input", __func__);
266  }
267  if (coin.nHeight == MEMPOOL_HEIGHT) {
268  // Assume all mempool transaction confirm in the next block
269  prevheights[txinIndex] = tip->nHeight + 1;
270  } else {
271  prevheights[txinIndex] = coin.nHeight;
272  }
273  }
274  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
275  if (lp) {
276  lp->height = lockPair.first;
277  lp->time = lockPair.second;
278  // Also store the hash of the block with the highest height of
279  // all the blocks which have sequence locked prevouts.
280  // This hash needs to still be on the chain
281  // for these LockPoint calculations to be valid
282  // Note: It is impossible to correctly calculate a maxInputBlock
283  // if any of the sequence locked inputs depend on unconfirmed txs,
284  // except in the special case where the relative lock time/height
285  // is 0, which is equivalent to no sequence lock. Since we assume
286  // input height of tip+1 for mempool txs and test the resulting
287  // lockPair from CalculateSequenceLocks against tip+1. We know
288  // EvaluateSequenceLocks will fail if there was a non-zero sequence
289  // lock on a mempool input, so we can use the return value of
290  // CheckSequenceLocks to indicate the LockPoints validity
291  int maxInputHeight = 0;
292  for (const int height : prevheights) {
293  // Can ignore mempool inputs since we'll fail if they had non-zero locks
294  if (height != tip->nHeight+1) {
295  maxInputHeight = std::max(maxInputHeight, height);
296  }
297  }
298  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
299  }
300  }
301  return EvaluateSequenceLocks(index, lockPair);
302 }
303 
304 // Returns the script flags which should be checked for a given block
305 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
306 
307 static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
309 {
310  int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
311  if (expired != 0) {
312  LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
313  }
314 
315  std::vector<COutPoint> vNoSpendsRemaining;
316  pool.TrimToSize(limit, &vNoSpendsRemaining);
317  for (const COutPoint& removed : vNoSpendsRemaining)
318  coins_cache.Uncache(removed);
319 }
320 
322 {
324  if (active_chainstate.IsInitialBlockDownload())
325  return false;
326  if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
327  return false;
328  if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1)
329  return false;
330  return true;
331 }
332 
334  DisconnectedBlockTransactions& disconnectpool,
335  bool fAddToMempool)
336 {
337  if (!m_mempool) return;
338 
341  std::vector<uint256> vHashUpdate;
342  // disconnectpool's insertion_order index sorts the entries from
343  // oldest to newest, but the oldest entry will be the last tx from the
344  // latest mined block that was disconnected.
345  // Iterate disconnectpool in reverse, so that we add transactions
346  // back to the mempool starting with the earliest transaction that had
347  // been previously seen in a block.
348  auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
349  while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
350  // ignore validation errors in resurrected transactions
351  if (!fAddToMempool || (*it)->IsCoinBase() ||
353  *this, *m_mempool, *it, true /* bypass_limits */).m_result_type !=
355  // If the transaction doesn't make it in to the mempool, remove any
356  // transactions that depend on it (which would now be orphans).
358  } else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
359  vHashUpdate.push_back((*it)->GetHash());
360  }
361  ++it;
362  }
363  disconnectpool.queuedTx.clear();
364  // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
365  // no in-mempool children, which is generally not true when adding
366  // previously-confirmed transactions back to the mempool.
367  // UpdateTransactionsFromBlock finds descendants of any transactions in
368  // the disconnectpool that were added back and cleans up the mempool state.
370 
371  // We also need to remove any now-immature transactions
373  // Re-limit mempool size, in case we added any transactions
375  *m_mempool,
376  this->CoinsTip(),
377  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
378  std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
379 }
380 
387  const CCoinsViewCache& view, const CTxMemPool& pool,
388  unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
390 {
392  AssertLockHeld(pool.cs);
393 
394  assert(!tx.IsCoinBase());
395  for (const CTxIn& txin : tx.vin) {
396  const Coin& coin = view.AccessCoin(txin.prevout);
397 
398  // This coin was checked in PreChecks and MemPoolAccept
399  // has been holding cs_main since then.
400  Assume(!coin.IsSpent());
401  if (coin.IsSpent()) return false;
402 
403  // If the Coin is available, there are 2 possibilities:
404  // it is available in our current ChainstateActive UTXO set,
405  // or it's a UTXO provided by a transaction in our mempool.
406  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
407  const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
408  if (txFrom) {
409  assert(txFrom->GetHash() == txin.prevout.hash);
410  assert(txFrom->vout.size() > txin.prevout.n);
411  assert(txFrom->vout[txin.prevout.n] == coin.out);
412  } else {
413  const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
414  assert(!coinFromUTXOSet.IsSpent());
415  assert(coinFromUTXOSet.out == coin.out);
416  }
417  }
418 
419  // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
420  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata);
421 }
422 
423 namespace {
424 
425 class MemPoolAccept
426 {
427 public:
428  explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
429  m_limit_ancestors(gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
430  m_limit_ancestor_size(gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
431  m_limit_descendants(gArgs.GetIntArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
432  m_limit_descendant_size(gArgs.GetIntArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
433  }
434 
435  // We put the arguments we're handed into a struct, so we can pass them
436  // around easier.
437  struct ATMPArgs {
438  const CChainParams& m_chainparams;
439  const int64_t m_accept_time;
440  const bool m_bypass_limits;
441  /*
442  * Return any outpoints which were not previously present in the coins
443  * cache, but were added as a result of validating the tx for mempool
444  * acceptance. This allows the caller to optionally remove the cache
445  * additions if the associated transaction ends up being rejected by
446  * the mempool.
447  */
448  std::vector<COutPoint>& m_coins_to_uncache;
449  const bool m_test_accept;
453  const bool m_allow_bip125_replacement;
454 
456  static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time,
457  bool bypass_limits, std::vector<COutPoint>& coins_to_uncache,
458  bool test_accept) {
459  return ATMPArgs{/* m_chainparams */ chainparams,
460  /* m_accept_time */ accept_time,
461  /* m_bypass_limits */ bypass_limits,
462  /* m_coins_to_uncache */ coins_to_uncache,
463  /* m_test_accept */ test_accept,
464  /* m_allow_bip125_replacement */ true,
465  };
466  }
467 
469  static ATMPArgs PackageTestAccept(const CChainParams& chainparams, int64_t accept_time,
470  std::vector<COutPoint>& coins_to_uncache) {
471  return ATMPArgs{/* m_chainparams */ chainparams,
472  /* m_accept_time */ accept_time,
473  /* m_bypass_limits */ false,
474  /* m_coins_to_uncache */ coins_to_uncache,
475  /* m_test_accept */ true,
476  /* m_allow_bip125_replacement */ false,
477  };
478  }
479 
480  // No default ctor to avoid exposing details to clients and allowing the possibility of
481  // mixing up the order of the arguments. Use static functions above instead.
482  ATMPArgs() = delete;
483  };
484 
485  // Single transaction acceptance
486  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
487 
493  PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
494 
495 private:
496  // All the intermediate state that gets passed between the various levels
497  // of checking a given transaction.
498  struct Workspace {
499  explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
501  std::set<uint256> m_conflicts;
503  CTxMemPool::setEntries m_iters_conflicting;
506  CTxMemPool::setEntries m_all_conflicting;
508  CTxMemPool::setEntries m_ancestors;
511  std::unique_ptr<CTxMemPoolEntry> m_entry;
515  std::list<CTransactionRef> m_replaced_transactions;
516 
519  int64_t m_vsize;
521  CAmount m_base_fees;
523  CAmount m_modified_fees;
525  CAmount m_conflicting_fees{0};
527  size_t m_conflicting_size{0};
528 
529  const CTransactionRef& m_ptx;
531  const uint256& m_hash;
532  TxValidationState m_state;
535  PrecomputedTransactionData m_precomputed_txdata;
536  };
537 
538  // Run the policy checks on a given transaction, excluding any script checks.
539  // Looks up inputs, calculates feerate, considers replacement, evaluates
540  // package limits, etc. As this function can be invoked for "free" by a peer,
541  // only tests that are fast should be done here (to avoid CPU DoS).
542  bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
543 
544  // Run checks for mempool replace-by-fee.
545  bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
546 
547  // Enforce package mempool ancestor/descendant limits (distinct from individual
548  // ancestor/descendant limits done in PreChecks).
549  bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
550  PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
551 
552  // Run the script checks using our policy flags. As this can be slow, we should
553  // only invoke this on transactions that have otherwise passed policy checks.
554  bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
555 
556  // Re-run the script checks, using consensus flags, and try to cache the
557  // result in the scriptcache. This should be done after
558  // PolicyScriptChecks(). This requires that all inputs either be in our
559  // utxo set or in the mempool.
560  bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
561 
562  // Try to add the transaction to the mempool, removing any conflicts first.
563  // Returns true if the transaction is in the mempool after any size
564  // limiting is performed, false otherwise.
565  bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
566 
567  // Compare a package's feerate against minimum allowed.
568  bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs)
569  {
570  CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
571  if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
572  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
573  }
574 
575  if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
576  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
577  }
578  return true;
579  }
580 
581 private:
582  CTxMemPool& m_pool;
583  CCoinsViewCache m_view;
584  CCoinsViewMemPool m_viewmempool;
585  CCoinsView m_dummy;
586 
587  CChainState& m_active_chainstate;
588 
589  // The package limits in effect at the time of invocation.
590  const size_t m_limit_ancestors;
591  const size_t m_limit_ancestor_size;
592  // These may be modified while evaluating a transaction (eg to account for
593  // in-mempool conflicts; see below).
594  size_t m_limit_descendants;
595  size_t m_limit_descendant_size;
596 
598  bool m_rbf{false};
599 };
600 
601 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
602 {
603  const CTransactionRef& ptx = ws.m_ptx;
604  const CTransaction& tx = *ws.m_ptx;
605  const uint256& hash = ws.m_hash;
606 
607  // Copy/alias what we need out of args
608  const int64_t nAcceptTime = args.m_accept_time;
609  const bool bypass_limits = args.m_bypass_limits;
610  std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
611 
612  // Alias what we need out of ws
613  TxValidationState& state = ws.m_state;
614  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
615 
616  if (!CheckTransaction(tx, state)) {
617  return false; // state filled in by CheckTransaction
618  }
619 
620  // Coinbase is only valid in a block, not as a loose transaction
621  if (tx.IsCoinBase())
622  return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
623 
624  // Rather not work on nonstandard transactions (unless -testnet/-regtest)
625  std::string reason;
626  if (fRequireStandard && !IsStandardTx(tx, reason))
627  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
628 
629  // Do not work on transactions that are too small.
630  // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
631  // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
632  // 64-byte transactions.
634  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
635 
636  // Only accept nLockTime-using transactions that can be mined in the next
637  // block; we don't want our mempool filled up with transactions that can't
638  // be mined yet.
639  if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
640  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
641 
642  if (m_pool.exists(GenTxid::Wtxid(tx.GetWitnessHash()))) {
643  // Exact transaction already exists in the mempool.
644  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
645  } else if (m_pool.exists(GenTxid::Txid(tx.GetHash()))) {
646  // Transaction with the same non-witness data but different witness (same txid, different
647  // wtxid) already exists in the mempool.
648  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
649  }
650 
651  // Check for conflicts with in-memory transactions
652  for (const CTxIn &txin : tx.vin)
653  {
654  const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
655  if (ptxConflicting) {
656  if (!args.m_allow_bip125_replacement) {
657  // Transaction conflicts with a mempool tx, but we're not allowing replacements.
658  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
659  }
660  if (!ws.m_conflicts.count(ptxConflicting->GetHash()))
661  {
662  // Transactions that don't explicitly signal replaceability are
663  // *not* replaceable with the current logic, even if one of their
664  // unconfirmed ancestors signals replaceability. This diverges
665  // from BIP125's inherited signaling description (see CVE-2021-31876).
666  // Applications relying on first-seen mempool behavior should
667  // check all unconfirmed ancestors; otherwise an opt-in ancestor
668  // might be replaced, causing removal of this descendant.
669  if (!SignalsOptInRBF(*ptxConflicting)) {
670  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
671  }
672 
673  ws.m_conflicts.insert(ptxConflicting->GetHash());
674  }
675  }
676  }
677 
678  LockPoints lp;
679  m_view.SetBackend(m_viewmempool);
680 
681  const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
682  // do all inputs exist?
683  for (const CTxIn& txin : tx.vin) {
684  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
685  coins_to_uncache.push_back(txin.prevout);
686  }
687 
688  // Note: this call may add txin.prevout to the coins cache
689  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
690  // later (via coins_to_uncache) if this tx turns out to be invalid.
691  if (!m_view.HaveCoin(txin.prevout)) {
692  // Are inputs missing because we already have the tx?
693  for (size_t out = 0; out < tx.vout.size(); out++) {
694  // Optimistically just do efficient check of cache for outputs
695  if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
696  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
697  }
698  }
699  // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
700  return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
701  }
702  }
703 
704  // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
705  // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
706  m_view.GetBestBlock();
707 
708  // we have all inputs cached now, so switch back to dummy (to protect
709  // against bugs where we pull more inputs from disk that miss being added
710  // to coins_to_uncache)
711  m_view.SetBackend(m_dummy);
712 
713  // Only accept BIP68 sequence locked transactions that can be mined in the next
714  // block; we don't want our mempool filled up with transactions that can't
715  // be mined yet.
716  // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
717  // backend was removed, it no longer pulls coins from the mempool.
718  if (!CheckSequenceLocks(m_active_chainstate.m_chain.Tip(), m_view, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
719  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
720 
721  if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) {
722  return false; // state filled in by CheckTxInputs
723  }
724 
725  // Check for non-standard pay-to-script-hash in inputs
726  if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
727  return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
728  }
729 
730  // Check for non-standard witnesses.
731  if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
732  return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
733 
734  int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
735 
736  // ws.m_modified_fees includes any fee deltas from PrioritiseTransaction
737  ws.m_modified_fees = ws.m_base_fees;
738  m_pool.ApplyDelta(hash, ws.m_modified_fees);
739 
740  // Keep track of transactions that spend a coinbase, which we re-scan
741  // during reorgs to ensure COINBASE_MATURITY is still met.
742  bool fSpendsCoinbase = false;
743  for (const CTxIn &txin : tx.vin) {
744  const Coin &coin = m_view.AccessCoin(txin.prevout);
745  if (coin.IsCoinBase()) {
746  fSpendsCoinbase = true;
747  break;
748  }
749  }
750 
751  entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
752  fSpendsCoinbase, nSigOpsCost, lp));
753  ws.m_vsize = entry->GetTxSize();
754 
755  if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
756  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
757  strprintf("%d", nSigOpsCost));
758 
759  // No transactions are allowed below minRelayTxFee except from disconnected
760  // blocks
761  if (!bypass_limits && !CheckFeeRate(ws.m_vsize, ws.m_modified_fees, state)) return false;
762 
763  ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
764  // Calculate in-mempool ancestors, up to a limit.
765  if (ws.m_conflicts.size() == 1) {
766  // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
767  // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
768  // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
769  // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
770  // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
771  // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
772  // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
773  // for off-chain contract systems (see link in the comment below).
774  //
775  // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
776  // conflict directly with exactly one other transaction (but may evict children of said transaction),
777  // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
778  // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
779  // amended, we may need to move that check to here instead of removing it wholesale.
780  //
781  // Such transactions are clearly not merging any existing packages, so we are only concerned with
782  // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
783  // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
784  // to.
785  //
786  // To check these we first check if we meet the RBF criteria, above, and increment the descendant
787  // limits by the direct conflict and its descendants (as these are recalculated in
788  // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
789  // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
790  // the ancestor limits should be the same for both our new transaction and any conflicts).
791  // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
792  // into force here (as we're only adding a single transaction).
793  assert(ws.m_iters_conflicting.size() == 1);
794  CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
795 
796  m_limit_descendants += 1;
797  m_limit_descendant_size += conflict->GetSizeWithDescendants();
798  }
799 
800  std::string errString;
801  if (!m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
802  ws.m_ancestors.clear();
803  // If CalculateMemPoolAncestors fails second time, we want the original error string.
804  std::string dummy_err_string;
805  // Contracting/payment channels CPFP carve-out:
806  // If the new transaction is relatively small (up to 40k weight)
807  // and has at most one ancestor (ie ancestor limit of 2, including
808  // the new transaction), allow it if its parent has exactly the
809  // descendant limit descendants.
810  //
811  // This allows protocols which rely on distrusting counterparties
812  // being able to broadcast descendants of an unconfirmed transaction
813  // to be secure by simply only having two immediately-spendable
814  // outputs - one for each counterparty. For more info on the uses for
815  // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
816  if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
817  !m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
818  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
819  }
820  }
821 
822  // A transaction that spends outputs that would be replaced by it is invalid. Now
823  // that we have the set of all ancestors we can detect this
824  // pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
825  // intersect.
826  if (const auto err_string{EntriesAndTxidsDisjoint(ws.m_ancestors, ws.m_conflicts, hash)}) {
827  // We classify this as a consensus error because a transaction depending on something it
828  // conflicts with would be inconsistent.
829  return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx", *err_string);
830  }
831 
832  m_rbf = !ws.m_conflicts.empty();
833  return true;
834 }
835 
836 bool MemPoolAccept::ReplacementChecks(Workspace& ws)
837 {
839  AssertLockHeld(m_pool.cs);
840 
841  const CTransaction& tx = *ws.m_ptx;
842  const uint256& hash = ws.m_hash;
843  TxValidationState& state = ws.m_state;
844 
845  CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
846  // It's possible that the replacement pays more fees than its direct conflicts but not more
847  // than all conflicts (i.e. the direct conflicts have high-fee descendants). However, if the
848  // replacement doesn't pay more fees than its direct conflicts, then we can be sure it's not
849  // more economically rational to mine. Before we go digging through the mempool for all
850  // transactions that would need to be removed (direct conflicts and all descendants), check
851  // that the replacement transaction pays more than its direct conflicts.
852  if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
853  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
854  }
855 
856  // Calculate all conflicting entries and enforce BIP125 Rule #5.
857  if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
859  "too many potential replacements", *err_string);
860  }
861  // Enforce BIP125 Rule #2.
862  if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
864  "replacement-adds-unconfirmed", *err_string);
865  }
866  // Check if it's economically rational to mine this transaction rather than the ones it
867  // replaces and pays for its own relay fees. Enforce BIP125 Rules #3 and #4.
868  for (CTxMemPool::txiter it : ws.m_all_conflicting) {
869  ws.m_conflicting_fees += it->GetModifiedFee();
870  ws.m_conflicting_size += it->GetTxSize();
871  }
872  if (const auto err_string{PaysForRBF(ws.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
873  ::incrementalRelayFee, hash)}) {
874  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
875  }
876  return true;
877 }
878 
879 bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
880  PackageValidationState& package_state)
881 {
883  AssertLockHeld(m_pool.cs);
884 
885  std::string err_string;
886  if (!m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants,
887  m_limit_descendant_size, err_string)) {
888  // This is a package-wide error, separate from an individual transaction error.
889  return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
890  }
891  return true;
892 }
893 
894 bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
895 {
896  const CTransaction& tx = *ws.m_ptx;
897  TxValidationState& state = ws.m_state;
898 
899  constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
900 
901  // Check input scripts and signatures.
902  // This is done last to help prevent CPU exhaustion denial-of-service attacks.
903  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata)) {
904  // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
905  // need to turn both off, and compare against just turning off CLEANSTACK
906  // to see if the failure is specifically due to witness validation.
907  TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
908  if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata) &&
909  !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata)) {
910  // Only the witness is missing, so the transaction itself may be fine.
912  state.GetRejectReason(), state.GetDebugMessage());
913  }
914  return false; // state filled in by CheckInputScripts
915  }
916 
917  return true;
918 }
919 
920 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws)
921 {
922  const CTransaction& tx = *ws.m_ptx;
923  const uint256& hash = ws.m_hash;
924  TxValidationState& state = ws.m_state;
925  const CChainParams& chainparams = args.m_chainparams;
926 
927  // Check again against the current block tip's script verification
928  // flags to cache our script execution flags. This is, of course,
929  // useless if the next block has different script flags from the
930  // previous one, but because the cache tracks script flags for us it
931  // will auto-invalidate and we'll just have a few blocks of extra
932  // misses on soft-fork activation.
933  //
934  // This is also useful in case of bugs in the standard flags that cause
935  // transactions to pass as valid when they're actually invalid. For
936  // instance the STRICTENC flag was incorrectly allowing certain
937  // CHECKSIG NOT scripts to pass, even though they were invalid.
938  //
939  // There is a similar check in CreateNewBlock() to prevent creating
940  // invalid blocks (using TestBlockValidity), however allowing such
941  // transactions into the mempool can be exploited as a DoS attack.
942  unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus());
943  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags,
944  ws.m_precomputed_txdata, m_active_chainstate.CoinsTip())) {
945  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
946  __func__, hash.ToString(), state.ToString());
947  }
948 
949  return true;
950 }
951 
952 bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
953 {
954  const CTransaction& tx = *ws.m_ptx;
955  const uint256& hash = ws.m_hash;
956  TxValidationState& state = ws.m_state;
957  const bool bypass_limits = args.m_bypass_limits;
958 
959  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
960 
961  // Remove conflicting transactions from the mempool
962  for (CTxMemPool::txiter it : ws.m_all_conflicting)
963  {
964  LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
965  it->GetTx().GetHash().ToString(),
966  hash.ToString(),
967  FormatMoney(ws.m_modified_fees - ws.m_conflicting_fees),
968  (int)entry->GetTxSize() - (int)ws.m_conflicting_size);
969  ws.m_replaced_transactions.push_back(it->GetSharedTx());
970  }
971  m_pool.RemoveStaged(ws.m_all_conflicting, false, MemPoolRemovalReason::REPLACED);
972 
973  // This transaction should only count for fee estimation if:
974  // - it's not being re-added during a reorg which bypasses typical mempool fee limits
975  // - the node is not behind
976  // - the transaction is not dependent on any other transactions in the mempool
977  bool validForFeeEstimation = !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
978 
979  // Store transaction in memory
980  m_pool.addUnchecked(*entry, ws.m_ancestors, validForFeeEstimation);
981 
982  // trim mempool and check if tx was trimmed
983  if (!bypass_limits) {
984  LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
985  if (!m_pool.exists(GenTxid::Txid(hash)))
986  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
987  }
988  return true;
989 }
990 
991 MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
992 {
994  LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
995 
996  Workspace ws(ptx);
997 
998  if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
999 
1000  if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
1001 
1002  // Perform the inexpensive checks first and avoid hashing and signature verification unless
1003  // those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1004  if (!PolicyScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1005 
1006  if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1007 
1008  // Tx was accepted, but not added
1009  if (args.m_test_accept) {
1010  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
1011  }
1012 
1013  if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1014 
1015  GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
1016 
1017  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
1018 }
1019 
1020 PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
1021 {
1023 
1024  // These context-free package limits can be done before taking the mempool lock.
1025  PackageValidationState package_state;
1026  if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {});
1027 
1028  std::vector<Workspace> workspaces{};
1029  workspaces.reserve(txns.size());
1030  std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
1031  [](const auto& tx) { return Workspace(tx); });
1032  std::map<const uint256, const MempoolAcceptResult> results;
1033 
1034  LOCK(m_pool.cs);
1035 
1036  // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
1037  for (Workspace& ws : workspaces) {
1038  if (!PreChecks(args, ws)) {
1039  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1040  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1041  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1042  return PackageMempoolAcceptResult(package_state, std::move(results));
1043  }
1044  // Make the coins created by this transaction available for subsequent transactions in the
1045  // package to spend. Since we already checked conflicts in the package and we don't allow
1046  // replacements, we don't need to track the coins spent. Note that this logic will need to be
1047  // updated if package replace-by-fee is allowed in the future.
1048  assert(!args.m_allow_bip125_replacement);
1049  m_viewmempool.PackageAddTransaction(ws.m_ptx);
1050  }
1051 
1052  // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
1053  // because it's unnecessary. Also, CPFP carve out can increase the limit for individual
1054  // transactions, but this exemption is not extended to packages in CheckPackageLimits().
1055  std::string err_string;
1056  if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
1057  return PackageMempoolAcceptResult(package_state, std::move(results));
1058  }
1059 
1060  for (Workspace& ws : workspaces) {
1061  if (!PolicyScriptChecks(args, ws)) {
1062  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1063  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1064  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1065  return PackageMempoolAcceptResult(package_state, std::move(results));
1066  }
1067  if (args.m_test_accept) {
1068  // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
1069  // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
1070  results.emplace(ws.m_ptx->GetWitnessHash(),
1071  MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions),
1072  ws.m_vsize, ws.m_base_fees));
1073  }
1074  }
1075 
1076  return PackageMempoolAcceptResult(package_state, std::move(results));
1077 }
1078 
1079 } // anon namespace
1080 
1083  CChainState& active_chainstate,
1084  const CTransactionRef &tx, int64_t nAcceptTime,
1085  bool bypass_limits, bool test_accept)
1087 {
1088  std::vector<COutPoint> coins_to_uncache;
1089  auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, nAcceptTime, bypass_limits, coins_to_uncache, test_accept);
1090  const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
1092  // Remove coins that were not present in the coins cache before calling
1093  // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
1094  // number of invalid transactions that attempt to overrun the in-memory coins cache
1095  // (`CCoinsViewCache::cacheCoins`).
1096 
1097  for (const COutPoint& hashTx : coins_to_uncache)
1098  active_chainstate.CoinsTip().Uncache(hashTx);
1099  }
1100  // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1101  BlockValidationState state_dummy;
1102  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1103  return result;
1104 }
1105 
1107  bool bypass_limits, bool test_accept)
1108 {
1109  return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept);
1110 }
1111 
1113  const Package& package, bool test_accept)
1114 {
1116  assert(test_accept); // Only allow package accept dry-runs (testmempoolaccept RPC).
1117  assert(!package.empty());
1118  assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
1119 
1120  std::vector<COutPoint> coins_to_uncache;
1121  const CChainParams& chainparams = Params();
1122  auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
1123  const PackageMempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
1124 
1125  // Uncache coins pertaining to transactions that were not submitted to the mempool.
1126  for (const COutPoint& hashTx : coins_to_uncache) {
1127  active_chainstate.CoinsTip().Uncache(hashTx);
1128  }
1129  return result;
1130 }
1131 
1132 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1133 {
1134  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1135  // Force block reward to zero when right shift is undefined.
1136  if (halvings >= 64)
1137  return 0;
1138 
1139  CAmount nSubsidy = 50 * COIN;
1140  // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1141  nSubsidy >>= halvings;
1142  return nSubsidy;
1143 }
1144 
1146  std::string ldb_name,
1147  size_t cache_size_bytes,
1148  bool in_memory,
1149  bool should_wipe) : m_dbview(
1150  gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1151  m_catcherview(&m_dbview) {}
1152 
1153 void CoinsViews::InitCache()
1154 {
1155  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1156 }
1157 
1159  CTxMemPool* mempool,
1160  BlockManager& blockman,
1161  ChainstateManager& chainman,
1162  std::optional<uint256> from_snapshot_blockhash)
1163  : m_mempool(mempool),
1164  m_params(::Params()),
1165  m_blockman(blockman),
1166  m_chainman(chainman),
1167  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1168 
1170  size_t cache_size_bytes,
1171  bool in_memory,
1172  bool should_wipe,
1173  std::string leveldb_name)
1174 {
1176  leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
1177  }
1178 
1179  m_coins_views = std::make_unique<CoinsViews>(
1180  leveldb_name, cache_size_bytes, in_memory, should_wipe);
1181 }
1182 
1183 void CChainState::InitCoinsCache(size_t cache_size_bytes)
1184 {
1185  assert(m_coins_views != nullptr);
1186  m_coinstip_cache_size_bytes = cache_size_bytes;
1187  m_coins_views->InitCache();
1188 }
1189 
1190 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1191 // is a performance-related implementation detail. This function must be marked
1192 // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1193 // can call it.
1194 //
1195 bool CChainState::IsInitialBlockDownload() const
1196 {
1197  // Optimization: pre-test latch before taking the lock.
1198  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1199  return false;
1200 
1201  LOCK(cs_main);
1202  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1203  return false;
1204  if (fImporting || fReindex)
1205  return true;
1206  if (m_chain.Tip() == nullptr)
1207  return true;
1209  return true;
1210  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1211  return true;
1212  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1213  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1214  return false;
1215 }
1216 
1217 static void AlertNotify(const std::string& strMessage)
1218 {
1219  uiInterface.NotifyAlertChanged();
1220 #if HAVE_SYSTEM
1221  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1222  if (strCmd.empty()) return;
1223 
1224  // Alert text should be plain ascii coming from a trusted source, but to
1225  // be safe we first strip anything not in safeChars, then add single quotes around
1226  // the whole string before passing it to the shell:
1227  std::string singleQuote("'");
1228  std::string safeStatus = SanitizeString(strMessage);
1229  safeStatus = singleQuote+safeStatus+singleQuote;
1230  boost::replace_all(strCmd, "%s", safeStatus);
1231 
1232  std::thread t(runCommand, strCmd);
1233  t.detach(); // thread runs free
1234 #endif
1235 }
1236 
1238 {
1240 
1241  // Before we get past initial download, we cannot reliably alert about forks
1242  // (we assume we don't get stuck on a fork before finishing our initial sync)
1243  if (IsInitialBlockDownload()) {
1244  return;
1245  }
1246 
1247  if (pindexBestInvalid && pindexBestInvalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
1248  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1250  } else {
1252  }
1253 }
1254 
1255 // Called both upon regular invalid block discovery *and* InvalidateBlock
1257 {
1258  if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1259  pindexBestInvalid = pindexNew;
1260  if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1262  }
1263 
1264  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1265  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1266  log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1267  CBlockIndex *tip = m_chain.Tip();
1268  assert (tip);
1269  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1270  tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
1273 }
1274 
1275 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1276 // which does its own setBlockIndexCandidates management.
1278 {
1280  pindex->nStatus |= BLOCK_FAILED_VALID;
1281  m_blockman.m_failed_blocks.insert(pindex);
1282  setDirtyBlockIndex.insert(pindex);
1283  setBlockIndexCandidates.erase(pindex);
1284  InvalidChainFound(pindex);
1285  }
1286 }
1287 
1288 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1289 {
1290  // mark inputs spent
1291  if (!tx.IsCoinBase()) {
1292  txundo.vprevout.reserve(tx.vin.size());
1293  for (const CTxIn &txin : tx.vin) {
1294  txundo.vprevout.emplace_back();
1295  bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1296  assert(is_spent);
1297  }
1298  }
1299  // add outputs
1300  AddCoins(inputs, tx, nHeight);
1301 }
1302 
1304  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1305  const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1307 }
1308 
1310 {
1312  CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1313  return pindexPrev->nHeight + 1;
1314 }
1315 
1316 
1319 
1321  // Setup the salted hasher
1323  // We want the nonce to be 64 bytes long to force the hasher to process
1324  // this chunk, which makes later hash computations more efficient. We
1325  // just write our 32-byte entropy twice to fill the 64 bytes.
1328  // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1329  // setup_bytes creates the minimum possible cache (2 elements).
1330  size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetIntArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1331  size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1332  LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1333  (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1334 }
1335 
1356  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
1357  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
1358  std::vector<CScriptCheck>* pvChecks)
1359 {
1360  if (tx.IsCoinBase()) return true;
1361 
1362  if (pvChecks) {
1363  pvChecks->reserve(tx.vin.size());
1364  }
1365 
1366  // First check if script executions have been cached with the same
1367  // flags. Note that this assumes that the inputs provided are
1368  // correct (ie that the transaction hash which is in tx's prevouts
1369  // properly commits to the scriptPubKey in the inputs view of that
1370  // transaction).
1371  uint256 hashCacheEntry;
1373  hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1374  AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1375  if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1376  return true;
1377  }
1378 
1379  if (!txdata.m_spent_outputs_ready) {
1380  std::vector<CTxOut> spent_outputs;
1381  spent_outputs.reserve(tx.vin.size());
1382 
1383  for (const auto& txin : tx.vin) {
1384  const COutPoint& prevout = txin.prevout;
1385  const Coin& coin = inputs.AccessCoin(prevout);
1386  assert(!coin.IsSpent());
1387  spent_outputs.emplace_back(coin.out);
1388  }
1389  txdata.Init(tx, std::move(spent_outputs));
1390  }
1391  assert(txdata.m_spent_outputs.size() == tx.vin.size());
1392 
1393  for (unsigned int i = 0; i < tx.vin.size(); i++) {
1394 
1395  // We very carefully only pass in things to CScriptCheck which
1396  // are clearly committed to by tx' witness hash. This provides
1397  // a sanity check that our caching is not introducing consensus
1398  // failures through additional data in, eg, the coins being
1399  // spent being checked as a part of CScriptCheck.
1400 
1401  // Verify signature
1402  CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
1403  if (pvChecks) {
1404  pvChecks->push_back(CScriptCheck());
1405  check.swap(pvChecks->back());
1406  } else if (!check()) {
1408  // Check whether the failure was caused by a
1409  // non-mandatory script verification check, such as
1410  // non-standard DER encodings or non-null dummy
1411  // arguments; if so, ensure we return NOT_STANDARD
1412  // instead of CONSENSUS to avoid downstream users
1413  // splitting the network between upgraded and
1414  // non-upgraded nodes by banning CONSENSUS-failing
1415  // data providers.
1416  CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
1417  flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1418  if (check2())
1419  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1420  }
1421  // MANDATORY flag failures correspond to
1422  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1423  // failures are the most serious case of validation
1424  // failures, we may need to consider using
1425  // RECENT_CONSENSUS_CHANGE for any script failure that
1426  // could be due to non-upgraded nodes which we may want to
1427  // support, to avoid splitting the network (but this
1428  // depends on the details of how net_processing handles
1429  // such errors).
1430  return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1431  }
1432  }
1433 
1434  if (cacheFullScriptStore && !pvChecks) {
1435  // We executed all of the provided scripts, and were told to
1436  // cache the result. Do so now.
1437  g_scriptExecutionCache.insert(hashCacheEntry);
1438  }
1439 
1440  return true;
1441 }
1442 
1443 bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
1444 {
1445  AbortNode(strMessage, userMessage);
1446  return state.Error(strMessage);
1447 }
1448 
1456 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1457 {
1458  bool fClean = true;
1459 
1460  if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1461 
1462  if (undo.nHeight == 0) {
1463  // Missing undo metadata (height and coinbase). Older versions included this
1464  // information only in undo records for the last spend of a transactions'
1465  // outputs. This implies that it must be present for some other output of the same tx.
1466  const Coin& alternate = AccessByTxid(view, out.hash);
1467  if (!alternate.IsSpent()) {
1468  undo.nHeight = alternate.nHeight;
1469  undo.fCoinBase = alternate.fCoinBase;
1470  } else {
1471  return DISCONNECT_FAILED; // adding output for transaction without known metadata
1472  }
1473  }
1474  // If the coin already exists as an unspent coin in the cache, then the
1475  // possible_overwrite parameter to AddCoin must be set to true. We have
1476  // already checked whether an unspent coin exists above using HaveCoin, so
1477  // we don't need to guess. When fClean is false, an unspent coin already
1478  // existed and it is an overwrite.
1479  view.AddCoin(out, std::move(undo), !fClean);
1480 
1481  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1482 }
1483 
1487 {
1488  bool fClean = true;
1489 
1490  CBlockUndo blockUndo;
1491  if (!UndoReadFromDisk(blockUndo, pindex)) {
1492  error("DisconnectBlock(): failure reading undo data");
1493  return DISCONNECT_FAILED;
1494  }
1495 
1496  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1497  error("DisconnectBlock(): block and undo data inconsistent");
1498  return DISCONNECT_FAILED;
1499  }
1500 
1501  // undo transactions in reverse order
1502  for (int i = block.vtx.size() - 1; i >= 0; i--) {
1503  const CTransaction &tx = *(block.vtx[i]);
1504  uint256 hash = tx.GetHash();
1505  bool is_coinbase = tx.IsCoinBase();
1506 
1507  // Check that all outputs are available and match the outputs in the block itself
1508  // exactly.
1509  for (size_t o = 0; o < tx.vout.size(); o++) {
1510  if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1511  COutPoint out(hash, o);
1512  Coin coin;
1513  bool is_spent = view.SpendCoin(out, &coin);
1514  if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1515  fClean = false; // transaction output mismatch
1516  }
1517  }
1518  }
1519 
1520  // restore inputs
1521  if (i > 0) { // not coinbases
1522  CTxUndo &txundo = blockUndo.vtxundo[i-1];
1523  if (txundo.vprevout.size() != tx.vin.size()) {
1524  error("DisconnectBlock(): transaction and undo data inconsistent");
1525  return DISCONNECT_FAILED;
1526  }
1527  for (unsigned int j = tx.vin.size(); j-- > 0;) {
1528  const COutPoint &out = tx.vin[j].prevout;
1529  int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1530  if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1531  fClean = fClean && res != DISCONNECT_UNCLEAN;
1532  }
1533  // At this point, all of txundo.vprevout should have been moved out.
1534  }
1535  }
1536 
1537  // move best block pointer to prevout block
1538  view.SetBestBlock(pindex->pprev->GetBlockHash());
1539 
1540  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1541 }
1542 
1544 
1545 void StartScriptCheckWorkerThreads(int threads_num)
1546 {
1547  scriptcheckqueue.StartWorkerThreads(threads_num);
1548 }
1549 
1551 {
1552  scriptcheckqueue.StopWorkerThreads();
1553 }
1554 
1559 {
1560 private:
1561  int bit;
1562 
1563 public:
1564  explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1565 
1566  int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1567  int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1568  int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1569  int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1570 
1571  bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1572  {
1573  return pindex->nHeight >= params.MinBIP9WarningHeight &&
1575  ((pindex->nVersion >> bit) & 1) != 0 &&
1576  ((g_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1577  }
1578 };
1579 
1581 
1582 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams)
1583 {
1584  unsigned int flags = SCRIPT_VERIFY_NONE;
1585 
1586  // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
1587  // retroactively applied to testnet)
1588  // However, only one historical block violated the P2SH rules (on both
1589  // mainnet and testnet), so for simplicity, always leave P2SH
1590  // on except for the one violating block.
1591  if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
1592  pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
1593  *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
1594  {
1595  // Enforce WITNESS rules whenever P2SH is in effect
1597  }
1598 
1599  // Enforce the DERSIG (BIP66) rule
1600  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_DERSIG)) {
1602  }
1603 
1604  // Enforce CHECKLOCKTIMEVERIFY (BIP65)
1605  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CLTV)) {
1607  }
1608 
1609  // Enforce CHECKSEQUENCEVERIFY (BIP112)
1610  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CSV)) {
1612  }
1613 
1614  // Enforce Taproot (BIP340-BIP342)
1615  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_TAPROOT)) {
1617  }
1618 
1619  // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit)
1620  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_SEGWIT)) {
1622  }
1623 
1624  return flags;
1625 }
1626 
1627 
1628 
1629 static int64_t nTimeCheck = 0;
1630 static int64_t nTimeForks = 0;
1631 static int64_t nTimeVerify = 0;
1632 static int64_t nTimeConnect = 0;
1633 static int64_t nTimeIndex = 0;
1634 static int64_t nTimeTotal = 0;
1635 static int64_t nBlocksTotal = 0;
1636 
1641  CCoinsViewCache& view, bool fJustCheck)
1642 {
1644  assert(pindex);
1645  assert(*pindex->phashBlock == block.GetHash());
1646  int64_t nTimeStart = GetTimeMicros();
1647 
1648  // Check it again in case a previous version let a bad block in
1649  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1650  // ContextualCheckBlockHeader() here. This means that if we add a new
1651  // consensus rule that is enforced in one of those two functions, then we
1652  // may have let in a block that violates the rule prior to updating the
1653  // software, and we would NOT be enforcing the rule here. Fully solving
1654  // upgrade from one software version to the next after a consensus rule
1655  // change is potentially tricky and issue-specific (see NeedsRedownload()
1656  // for one approach that was used for BIP 141 deployment).
1657  // Also, currently the rule against blocks more than 2 hours in the future
1658  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1659  // re-enforce that rule here (at least until we make it impossible for
1660  // GetAdjustedTime() to go backward).
1661  if (!CheckBlock(block, state, m_params.GetConsensus(), !fJustCheck, !fJustCheck)) {
1663  // We don't write down blocks to disk if they may have been
1664  // corrupted, so this should be impossible unless we're having hardware
1665  // problems.
1666  return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
1667  }
1668  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
1669  }
1670 
1671  // verify that the view's current state corresponds to the previous block
1672  uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
1673  assert(hashPrevBlock == view.GetBestBlock());
1674 
1675  nBlocksTotal++;
1676 
1677  // Special case for the genesis block, skipping connection of its transactions
1678  // (its coinbase is unspendable)
1679  if (block.GetHash() == m_params.GetConsensus().hashGenesisBlock) {
1680  if (!fJustCheck)
1681  view.SetBestBlock(pindex->GetBlockHash());
1682  return true;
1683  }
1684 
1685  bool fScriptChecks = true;
1686  if (!hashAssumeValid.IsNull()) {
1687  // We've been configured with the hash of a block which has been externally verified to have a valid history.
1688  // A suitable default value is included with the software and updated from time to time. Because validity
1689  // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1690  // This setting doesn't force the selection of any particular chain but makes validating some faster by
1691  // effectively caching the result of part of the verification.
1692  BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
1693  if (it != m_blockman.m_block_index.end()) {
1694  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1695  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1697  // This block is a member of the assumed verified chain and an ancestor of the best header.
1698  // Script verification is skipped when connecting blocks under the
1699  // assumevalid block. Assuming the assumevalid block is valid this
1700  // is safe because block merkle hashes are still computed and checked,
1701  // Of course, if an assumed valid block is invalid due to false scriptSigs
1702  // this optimization would allow an invalid chain to be accepted.
1703  // The equivalent time check discourages hash power from extorting the network via DOS attack
1704  // into accepting an invalid block through telling users they must manually set assumevalid.
1705  // Requiring a software change or burying the invalid block, regardless of the setting, makes
1706  // it hard to hide the implication of the demand. This also avoids having release candidates
1707  // that are hardly doing any signature verification at all in testing without having to
1708  // artificially set the default assumed verified block further back.
1709  // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
1710  // least as good as the expected chain.
1711  fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
1712  }
1713  }
1714  }
1715 
1716  int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
1717  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
1718 
1719  // Do not allow blocks that contain transactions which 'overwrite' older transactions,
1720  // unless those are already completely spent.
1721  // If such overwrites are allowed, coinbases and transactions depending upon those
1722  // can be duplicated to remove the ability to spend the first instance -- even after
1723  // being sent to another address.
1724  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
1725  // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
1726  // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
1727  // two in the chain that violate it. This prevents exploiting the issue against nodes during their
1728  // initial block download.
1729  bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
1730  (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
1731 
1732  // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
1733  // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
1734  // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
1735  // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
1736  // duplicate transactions descending from the known pairs either.
1737  // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
1738 
1739  // BIP34 requires that a block at height X (block X) has its coinbase
1740  // scriptSig start with a CScriptNum of X (indicated height X). The above
1741  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1742  // case that there is a block X before the BIP34 height of 227,931 which has
1743  // an indicated height Y where Y is greater than X. The coinbase for block
1744  // X would also be a valid coinbase for block Y, which could be a BIP30
1745  // violation. An exhaustive search of all mainnet coinbases before the
1746  // BIP34 height which have an indicated height greater than the block height
1747  // reveals many occurrences. The 3 lowest indicated heights found are
1748  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1749  // heights would be the first opportunity for BIP30 to be violated.
1750 
1751  // The search reveals a great many blocks which have an indicated height
1752  // greater than 1,983,702, so we simply remove the optimization to skip
1753  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1754  // that block in another 25 years or so, we should take advantage of a
1755  // future consensus change to do a new and improved version of BIP34 that
1756  // will actually prevent ever creating any duplicate coinbases in the
1757  // future.
1758  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1759 
1760  // There is no potential to create a duplicate coinbase at block 209,921
1761  // because this is still before the BIP34 height and so explicit BIP30
1762  // checking is still active.
1763 
1764  // The final case is block 176,684 which has an indicated height of
1765  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1766  // before block 490,897 so there was not much opportunity to address this
1767  // case other than to carefully analyze it and determine it would not be a
1768  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1769  // block 176,684, but it is important to note that even if it hadn't been or
1770  // is remined on an alternate fork with a duplicate coinbase, we would still
1771  // not run into a BIP30 violation. This is because the coinbase for 176,684
1772  // is spent in block 185,956 in transaction
1773  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1774  // spending transaction can't be duplicated because it also spends coinbase
1775  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1776  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1777  // duplicatable until that height, and it's currently impossible to create a
1778  // chain that long. Nevertheless we may wish to consider a future soft fork
1779  // which retroactively prevents block 490,897 from creating a duplicate
1780  // coinbase. The two historical BIP30 violations often provide a confusing
1781  // edge case when manipulating the UTXO and it would be simpler not to have
1782  // another edge case to deal with.
1783 
1784  // testnet3 has no blocks before the BIP34 height with indicated heights
1785  // post BIP34 before approximately height 486,000,000 and presumably will
1786  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1787  // BIP30 checking again.
1788  assert(pindex->pprev);
1789  CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(m_params.GetConsensus().BIP34Height);
1790  //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
1791  fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == m_params.GetConsensus().BIP34Hash));
1792 
1793  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
1794  // consensus change that ensures coinbases at those heights can not
1795  // duplicate earlier coinbases.
1796  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1797  for (const auto& tx : block.vtx) {
1798  for (size_t o = 0; o < tx->vout.size(); o++) {
1799  if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
1800  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1801  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
1802  }
1803  }
1804  }
1805  }
1806 
1807  // Enforce BIP68 (sequence locks)
1808  int nLockTimeFlags = 0;
1810  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1811  }
1812 
1813  // Get the script flags for this block
1814  unsigned int flags = GetBlockScriptFlags(pindex, m_params.GetConsensus());
1815 
1816  int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
1817  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
1818 
1819  CBlockUndo blockundo;
1820 
1821  // Precomputed transaction data pointers must not be invalidated
1822  // until after `control` has run the script checks (potentially
1823  // in multiple threads). Preallocate the vector size so a new allocation
1824  // doesn't invalidate pointers into the vector, and keep txsdata in scope
1825  // for as long as `control`.
1826  CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
1827  std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
1828 
1829  std::vector<int> prevheights;
1830  CAmount nFees = 0;
1831  int nInputs = 0;
1832  int64_t nSigOpsCost = 0;
1833  blockundo.vtxundo.reserve(block.vtx.size() - 1);
1834  for (unsigned int i = 0; i < block.vtx.size(); i++)
1835  {
1836  const CTransaction &tx = *(block.vtx[i]);
1837 
1838  nInputs += tx.vin.size();
1839 
1840  if (!tx.IsCoinBase())
1841  {
1842  CAmount txfee = 0;
1843  TxValidationState tx_state;
1844  if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
1845  // Any transaction validation failure in ConnectBlock is a block consensus failure
1847  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
1848  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
1849  }
1850  nFees += txfee;
1851  if (!MoneyRange(nFees)) {
1852  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
1853  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
1854  }
1855 
1856  // Check that transaction is BIP68 final
1857  // BIP68 lock checks (as opposed to nLockTime checks) must
1858  // be in ConnectBlock because they require the UTXO set
1859  prevheights.resize(tx.vin.size());
1860  for (size_t j = 0; j < tx.vin.size(); j++) {
1861  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
1862  }
1863 
1864  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
1865  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
1866  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
1867  }
1868  }
1869 
1870  // GetTransactionSigOpCost counts 3 types of sigops:
1871  // * legacy (always)
1872  // * p2sh (when P2SH enabled in flags and excludes coinbase)
1873  // * witness (when witness enabled in flags and excludes coinbase)
1874  nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
1875  if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
1876  LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
1877  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
1878  }
1879 
1880  if (!tx.IsCoinBase())
1881  {
1882  std::vector<CScriptCheck> vChecks;
1883  bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
1884  TxValidationState tx_state;
1885  if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
1886  // Any transaction validation failure in ConnectBlock is a block consensus failure
1888  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
1889  return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
1890  tx.GetHash().ToString(), state.ToString());
1891  }
1892  control.Add(vChecks);
1893  }
1894 
1895  CTxUndo undoDummy;
1896  if (i > 0) {
1897  blockundo.vtxundo.push_back(CTxUndo());
1898  }
1899  UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
1900  }
1901  int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
1902  LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
1903 
1904  CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, m_params.GetConsensus());
1905  if (block.vtx[0]->GetValueOut() > blockReward) {
1906  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
1907  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
1908  }
1909 
1910  if (!control.Wait()) {
1911  LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
1912  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
1913  }
1914  int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
1915  LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
1916 
1917  if (fJustCheck)
1918  return true;
1919 
1920  if (!WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
1921  return false;
1922  }
1923 
1924  if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
1926  setDirtyBlockIndex.insert(pindex);
1927  }
1928 
1929  assert(pindex->phashBlock);
1930  // add this block to the view's block chain
1931  view.SetBestBlock(pindex->GetBlockHash());
1932 
1933  int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
1934  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
1935 
1936  TRACE6(validation, block_connected,
1937  block.GetHash().data(),
1938  pindex->nHeight,
1939  block.vtx.size(),
1940  nInputs,
1941  nSigOpsCost,
1942  GetTimeMicros() - nTimeStart // in microseconds (µs)
1943  );
1944 
1945  return true;
1946 }
1947 
1948 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
1949 {
1950  return this->GetCoinsCacheSizeState(
1952  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
1953 }
1954 
1955 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
1956  size_t max_coins_cache_size_bytes,
1957  size_t max_mempool_size_bytes)
1958 {
1959  const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
1960  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
1961  int64_t nTotalSpace =
1962  max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
1963 
1965  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
1966  int64_t large_threshold =
1967  std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
1968 
1969  if (cacheSize > nTotalSpace) {
1970  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
1972  } else if (cacheSize > large_threshold) {
1974  }
1975  return CoinsCacheSizeState::OK;
1976 }
1977 
1979  BlockValidationState &state,
1980  FlushStateMode mode,
1981  int nManualPruneHeight)
1982 {
1983  LOCK(cs_main);
1984  assert(this->CanFlushToDisk());
1985  static std::chrono::microseconds nLastWrite{0};
1986  static std::chrono::microseconds nLastFlush{0};
1987  std::set<int> setFilesToPrune;
1988  bool full_flush_completed = false;
1989 
1990  const size_t coins_count = CoinsTip().GetCacheSize();
1991  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
1992 
1993  try {
1994  {
1995  bool fFlushForPrune = false;
1996  bool fDoFullFlush = false;
1997 
1998  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
2000  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2001  // make sure we don't prune above the blockfilterindexes bestblocks
2002  // pruning is height-based
2003  int last_prune = m_chain.Height(); // last height we can prune
2005  last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
2006  });
2007 
2008  if (nManualPruneHeight > 0) {
2009  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2010 
2011  m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
2012  } else {
2013  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2014 
2015  m_blockman.FindFilesToPrune(setFilesToPrune, m_params.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
2016  fCheckForPruning = false;
2017  }
2018  if (!setFilesToPrune.empty()) {
2019  fFlushForPrune = true;
2020  if (!fHavePruned) {
2021  m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
2022  fHavePruned = true;
2023  }
2024  }
2025  }
2026  const auto nNow = GetTime<std::chrono::microseconds>();
2027  // Avoid writing/flushing immediately after startup.
2028  if (nLastWrite.count() == 0) {
2029  nLastWrite = nNow;
2030  }
2031  if (nLastFlush.count() == 0) {
2032  nLastFlush = nNow;
2033  }
2034  // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2035  bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2036  // The cache is over the limit, we have to write now.
2037  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2038  // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2039  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2040  // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2041  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2042  // Combine all conditions that result in a full cache flush.
2043  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2044  // Write blocks and block index to disk.
2045  if (fDoFullFlush || fPeriodicWrite) {
2046  // Ensure we can write block index
2048  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2049  }
2050  {
2051  LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2052 
2053  // First make sure all block and undo data is flushed to disk.
2054  FlushBlockFile();
2055  }
2056 
2057  // Then update all block file information (which may refer to block and undo files).
2058  {
2059  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2060 
2061  std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2062  vFiles.reserve(setDirtyFileInfo.size());
2063  for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2064  vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2065  setDirtyFileInfo.erase(it++);
2066  }
2067  std::vector<const CBlockIndex*> vBlocks;
2068  vBlocks.reserve(setDirtyBlockIndex.size());
2069  for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2070  vBlocks.push_back(*it);
2071  setDirtyBlockIndex.erase(it++);
2072  }
2073  if (!m_blockman.m_block_tree_db->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2074  return AbortNode(state, "Failed to write to block index database");
2075  }
2076  }
2077  // Finally remove any pruned files
2078  if (fFlushForPrune) {
2079  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2080 
2081  UnlinkPrunedFiles(setFilesToPrune);
2082  }
2083  nLastWrite = nNow;
2084  }
2085  // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2086  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2087  LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2088  coins_count, coins_mem_usage / 1000), BCLog::BENCH);
2089 
2090  // Typical Coin structures on disk are around 48 bytes in size.
2091  // Pushing a new one to the database can cause it to be written
2092  // twice (once in the log, and once in the tables). This is already
2093  // an overestimation, as most will delete an existing entry or
2094  // overwrite one. Still, use a conservative safety factor of 2.
2095  if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2096  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2097  }
2098  // Flush the chainstate (which may refer to block index entries).
2099  if (!CoinsTip().Flush())
2100  return AbortNode(state, "Failed to write to coin database");
2101  nLastFlush = nNow;
2102  full_flush_completed = true;
2103  }
2104  TRACE6(utxocache, flush,
2105  (int64_t)(GetTimeMicros() - nNow.count()), // in microseconds (µs)
2106  (u_int32_t)mode,
2107  (u_int64_t)coins_count,
2108  (u_int64_t)coins_mem_usage,
2109  (bool)fFlushForPrune,
2110  (bool)fDoFullFlush);
2111  }
2112  if (full_flush_completed) {
2113  // Update best block in wallet (so we can detect restored wallets).
2115  }
2116  } catch (const std::runtime_error& e) {
2117  return AbortNode(state, std::string("System error while flushing: ") + e.what());
2118  }
2119  return true;
2120 }
2121 
2123 {
2124  BlockValidationState state;
2125  if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2126  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2127  }
2128 }
2129 
2131 {
2132  BlockValidationState state;
2133  fCheckForPruning = true;
2134  if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2135  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2136  }
2137 }
2138 
2139 static void DoWarning(const bilingual_str& warning)
2140 {
2141  static bool fWarned = false;
2142  SetMiscWarning(warning);
2143  if (!fWarned) {
2144  AlertNotify(warning.original);
2145  fWarned = true;
2146  }
2147 }
2148 
2150 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2151 {
2152  if (!res.empty()) res += Untranslated(", ");
2153  res += warn;
2154 }
2155 
2156 static void UpdateTipLog(
2157  const CCoinsViewCache& coins_tip,
2158  const CBlockIndex* tip,
2159  const CChainParams& params,
2160  const std::string& func_name,
2161  const std::string& prefix,
2162  const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
2163 {
2164 
2166  LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
2167  prefix, func_name,
2168  tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
2169  log(tip->nChainWork.getdouble()) / log(2.0), (unsigned long)tip->nChainTx,
2170  FormatISO8601DateTime(tip->GetBlockTime()),
2171  GuessVerificationProgress(params.TxData(), tip),
2172  coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
2173  coins_tip.GetCacheSize(),
2174  !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
2175 }
2176 
2177 void CChainState::UpdateTip(const CBlockIndex* pindexNew)
2178 {
2179  const auto& coins_tip = this->CoinsTip();
2180 
2181  // The remainder of the function isn't relevant if we are not acting on
2182  // the active chainstate, so return if need be.
2183  if (this != &m_chainman.ActiveChainstate()) {
2184  // Only log every so often so that we don't bury log messages at the tip.
2185  constexpr int BACKGROUND_LOG_INTERVAL = 2000;
2186  if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
2187  UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "[background validation] ", "");
2188  }
2189  return;
2190  }
2191 
2192  // New best block
2193  if (m_mempool) {
2195  }
2196 
2197  {
2199  g_best_block = pindexNew->GetBlockHash();
2200  g_best_block_cv.notify_all();
2201  }
2202 
2203  bilingual_str warning_messages;
2204  if (!this->IsInitialBlockDownload()) {
2205  const CBlockIndex* pindex = pindexNew;
2206  for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2207  WarningBitsConditionChecker checker(bit);
2208  ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache[bit]);
2209  if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2210  const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
2211  if (state == ThresholdState::ACTIVE) {
2212  DoWarning(warning);
2213  } else {
2214  AppendWarning(warning_messages, warning);
2215  }
2216  }
2217  }
2218  }
2219  UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "", warning_messages.original);
2220 }
2221 
2233 {
2236 
2237  CBlockIndex *pindexDelete = m_chain.Tip();
2238  assert(pindexDelete);
2239  // Read block from disk.
2240  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2241  CBlock& block = *pblock;
2242  if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) {
2243  return error("DisconnectTip(): Failed to read block");
2244  }
2245  // Apply the block atomically to the chain state.
2246  int64_t nStart = GetTimeMicros();
2247  {
2248  CCoinsViewCache view(&CoinsTip());
2249  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2250  if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2251  return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2252  bool flushed = view.Flush();
2253  assert(flushed);
2254  }
2255  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2256  // Write the chain state to disk, if necessary.
2258  return false;
2259  }
2260 
2261  if (disconnectpool && m_mempool) {
2262  // Save transactions to re-add to mempool at end of reorg
2263  for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2264  disconnectpool->addTransaction(*it);
2265  }
2266  while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2267  // Drop the earliest entry, and remove its children from the mempool.
2268  auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2270  disconnectpool->removeEntry(it);
2271  }
2272  }
2273 
2274  m_chain.SetTip(pindexDelete->pprev);
2275 
2276  UpdateTip(pindexDelete->pprev);
2277  // Let wallets know transactions went from 1-confirmed to
2278  // 0-confirmed or conflicted:
2279  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2280  return true;
2281 }
2282 
2283 static int64_t nTimeReadFromDisk = 0;
2284 static int64_t nTimeConnectTotal = 0;
2285 static int64_t nTimeFlush = 0;
2286 static int64_t nTimeChainState = 0;
2287 static int64_t nTimePostConnect = 0;
2288 
2290  CBlockIndex* pindex = nullptr;
2291  std::shared_ptr<const CBlock> pblock;
2293 };
2302 private:
2303  std::vector<PerBlockConnectTrace> blocksConnected;
2304 
2305 public:
2306  explicit ConnectTrace() : blocksConnected(1) {}
2307 
2308  void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2309  assert(!blocksConnected.back().pindex);
2310  assert(pindex);
2311  assert(pblock);
2312  blocksConnected.back().pindex = pindex;
2313  blocksConnected.back().pblock = std::move(pblock);
2314  blocksConnected.emplace_back();
2315  }
2316 
2317  std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2318  // We always keep one extra block at the end of our list because
2319  // blocks are added after all the conflicted transactions have
2320  // been filled in. Thus, the last entry should always be an empty
2321  // one waiting for the transactions from the next block. We pop
2322  // the last entry here to make sure the list we return is sane.
2323  assert(!blocksConnected.back().pindex);
2324  blocksConnected.pop_back();
2325  return blocksConnected;
2326  }
2327 };
2328 
2335 bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
2336 {
2339 
2340  assert(pindexNew->pprev == m_chain.Tip());
2341  // Read block from disk.
2342  int64_t nTime1 = GetTimeMicros();
2343  std::shared_ptr<const CBlock> pthisBlock;
2344  if (!pblock) {
2345  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2346  if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_params.GetConsensus())) {
2347  return AbortNode(state, "Failed to read block");
2348  }
2349  pthisBlock = pblockNew;
2350  } else {
2351  pthisBlock = pblock;
2352  }
2353  const CBlock& blockConnecting = *pthisBlock;
2354  // Apply the block atomically to the chain state.
2355  int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2356  int64_t nTime3;
2357  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2358  {
2359  CCoinsViewCache view(&CoinsTip());
2360  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
2361  GetMainSignals().BlockChecked(blockConnecting, state);
2362  if (!rv) {
2363  if (state.IsInvalid())
2364  InvalidBlockFound(pindexNew, state);
2365  return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2366  }
2367  nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2368  assert(nBlocksTotal > 0);
2369  LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2370  bool flushed = view.Flush();
2371  assert(flushed);
2372  }
2373  int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2374  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2375  // Write the chain state to disk, if necessary.
2377  return false;
2378  }
2379  int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2380  LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2381  // Remove conflicting transactions from the mempool.;
2382  if (m_mempool) {
2383  m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2384  disconnectpool.removeForBlock(blockConnecting.vtx);
2385  }
2386  // Update m_chain & related variables.
2387  m_chain.SetTip(pindexNew);
2388  UpdateTip(pindexNew);
2389 
2390  int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2391  LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2392  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2393 
2394  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2395  return true;
2396 }
2397 
2403  do {
2404  CBlockIndex *pindexNew = nullptr;
2405 
2406  // Find the best candidate header.
2407  {
2408  std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2409  if (it == setBlockIndexCandidates.rend())
2410  return nullptr;
2411  pindexNew = *it;
2412  }
2413 
2414  // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2415  // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2416  CBlockIndex *pindexTest = pindexNew;
2417  bool fInvalidAncestor = false;
2418  while (pindexTest && !m_chain.Contains(pindexTest)) {
2419  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2420 
2421  // Pruned nodes may have entries in setBlockIndexCandidates for
2422  // which block files have been deleted. Remove those as candidates
2423  // for the most work chain if we come across them; we can't switch
2424  // to a chain unless we have all the non-active-chain parent blocks.
2425  bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2426  bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2427  if (fFailedChain || fMissingData) {
2428  // Candidate chain is not usable (either invalid or missing data)
2429  if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2430  pindexBestInvalid = pindexNew;
2431  CBlockIndex *pindexFailed = pindexNew;
2432  // Remove the entire chain from the set.
2433  while (pindexTest != pindexFailed) {
2434  if (fFailedChain) {
2435  pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2436  } else if (fMissingData) {
2437  // If we're missing data, then add back to m_blocks_unlinked,
2438  // so that if the block arrives in the future we can try adding
2439  // to setBlockIndexCandidates again.
2441  std::make_pair(pindexFailed->pprev, pindexFailed));
2442  }
2443  setBlockIndexCandidates.erase(pindexFailed);
2444  pindexFailed = pindexFailed->pprev;
2445  }
2446  setBlockIndexCandidates.erase(pindexTest);
2447  fInvalidAncestor = true;
2448  break;
2449  }
2450  pindexTest = pindexTest->pprev;
2451  }
2452  if (!fInvalidAncestor)
2453  return pindexNew;
2454  } while(true);
2455 }
2456 
2459  // Note that we can't delete the current block itself, as we may need to return to it later in case a
2460  // reorganization to a better block fails.
2461  std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2462  while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2463  setBlockIndexCandidates.erase(it++);
2464  }
2465  // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2466  assert(!setBlockIndexCandidates.empty());
2467 }
2468 
2475 bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2476 {
2479 
2480  const CBlockIndex* pindexOldTip = m_chain.Tip();
2481  const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
2482 
2483  // Disconnect active blocks which are no longer in the best chain.
2484  bool fBlocksDisconnected = false;
2485  DisconnectedBlockTransactions disconnectpool;
2486  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2487  if (!DisconnectTip(state, &disconnectpool)) {
2488  // This is likely a fatal error, but keep the mempool consistent,
2489  // just in case. Only remove from the mempool in this case.
2490  MaybeUpdateMempoolForReorg(disconnectpool, false);
2491 
2492  // If we're unable to disconnect a block during normal operation,
2493  // then that is a failure of our local system -- we should abort
2494  // rather than stay on a less work chain.
2495  AbortNode(state, "Failed to disconnect block; see debug.log for details");
2496  return false;
2497  }
2498  fBlocksDisconnected = true;
2499  }
2500 
2501  // Build list of new blocks to connect (in descending height order).
2502  std::vector<CBlockIndex*> vpindexToConnect;
2503  bool fContinue = true;
2504  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2505  while (fContinue && nHeight != pindexMostWork->nHeight) {
2506  // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2507  // a few blocks along the way.
2508  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2509  vpindexToConnect.clear();
2510  vpindexToConnect.reserve(nTargetHeight - nHeight);
2511  CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2512  while (pindexIter && pindexIter->nHeight != nHeight) {
2513  vpindexToConnect.push_back(pindexIter);
2514  pindexIter = pindexIter->pprev;
2515  }
2516  nHeight = nTargetHeight;
2517 
2518  // Connect new blocks.
2519  for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
2520  if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2521  if (state.IsInvalid()) {
2522  // The block violates a consensus rule.
2524  InvalidChainFound(vpindexToConnect.front());
2525  }
2526  state = BlockValidationState();
2527  fInvalidFound = true;
2528  fContinue = false;
2529  break;
2530  } else {
2531  // A system error occurred (disk space, database error, ...).
2532  // Make the mempool consistent with the current tip, just in case
2533  // any observers try to use it before shutdown.
2534  MaybeUpdateMempoolForReorg(disconnectpool, false);
2535  return false;
2536  }
2537  } else {
2539  if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2540  // We're in a better position than we were. Return temporarily to release the lock.
2541  fContinue = false;
2542  break;
2543  }
2544  }
2545  }
2546  }
2547 
2548  if (fBlocksDisconnected) {
2549  // If any blocks were disconnected, disconnectpool may be non empty. Add
2550  // any disconnected transactions back to the mempool.
2551  MaybeUpdateMempoolForReorg(disconnectpool, true);
2552  }
2553  if (m_mempool) m_mempool->check(this->CoinsTip(), this->m_chain.Height() + 1);
2554 
2556 
2557  return true;
2558 }
2559 
2561 {
2565 }
2566 
2568  bool fNotify = false;
2569  bool fInitialBlockDownload = false;
2570  static CBlockIndex* pindexHeaderOld = nullptr;
2571  CBlockIndex* pindexHeader = nullptr;
2572  {
2573  LOCK(cs_main);
2574  pindexHeader = pindexBestHeader;
2575 
2576  if (pindexHeader != pindexHeaderOld) {
2577  fNotify = true;
2578  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
2579  pindexHeaderOld = pindexHeader;
2580  }
2581  }
2582  // Send block tip changed notifications without cs_main
2583  if (fNotify) {
2584  uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2585  }
2586  return fNotify;
2587 }
2588 
2591 
2592  if (GetMainSignals().CallbacksPending() > 10) {
2594  }
2595 }
2596 
2597 bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
2598 {
2599  // Note that while we're often called here from ProcessNewBlock, this is
2600  // far from a guarantee. Things in the P2P/RPC will often end up calling
2601  // us in the middle of ProcessNewBlock - do not assume pblock is set
2602  // sanely for performance or correctness!
2604 
2605  // ABC maintains a fair degree of expensive-to-calculate internal state
2606  // because this function periodically releases cs_main so that it does not lock up other threads for too long
2607  // during large connects - and to allow for e.g. the callback queue to drain
2608  // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2610 
2611  CBlockIndex *pindexMostWork = nullptr;
2612  CBlockIndex *pindexNewTip = nullptr;
2613  int nStopAtHeight = gArgs.GetIntArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2614  do {
2615  // Block until the validation queue drains. This should largely
2616  // never happen in normal operation, however may happen during
2617  // reindex, causing memory blowup if we run too far ahead.
2618  // Note that if a validationinterface callback ends up calling
2619  // ActivateBestChain this may lead to a deadlock! We should
2620  // probably have a DEBUG_LOCKORDER test for this in the future.
2622 
2623  {
2624  LOCK(cs_main);
2625  // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2626  LOCK(MempoolMutex());
2627  CBlockIndex* starting_tip = m_chain.Tip();
2628  bool blocks_connected = false;
2629  do {
2630  // We absolutely may not unlock cs_main until we've made forward progress
2631  // (with the exception of shutdown due to hardware issues, low disk space, etc).
2632  ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2633 
2634  if (pindexMostWork == nullptr) {
2635  pindexMostWork = FindMostWorkChain();
2636  }
2637 
2638  // Whether we have anything to do at all.
2639  if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2640  break;
2641  }
2642 
2643  bool fInvalidFound = false;
2644  std::shared_ptr<const CBlock> nullBlockPtr;
2645  if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2646  // A system error occurred
2647  return false;
2648  }
2649  blocks_connected = true;
2650 
2651  if (fInvalidFound) {
2652  // Wipe cache, we may need another branch now.
2653  pindexMostWork = nullptr;
2654  }
2655  pindexNewTip = m_chain.Tip();
2656 
2657  for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2658  assert(trace.pblock && trace.pindex);
2659  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2660  }
2661  } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2662  if (!blocks_connected) return true;
2663 
2664  const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2665  bool fInitialDownload = IsInitialBlockDownload();
2666 
2667  // Notify external listeners about the new tip.
2668  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2669  if (pindexFork != pindexNewTip) {
2670  // Notify ValidationInterface subscribers
2671  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2672 
2673  // Always notify the UI if a new block tip was connected
2674  uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2675  }
2676  }
2677  // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2678 
2679  if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2680 
2681  // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
2682  // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
2683  // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
2684  // that the best block hash is non-null.
2685  if (ShutdownRequested()) break;
2686  } while (pindexNewTip != pindexMostWork);
2687  CheckBlockIndex();
2688 
2689  // Write changes periodically to disk, after relay.
2691  return false;
2692  }
2693 
2694  return true;
2695 }
2696 
2698 {
2699  {
2700  LOCK(cs_main);
2701  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
2702  // Nothing to do, this block is not at the tip.
2703  return true;
2704  }
2706  // The chain has been extended since the last call, reset the counter.
2708  }
2710  setBlockIndexCandidates.erase(pindex);
2712  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
2713  // We can't keep reducing the counter if somebody really wants to
2714  // call preciousblock 2**31-1 times on the same set of tips...
2716  }
2717  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
2718  setBlockIndexCandidates.insert(pindex);
2720  }
2721  }
2722 
2723  return ActivateBestChain(state, std::shared_ptr<const CBlock>());
2724 }
2725 
2727 {
2728  // Genesis block can't be invalidated
2729  assert(pindex);
2730  if (pindex->nHeight == 0) return false;
2731 
2732  CBlockIndex* to_mark_failed = pindex;
2733  bool pindex_was_in_chain = false;
2734  int disconnected = 0;
2735 
2736  // We do not allow ActivateBestChain() to run while InvalidateBlock() is
2737  // running, as that could cause the tip to change while we disconnect
2738  // blocks.
2740 
2741  // We'll be acquiring and releasing cs_main below, to allow the validation
2742  // callbacks to run. However, we should keep the block index in a
2743  // consistent state as we disconnect blocks -- in particular we need to
2744  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
2745  // To avoid walking the block index repeatedly in search of candidates,
2746  // build a map once so that we can look up candidate blocks by chain
2747  // work as we go.
2748  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
2749 
2750  {
2751  LOCK(cs_main);
2752  for (const auto& entry : m_blockman.m_block_index) {
2753  CBlockIndex *candidate = entry.second;
2754  // We don't need to put anything in our active chain into the
2755  // multimap, because those candidates will be found and considered
2756  // as we disconnect.
2757  // Instead, consider only non-active-chain blocks that have at
2758  // least as much work as where we expect the new tip to end up.
2759  if (!m_chain.Contains(candidate) &&
2760  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
2761  candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
2762  candidate->HaveTxsDownloaded()) {
2763  candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
2764  }
2765  }
2766  }
2767 
2768  // Disconnect (descendants of) pindex, and mark them invalid.
2769  while (true) {
2770  if (ShutdownRequested()) break;
2771 
2772  // Make sure the queue of validation callbacks doesn't grow unboundedly.
2774 
2775  LOCK(cs_main);
2776  // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
2777  // called after DisconnectTip without unlocking in between
2778  LOCK(MempoolMutex());
2779  if (!m_chain.Contains(pindex)) break;
2780  pindex_was_in_chain = true;
2781  CBlockIndex *invalid_walk_tip = m_chain.Tip();
2782 
2783  // ActivateBestChain considers blocks already in m_chain
2784  // unconditionally valid already, so force disconnect away from it.
2785  DisconnectedBlockTransactions disconnectpool;
2786  bool ret = DisconnectTip(state, &disconnectpool);
2787  // DisconnectTip will add transactions to disconnectpool.
2788  // Adjust the mempool to be consistent with the new tip, adding
2789  // transactions back to the mempool if disconnecting was successful,
2790  // and we're not doing a very deep invalidation (in which case
2791  // keeping the mempool up to date is probably futile anyway).
2792  MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
2793  if (!ret) return false;
2794  assert(invalid_walk_tip->pprev == m_chain.Tip());
2795 
2796  // We immediately mark the disconnected blocks as invalid.
2797  // This prevents a case where pruned nodes may fail to invalidateblock
2798  // and be left unable to start as they have no tip candidates (as there
2799  // are no blocks that meet the "have data and are not invalid per
2800  // nStatus" criteria for inclusion in setBlockIndexCandidates).
2801  invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
2802  setDirtyBlockIndex.insert(invalid_walk_tip);
2803  setBlockIndexCandidates.erase(invalid_walk_tip);
2804  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
2805  if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
2806  // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
2807  // need to be BLOCK_FAILED_CHILD instead.
2808  to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
2809  setDirtyBlockIndex.insert(to_mark_failed);
2810  }
2811 
2812  // Add any equal or more work headers to setBlockIndexCandidates
2813  auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
2814  while (candidate_it != candidate_blocks_by_work.end()) {
2815  if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
2816  setBlockIndexCandidates.insert(candidate_it->second);
2817  candidate_it = candidate_blocks_by_work.erase(candidate_it);
2818  } else {
2819  ++candidate_it;
2820  }
2821  }
2822 
2823  // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
2824  // iterations, or, if it's the last one, call InvalidChainFound on it.
2825  to_mark_failed = invalid_walk_tip;
2826  }
2827 
2828  CheckBlockIndex();
2829 
2830  {
2831  LOCK(cs_main);
2832  if (m_chain.Contains(to_mark_failed)) {
2833  // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
2834  return false;
2835  }
2836 
2837  // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
2838  to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
2839  setDirtyBlockIndex.insert(to_mark_failed);
2840  setBlockIndexCandidates.erase(to_mark_failed);
2841  m_blockman.m_failed_blocks.insert(to_mark_failed);
2842 
2843  // If any new blocks somehow arrived while we were disconnecting
2844  // (above), then the pre-calculation of what should go into
2845  // setBlockIndexCandidates may have missed entries. This would
2846  // technically be an inconsistency in the block index, but if we clean
2847  // it up here, this should be an essentially unobservable error.
2848  // Loop back over all block index entries and add any missing entries
2849  // to setBlockIndexCandidates.
2850  BlockMap::iterator it = m_blockman.m_block_index.begin();
2851  while (it != m_blockman.m_block_index.end()) {
2852  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
2853  setBlockIndexCandidates.insert(it->second);
2854  }
2855  it++;
2856  }
2857 
2858  InvalidChainFound(to_mark_failed);
2859  }
2860 
2861  // Only notify about a new block tip if the active chain was modified.
2862  if (pindex_was_in_chain) {
2863  uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
2864  }
2865  return true;
2866 }
2867 
2870 
2871  int nHeight = pindex->nHeight;
2872 
2873  // Remove the invalidity flag from this block and all its descendants.
2874  BlockMap::iterator it = m_blockman.m_block_index.begin();
2875  while (it != m_blockman.m_block_index.end()) {
2876  if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
2877  it->second->nStatus &= ~BLOCK_FAILED_MASK;
2878  setDirtyBlockIndex.insert(it->second);
2879  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
2880  setBlockIndexCandidates.insert(it->second);
2881  }
2882  if (it->second == pindexBestInvalid) {
2883  // Reset invalid block marker if it was pointing to one of those.
2884  pindexBestInvalid = nullptr;
2885  }
2886  m_blockman.m_failed_blocks.erase(it->second);
2887  }
2888  it++;
2889  }
2890 
2891  // Remove the invalidity flag from all ancestors too.
2892  while (pindex != nullptr) {
2893  if (pindex->nStatus & BLOCK_FAILED_MASK) {
2894  pindex->nStatus &= ~BLOCK_FAILED_MASK;
2895  setDirtyBlockIndex.insert(pindex);
2896  m_blockman.m_failed_blocks.erase(pindex);
2897  }
2898  pindex = pindex->pprev;
2899  }
2900 }
2901 
2903 {
2905 
2906  // Check for duplicate
2907  uint256 hash = block.GetHash();
2908  BlockMap::iterator it = m_block_index.find(hash);
2909  if (it != m_block_index.end())
2910  return it->second;
2911 
2912  // Construct new block index object
2913  CBlockIndex* pindexNew = new CBlockIndex(block);
2914  // We assign the sequence id to blocks only when the full data is available,
2915  // to avoid miners withholding blocks but broadcasting headers, to get a
2916  // competitive advantage.
2917  pindexNew->nSequenceId = 0;
2918  BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
2919  pindexNew->phashBlock = &((*mi).first);
2920  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
2921  if (miPrev != m_block_index.end())
2922  {
2923  pindexNew->pprev = (*miPrev).second;
2924  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
2925  pindexNew->BuildSkip();
2926  }
2927  pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
2928  pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
2929  pindexNew->RaiseValidity(BLOCK_VALID_TREE);
2930  if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
2931  pindexBestHeader = pindexNew;
2932 
2933  setDirtyBlockIndex.insert(pindexNew);
2934 
2935  return pindexNew;
2936 }
2937 
2939 void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
2940 {
2941  pindexNew->nTx = block.vtx.size();
2942  pindexNew->nChainTx = 0;
2943  pindexNew->nFile = pos.nFile;
2944  pindexNew->nDataPos = pos.nPos;
2945  pindexNew->nUndoPos = 0;
2946  pindexNew->nStatus |= BLOCK_HAVE_DATA;
2948  pindexNew->nStatus |= BLOCK_OPT_WITNESS;
2949  }
2951  setDirtyBlockIndex.insert(pindexNew);
2952 
2953  if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
2954  // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
2955  std::deque<CBlockIndex*> queue;
2956  queue.push_back(pindexNew);
2957 
2958  // Recursively process any descendant blocks that now may be eligible to be connected.
2959  while (!queue.empty()) {
2960  CBlockIndex *pindex = queue.front();
2961  queue.pop_front();
2962  pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
2963  pindex->nSequenceId = nBlockSequenceId++;
2964  if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
2965  setBlockIndexCandidates.insert(pindex);
2966  }
2967  std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
2968  while (range.first != range.second) {
2969  std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
2970  queue.push_back(it->second);
2971  range.first++;
2972  m_blockman.m_blocks_unlinked.erase(it);
2973  }
2974  }
2975  } else {
2976  if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
2977  m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
2978  }
2979  }
2980 }
2981 
2982 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
2983 {
2984  // Check proof of work matches claimed amount
2985  if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
2986  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
2987 
2988  return true;
2989 }
2990 
2991 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
2992 {
2993  // These are checks that are independent of context.
2994 
2995  if (block.fChecked)
2996  return true;
2997 
2998  // Check that the header is valid (particularly PoW). This is mostly
2999  // redundant with the call in AcceptBlockHeader.
3000  if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3001  return false;
3002 
3003  // Signet only: check block solution
3004  if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
3005  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
3006  }
3007 
3008  // Check the merkle root.
3009  if (fCheckMerkleRoot) {
3010  bool mutated;
3011  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3012  if (block.hashMerkleRoot != hashMerkleRoot2)
3013  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3014 
3015  // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3016  // of transactions in a block without affecting the merkle root of a block,
3017  // while still invalidating it.
3018  if (mutated)
3019  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3020  }
3021 
3022  // All potential-corruption validation must be done before we do any
3023  // transaction validation, as otherwise we may mark the header as invalid
3024  // because we receive the wrong transactions for it.
3025  // Note that witness malleability is checked in ContextualCheckBlock, so no
3026  // checks that use witness data may be performed here.
3027 
3028  // Size limits
3030  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3031 
3032  // First transaction must be coinbase, the rest must not be
3033  if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3034  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3035  for (unsigned int i = 1; i < block.vtx.size(); i++)
3036  if (block.vtx[i]->IsCoinBase())
3037  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3038 
3039  // Check transactions
3040  // Must check for duplicate inputs (see CVE-2018-17144)
3041  for (const auto& tx : block.vtx) {
3042  TxValidationState tx_state;
3043  if (!CheckTransaction(*tx, tx_state)) {
3044  // CheckBlock() does context-free validation checks. The only
3045  // possible failures are consensus failures.
3048  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3049  }
3050  }
3051  unsigned int nSigOps = 0;
3052  for (const auto& tx : block.vtx)
3053  {
3054  nSigOps += GetLegacySigOpCount(*tx);
3055  }
3057  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3058 
3059  if (fCheckPOW && fCheckMerkleRoot)
3060  block.fChecked = true;
3061 
3062  return true;
3063 }
3064 
3065 void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3066 {
3067  int commitpos = GetWitnessCommitmentIndex(block);
3068  static const std::vector<unsigned char> nonce(32, 0x00);
3069  if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) {
3070  CMutableTransaction tx(*block.vtx[0]);
3071  tx.vin[0].scriptWitness.stack.resize(1);
3072  tx.vin[0].scriptWitness.stack[0] = nonce;
3073  block.vtx[0] = MakeTransactionRef(std::move(tx));
3074  }
3075 }
3076 
3077 std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3078 {
3079  std::vector<unsigned char> commitment;
3080  int commitpos = GetWitnessCommitmentIndex(block);
3081  std::vector<unsigned char> ret(32, 0x00);
3082  if (commitpos == NO_WITNESS_COMMITMENT) {
3083  uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3084  CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3085  CTxOut out;
3086  out.nValue = 0;
3088  out.scriptPubKey[0] = OP_RETURN;
3089  out.scriptPubKey[1] = 0x24;
3090  out.scriptPubKey[2] = 0xaa;
3091  out.scriptPubKey[3] = 0x21;
3092  out.scriptPubKey[4] = 0xa9;
3093  out.scriptPubKey[5] = 0xed;
3094  memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3095  commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3096  CMutableTransaction tx(*block.vtx[0]);
3097  tx.vout.push_back(out);
3098  block.vtx[0] = MakeTransactionRef(std::move(tx));
3099  }
3100  UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3101  return commitment;
3102 }
3103 
3105 {
3106  const MapCheckpoints& checkpoints = data.mapCheckpoints;
3107 
3108  for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3109  {
3110  const uint256& hash = i.second;
3111  CBlockIndex* pindex = LookupBlockIndex(hash);
3112  if (pindex) {
3113  return pindex;
3114  }
3115  }
3116  return nullptr;
3117 }
3118 
3128 static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3129 {
3130  assert(pindexPrev != nullptr);
3131  const int nHeight = pindexPrev->nHeight + 1;
3132 
3133  // Check proof of work
3134  const Consensus::Params& consensusParams = params.GetConsensus();
3135  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3136  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3137 
3138  // Check against checkpoints
3139  if (fCheckpointsEnabled) {
3140  // Don't accept any forks from the main chain prior to last checkpoint.
3141  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3142  // BlockIndex().
3143  CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints());
3144  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3145  LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3146  return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3147  }
3148  }
3149 
3150  // Check timestamp against prev
3151  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3152  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3153 
3154  // Check timestamp
3155  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3156  return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3157 
3158  // Reject blocks with outdated version
3159  if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
3160  (block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_DERSIG)) ||
3161  (block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CLTV))) {
3162  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3163  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3164  }
3165 
3166  return true;
3167 }
3168 
3175 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3176 {
3177  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3178 
3179  // Enforce BIP113 (Median Time Past).
3180  int nLockTimeFlags = 0;
3181  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV)) {
3182  assert(pindexPrev != nullptr);
3183  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3184  }
3185 
3186  int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3187  ? pindexPrev->GetMedianTimePast()
3188  : block.GetBlockTime();
3189 
3190  // Check that all transactions are finalized
3191  for (const auto& tx : block.vtx) {
3192  if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3193  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3194  }
3195  }
3196 
3197  // Enforce rule that the coinbase starts with serialized block height
3198  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB))
3199  {
3200  CScript expect = CScript() << nHeight;
3201  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3202  !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3203  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3204  }
3205  }
3206 
3207  // Validation for witness commitments.
3208  // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3209  // coinbase (where 0x0000....0000 is used instead).
3210  // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3211  // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3212  // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3213  // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3214  // multiple, the last one is used.
3215  bool fHaveWitness = false;
3216  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT)) {
3217  int commitpos = GetWitnessCommitmentIndex(block);
3218  if (commitpos != NO_WITNESS_COMMITMENT) {
3219  bool malleated = false;
3220  uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3221  // The malleation check is ignored; as the transaction tree itself
3222  // already does not permit it, it is impossible to trigger in the
3223  // witness tree.
3224  if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3225  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3226  }
3227  CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3228  if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3229  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3230  }
3231  fHaveWitness = true;
3232  }
3233  }
3234 
3235  // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3236  if (!fHaveWitness) {
3237  for (const auto& tx : block.vtx) {
3238  if (tx->HasWitness()) {
3239  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3240  }
3241  }
3242  }
3243 
3244  // After the coinbase witness reserved value and commitment are verified,
3245  // we can check if the block weight passes (before we've checked the
3246  // coinbase witness, it would be possible for the weight to be too
3247  // large by filling up the coinbase witness, which doesn't change
3248  // the block hash, so we couldn't mark the block as permanently
3249  // failed).
3250  if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3251  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3252  }
3253 
3254  return true;
3255 }
3256 
3257 bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3258 {
3260  // Check for duplicate
3261  uint256 hash = block.GetHash();
3262  BlockMap::iterator miSelf = m_block_index.find(hash);
3263  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3264  if (miSelf != m_block_index.end()) {
3265  // Block header is already known.
3266  CBlockIndex* pindex = miSelf->second;
3267  if (ppindex)
3268  *ppindex = pindex;
3269  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3270  LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
3271  return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3272  }
3273  return true;
3274  }
3275 
3276  if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
3277  LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3278  return false;
3279  }
3280 
3281  // Get prev block index
3282  CBlockIndex* pindexPrev = nullptr;
3283  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3284  if (mi == m_block_index.end()) {
3285  LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
3286  return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3287  }
3288  pindexPrev = (*mi).second;
3289  if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3290  LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
3291  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3292  }
3293  if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime())) {
3294  LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3295  return false;
3296  }
3297 
3298  /* Determine if this block descends from any block which has been found
3299  * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3300  * them as failed. For example:
3301  *
3302  * D3
3303  * /
3304  * B2 - C2
3305  * / \
3306  * A D2 - E2 - F2
3307  * \
3308  * B1 - C1 - D1 - E1
3309  *
3310  * In the case that we attempted to reorg from E1 to F2, only to find
3311  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3312  * but NOT D3 (it was not in any of our candidate sets at the time).
3313  *
3314  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3315  * in LoadBlockIndex.
3316  */
3317  if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3318  // The above does not mean "invalid": it checks if the previous block
3319  // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3320  // optimization, in the common case of adding a new block to the tip,
3321  // we don't need to iterate over the failed blocks list.
3322  for (const CBlockIndex* failedit : m_failed_blocks) {
3323  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3324  assert(failedit->nStatus & BLOCK_FAILED_VALID);
3325  CBlockIndex* invalid_walk = pindexPrev;
3326  while (invalid_walk != failedit) {
3327  invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3328  setDirtyBlockIndex.insert(invalid_walk);
3329  invalid_walk = invalid_walk->pprev;
3330  }
3331  LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
3332  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3333  }
3334  }
3335  }
3336  }
3337  CBlockIndex* pindex = AddToBlockIndex(block);
3338 
3339  if (ppindex)
3340  *ppindex = pindex;
3341 
3342  return true;
3343 }
3344 
3345 // Exposed wrapper for AcceptBlockHeader
3346 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3347 {
3349  {
3350  LOCK(cs_main);
3351  for (const CBlockHeader& header : headers) {
3352  CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3353  bool accepted = m_blockman.AcceptBlockHeader(
3354  header, state, chainparams, &pindex);
3356 
3357  if (!accepted) {
3358  return false;
3359  }
3360  if (ppindex) {
3361  *ppindex = pindex;
3362  }
3363  }
3364  }
3366  if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
3367  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3368  }
3369  }
3370  return true;
3371 }
3372 
3374 bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3375 {
3376  const CBlock& block = *pblock;
3377 
3378  if (fNewBlock) *fNewBlock = false;
3380 
3381  CBlockIndex *pindexDummy = nullptr;
3382  CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3383 
3384  bool accepted_header = m_blockman.AcceptBlockHeader(block, state, m_params, &pindex);
3385  CheckBlockIndex();
3386 
3387  if (!accepted_header)
3388  return false;
3389 
3390  // Try to process all requested blocks that we don't have, but only
3391  // process an unrequested block if it's new and has enough work to
3392  // advance our tip, and isn't too many blocks ahead.
3393  bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3394  bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3395  // Blocks that are too out-of-order needlessly limit the effectiveness of
3396  // pruning, because pruning will not delete block files that contain any
3397  // blocks which are too close in height to the tip. Apply this test
3398  // regardless of whether pruning is enabled; it should generally be safe to
3399  // not process unrequested blocks.
3400  bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3401 
3402  // TODO: Decouple this function from the block download logic by removing fRequested
3403  // This requires some new chain data structure to efficiently look up if a
3404  // block is in a chain leading to a candidate for best tip, despite not
3405  // being such a candidate itself.
3406 
3407  // TODO: deal better with return value and error conditions for duplicate
3408  // and unrequested blocks.
3409  if (fAlreadyHave) return true;
3410  if (!fRequested) { // If we didn't ask for it:
3411  if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3412  if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3413  if (fTooFarAhead) return true; // Block height is too high
3414 
3415  // Protect against DoS attacks from low-work chains.
3416  // If our tip is behind, a peer could try to send us
3417  // low-work blocks on a fake chain that we would never
3418  // request; don't process these.
3419  if (pindex->nChainWork < nMinimumChainWork) return true;
3420  }
3421 
3422  if (!CheckBlock(block, state, m_params.GetConsensus()) ||
3423  !ContextualCheckBlock(block, state, m_params.GetConsensus(), pindex->pprev)) {
3424  if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3425  pindex->nStatus |= BLOCK_FAILED_VALID;
3426  setDirtyBlockIndex.insert(pindex);
3427  }
3428  return error("%s: %s", __func__, state.ToString());
3429  }
3430 
3431  // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3432  // (but if it does not build on our best tip, let the SendMessages loop relay it)
3433  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3434  GetMainSignals().NewPoWValidBlock(pindex, pblock);
3435 
3436  // Write block to history file
3437  if (fNewBlock) *fNewBlock = true;
3438  try {
3439  FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, m_chain, m_params, dbp);
3440  if (blockPos.IsNull()) {
3441  state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3442  return false;
3443  }
3444  ReceivedBlockTransactions(block, pindex, blockPos);
3445  } catch (const std::runtime_error& e) {
3446  return AbortNode(state, std::string("System error: ") + e.what());
3447  }
3448 
3450 
3451  CheckBlockIndex();
3452 
3453  return true;
3454 }
3455 
3456 bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock>& block, bool force_processing, bool* new_block)
3457 {
3459 
3460  {
3461  CBlockIndex *pindex = nullptr;
3462  if (new_block) *new_block = false;
3463  BlockValidationState state;
3464 
3465  // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3466  // Therefore, the following critical section must include the CheckBlock() call as well.
3467  LOCK(cs_main);
3468 
3469  // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if
3470  // CheckBlock() fails. This is protective against consensus failure if there are any unknown forms of block
3471  // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and
3472  // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html. Because CheckBlock() is
3473  // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial.
3474  bool ret = CheckBlock(*block, state, chainparams.GetConsensus());
3475  if (ret) {
3476  // Store to disk
3477  ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block);
3478  }
3479  if (!ret) {
3480  GetMainSignals().BlockChecked(*block, state);
3481  return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3482  }
3483  }
3484 
3486 
3487  BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3488  if (!ActiveChainstate().ActivateBestChain(state, block)) {
3489  return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3490  }
3491 
3492  return true;
3493 }
3494 
3496 {
3497  CChainState& active_chainstate = ActiveChainstate();
3498  if (!active_chainstate.m_mempool) {
3499  TxValidationState state;
3500  state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
3501  return MempoolAcceptResult::Failure(state);
3502  }
3503  auto result = AcceptToMemoryPool(active_chainstate, *active_chainstate.m_mempool, tx, /*bypass_limits=*/ false, test_accept);
3504  active_chainstate.m_mempool->check(active_chainstate.CoinsTip(), active_chainstate.m_chain.Height() + 1);
3505  return result;
3506 }
3507 
3509  const CChainParams& chainparams,
3510  CChainState& chainstate,
3511  const CBlock& block,
3512  CBlockIndex* pindexPrev,
3513  bool fCheckPOW,
3514  bool fCheckMerkleRoot)
3515 {
3517  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
3518  CCoinsViewCache viewNew(&chainstate.CoinsTip());
3519  uint256 block_hash(block.GetHash());
3520  CBlockIndex indexDummy(block);
3521  indexDummy.pprev = pindexPrev;
3522  indexDummy.nHeight = pindexPrev->nHeight + 1;
3523  indexDummy.phashBlock = &block_hash;
3524 
3525  // NOTE: CheckBlockHeader is called by CheckBlock
3526  if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime()))
3527  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3528  if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3529  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3530  if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3531  return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3532  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
3533  return false;
3534  }
3535  assert(state.IsValid());
3536 
3537  return true;
3538 }
3539 
3544 void BlockManager::PruneOneBlockFile(const int fileNumber)
3545 {
3548 
3549  for (const auto& entry : m_block_index) {
3550  CBlockIndex* pindex = entry.second;
3551  if (pindex->nFile == fileNumber) {
3552  pindex->nStatus &= ~BLOCK_HAVE_DATA;
3553  pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3554  pindex->nFile = 0;
3555  pindex->nDataPos = 0;
3556  pindex->nUndoPos = 0;
3557  setDirtyBlockIndex.insert(pindex);
3558 
3559  // Prune from m_blocks_unlinked -- any block we prune would have
3560  // to be downloaded again in order to consider its chain, at which
3561  // point it would be considered as a candidate for
3562  // m_blocks_unlinked or setBlockIndexCandidates.
3563  auto range = m_blocks_unlinked.equal_range(pindex->pprev);
3564  while (range.first != range.second) {
3565  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
3566  range.first++;
3567  if (_it->second == pindex) {
3568  m_blocks_unlinked.erase(_it);
3569  }
3570  }
3571  }
3572  }
3573 
3574  vinfoBlockFile[fileNumber].SetNull();
3575  setDirtyFileInfo.insert(fileNumber);
3576 }
3577 
3578 void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
3579 {
3580  assert(fPruneMode && nManualPruneHeight > 0);
3581 
3583  if (chain_tip_height < 0) {
3584  return;
3585  }
3586 
3587  // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3588  unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
3589  int count = 0;
3590  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3591  if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3592  continue;
3593  }
3594  PruneOneBlockFile(fileNumber);
3595  setFilesToPrune.insert(fileNumber);
3596  count++;
3597  }
3598  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
3599 }
3600 
3601 /* This function is called from the RPC code for pruneblockchain */
3602 void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
3603 {
3604  BlockValidationState state;
3605  if (!active_chainstate.FlushStateToDisk(
3606  state, FlushStateMode::NONE, nManualPruneHeight)) {
3607  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
3608  }
3609 }
3610 
3611 void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
3612 {
3614  if (chain_tip_height < 0 || nPruneTarget == 0) {
3615  return;
3616  }
3617  if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
3618  return;
3619  }
3620 
3621  unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP));
3622  uint64_t nCurrentUsage = CalculateCurrentUsage();
3623  // We don't check to prune until after we've allocated new space for files
3624  // So we should leave a buffer under our target to account for another allocation
3625  // before the next pruning.
3626  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
3627  uint64_t nBytesToPrune;
3628  int count = 0;
3629 
3630  if (nCurrentUsage + nBuffer >= nPruneTarget) {
3631  // On a prune event, the chainstate DB is flushed.
3632  // To avoid excessive prune events negating the benefit of high dbcache
3633  // values, we should not prune too rapidly.
3634  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
3635  if (is_ibd) {
3636  // Since this is only relevant during IBD, we use a fixed 10%
3637  nBuffer += nPruneTarget / 10;
3638  }
3639 
3640  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3641  nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
3642 
3643  if (vinfoBlockFile[fileNumber].nSize == 0) {
3644  continue;
3645  }
3646 
3647  if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
3648  break;
3649  }
3650 
3651  // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3652  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3653  continue;
3654  }
3655 
3656  PruneOneBlockFile(fileNumber);
3657  // Queue up the files for removal
3658  setFilesToPrune.insert(fileNumber);
3659  nCurrentUsage -= nBytesToPrune;
3660  count++;
3661  }
3662  }
3663 
3664  LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3665  nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
3666  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
3667  nLastBlockWeCanPrune, count);
3668 }
3669 
3671 {
3673 
3674  if (hash.IsNull())
3675  return nullptr;
3676 
3677  // Return existing
3678  BlockMap::iterator mi = m_block_index.find(hash);
3679  if (mi != m_block_index.end())
3680  return (*mi).second;
3681 
3682  // Create new
3683  CBlockIndex* pindexNew = new CBlockIndex();
3684  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3685  pindexNew->phashBlock = &((*mi).first);
3686 
3687  return pindexNew;
3688 }
3689 
3691  const Consensus::Params& consensus_params,
3692  std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
3693 {
3694  if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
3695  return false;
3696  }
3697 
3698  // Calculate nChainWork
3699  std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
3700  vSortedByHeight.reserve(m_block_index.size());
3701  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
3702  {
3703  CBlockIndex* pindex = item.second;
3704  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
3705  }
3706  sort(vSortedByHeight.begin(), vSortedByHeight.end());
3707  for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
3708  {
3709  if (ShutdownRequested()) return false;
3710  CBlockIndex* pindex = item.second;
3711  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
3712  pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
3713  // We can link the chain of blocks for which we've received transactions at some point.
3714  // Pruned nodes may have deleted the block.
3715  if (pindex->nTx > 0) {
3716  if (pindex->pprev) {
3717  if (pindex->pprev->HaveTxsDownloaded()) {
3718  pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
3719  } else {
3720  pindex->nChainTx = 0;
3721  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
3722  }
3723  } else {
3724  pindex->nChainTx = pindex->nTx;
3725  }
3726  }
3727  if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
3728  pindex->nStatus |= BLOCK_FAILED_CHILD;
3729  setDirtyBlockIndex.insert(pindex);
3730  }
3731  if (pindex->IsAssumedValid() ||
3732  (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
3733  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
3734  block_index_candidates.insert(pindex);
3735  }
3736  if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
3737  pindexBestInvalid = pindex;
3738  if (pindex->pprev)
3739  pindex->BuildSkip();
3740  if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
3741  pindexBestHeader = pindex;
3742  }
3743 
3744  return true;
3745 }
3746 
3748  m_failed_blocks.clear();
3749  m_blocks_unlinked.clear();
3750 
3751  for (const BlockMap::value_type& entry : m_block_index) {
3752  delete entry.second;
3753  }
3754 
3755  m_block_index.clear();
3756 }
3757 
3758 bool BlockManager::LoadBlockIndexDB(std::set<CBlockIndex*, CBlockIndexWorkComparator>& setBlockIndexCandidates)
3759 {
3760  if (!LoadBlockIndex(
3761  ::Params().GetConsensus(),
3762  setBlockIndexCandidates)) {
3763  return false;
3764  }
3765 
3766  // Load block file info
3767  m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
3768  vinfoBlockFile.resize(nLastBlockFile + 1);
3769  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
3770  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
3771  m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
3772  }
3773  LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
3774  for (int nFile = nLastBlockFile + 1; true; nFile++) {
3775  CBlockFileInfo info;
3776  if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
3777  vinfoBlockFile.push_back(info);
3778  } else {
3779  break;
3780  }
3781  }
3782 
3783  // Check presence of blk files
3784  LogPrintf("Checking all blk files are present...\n");
3785  std::set<int> setBlkDataFiles;
3786  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
3787  CBlockIndex* pindex = item.second;
3788  if (pindex->nStatus & BLOCK_HAVE_DATA) {
3789  setBlkDataFiles.insert(pindex->nFile);
3790  }
3791  }
3792  for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
3793  {
3794  FlatFilePos pos(*it, 0);
3795  if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
3796  return false;
3797  }
3798  }
3799 
3800  // Check whether we have ever pruned block & undo files
3801  m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
3802  if (fHavePruned)
3803  LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3804 
3805  // Check whether we need to continue reindexing
3806  bool fReindexing = false;
3807  m_block_tree_db->ReadReindexing(fReindexing);
3808  if(fReindexing) fReindex = true;
3809 
3810  return true;
3811 }
3812 
3814 {
3815  if (!m_mempool) return;
3816  if (args.GetBoolArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
3817  ::LoadMempool(*m_mempool, *this);
3818  }
3820 }
3821 
3823 {
3825  const CCoinsViewCache& coins_cache = CoinsTip();
3826  assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
3827  const CBlockIndex* tip = m_chain.Tip();
3828 
3829  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
3830  return true;
3831  }
3832 
3833  // Load pointer to end of best chain
3834  CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
3835  if (!pindex) {
3836  return false;
3837  }
3838  m_chain.SetTip(pindex);
3840 
3841  tip = m_chain.Tip();
3842  LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
3843  tip->GetBlockHash().ToString(),
3844  m_chain.Height(),
3847  return true;
3848 }
3849 
3851 {
3852  uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
3853 }
3854 
3856 {
3857  uiInterface.ShowProgress("", 100, false);
3858 }
3859 
3861  CChainState& chainstate,
3862  const CChainParams& chainparams,
3863  CCoinsView& coinsview,
3864  int nCheckLevel, int nCheckDepth)
3865 {
3867 
3868  if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr)
3869  return true;
3870 
3871  // Verify blocks in the best chain
3872  if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height())
3873  nCheckDepth = chainstate.m_chain.Height();
3874  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
3875  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
3876  CCoinsViewCache coins(&coinsview);
3877  CBlockIndex* pindex;
3878  CBlockIndex* pindexFailure = nullptr;
3879  int nGoodTransactions = 0;
3880  BlockValidationState state;
3881  int reportDone = 0;
3882  LogPrintf("[0%%]..."); /* Continued */
3883 
3884  const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
3885 
3886  for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
3887  const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
3888  if (reportDone < percentageDone/10) {
3889  // report every 10% step
3890  LogPrintf("[%d%%]...", percentageDone); /* Continued */
3891  reportDone = percentageDone/10;
3892  }
3893  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
3894  if (pindex->nHeight <= chainstate.m_chain.Height()-nCheckDepth)
3895  break;
3896  if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
3897  // If pruning or running under an assumeutxo snapshot, only go
3898  // back as far as we have data.
3899  LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
3900  break;
3901  }
3902  CBlock block;
3903  // check level 0: read from disk
3904  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
3905  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3906  // check level 1: verify block validity
3907  if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
3908  return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
3909  pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
3910  // check level 2: verify undo validity
3911  if (nCheckLevel >= 2 && pindex) {
3912  CBlockUndo undo;
3913  if (!pindex->GetUndoPos().IsNull()) {
3914  if (!UndoReadFromDisk(undo, pindex)) {
3915  return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3916  }
3917  }
3918  }
3919  // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3920  size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
3921 
3922  if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
3923  assert(coins.GetBestBlock() == pindex->GetBlockHash());
3924  DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
3925  if (res == DISCONNECT_FAILED) {
3926  return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3927  }
3928  if (res == DISCONNECT_UNCLEAN) {
3929  nGoodTransactions = 0;
3930  pindexFailure = pindex;
3931  } else {
3932  nGoodTransactions += block.vtx.size();
3933  }
3934  }
3935  if (ShutdownRequested()) return true;
3936  }
3937  if (pindexFailure)
3938  return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
3939 
3940  // store block count as we move pindex at check level >= 4
3941  int block_count = chainstate.m_chain.Height() - pindex->nHeight;
3942 
3943  // check level 4: try reconnecting blocks
3944  if (nCheckLevel >= 4) {
3945  while (pindex != chainstate.m_chain.Tip()) {
3946  const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
3947  if (reportDone < percentageDone/10) {
3948  // report every 10% step
3949  LogPrintf("[%d%%]...", percentageDone); /* Continued */
3950  reportDone = percentageDone/10;
3951  }
3952  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
3953  pindex = chainstate.m_chain.Next(pindex);
3954  CBlock block;
3955  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
3956  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3957  if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
3958  return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
3959  }
3960  if (ShutdownRequested()) return true;
3961  }
3962  }
3963 
3964  LogPrintf("[DONE].\n");
3965  LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
3966 
3967  return true;
3968 }
3969 
3972 {
3973  // TODO: merge with ConnectBlock
3974  CBlock block;
3975  if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
3976  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3977  }
3978 
3979  for (const CTransactionRef& tx : block.vtx) {
3980  if (!tx->IsCoinBase()) {
3981  for (const CTxIn &txin : tx->vin) {
3982  inputs.SpendCoin(txin.prevout);
3983  }
3984  }
3985  // Pass check = true as every addition may be an overwrite.
3986  AddCoins(inputs, *tx, pindex->nHeight, true);
3987  }
3988  return true;
3989 }
3990 
3992 {
3993  LOCK(cs_main);
3994 
3995  CCoinsView& db = this->CoinsDB();
3996  CCoinsViewCache cache(&db);
3997 
3998  std::vector<uint256> hashHeads = db.GetHeadBlocks();
3999  if (hashHeads.empty()) return true; // We're already in a consistent state.
4000  if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4001 
4002  uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
4003  LogPrintf("Replaying blocks\n");
4004 
4005  const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4006  const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4007  const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4008 
4009  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4010  return error("ReplayBlocks(): reorganization to unknown block requested");
4011  }
4012  pindexNew = m_blockman.m_block_index[hashHeads[0]];
4013 
4014  if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4015  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4016  return error("ReplayBlocks(): reorganization from unknown block requested");
4017  }
4018  pindexOld = m_blockman.m_block_index[hashHeads[1]];
4019  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4020  assert(pindexFork != nullptr);
4021  }
4022 
4023  // Rollback along the old branch.
4024  while (pindexOld != pindexFork) {
4025  if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4026  CBlock block;
4027  if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
4028  return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4029  }
4030  LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4031  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4032  if (res == DISCONNECT_FAILED) {
4033  return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4034  }
4035  // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4036  // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4037  // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4038  // the result is still a version of the UTXO set with the effects of that block undone.
4039  }
4040  pindexOld = pindexOld->pprev;
4041  }
4042 
4043  // Roll forward from the forking point to the new tip.
4044  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4045  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4046  const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4047  LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4048  uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4049  if (!RollforwardBlock(pindex, cache)) return false;
4050  }
4051 
4052  cache.SetBestBlock(pindexNew->GetBlockHash());
4053  cache.Flush();
4054  uiInterface.ShowProgress("", 100, false);
4055  return true;
4056 }
4057 
4059 {
4061 
4062  // At and above m_params.SegwitHeight, segwit consensus rules must be validated
4063  CBlockIndex* block{m_chain.Tip()};
4064 
4065  while (block != nullptr && DeploymentActiveAt(*block, m_params.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
4066  if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
4067  // block is insufficiently validated for a segwit client
4068  return true;
4069  }
4070  block = block->pprev;
4071  }
4072 
4073  return false;
4074 }
4075 
4077  nBlockSequenceId = 1;
4078  setBlockIndexCandidates.clear();
4079 }
4080 
4081 // May NOT be used after any connections are up as much
4082 // of the peer-processing logic assumes a consistent
4083 // block index state
4085 {
4086  LOCK(cs_main);
4087  chainman.Unload();
4088  pindexBestInvalid = nullptr;
4089  pindexBestHeader = nullptr;
4090  if (mempool) mempool->clear();
4091  vinfoBlockFile.clear();
4092  nLastBlockFile = 0;
4093  setDirtyBlockIndex.clear();
4094  setDirtyFileInfo.clear();
4096  for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4097  warningcache[b].clear();
4098  }
4099  fHavePruned = false;
4100 }
4101 
4103 {
4105  // Load block index from databases
4106  bool needs_init = fReindex;
4107  if (!fReindex) {
4108  bool ret = m_blockman.LoadBlockIndexDB(ActiveChainstate().setBlockIndexCandidates);
4109  if (!ret) return false;
4110  needs_init = m_blockman.m_block_index.empty();
4111  }
4112 
4113  if (needs_init) {
4114  // Everything here is for *new* reindex/DBs. Thus, though
4115  // LoadBlockIndexDB may have set fReindex if we shut down
4116  // mid-reindex previously, we don't check fReindex and
4117  // instead only check it prior to LoadBlockIndexDB to set
4118  // needs_init.
4119 
4120  LogPrintf("Initializing databases...\n");
4121  }
4122  return true;
4123 }
4124 
4126 {
4127  LOCK(cs_main);
4128 
4129  // Check whether we're already initialized by checking for genesis in
4130  // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4131  // set based on the coins db, not the block index db, which is the only
4132  // thing loaded at this point.
4133  if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash()))
4134  return true;
4135 
4136  try {
4137  const CBlock& block = m_params.GenesisBlock();
4138  FlatFilePos blockPos = SaveBlockToDisk(block, 0, m_chain, m_params, nullptr);
4139  if (blockPos.IsNull())
4140  return error("%s: writing genesis block to disk failed", __func__);
4141  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4142  ReceivedBlockTransactions(block, pindex, blockPos);
4143  } catch (const std::runtime_error& e) {
4144  return error("%s: failed to write genesis block: %s", __func__, e.what());
4145  }
4146 
4147  return true;
4148 }
4149 
4150 void CChainState::LoadExternalBlockFile(FILE* fileIn, FlatFilePos* dbp)
4151 {
4152  // Map of disk positions for blocks with unknown parent (only used for reindex)
4153  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4154  int64_t nStart = GetTimeMillis();
4155 
4156  int nLoaded = 0;
4157  try {
4158  // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4160  uint64_t nRewind = blkdat.GetPos();
4161  while (!blkdat.eof()) {
4162  if (ShutdownRequested()) return;
4163 
4164  blkdat.SetPos(nRewind);
4165  nRewind++; // start one byte further next time, in case of failure
4166  blkdat.SetLimit(); // remove former limit
4167  unsigned int nSize = 0;
4168  try {
4169  // locate a header
4170  unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4171  blkdat.FindByte(m_params.MessageStart()[0]);
4172  nRewind = blkdat.GetPos()+1;
4173  blkdat >> buf;
4175  continue;
4176  }
4177  // read size
4178  blkdat >> nSize;
4179  if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4180  continue;
4181  } catch (const std::exception&) {
4182  // no valid block header found; don't complain
4183  break;
4184  }
4185  try {
4186  // read block
4187  uint64_t nBlockPos = blkdat.GetPos();
4188  if (dbp)
4189  dbp->nPos = nBlockPos;
4190  blkdat.SetLimit(nBlockPos + nSize);
4191  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4192  CBlock& block = *pblock;
4193  blkdat >> block;
4194  nRewind = blkdat.GetPos();
4195 
4196  uint256 hash = block.GetHash();
4197  {
4198  LOCK(cs_main);
4199  // detect out of order blocks, and store them for later
4200  if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
4201  LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4202  block.hashPrevBlock.ToString());
4203  if (dbp)
4204  mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4205  continue;
4206  }
4207 
4208  // process in case the block isn't known yet
4209  CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
4210  if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4211  BlockValidationState state;
4212  if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) {
4213  nLoaded++;
4214  }
4215  if (state.IsError()) {
4216  break;
4217  }
4218  } else if (hash != m_params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4219  LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4220  }
4221  }
4222 
4223  // Activate the genesis block so normal node progress can continue
4224  if (hash == m_params.GetConsensus().hashGenesisBlock) {
4225  BlockValidationState state;
4226  if (!ActivateBestChain(state, nullptr)) {
4227  break;
4228  }
4229  }
4230 
4231  NotifyHeaderTip(*this);
4232 
4233  // Recursively process earlier encountered successors of this block
4234  std::deque<uint256> queue;
4235  queue.push_back(hash);
4236  while (!queue.empty()) {
4237  uint256 head = queue.front();
4238  queue.pop_front();
4239  std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4240  while (range.first != range.second) {
4241  std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4242  std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4243  if (ReadBlockFromDisk(*pblockrecursive, it->second, m_params.GetConsensus())) {
4244  LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4245  head.ToString());
4246  LOCK(cs_main);
4247  BlockValidationState dummy;
4248  if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) {
4249  nLoaded++;
4250  queue.push_back(pblockrecursive->GetHash());
4251  }
4252  }
4253  range.first++;
4254  mapBlocksUnknownParent.erase(it);
4255  NotifyHeaderTip(*this);
4256  }
4257  }
4258  } catch (const std::exception& e) {
4259  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4260  }
4261  }
4262  } catch (const std::runtime_error& e) {
4263  AbortNode(std::string("System error: ") + e.what());
4264  }
4265  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4266 }
4267 
4269 {
4270  if (!fCheckBlockIndex) {
4271  return;
4272  }
4273 
4274  LOCK(cs_main);
4275 
4276  // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4277  // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4278  // tests when iterating the block tree require that m_chain has been initialized.)
4279  if (m_chain.Height() < 0) {
4280  assert(m_blockman.m_block_index.size() <= 1);
4281  return;
4282  }
4283 
4284  // Build forward-pointing map of the entire block tree.
4285  std::multimap<CBlockIndex*,CBlockIndex*> forward;
4286  for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4287  forward.insert(std::make_pair(entry.second->pprev, entry.second));
4288  }
4289 
4290  assert(forward.size() == m_blockman.m_block_index.size());
4291 
4292  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4293  CBlockIndex *pindex = rangeGenesis.first->second;
4294  rangeGenesis.first++;
4295  assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4296 
4297  // Iterate over the entire block tree, using depth-first search.
4298  // Along the way, remember whether there are blocks on the path from genesis
4299  // block being explored which are the first to have certain properties.
4300  size_t nNodes = 0;
4301  int nHeight = 0;
4302  CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4303  CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4304  CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4305  CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4306  CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4307  CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4308  CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4309  while (pindex != nullptr) {
4310  nNodes++;
4311  if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4312  // Assumed-valid index entries will not have data since we haven't downloaded the
4313  // full block yet.
4314  if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA) && !pindex->IsAssumedValid()) {
4315  pindexFirstMissing = pindex;
4316  }
4317  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4318  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4319 
4320  if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) {
4321  // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these
4322  // *_VALID_MASK flags will not be present for index entries we are temporarily assuming
4323  // valid.
4324  if (pindexFirstNotTransactionsValid == nullptr &&
4326  pindexFirstNotTransactionsValid = pindex;
4327  }
4328 
4329  if (pindexFirstNotChainValid == nullptr &&
4330  (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) {
4331  pindexFirstNotChainValid = pindex;
4332  }
4333 
4334  if (pindexFirstNotScriptsValid == nullptr &&
4336  pindexFirstNotScriptsValid = pindex;
4337  }
4338  }
4339 
4340  // Begin: actual consistency checks.
4341  if (pindex->pprev == nullptr) {
4342  // Genesis block checks.
4343  assert(pindex->GetBlockHash() == m_params.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
4344  assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4345  }
4346  if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4347  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4348  // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4349  // Unless these indexes are assumed valid and pending block download on a
4350  // background chainstate.
4351  if (!fHavePruned && !pindex->IsAssumedValid()) {
4352  // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4353  assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4354  assert(pindexFirstMissing == pindexFirstNeverProcessed);
4355  } else {
4356  // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4357  if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4358  }
4359  if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4360  if (pindex->IsAssumedValid()) {
4361  // Assumed-valid blocks should have some nTx value.
4362  assert(pindex->nTx > 0);
4363  // Assumed-valid blocks should connect to the main chain.
4365  } else {
4366  // Otherwise there should only be an nTx value if we have
4367  // actually seen a block's transactions.
4368  assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4369  }
4370  // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4371  assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4372  assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4373  assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4374  assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4375  assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4376  assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4377  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4378  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4379  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4380  if (pindexFirstInvalid == nullptr) {
4381  // Checks for not-invalid blocks.
4382  assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4383  }
4384  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4385  if (pindexFirstInvalid == nullptr) {
4386  const bool is_active = this == &m_chainman.ActiveChainstate();
4387 
4388  // If this block sorts at least as good as the current tip and
4389  // is valid and we have all data for its parents, it must be in
4390  // setBlockIndexCandidates. m_chain.Tip() must also be there
4391  // even if some data has been pruned.
4392  //
4393  // Don't perform this check for the background chainstate since
4394  // its setBlockIndexCandidates shouldn't have some entries (i.e. those past the
4395  // snapshot block) which do exist in the block index for the active chainstate.
4396  if (is_active && (pindexFirstMissing == nullptr || pindex == m_chain.Tip())) {
4397  assert(setBlockIndexCandidates.count(pindex));
4398  }
4399  // If some parent is missing, then it could be that this block was in
4400  // setBlockIndexCandidates but had to be removed because of the missing data.
4401  // In this case it must be in m_blocks_unlinked -- see test below.
4402  }
4403  } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4404  assert(setBlockIndexCandidates.count(pindex) == 0);
4405  }
4406  // Check whether this block is in m_blocks_unlinked.
4407  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4408  bool foundInUnlinked = false;
4409  while (rangeUnlinked.first != rangeUnlinked.second) {
4410  assert(rangeUnlinked.first->first == pindex->pprev);
4411  if (rangeUnlinked.first->second == pindex) {
4412  foundInUnlinked = true;
4413  break;
4414  }
4415  rangeUnlinked.first++;
4416  }
4417  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4418  // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4419  assert(foundInUnlinked);
4420  }
4421  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4422  if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4423  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4424  // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4425  assert(fHavePruned); // We must have pruned.
4426  // This block may have entered m_blocks_unlinked if:
4427  // - it has a descendant that at some point had more work than the
4428  // tip, and
4429  // - we tried switching to that descendant but were missing
4430  // data for some intermediate block between m_chain and the
4431  // tip.
4432  // So if this block is itself better than m_chain.Tip() and it wasn't in
4433  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4434  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4435  if (pindexFirstInvalid == nullptr) {
4436  assert(foundInUnlinked);
4437  }
4438  }
4439  }
4440  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4441  // End: actual consistency checks.
4442 
4443  // Try descending into the first subnode.
4444  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4445  if (range.first != range.second) {
4446  // A subnode was found.
4447  pindex = range.first->second;
4448  nHeight++;
4449  continue;
4450  }
4451  // This is a leaf node.
4452  // Move upwards until we reach a node of which we have not yet visited the last child.
4453  while (pindex) {
4454  // We are going to either move to a parent or a sibling of pindex.
4455  // If pindex was the first with a certain property, unset the corresponding variable.
4456  if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4457  if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4458  if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4459  if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4460  if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4461  if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4462  if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4463  // Find our parent.
4464  CBlockIndex* pindexPar = pindex->pprev;
4465  // Find which child we just visited.
4466  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4467  while (rangePar.first->second != pindex) {
4468  assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4469  rangePar.first++;
4470  }
4471  // Proceed to the next one.
4472  rangePar.first++;
4473  if (rangePar.first != rangePar.second) {
4474  // Move to the sibling.
4475  pindex = rangePar.first->second;
4476  break;
4477  } else {
4478  // Move up further.
4479  pindex = pindexPar;
4480  nHeight--;
4481  continue;
4482  }
4483  }
4484  }
4485 
4486  // Check that we actually traversed the entire map.
4487  assert(nNodes == forward.size());
4488 }
4489 
4490 std::string CChainState::ToString()
4491 {
4492  CBlockIndex* tip = m_chain.Tip();
4493  return strprintf("Chainstate [%s] @ height %d (%s)",
4494  m_from_snapshot_blockhash ? "snapshot" : "ibd",
4495  tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4496 }
4497 
4498 bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
4499 {
4500  if (coinstip_size == m_coinstip_cache_size_bytes &&
4501  coinsdb_size == m_coinsdb_cache_size_bytes) {
4502  // Cache sizes are unchanged, no need to continue.
4503  return true;
4504  }
4505  size_t old_coinstip_size = m_coinstip_cache_size_bytes;
4506  m_coinstip_cache_size_bytes = coinstip_size;
4507  m_coinsdb_cache_size_bytes = coinsdb_size;
4508  CoinsDB().ResizeCache(coinsdb_size);
4509 
4510  LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
4511  this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
4512  LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
4513  this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
4514 
4515  BlockValidationState state;
4516  bool ret;
4517 
4518  if (coinstip_size > old_coinstip_size) {
4519  // Likely no need to flush if cache sizes have grown.
4521  } else {
4522  // Otherwise, flush state to disk and deallocate the in-memory coins map.
4525  }
4526  return ret;
4527 }
4528 
4529 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
4530 
4531 bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
4532 {
4533  const CChainParams& chainparams = Params();
4534  int64_t nExpiryTimeout = gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
4535  FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat", "rb")};
4536  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
4537  if (file.IsNull()) {
4538  LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
4539  return false;
4540  }
4541 
4542  int64_t count = 0;
4543  int64_t expired = 0;
4544  int64_t failed = 0;
4545  int64_t already_there = 0;
4546  int64_t unbroadcast = 0;
4547  int64_t nNow = GetTime();
4548 
4549  try {
4550  uint64_t version;
4551  file >> version;
4552  if (version != MEMPOOL_DUMP_VERSION) {
4553  return false;
4554  }
4555  uint64_t num;
4556  file >> num;
4557  while (num--) {
4558  CTransactionRef tx;
4559  int64_t nTime;
4560  int64_t nFeeDelta;
4561  file >> tx;
4562  file >> nTime;
4563  file >> nFeeDelta;
4564 
4565  CAmount amountdelta = nFeeDelta;
4566  if (amountdelta) {
4567  pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
4568  }
4569  if (nTime > nNow - nExpiryTimeout) {
4570  LOCK(cs_main);
4571  if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */,
4572  false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) {
4573  ++count;
4574  } else {
4575  // mempool may contain the transaction already, e.g. from
4576  // wallet(s) having loaded it while we were processing
4577  // mempool transactions; consider these as valid, instead of
4578  // failed, but mark them as 'already there'
4579  if (pool.exists(GenTxid::Txid(tx->GetHash()))) {
4580  ++already_there;
4581  } else {
4582  ++failed;
4583  }
4584  }
4585  } else {
4586  ++expired;
4587  }
4588  if (ShutdownRequested())
4589  return false;
4590  }
4591  std::map<uint256, CAmount> mapDeltas;
4592  file >> mapDeltas;
4593 
4594  for (const auto& i : mapDeltas) {
4595  pool.PrioritiseTransaction(i.first, i.second);
4596  }
4597 
4598  std::set<uint256> unbroadcast_txids;
4599  file >> unbroadcast_txids;
4600  unbroadcast = unbroadcast_txids.size();
4601  for (const auto& txid : unbroadcast_txids) {
4602  // Ensure transactions were accepted to mempool then add to
4603  // unbroadcast set.
4604  if (pool.get(txid) != nullptr) pool.AddUnbroadcastTx(txid);
4605  }
4606  } catch (const std::exception& e) {
4607  LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
4608  return false;
4609  }
4610 
4611  LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
4612  return true;
4613 }
4614 
4615 bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function, bool skip_file_commit)
4616 {
4617  int64_t start = GetTimeMicros();
4618 
4619  std::map<uint256, CAmount> mapDeltas;
4620  std::vector<TxMempoolInfo> vinfo;
4621  std::set<uint256> unbroadcast_txids;
4622 
4623  static Mutex dump_mutex;
4624  LOCK(dump_mutex);
4625 
4626  {
4627  LOCK(pool.cs);
4628  for (const auto &i : pool.mapDeltas) {
4629  mapDeltas[i.first] = i.second;
4630  }
4631  vinfo = pool.infoAll();
4632  unbroadcast_txids = pool.GetUnbroadcastTxs();
4633  }
4634 
4635  int64_t mid = GetTimeMicros();
4636 
4637  try {
4638  FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat.new", "wb")};
4639  if (!filestr) {
4640  return false;
4641  }
4642 
4643  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
4644 
4645  uint64_t version = MEMPOOL_DUMP_VERSION;
4646  file << version;
4647 
4648  file << (uint64_t)vinfo.size();
4649  for (const auto& i : vinfo) {
4650  file << *(i.tx);
4651  file << int64_t{count_seconds(i.m_time)};
4652  file << int64_t{i.nFeeDelta};
4653  mapDeltas.erase(i.tx->GetHash());
4654  }
4655 
4656  file << mapDeltas;
4657 
4658  LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
4659  file << unbroadcast_txids;
4660 
4661  if (!skip_file_commit && !FileCommit(file.Get()))
4662  throw std::runtime_error("FileCommit failed");
4663  file.fclose();
4664  if (!RenameOver(gArgs.GetDataDirNet() / "mempool.dat.new", gArgs.GetDataDirNet() / "mempool.dat")) {
4665  throw std::runtime_error("Rename failed");
4666  }
4667  int64_t last = GetTimeMicros();
4668  LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
4669  } catch (const std::exception& e) {
4670  LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
4671  return false;
4672  }
4673  return true;
4674 }
4675 
4678 double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
4679  if (pindex == nullptr)
4680  return 0.0;
4681 
4682  int64_t nNow = time(nullptr);
4683 
4684  double fTxTotal;
4685 
4686  if (pindex->nChainTx <= data.nTxCount) {
4687  fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
4688  } else {
4689  fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
4690  }
4691 
4692  return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
4693 }
4694 
4695 std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
4696 {
4697  LOCK(::cs_main);
4698  if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
4699  // If a snapshot chainstate exists, it will always be our active.
4700  return m_active_chainstate->m_from_snapshot_blockhash;
4701  }
4702  return std::nullopt;
4703 }
4704 
4705 std::vector<CChainState*> ChainstateManager::GetAll()
4706 {
4707  LOCK(::cs_main);
4708  std::vector<CChainState*> out;
4709 
4710  if (!IsSnapshotValidated() && m_ibd_chainstate) {
4711  out.push_back(m_ibd_chainstate.get());
4712  }
4713 
4714  if (m_snapshot_chainstate) {
4715  out.push_back(m_snapshot_chainstate.get());
4716  }
4717 
4718  return out;
4719 }
4720 
4721 CChainState& ChainstateManager::InitializeChainstate(
4722  CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
4723 {
4724  bool is_snapshot = snapshot_blockhash.has_value();
4725  std::unique_ptr<CChainState>& to_modify =
4726  is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
4727 
4728  if (to_modify) {
4729  throw std::logic_error("should not be overwriting a chainstate");
4730  }
4731  to_modify.reset(new CChainState(mempool, m_blockman, *this, snapshot_blockhash));
4732 
4733  // Snapshot chainstates and initial IBD chaintates always become active.
4734  if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
4735  LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
4736  m_active_chainstate = to_modify.get();
4737  } else {
4738  throw std::logic_error("unexpected chainstate activation");
4739  }
4740 
4741  return *to_modify;
4742 }
4743 
4745  const int height, const CChainParams& chainparams)
4746 {
4747  const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
4748  const auto assumeutxo_found = valid_assumeutxos_map.find(height);
4749 
4750  if (assumeutxo_found != valid_assumeutxos_map.end()) {
4751  return &assumeutxo_found->second;
4752  }
4753  return nullptr;
4754 }
4755 
4757  CAutoFile& coins_file,
4758  const SnapshotMetadata& metadata,
4759  bool in_memory)
4760 {
4761  uint256 base_blockhash = metadata.m_base_blockhash;
4762 
4763  if (this->SnapshotBlockhash()) {
4764  LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
4765  return false;
4766  }
4767 
4768  int64_t current_coinsdb_cache_size{0};
4769  int64_t current_coinstip_cache_size{0};
4770 
4771  // Cache percentages to allocate to each chainstate.
4772  //
4773  // These particular percentages don't matter so much since they will only be
4774  // relevant during snapshot activation; caches are rebalanced at the conclusion of
4775  // this function. We want to give (essentially) all available cache capacity to the
4776  // snapshot to aid the bulk load later in this function.
4777  static constexpr double IBD_CACHE_PERC = 0.01;
4778  static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
4779 
4780  {
4781  LOCK(::cs_main);
4782  // Resize the coins caches to ensure we're not exceeding memory limits.
4783  //
4784  // Allocate the majority of the cache to the incoming snapshot chainstate, since
4785  // (optimistically) getting to its tip will be the top priority. We'll need to call
4786  // `MaybeRebalanceCaches()` once we're done with this function to ensure
4787  // the right allocation (including the possibility that no snapshot was activated
4788  // and that we should restore the active chainstate caches to their original size).
4789  //
4790  current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
4791  current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
4792 
4793  // Temporarily resize the active coins cache to make room for the newly-created
4794  // snapshot chain.
4795  this->ActiveChainstate().ResizeCoinsCaches(
4796  static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
4797  static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
4798  }
4799 
4800  auto snapshot_chainstate = WITH_LOCK(::cs_main,
4801  return std::make_unique<CChainState>(
4802  /* mempool */ nullptr, m_blockman, *this, base_blockhash));
4803 
4804  {
4805  LOCK(::cs_main);
4806  snapshot_chainstate->InitCoinsDB(
4807  static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
4808  in_memory, false, "chainstate");
4809  snapshot_chainstate->InitCoinsCache(
4810  static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
4811  }
4812 
4813  const bool snapshot_ok = this->PopulateAndValidateSnapshot(
4814  *snapshot_chainstate, coins_file, metadata);
4815 
4816  if (!snapshot_ok) {
4817  WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
4818  return false;
4819  }
4820 
4821  {
4822  LOCK(::cs_main);
4823  assert(!m_snapshot_chainstate);
4824  m_snapshot_chainstate.swap(snapshot_chainstate);
4825  const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
4826  assert(chaintip_loaded);
4827 
4828  m_active_chainstate = m_snapshot_chainstate.get();
4829 
4830  LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
4831  LogPrintf("[snapshot] (%.2f MB)\n",
4832  m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000));
4833 
4834  this->MaybeRebalanceCaches();
4835  }
4836  return true;
4837 }
4838 
4840  CChainState& snapshot_chainstate,
4841  CAutoFile& coins_file,
4842  const SnapshotMetadata& metadata)
4843 {
4844  // It's okay to release cs_main before we're done using `coins_cache` because we know
4845  // that nothing else will be referencing the newly created snapshot_chainstate yet.
4846  CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
4847 
4848  uint256 base_blockhash = metadata.m_base_blockhash;
4849 
4850  CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
4851 
4852  if (!snapshot_start_block) {
4853  // Needed for GetUTXOStats and ExpectedAssumeutxo to determine the height and to avoid a crash when base_blockhash.IsNull()
4854  LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
4855  base_blockhash.ToString());
4856  return false;
4857  }
4858 
4859  int base_height = snapshot_start_block->nHeight;
4860  auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params());
4861 
4862  if (!maybe_au_data) {
4863  LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */
4864  "(%d) - refusing to load snapshot\n", base_height);
4865  return false;
4866  }
4867 
4868  const AssumeutxoData& au_data = *maybe_au_data;
4869 
4870  COutPoint outpoint;
4871  Coin coin;
4872  const uint64_t coins_count = metadata.m_coins_count;
4873  uint64_t coins_left = metadata.m_coins_count;
4874 
4875  LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString());
4876  int64_t flush_now{0};
4877  int64_t coins_processed{0};
4878 
4879  while (coins_left > 0) {
4880  try {
4881  coins_file >> outpoint;
4882  coins_file >> coin;
4883  } catch (const std::ios_base::failure&) {
4884  LogPrintf("[snapshot] bad snapshot format or truncated snapshot after deserializing %d coins\n",
4885  coins_count - coins_left);
4886  return false;
4887  }
4888  if (coin.nHeight > base_height ||
4889  outpoint.n >= std::numeric_limits<decltype(outpoint.n)>::max() // Avoid integer wrap-around in coinstats.cpp:ApplyHash
4890  ) {
4891  LogPrintf("[snapshot] bad snapshot data after deserializing %d coins\n",
4892  coins_count - coins_left);
4893  return false;
4894  }
4895 
4896  coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin));
4897 
4898  --coins_left;
4899  ++coins_processed;
4900 
4901  if (coins_processed % 1000000 == 0) {
4902  LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
4903  coins_processed,
4904  static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count),
4905  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
4906  }
4907 
4908  // Batch write and flush (if we need to) every so often.
4909  //
4910  // If our average Coin size is roughly 41 bytes, checking every 120,000 coins
4911  // means <5MB of memory imprecision.
4912  if (coins_processed % 120000 == 0) {
4913  if (ShutdownRequested()) {
4914  return false;
4915  }
4916 
4917  const auto snapshot_cache_state = WITH_LOCK(::cs_main,
4918  return snapshot_chainstate.GetCoinsCacheSizeState());
4919 
4920  if (snapshot_cache_state >=
4922  LogPrintf("[snapshot] flushing coins cache (%.2f MB)... ", /* Continued */
4923  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
4924  flush_now = GetTimeMillis();
4925 
4926  // This is a hack - we don't know what the actual best block is, but that
4927  // doesn't matter for the purposes of flushing the cache here. We'll set this
4928  // to its correct value (`base_blockhash`) below after the coins are loaded.
4929  coins_cache.SetBestBlock(GetRandHash());
4930 
4931  coins_cache.Flush();
4932  LogPrintf("done (%.2fms)\n", GetTimeMillis() - flush_now);
4933  }
4934  }
4935  }
4936 
4937  // Important that we set this. This and the coins_cache accesses above are
4938  // sort of a layer violation, but either we reach into the innards of
4939  // CCoinsViewCache here or we have to invert some of the CChainState to
4940  // embed them in a snapshot-activation-specific CCoinsViewCache bulk load
4941  // method.
4942  coins_cache.SetBestBlock(base_blockhash);
4943 
4944  bool out_of_coins{false};
4945  try {
4946  coins_file >> outpoint;
4947  } catch (const std::ios_base::failure&) {
4948  // We expect an exception since we should be out of coins.
4949  out_of_coins = true;
4950  }
4951  if (!out_of_coins) {
4952  LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n",
4953  coins_count);
4954  return false;
4955  }
4956 
4957  LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
4958  coins_count,
4959  coins_cache.DynamicMemoryUsage() / (1000 * 1000),
4960  base_blockhash.ToString());
4961 
4962  LogPrintf("[snapshot] flushing snapshot chainstate to disk\n");
4963  // No need to acquire cs_main since this chainstate isn't being used yet.
4964  coins_cache.Flush(); // TODO: if #17487 is merged, add erase=false here for better performance.
4965 
4966  assert(coins_cache.GetBestBlock() == base_blockhash);
4967 
4969  auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ };
4970 
4971  // As above, okay to immediately release cs_main here since no other context knows
4972  // about the snapshot_chainstate.
4973  CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
4974 
4975  if (!GetUTXOStats(snapshot_coinsdb, WITH_LOCK(::cs_main, return std::ref(m_blockman)), stats, breakpoint_fnc)) {
4976  LogPrintf("[snapshot] failed to generate coins stats\n");
4977  return false;
4978  }
4979 
4980  // Assert that the deserialized chainstate contents match the expected assumeutxo value.
4981  if (AssumeutxoHash{stats.hashSerialized} != au_data.hash_serialized) {
4982  LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
4983  au_data.hash_serialized.ToString(), stats.hashSerialized.ToString());
4984  return false;
4985  }
4986 
4987  snapshot_chainstate.m_chain.SetTip(snapshot_start_block);
4988 
4989  // The remainder of this function requires modifying data protected by cs_main.
4990  LOCK(::cs_main);
4991 
4992  // Fake various pieces of CBlockIndex state:
4993  CBlockIndex* index = nullptr;
4994  for (int i = 0; i <= snapshot_chainstate.m_chain.Height(); ++i) {
4995  index = snapshot_chainstate.m_chain[i];
4996 
4997  // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
4998  // entries (among other things)
4999  if (!index->nTx) {
5000  index->nTx = 1;
5001  }
5002  // Fake nChainTx so that GuessVerificationProgress reports accurately
5003  index->nChainTx = index->pprev ? index->pprev->nChainTx + index->nTx : 1;
5004 
5005  // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid.
5006  if (!index->IsValid(BLOCK_VALID_SCRIPTS)) {
5007  // This flag will be removed once the block is fully validated by a
5008  // background chainstate.
5009  index->nStatus |= BLOCK_ASSUMED_VALID;
5010  }
5011 
5012  // Fake BLOCK_OPT_WITNESS so that CChainState::NeedsRedownload()
5013  // won't ask to rewind the entire assumed-valid chain on startup.
5014  if (index->pprev && DeploymentActiveAt(*index, ::Params().GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
5015  index->nStatus |= BLOCK_OPT_WITNESS;
5016  }
5017 
5018  setDirtyBlockIndex.insert(index);
5019  // Changes to the block index will be flushed to disk after this call
5020  // returns in `ActivateSnapshot()`, when `MaybeRebalanceCaches()` is
5021  // called, since we've added a snapshot chainstate and therefore will
5022  // have to downsize the IBD chainstate, which will result in a call to
5023  // `FlushStateToDisk(ALWAYS)`.
5024  }
5025 
5026  assert(index);
5027  index->nChainTx = au_data.nChainTx;
5028  snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
5029 
5030  LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
5031  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
5032  return true;
5033 }
5034 
5036 {
5037  LOCK(::cs_main);
5038  assert(m_active_chainstate);
5039  return *m_active_chainstate;
5040 }
5041 
5043 {
5044  LOCK(::cs_main);
5045  return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
5046 }
5047 
5048 void ChainstateManager::Unload()
5049 {
5050  for (CChainState* chainstate : this->GetAll()) {
5051  chainstate->m_chain.SetTip(nullptr);
5052  chainstate->UnloadBlockIndex();
5053  }
5054 
5055  m_blockman.Unload();
5056 }
5057 
5058 void ChainstateManager::Reset()
5059 {
5060  LOCK(::cs_main);
5061  m_ibd_chainstate.reset();
5062  m_snapshot_chainstate.reset();
5063  m_active_chainstate = nullptr;
5064  m_snapshot_validated = false;
5065 }
5066 
5067 void ChainstateManager::MaybeRebalanceCaches()
5068 {
5069  if (m_ibd_chainstate && !m_snapshot_chainstate) {
5070  LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
5071  // Allocate everything to the IBD chainstate.
5072  m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5073  }
5074  else if (m_snapshot_chainstate && !m_ibd_chainstate) {
5075  LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
5076  // Allocate everything to the snapshot chainstate.
5077  m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5078  }
5079  else if (m_ibd_chainstate && m_snapshot_chainstate) {
5080  // If both chainstates exist, determine who needs more cache based on IBD status.
5081  //
5082  // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
5083  if (m_snapshot_chainstate->IsInitialBlockDownload()) {
5084  m_ibd_chainstate->ResizeCoinsCaches(
5086  m_snapshot_chainstate->ResizeCoinsCaches(