Bitcoin Core  22.99.0
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <validation.h>
7 
8 #include <arith_uint256.h>
9 #include <chain.h>
10 #include <chainparams.h>
11 #include <checkqueue.h>
12 #include <consensus/consensus.h>
13 #include <consensus/merkle.h>
14 #include <consensus/tx_check.h>
15 #include <consensus/tx_verify.h>
16 #include <consensus/validation.h>
17 #include <cuckoocache.h>
18 #include <deploymentstatus.h>
19 #include <flatfile.h>
20 #include <hash.h>
21 #include <index/blockfilterindex.h>
22 #include <logging.h>
23 #include <logging/timer.h>
24 #include <node/blockstorage.h>
25 #include <node/coinstats.h>
26 #include <node/ui_interface.h>
27 #include <policy/policy.h>
28 #include <policy/settings.h>
29 #include <pow.h>
30 #include <primitives/block.h>
31 #include <primitives/transaction.h>
32 #include <random.h>
33 #include <reverse_iterator.h>
34 #include <script/script.h>
35 #include <script/sigcache.h>
36 #include <shutdown.h>
37 #include <signet.h>
38 #include <timedata.h>
39 #include <tinyformat.h>
40 #include <txdb.h>
41 #include <txmempool.h>
42 #include <uint256.h>
43 #include <undo.h>
44 #include <util/check.h> // For NDEBUG compile time check
45 #include <util/hasher.h>
46 #include <util/moneystr.h>
47 #include <util/rbf.h>
48 #include <util/strencodings.h>
49 #include <util/system.h>
50 #include <util/trace.h>
51 #include <util/translation.h>
52 #include <validationinterface.h>
53 #include <warnings.h>
54 
55 #include <numeric>
56 #include <optional>
57 #include <string>
58 
59 #include <boost/algorithm/string/replace.hpp>
60 
61 #define MICRO 0.000001
62 #define MILLI 0.001
63 
69 static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
71 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
73 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
75 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
77 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
78 const std::vector<std::string> CHECKLEVEL_DOC {
79  "level 0 reads the blocks from disk",
80  "level 1 verifies block validity",
81  "level 2 verifies undo data",
82  "level 3 checks disconnection of tip blocks",
83  "level 4 tries to reconnect the blocks",
84  "each level includes the checks of the previous levels",
85 };
86 
88  // First sort by most total work, ...
89  if (pa->nChainWork > pb->nChainWork) return false;
90  if (pa->nChainWork < pb->nChainWork) return true;
91 
92  // ... then by earliest time received, ...
93  if (pa->nSequenceId < pb->nSequenceId) return false;
94  if (pa->nSequenceId > pb->nSequenceId) return true;
95 
96  // Use pointer address as tie breaker (should only happen with blocks
97  // loaded from disk, as those all have id 0).
98  if (pa < pb) return false;
99  if (pa > pb) return true;
100 
101  // Identical blocks.
102  return false;
103 }
104 
116 
119 std::condition_variable g_best_block_cv;
122 bool fRequireStandard = true;
123 bool fCheckBlockIndex = false;
126 
129 
131 
132 // Internal stuff
133 namespace {
134  CBlockIndex* pindexBestInvalid = nullptr;
135 } // namespace
136 
137 // Internal stuff from blockstorage ...
139 extern std::vector<CBlockFileInfo> vinfoBlockFile;
140 extern int nLastBlockFile;
141 extern bool fCheckForPruning;
142 extern std::set<CBlockIndex*> setDirtyBlockIndex;
143 extern std::set<int> setDirtyFileInfo;
144 void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
145 // ... TODO move fully to blockstorage
146 
148 {
150  BlockMap::const_iterator it = m_block_index.find(hash);
151  return it == m_block_index.end() ? nullptr : it->second;
152 }
153 
155 {
157 
158  // Find the latest block common to locator and chain - we expect that
159  // locator.vHave is sorted descending by height.
160  for (const uint256& hash : locator.vHave) {
161  CBlockIndex* pindex = LookupBlockIndex(hash);
162  if (pindex) {
163  if (chain.Contains(pindex))
164  return pindex;
165  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
166  return chain.Tip();
167  }
168  }
169  }
170  return chain.Genesis();
171 }
172 
173 bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
174  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
175  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
176  std::vector<CScriptCheck>* pvChecks = nullptr)
178 
179 bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
180 {
182  assert(active_chain_tip); // TODO: Make active_chain_tip a reference
183 
184  // By convention a negative value for flags indicates that the
185  // current network-enforced consensus rules should be used. In
186  // a future soft-fork scenario that would mean checking which
187  // rules would be enforced for the next block and setting the
188  // appropriate flags. At the present time no soft-forks are
189  // scheduled, so no flags are set.
190  flags = std::max(flags, 0);
191 
192  // CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate
193  // nLockTime because when IsFinalTx() is called within
194  // CBlock::AcceptBlock(), the height of the block *being*
195  // evaluated is what is used. Thus if we want to know if a
196  // transaction can be part of the *next* block, we need to call
197  // IsFinalTx() with one more than active_chain_tip.Height().
198  const int nBlockHeight = active_chain_tip->nHeight + 1;
199 
200  // BIP113 requires that time-locked transactions have nLockTime set to
201  // less than the median time of the previous block they're contained in.
202  // When the next block is created its previous block will be the current
203  // chain tip, so we use that to calculate the median time passed to
204  // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
205  const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
206  ? active_chain_tip->GetMedianTimePast()
207  : GetAdjustedTime();
208 
209  return IsFinalTx(tx, nBlockHeight, nBlockTime);
210 }
211 
212 bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp)
213 {
215  assert(lp);
216  // If there are relative lock times then the maxInputBlock will be set
217  // If there are no relative lock times, the LockPoints don't depend on the chain
218  if (lp->maxInputBlock) {
219  // Check whether ::ChainActive() is an extension of the block at which the LockPoints
220  // calculation was valid. If not LockPoints are no longer valid
221  if (!active_chain.Contains(lp->maxInputBlock)) {
222  return false;
223  }
224  }
225 
226  // LockPoints still valid
227  return true;
228 }
229 
231  const CCoinsView& coins_view,
232  const CTransaction& tx,
233  int flags,
234  LockPoints* lp,
235  bool useExistingLockPoints)
236 {
237  assert(tip != nullptr);
238 
239  CBlockIndex index;
240  index.pprev = tip;
241  // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate
242  // height based locks because when SequenceLocks() is called within
243  // ConnectBlock(), the height of the block *being*
244  // evaluated is what is used.
245  // Thus if we want to know if a transaction can be part of the
246  // *next* block, we need to use one more than active_chainstate.m_chain.Height()
247  index.nHeight = tip->nHeight + 1;
248 
249  std::pair<int, int64_t> lockPair;
250  if (useExistingLockPoints) {
251  assert(lp);
252  lockPair.first = lp->height;
253  lockPair.second = lp->time;
254  }
255  else {
256  std::vector<int> prevheights;
257  prevheights.resize(tx.vin.size());
258  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
259  const CTxIn& txin = tx.vin[txinIndex];
260  Coin coin;
261  if (!coins_view.GetCoin(txin.prevout, coin)) {
262  return error("%s: Missing input", __func__);
263  }
264  if (coin.nHeight == MEMPOOL_HEIGHT) {
265  // Assume all mempool transaction confirm in the next block
266  prevheights[txinIndex] = tip->nHeight + 1;
267  } else {
268  prevheights[txinIndex] = coin.nHeight;
269  }
270  }
271  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
272  if (lp) {
273  lp->height = lockPair.first;
274  lp->time = lockPair.second;
275  // Also store the hash of the block with the highest height of
276  // all the blocks which have sequence locked prevouts.
277  // This hash needs to still be on the chain
278  // for these LockPoint calculations to be valid
279  // Note: It is impossible to correctly calculate a maxInputBlock
280  // if any of the sequence locked inputs depend on unconfirmed txs,
281  // except in the special case where the relative lock time/height
282  // is 0, which is equivalent to no sequence lock. Since we assume
283  // input height of tip+1 for mempool txs and test the resulting
284  // lockPair from CalculateSequenceLocks against tip+1. We know
285  // EvaluateSequenceLocks will fail if there was a non-zero sequence
286  // lock on a mempool input, so we can use the return value of
287  // CheckSequenceLocks to indicate the LockPoints validity
288  int maxInputHeight = 0;
289  for (const int height : prevheights) {
290  // Can ignore mempool inputs since we'll fail if they had non-zero locks
291  if (height != tip->nHeight+1) {
292  maxInputHeight = std::max(maxInputHeight, height);
293  }
294  }
295  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
296  }
297  }
298  return EvaluateSequenceLocks(index, lockPair);
299 }
300 
301 // Returns the script flags which should be checked for a given block
302 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
303 
304 static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
306 {
307  int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
308  if (expired != 0) {
309  LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
310  }
311 
312  std::vector<COutPoint> vNoSpendsRemaining;
313  pool.TrimToSize(limit, &vNoSpendsRemaining);
314  for (const COutPoint& removed : vNoSpendsRemaining)
315  coins_cache.Uncache(removed);
316 }
317 
319 {
321  if (active_chainstate.IsInitialBlockDownload())
322  return false;
323  if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
324  return false;
325  if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1)
326  return false;
327  return true;
328 }
329 
331  DisconnectedBlockTransactions& disconnectpool,
332  bool fAddToMempool)
333 {
334  if (!m_mempool) return;
335 
338  std::vector<uint256> vHashUpdate;
339  // disconnectpool's insertion_order index sorts the entries from
340  // oldest to newest, but the oldest entry will be the last tx from the
341  // latest mined block that was disconnected.
342  // Iterate disconnectpool in reverse, so that we add transactions
343  // back to the mempool starting with the earliest transaction that had
344  // been previously seen in a block.
345  auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
346  while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
347  // ignore validation errors in resurrected transactions
348  if (!fAddToMempool || (*it)->IsCoinBase() ||
350  *this, *m_mempool, *it, true /* bypass_limits */).m_result_type !=
352  // If the transaction doesn't make it in to the mempool, remove any
353  // transactions that depend on it (which would now be orphans).
355  } else if (m_mempool->exists((*it)->GetHash())) {
356  vHashUpdate.push_back((*it)->GetHash());
357  }
358  ++it;
359  }
360  disconnectpool.queuedTx.clear();
361  // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
362  // no in-mempool children, which is generally not true when adding
363  // previously-confirmed transactions back to the mempool.
364  // UpdateTransactionsFromBlock finds descendants of any transactions in
365  // the disconnectpool that were added back and cleans up the mempool state.
367 
368  // We also need to remove any now-immature transactions
370  // Re-limit mempool size, in case we added any transactions
372  *m_mempool,
373  this->CoinsTip(),
374  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
375  std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
376 }
377 
384  const CCoinsViewCache& view, const CTxMemPool& pool,
385  unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
387 {
389  AssertLockHeld(pool.cs);
390 
391  assert(!tx.IsCoinBase());
392  for (const CTxIn& txin : tx.vin) {
393  const Coin& coin = view.AccessCoin(txin.prevout);
394 
395  // This coin was checked in PreChecks and MemPoolAccept
396  // has been holding cs_main since then.
397  Assume(!coin.IsSpent());
398  if (coin.IsSpent()) return false;
399 
400  // If the Coin is available, there are 2 possibilities:
401  // it is available in our current ChainstateActive UTXO set,
402  // or it's a UTXO provided by a transaction in our mempool.
403  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
404  const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
405  if (txFrom) {
406  assert(txFrom->GetHash() == txin.prevout.hash);
407  assert(txFrom->vout.size() > txin.prevout.n);
408  assert(txFrom->vout[txin.prevout.n] == coin.out);
409  } else {
410  const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
411  assert(!coinFromUTXOSet.IsSpent());
412  assert(coinFromUTXOSet.out == coin.out);
413  }
414  }
415 
416  // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
417  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
418 }
419 
420 namespace {
421 
422 class MemPoolAccept
423 {
424 public:
425  explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
426  m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
427  m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
428  m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
429  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
430  }
431 
432  // We put the arguments we're handed into a struct, so we can pass them
433  // around easier.
434  struct ATMPArgs {
435  const CChainParams& m_chainparams;
436  const int64_t m_accept_time;
437  const bool m_bypass_limits;
438  /*
439  * Return any outpoints which were not previously present in the coins
440  * cache, but were added as a result of validating the tx for mempool
441  * acceptance. This allows the caller to optionally remove the cache
442  * additions if the associated transaction ends up being rejected by
443  * the mempool.
444  */
445  std::vector<COutPoint>& m_coins_to_uncache;
446  const bool m_test_accept;
450  const bool m_allow_bip125_replacement{true};
451  };
452 
453  // Single transaction acceptance
454  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
455 
461  PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
462 
463 private:
464  // All the intermediate state that gets passed between the various levels
465  // of checking a given transaction.
466  struct Workspace {
467  explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
468  std::set<uint256> m_conflicts;
469  CTxMemPool::setEntries m_all_conflicting;
470  CTxMemPool::setEntries m_ancestors;
471  std::unique_ptr<CTxMemPoolEntry> m_entry;
472  std::list<CTransactionRef> m_replaced_transactions;
473 
474  bool m_replacement_transaction;
475  CAmount m_base_fees;
476  CAmount m_modified_fees;
477  CAmount m_conflicting_fees;
478  size_t m_conflicting_size;
479 
480  const CTransactionRef& m_ptx;
481  const uint256& m_hash;
482  TxValidationState m_state;
483  };
484 
485  // Run the policy checks on a given transaction, excluding any script checks.
486  // Looks up inputs, calculates feerate, considers replacement, evaluates
487  // package limits, etc. As this function can be invoked for "free" by a peer,
488  // only tests that are fast should be done here (to avoid CPU DoS).
489  bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
490 
491  // Run the script checks using our policy flags. As this can be slow, we should
492  // only invoke this on transactions that have otherwise passed policy checks.
493  bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
494 
495  // Re-run the script checks, using consensus flags, and try to cache the
496  // result in the scriptcache. This should be done after
497  // PolicyScriptChecks(). This requires that all inputs either be in our
498  // utxo set or in the mempool.
499  bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
500 
501  // Try to add the transaction to the mempool, removing any conflicts first.
502  // Returns true if the transaction is in the mempool after any size
503  // limiting is performed, false otherwise.
504  bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
505 
506  // Compare a package's feerate against minimum allowed.
507  bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs)
508  {
509  CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
510  if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
511  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
512  }
513 
514  if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
515  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
516  }
517  return true;
518  }
519 
520 private:
521  CTxMemPool& m_pool;
522  CCoinsViewCache m_view;
523  CCoinsViewMemPool m_viewmempool;
524  CCoinsView m_dummy;
525 
526  CChainState& m_active_chainstate;
527 
528  // The package limits in effect at the time of invocation.
529  const size_t m_limit_ancestors;
530  const size_t m_limit_ancestor_size;
531  // These may be modified while evaluating a transaction (eg to account for
532  // in-mempool conflicts; see below).
533  size_t m_limit_descendants;
534  size_t m_limit_descendant_size;
535 };
536 
537 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
538 {
539  const CTransactionRef& ptx = ws.m_ptx;
540  const CTransaction& tx = *ws.m_ptx;
541  const uint256& hash = ws.m_hash;
542 
543  // Copy/alias what we need out of args
544  const int64_t nAcceptTime = args.m_accept_time;
545  const bool bypass_limits = args.m_bypass_limits;
546  std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
547 
548  // Alias what we need out of ws
549  TxValidationState& state = ws.m_state;
550  std::set<uint256>& setConflicts = ws.m_conflicts;
551  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
552  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
553  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
554  bool& fReplacementTransaction = ws.m_replacement_transaction;
555  CAmount& nModifiedFees = ws.m_modified_fees;
556  CAmount& nConflictingFees = ws.m_conflicting_fees;
557  size_t& nConflictingSize = ws.m_conflicting_size;
558 
559  if (!CheckTransaction(tx, state)) {
560  return false; // state filled in by CheckTransaction
561  }
562 
563  // Coinbase is only valid in a block, not as a loose transaction
564  if (tx.IsCoinBase())
565  return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
566 
567  // Rather not work on nonstandard transactions (unless -testnet/-regtest)
568  std::string reason;
569  if (fRequireStandard && !IsStandardTx(tx, reason))
570  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
571 
572  // Do not work on transactions that are too small.
573  // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
574  // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
575  // 64-byte transactions.
577  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
578 
579  // Only accept nLockTime-using transactions that can be mined in the next
580  // block; we don't want our mempool filled up with transactions that can't
581  // be mined yet.
582  if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
583  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
584 
585  if (m_pool.exists(GenTxid(true, tx.GetWitnessHash()))) {
586  // Exact transaction already exists in the mempool.
587  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
588  } else if (m_pool.exists(GenTxid(false, tx.GetHash()))) {
589  // Transaction with the same non-witness data but different witness (same txid, different
590  // wtxid) already exists in the mempool.
591  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-same-nonwitness-data-in-mempool");
592  }
593 
594  // Check for conflicts with in-memory transactions
595  for (const CTxIn &txin : tx.vin)
596  {
597  const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
598  if (ptxConflicting) {
599  if (!args.m_allow_bip125_replacement) {
600  // Transaction conflicts with a mempool tx, but we're not allowing replacements.
601  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
602  }
603  if (!setConflicts.count(ptxConflicting->GetHash()))
604  {
605  // Allow opt-out of transaction replacement by setting
606  // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
607  //
608  // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
609  // non-replaceable transactions. All inputs rather than just one
610  // is for the sake of multi-party protocols, where we don't
611  // want a single party to be able to disable replacement.
612  //
613  // Transactions that don't explicitly signal replaceability are
614  // *not* replaceable with the current logic, even if one of their
615  // unconfirmed ancestors signals replaceability. This diverges
616  // from BIP125's inherited signaling description (see CVE-2021-31876).
617  // Applications relying on first-seen mempool behavior should
618  // check all unconfirmed ancestors; otherwise an opt-in ancestor
619  // might be replaced, causing removal of this descendant.
620  bool fReplacementOptOut = true;
621  for (const CTxIn &_txin : ptxConflicting->vin)
622  {
623  if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
624  {
625  fReplacementOptOut = false;
626  break;
627  }
628  }
629  if (fReplacementOptOut) {
630  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
631  }
632 
633  setConflicts.insert(ptxConflicting->GetHash());
634  }
635  }
636  }
637 
638  LockPoints lp;
639  m_view.SetBackend(m_viewmempool);
640 
641  const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
642  // do all inputs exist?
643  for (const CTxIn& txin : tx.vin) {
644  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
645  coins_to_uncache.push_back(txin.prevout);
646  }
647 
648  // Note: this call may add txin.prevout to the coins cache
649  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
650  // later (via coins_to_uncache) if this tx turns out to be invalid.
651  if (!m_view.HaveCoin(txin.prevout)) {
652  // Are inputs missing because we already have the tx?
653  for (size_t out = 0; out < tx.vout.size(); out++) {
654  // Optimistically just do efficient check of cache for outputs
655  if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
656  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
657  }
658  }
659  // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
660  return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
661  }
662  }
663 
664  // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
665  // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
666  m_view.GetBestBlock();
667 
668  // we have all inputs cached now, so switch back to dummy (to protect
669  // against bugs where we pull more inputs from disk that miss being added
670  // to coins_to_uncache)
671  m_view.SetBackend(m_dummy);
672 
673  // Only accept BIP68 sequence locked transactions that can be mined in the next
674  // block; we don't want our mempool filled up with transactions that can't
675  // be mined yet.
676  // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's
677  // backend was removed, it no longer pulls coins from the mempool.
678  if (!CheckSequenceLocks(m_active_chainstate.m_chain.Tip(), m_view, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
679  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
680 
681  if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) {
682  return false; // state filled in by CheckTxInputs
683  }
684 
685  // Check for non-standard pay-to-script-hash in inputs
686  const bool taproot_active = DeploymentActiveAfter(m_active_chainstate.m_chain.Tip(), args.m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_TAPROOT);
687  if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_active)) {
688  return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
689  }
690 
691  // Check for non-standard witnesses.
692  if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
693  return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
694 
695  int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
696 
697  // nModifiedFees includes any fee deltas from PrioritiseTransaction
698  nModifiedFees = ws.m_base_fees;
699  m_pool.ApplyDelta(hash, nModifiedFees);
700 
701  // Keep track of transactions that spend a coinbase, which we re-scan
702  // during reorgs to ensure COINBASE_MATURITY is still met.
703  bool fSpendsCoinbase = false;
704  for (const CTxIn &txin : tx.vin) {
705  const Coin &coin = m_view.AccessCoin(txin.prevout);
706  if (coin.IsCoinBase()) {
707  fSpendsCoinbase = true;
708  break;
709  }
710  }
711 
712  entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
713  fSpendsCoinbase, nSigOpsCost, lp));
714  unsigned int nSize = entry->GetTxSize();
715 
716  if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
717  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
718  strprintf("%d", nSigOpsCost));
719 
720  // No transactions are allowed below minRelayTxFee except from disconnected
721  // blocks
722  if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
723 
724  const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
725  // Calculate in-mempool ancestors, up to a limit.
726  if (setConflicts.size() == 1) {
727  // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
728  // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
729  // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
730  // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
731  // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
732  // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
733  // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
734  // for off-chain contract systems (see link in the comment below).
735  //
736  // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
737  // conflict directly with exactly one other transaction (but may evict children of said transaction),
738  // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
739  // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
740  // amended, we may need to move that check to here instead of removing it wholesale.
741  //
742  // Such transactions are clearly not merging any existing packages, so we are only concerned with
743  // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
744  // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
745  // to.
746  //
747  // To check these we first check if we meet the RBF criteria, above, and increment the descendant
748  // limits by the direct conflict and its descendants (as these are recalculated in
749  // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
750  // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
751  // the ancestor limits should be the same for both our new transaction and any conflicts).
752  // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
753  // into force here (as we're only adding a single transaction).
754  assert(setIterConflicting.size() == 1);
755  CTxMemPool::txiter conflict = *setIterConflicting.begin();
756 
757  m_limit_descendants += 1;
758  m_limit_descendant_size += conflict->GetSizeWithDescendants();
759  }
760 
761  std::string errString;
762  if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
763  setAncestors.clear();
764  // If CalculateMemPoolAncestors fails second time, we want the original error string.
765  std::string dummy_err_string;
766  // Contracting/payment channels CPFP carve-out:
767  // If the new transaction is relatively small (up to 40k weight)
768  // and has at most one ancestor (ie ancestor limit of 2, including
769  // the new transaction), allow it if its parent has exactly the
770  // descendant limit descendants.
771  //
772  // This allows protocols which rely on distrusting counterparties
773  // being able to broadcast descendants of an unconfirmed transaction
774  // to be secure by simply only having two immediately-spendable
775  // outputs - one for each counterparty. For more info on the uses for
776  // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
777  if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
778  !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
779  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
780  }
781  }
782 
783  // A transaction that spends outputs that would be replaced by it is invalid. Now
784  // that we have the set of all ancestors we can detect this
785  // pathological case by making sure setConflicts and setAncestors don't
786  // intersect.
787  for (CTxMemPool::txiter ancestorIt : setAncestors)
788  {
789  const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
790  if (setConflicts.count(hashAncestor))
791  {
792  return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
793  strprintf("%s spends conflicting transaction %s",
794  hash.ToString(),
795  hashAncestor.ToString()));
796  }
797  }
798 
799  // Check if it's economically rational to mine this transaction rather
800  // than the ones it replaces.
801  nConflictingFees = 0;
802  nConflictingSize = 0;
803  uint64_t nConflictingCount = 0;
804 
805  // If we don't hold the lock allConflicting might be incomplete; the
806  // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
807  // mempool consistency for us.
808  fReplacementTransaction = setConflicts.size();
809  if (fReplacementTransaction)
810  {
811  CFeeRate newFeeRate(nModifiedFees, nSize);
812  std::set<uint256> setConflictsParents;
813  const int maxDescendantsToVisit = 100;
814  for (const auto& mi : setIterConflicting) {
815  // Don't allow the replacement to reduce the feerate of the
816  // mempool.
817  //
818  // We usually don't want to accept replacements with lower
819  // feerates than what they replaced as that would lower the
820  // feerate of the next block. Requiring that the feerate always
821  // be increased is also an easy-to-reason about way to prevent
822  // DoS attacks via replacements.
823  //
824  // We only consider the feerates of transactions being directly
825  // replaced, not their indirect descendants. While that does
826  // mean high feerate children are ignored when deciding whether
827  // or not to replace, we do require the replacement to pay more
828  // overall fees too, mitigating most cases.
829  CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
830  if (newFeeRate <= oldFeeRate)
831  {
832  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
833  strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
834  hash.ToString(),
835  newFeeRate.ToString(),
836  oldFeeRate.ToString()));
837  }
838 
839  for (const CTxIn &txin : mi->GetTx().vin)
840  {
841  setConflictsParents.insert(txin.prevout.hash);
842  }
843 
844  nConflictingCount += mi->GetCountWithDescendants();
845  }
846  // This potentially overestimates the number of actual descendants
847  // but we just want to be conservative to avoid doing too much
848  // work.
849  if (nConflictingCount <= maxDescendantsToVisit) {
850  // If not too many to replace, then calculate the set of
851  // transactions that would have to be evicted
852  for (CTxMemPool::txiter it : setIterConflicting) {
853  m_pool.CalculateDescendants(it, allConflicting);
854  }
855  for (CTxMemPool::txiter it : allConflicting) {
856  nConflictingFees += it->GetModifiedFee();
857  nConflictingSize += it->GetTxSize();
858  }
859  } else {
860  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
861  strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
862  hash.ToString(),
863  nConflictingCount,
864  maxDescendantsToVisit));
865  }
866 
867  for (unsigned int j = 0; j < tx.vin.size(); j++)
868  {
869  // We don't want to accept replacements that require low
870  // feerate junk to be mined first. Ideally we'd keep track of
871  // the ancestor feerates and make the decision based on that,
872  // but for now requiring all new inputs to be confirmed works.
873  //
874  // Note that if you relax this to make RBF a little more useful,
875  // this may break the CalculateMempoolAncestors RBF relaxation,
876  // above. See the comment above the first CalculateMempoolAncestors
877  // call for more info.
878  if (!setConflictsParents.count(tx.vin[j].prevout.hash))
879  {
880  // Rather than check the UTXO set - potentially expensive -
881  // it's cheaper to just check if the new input refers to a
882  // tx that's in the mempool.
883  if (m_pool.exists(tx.vin[j].prevout.hash)) {
884  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
885  strprintf("replacement %s adds unconfirmed input, idx %d",
886  hash.ToString(), j));
887  }
888  }
889  }
890 
891  // The replacement must pay greater fees than the transactions it
892  // replaces - if we did the bandwidth used by those conflicting
893  // transactions would not be paid for.
894  if (nModifiedFees < nConflictingFees)
895  {
896  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
897  strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
898  hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
899  }
900 
901  // Finally in addition to paying more fees than the conflicts the
902  // new transaction must pay for its own bandwidth.
903  CAmount nDeltaFees = nModifiedFees - nConflictingFees;
904  if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
905  {
906  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
907  strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
908  hash.ToString(),
909  FormatMoney(nDeltaFees),
911  }
912  }
913  return true;
914 }
915 
916 bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
917 {
918  const CTransaction& tx = *ws.m_ptx;
919  TxValidationState& state = ws.m_state;
920 
921  constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
922 
923  // Check input scripts and signatures.
924  // This is done last to help prevent CPU exhaustion denial-of-service attacks.
925  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
926  // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
927  // need to turn both off, and compare against just turning off CLEANSTACK
928  // to see if the failure is specifically due to witness validation.
929  TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
930  if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
931  !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
932  // Only the witness is missing, so the transaction itself may be fine.
934  state.GetRejectReason(), state.GetDebugMessage());
935  }
936  return false; // state filled in by CheckInputScripts
937  }
938 
939  return true;
940 }
941 
942 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
943 {
944  const CTransaction& tx = *ws.m_ptx;
945  const uint256& hash = ws.m_hash;
946  TxValidationState& state = ws.m_state;
947  const CChainParams& chainparams = args.m_chainparams;
948 
949  // Check again against the current block tip's script verification
950  // flags to cache our script execution flags. This is, of course,
951  // useless if the next block has different script flags from the
952  // previous one, but because the cache tracks script flags for us it
953  // will auto-invalidate and we'll just have a few blocks of extra
954  // misses on soft-fork activation.
955  //
956  // This is also useful in case of bugs in the standard flags that cause
957  // transactions to pass as valid when they're actually invalid. For
958  // instance the STRICTENC flag was incorrectly allowing certain
959  // CHECKSIG NOT scripts to pass, even though they were invalid.
960  //
961  // There is a similar check in CreateNewBlock() to prevent creating
962  // invalid blocks (using TestBlockValidity), however allowing such
963  // transactions into the mempool can be exploited as a DoS attack.
964  unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus());
965  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) {
966  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
967  __func__, hash.ToString(), state.ToString());
968  }
969 
970  return true;
971 }
972 
973 bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
974 {
975  const CTransaction& tx = *ws.m_ptx;
976  const uint256& hash = ws.m_hash;
977  TxValidationState& state = ws.m_state;
978  const bool bypass_limits = args.m_bypass_limits;
979 
980  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
981  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
982  const CAmount& nModifiedFees = ws.m_modified_fees;
983  const CAmount& nConflictingFees = ws.m_conflicting_fees;
984  const size_t& nConflictingSize = ws.m_conflicting_size;
985  const bool fReplacementTransaction = ws.m_replacement_transaction;
986  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
987 
988  // Remove conflicting transactions from the mempool
989  for (CTxMemPool::txiter it : allConflicting)
990  {
991  LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
992  it->GetTx().GetHash().ToString(),
993  hash.ToString(),
994  FormatMoney(nModifiedFees - nConflictingFees),
995  (int)entry->GetTxSize() - (int)nConflictingSize);
996  ws.m_replaced_transactions.push_back(it->GetSharedTx());
997  }
998  m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
999 
1000  // This transaction should only count for fee estimation if:
1001  // - it isn't a BIP 125 replacement transaction (may not be widely supported)
1002  // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1003  // - the node is not behind
1004  // - the transaction is not dependent on any other transactions in the mempool
1005  bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
1006 
1007  // Store transaction in memory
1008  m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
1009 
1010  // trim mempool and check if tx was trimmed
1011  if (!bypass_limits) {
1012  LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
1013  if (!m_pool.exists(hash))
1014  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1015  }
1016  return true;
1017 }
1018 
1019 MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1020 {
1022  LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1023 
1024  Workspace ws(ptx);
1025 
1026  if (!PreChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1027 
1028  // Only compute the precomputed transaction data if we need to verify
1029  // scripts (ie, other policy checks pass). We perform the inexpensive
1030  // checks first and avoid hashing and signature verification unless those
1031  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1033 
1034  if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult::Failure(ws.m_state);
1035 
1036  if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult::Failure(ws.m_state);
1037 
1038  // Tx was accepted, but not added
1039  if (args.m_test_accept) {
1040  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees);
1041  }
1042 
1043  if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
1044 
1045  GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
1046 
1047  return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees);
1048 }
1049 
1050 PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
1051 {
1053 
1054  // These context-free package limits can be done before taking the mempool lock.
1055  PackageValidationState package_state;
1056  if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {});
1057 
1058  std::vector<Workspace> workspaces{};
1059  workspaces.reserve(txns.size());
1060  std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces),
1061  [](const auto& tx) { return Workspace(tx); });
1062  std::map<const uint256, const MempoolAcceptResult> results;
1063 
1064  LOCK(m_pool.cs);
1065 
1066  // Do all PreChecks first and fail fast to avoid running expensive script checks when unnecessary.
1067  for (Workspace& ws : workspaces) {
1068  if (!PreChecks(args, ws)) {
1069  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1070  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1071  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1072  return PackageMempoolAcceptResult(package_state, std::move(results));
1073  }
1074  // Make the coins created by this transaction available for subsequent transactions in the
1075  // package to spend. Since we already checked conflicts in the package and we don't allow
1076  // replacements, we don't need to track the coins spent. Note that this logic will need to be
1077  // updated if package replace-by-fee is allowed in the future.
1078  assert(!args.m_allow_bip125_replacement);
1079  m_viewmempool.PackageAddTransaction(ws.m_ptx);
1080  }
1081 
1082  for (Workspace& ws : workspaces) {
1084  if (!PolicyScriptChecks(args, ws, txdata)) {
1085  // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
1086  package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
1087  results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
1088  return PackageMempoolAcceptResult(package_state, std::move(results));
1089  }
1090  if (args.m_test_accept) {
1091  // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
1092  // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
1093  results.emplace(ws.m_ptx->GetWitnessHash(),
1094  MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_base_fees));
1095  }
1096  }
1097 
1098  return PackageMempoolAcceptResult(package_state, std::move(results));
1099 }
1100 
1101 } // anon namespace
1102 
1105  CChainState& active_chainstate,
1106  const CTransactionRef &tx, int64_t nAcceptTime,
1107  bool bypass_limits, bool test_accept)
1109 {
1110  std::vector<COutPoint> coins_to_uncache;
1111  MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache,
1112  test_accept, /* m_allow_bip125_replacement */ true };
1113 
1114  const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
1116  // Remove coins that were not present in the coins cache before calling
1117  // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
1118  // number of invalid transactions that attempt to overrun the in-memory coins cache
1119  // (`CCoinsViewCache::cacheCoins`).
1120 
1121  for (const COutPoint& hashTx : coins_to_uncache)
1122  active_chainstate.CoinsTip().Uncache(hashTx);
1123  }
1124  // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1125  BlockValidationState state_dummy;
1126  active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
1127  return result;
1128 }
1129 
1131  bool bypass_limits, bool test_accept)
1132 {
1133  return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept);
1134 }
1135 
1137  const Package& package, bool test_accept)
1138 {
1140  assert(test_accept); // Only allow package accept dry-runs (testmempoolaccept RPC).
1141  assert(!package.empty());
1142  assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
1143 
1144  std::vector<COutPoint> coins_to_uncache;
1145  const CChainParams& chainparams = Params();
1146  MemPoolAccept::ATMPArgs args { chainparams, GetTime(), /* bypass_limits */ false, coins_to_uncache,
1147  test_accept, /* m_allow_bip125_replacement */ false };
1148  const PackageMempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args);
1149 
1150  // Uncache coins pertaining to transactions that were not submitted to the mempool.
1151  for (const COutPoint& hashTx : coins_to_uncache) {
1152  active_chainstate.CoinsTip().Uncache(hashTx);
1153  }
1154  return result;
1155 }
1156 
1157 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1158 {
1159  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1160  // Force block reward to zero when right shift is undefined.
1161  if (halvings >= 64)
1162  return 0;
1163 
1164  CAmount nSubsidy = 50 * COIN;
1165  // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1166  nSubsidy >>= halvings;
1167  return nSubsidy;
1168 }
1169 
1171  std::string ldb_name,
1172  size_t cache_size_bytes,
1173  bool in_memory,
1174  bool should_wipe) : m_dbview(
1175  gArgs.GetDataDirNet() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1176  m_catcherview(&m_dbview) {}
1177 
1178 void CoinsViews::InitCache()
1179 {
1180  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1181 }
1182 
1183 CChainState::CChainState(CTxMemPool* mempool, BlockManager& blockman, std::optional<uint256> from_snapshot_blockhash)
1184  : m_mempool(mempool),
1185  m_params(::Params()),
1186  m_blockman(blockman),
1187  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1188 
1190  size_t cache_size_bytes,
1191  bool in_memory,
1192  bool should_wipe,
1193  std::string leveldb_name)
1194 {
1196  leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
1197  }
1198 
1199  m_coins_views = std::make_unique<CoinsViews>(
1200  leveldb_name, cache_size_bytes, in_memory, should_wipe);
1201 }
1202 
1203 void CChainState::InitCoinsCache(size_t cache_size_bytes)
1204 {
1205  assert(m_coins_views != nullptr);
1206  m_coinstip_cache_size_bytes = cache_size_bytes;
1207  m_coins_views->InitCache();
1208 }
1209 
1210 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1211 // is a performance-related implementation detail. This function must be marked
1212 // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1213 // can call it.
1214 //
1216 {
1217  // Optimization: pre-test latch before taking the lock.
1218  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1219  return false;
1220 
1221  LOCK(cs_main);
1222  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1223  return false;
1224  if (fImporting || fReindex)
1225  return true;
1226  if (m_chain.Tip() == nullptr)
1227  return true;
1229  return true;
1230  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1231  return true;
1232  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1233  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1234  return false;
1235 }
1236 
1237 static void AlertNotify(const std::string& strMessage)
1238 {
1239  uiInterface.NotifyAlertChanged();
1240 #if HAVE_SYSTEM
1241  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1242  if (strCmd.empty()) return;
1243 
1244  // Alert text should be plain ascii coming from a trusted source, but to
1245  // be safe we first strip anything not in safeChars, then add single quotes around
1246  // the whole string before passing it to the shell:
1247  std::string singleQuote("'");
1248  std::string safeStatus = SanitizeString(strMessage);
1249  safeStatus = singleQuote+safeStatus+singleQuote;
1250  boost::replace_all(strCmd, "%s", safeStatus);
1251 
1252  std::thread t(runCommand, strCmd);
1253  t.detach(); // thread runs free
1254 #endif
1255 }
1256 
1258 {
1260 
1261  // Before we get past initial download, we cannot reliably alert about forks
1262  // (we assume we don't get stuck on a fork before finishing our initial sync)
1263  if (IsInitialBlockDownload()) {
1264  return;
1265  }
1266 
1267  if (pindexBestInvalid && pindexBestInvalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
1268  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1270  } else {
1272  }
1273 }
1274 
1275 // Called both upon regular invalid block discovery *and* InvalidateBlock
1277 {
1278  if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1279  pindexBestInvalid = pindexNew;
1280  if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1282  }
1283 
1284  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1285  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1286  log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1287  CBlockIndex *tip = m_chain.Tip();
1288  assert (tip);
1289  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1290  tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
1293 }
1294 
1295 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1296 // which does its own setBlockIndexCandidates management.
1298 {
1300  pindex->nStatus |= BLOCK_FAILED_VALID;
1301  m_blockman.m_failed_blocks.insert(pindex);
1302  setDirtyBlockIndex.insert(pindex);
1303  setBlockIndexCandidates.erase(pindex);
1304  InvalidChainFound(pindex);
1305  }
1306 }
1307 
1308 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1309 {
1310  // mark inputs spent
1311  if (!tx.IsCoinBase()) {
1312  txundo.vprevout.reserve(tx.vin.size());
1313  for (const CTxIn &txin : tx.vin) {
1314  txundo.vprevout.emplace_back();
1315  bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1316  assert(is_spent);
1317  }
1318  }
1319  // add outputs
1320  AddCoins(inputs, tx, nHeight);
1321 }
1322 
1323 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1324 {
1325  CTxUndo txundo;
1326  UpdateCoins(tx, inputs, txundo, nHeight);
1327 }
1328 
1330  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1331  const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1333 }
1334 
1336 {
1338  CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1339  return pindexPrev->nHeight + 1;
1340 }
1341 
1342 
1345 
1347  // Setup the salted hasher
1349  // We want the nonce to be 64 bytes long to force the hasher to process
1350  // this chunk, which makes later hash computations more efficient. We
1351  // just write our 32-byte entropy twice to fill the 64 bytes.
1354  // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1355  // setup_bytes creates the minimum possible cache (2 elements).
1356  size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1357  size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1358  LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1359  (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1360 }
1361 
1382  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
1383  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
1384  std::vector<CScriptCheck>* pvChecks)
1385 {
1386  if (tx.IsCoinBase()) return true;
1387 
1388  if (pvChecks) {
1389  pvChecks->reserve(tx.vin.size());
1390  }
1391 
1392  // First check if script executions have been cached with the same
1393  // flags. Note that this assumes that the inputs provided are
1394  // correct (ie that the transaction hash which is in tx's prevouts
1395  // properly commits to the scriptPubKey in the inputs view of that
1396  // transaction).
1397  uint256 hashCacheEntry;
1399  hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1400  AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1401  if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1402  return true;
1403  }
1404 
1405  if (!txdata.m_spent_outputs_ready) {
1406  std::vector<CTxOut> spent_outputs;
1407  spent_outputs.reserve(tx.vin.size());
1408 
1409  for (const auto& txin : tx.vin) {
1410  const COutPoint& prevout = txin.prevout;
1411  const Coin& coin = inputs.AccessCoin(prevout);
1412  assert(!coin.IsSpent());
1413  spent_outputs.emplace_back(coin.out);
1414  }
1415  txdata.Init(tx, std::move(spent_outputs));
1416  }
1417  assert(txdata.m_spent_outputs.size() == tx.vin.size());
1418 
1419  for (unsigned int i = 0; i < tx.vin.size(); i++) {
1420 
1421  // We very carefully only pass in things to CScriptCheck which
1422  // are clearly committed to by tx' witness hash. This provides
1423  // a sanity check that our caching is not introducing consensus
1424  // failures through additional data in, eg, the coins being
1425  // spent being checked as a part of CScriptCheck.
1426 
1427  // Verify signature
1428  CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
1429  if (pvChecks) {
1430  pvChecks->push_back(CScriptCheck());
1431  check.swap(pvChecks->back());
1432  } else if (!check()) {
1434  // Check whether the failure was caused by a
1435  // non-mandatory script verification check, such as
1436  // non-standard DER encodings or non-null dummy
1437  // arguments; if so, ensure we return NOT_STANDARD
1438  // instead of CONSENSUS to avoid downstream users
1439  // splitting the network between upgraded and
1440  // non-upgraded nodes by banning CONSENSUS-failing
1441  // data providers.
1442  CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
1443  flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1444  if (check2())
1445  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1446  }
1447  // MANDATORY flag failures correspond to
1448  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1449  // failures are the most serious case of validation
1450  // failures, we may need to consider using
1451  // RECENT_CONSENSUS_CHANGE for any script failure that
1452  // could be due to non-upgraded nodes which we may want to
1453  // support, to avoid splitting the network (but this
1454  // depends on the details of how net_processing handles
1455  // such errors).
1456  return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1457  }
1458  }
1459 
1460  if (cacheFullScriptStore && !pvChecks) {
1461  // We executed all of the provided scripts, and were told to
1462  // cache the result. Do so now.
1463  g_scriptExecutionCache.insert(hashCacheEntry);
1464  }
1465 
1466  return true;
1467 }
1468 
1469 bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
1470 {
1471  AbortNode(strMessage, userMessage);
1472  return state.Error(strMessage);
1473 }
1474 
1482 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1483 {
1484  bool fClean = true;
1485 
1486  if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1487 
1488  if (undo.nHeight == 0) {
1489  // Missing undo metadata (height and coinbase). Older versions included this
1490  // information only in undo records for the last spend of a transactions'
1491  // outputs. This implies that it must be present for some other output of the same tx.
1492  const Coin& alternate = AccessByTxid(view, out.hash);
1493  if (!alternate.IsSpent()) {
1494  undo.nHeight = alternate.nHeight;
1495  undo.fCoinBase = alternate.fCoinBase;
1496  } else {
1497  return DISCONNECT_FAILED; // adding output for transaction without known metadata
1498  }
1499  }
1500  // If the coin already exists as an unspent coin in the cache, then the
1501  // possible_overwrite parameter to AddCoin must be set to true. We have
1502  // already checked whether an unspent coin exists above using HaveCoin, so
1503  // we don't need to guess. When fClean is false, an unspent coin already
1504  // existed and it is an overwrite.
1505  view.AddCoin(out, std::move(undo), !fClean);
1506 
1507  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1508 }
1509 
1513 {
1514  bool fClean = true;
1515 
1516  CBlockUndo blockUndo;
1517  if (!UndoReadFromDisk(blockUndo, pindex)) {
1518  error("DisconnectBlock(): failure reading undo data");
1519  return DISCONNECT_FAILED;
1520  }
1521 
1522  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1523  error("DisconnectBlock(): block and undo data inconsistent");
1524  return DISCONNECT_FAILED;
1525  }
1526 
1527  // undo transactions in reverse order
1528  for (int i = block.vtx.size() - 1; i >= 0; i--) {
1529  const CTransaction &tx = *(block.vtx[i]);
1530  uint256 hash = tx.GetHash();
1531  bool is_coinbase = tx.IsCoinBase();
1532 
1533  // Check that all outputs are available and match the outputs in the block itself
1534  // exactly.
1535  for (size_t o = 0; o < tx.vout.size(); o++) {
1536  if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1537  COutPoint out(hash, o);
1538  Coin coin;
1539  bool is_spent = view.SpendCoin(out, &coin);
1540  if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1541  fClean = false; // transaction output mismatch
1542  }
1543  }
1544  }
1545 
1546  // restore inputs
1547  if (i > 0) { // not coinbases
1548  CTxUndo &txundo = blockUndo.vtxundo[i-1];
1549  if (txundo.vprevout.size() != tx.vin.size()) {
1550  error("DisconnectBlock(): transaction and undo data inconsistent");
1551  return DISCONNECT_FAILED;
1552  }
1553  for (unsigned int j = tx.vin.size(); j-- > 0;) {
1554  const COutPoint &out = tx.vin[j].prevout;
1555  int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1556  if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1557  fClean = fClean && res != DISCONNECT_UNCLEAN;
1558  }
1559  // At this point, all of txundo.vprevout should have been moved out.
1560  }
1561  }
1562 
1563  // move best block pointer to prevout block
1564  view.SetBestBlock(pindex->pprev->GetBlockHash());
1565 
1566  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1567 }
1568 
1570 
1571 void StartScriptCheckWorkerThreads(int threads_num)
1572 {
1573  scriptcheckqueue.StartWorkerThreads(threads_num);
1574 }
1575 
1577 {
1578  scriptcheckqueue.StopWorkerThreads();
1579 }
1580 
1585 {
1586 private:
1587  int bit;
1588 
1589 public:
1590  explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1591 
1592  int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1593  int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1594  int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1595  int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1596 
1597  bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1598  {
1599  return pindex->nHeight >= params.MinBIP9WarningHeight &&
1601  ((pindex->nVersion >> bit) & 1) != 0 &&
1602  ((g_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1603  }
1604 };
1605 
1607 
1608 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams)
1609 {
1610  unsigned int flags = SCRIPT_VERIFY_NONE;
1611 
1612  // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
1613  // retroactively applied to testnet)
1614  // However, only one historical block violated the P2SH rules (on both
1615  // mainnet and testnet), so for simplicity, always leave P2SH
1616  // on except for the one violating block.
1617  if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
1618  pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
1619  *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
1620  {
1621  // Enforce WITNESS rules whenever P2SH is in effect
1623  }
1624 
1625  // Enforce the DERSIG (BIP66) rule
1626  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_DERSIG)) {
1628  }
1629 
1630  // Enforce CHECKLOCKTIMEVERIFY (BIP65)
1631  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CLTV)) {
1633  }
1634 
1635  // Enforce CHECKSEQUENCEVERIFY (BIP112)
1636  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CSV)) {
1638  }
1639 
1640  // Enforce Taproot (BIP340-BIP342)
1641  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_TAPROOT)) {
1643  }
1644 
1645  // Enforce BIP147 NULLDUMMY (activated simultaneously with segwit)
1646  if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_SEGWIT)) {
1648  }
1649 
1650  return flags;
1651 }
1652 
1653 
1654 
1655 static int64_t nTimeCheck = 0;
1656 static int64_t nTimeForks = 0;
1657 static int64_t nTimeVerify = 0;
1658 static int64_t nTimeConnect = 0;
1659 static int64_t nTimeIndex = 0;
1660 static int64_t nTimeCallbacks = 0;
1661 static int64_t nTimeTotal = 0;
1662 static int64_t nBlocksTotal = 0;
1663 
1668  CCoinsViewCache& view, bool fJustCheck)
1669 {
1671  assert(pindex);
1672  assert(*pindex->phashBlock == block.GetHash());
1673  int64_t nTimeStart = GetTimeMicros();
1674 
1675  // Check it again in case a previous version let a bad block in
1676  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1677  // ContextualCheckBlockHeader() here. This means that if we add a new
1678  // consensus rule that is enforced in one of those two functions, then we
1679  // may have let in a block that violates the rule prior to updating the
1680  // software, and we would NOT be enforcing the rule here. Fully solving
1681  // upgrade from one software version to the next after a consensus rule
1682  // change is potentially tricky and issue-specific (see NeedsRedownload()
1683  // for one approach that was used for BIP 141 deployment).
1684  // Also, currently the rule against blocks more than 2 hours in the future
1685  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1686  // re-enforce that rule here (at least until we make it impossible for
1687  // GetAdjustedTime() to go backward).
1688  if (!CheckBlock(block, state, m_params.GetConsensus(), !fJustCheck, !fJustCheck)) {
1690  // We don't write down blocks to disk if they may have been
1691  // corrupted, so this should be impossible unless we're having hardware
1692  // problems.
1693  return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
1694  }
1695  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
1696  }
1697 
1698  // verify that the view's current state corresponds to the previous block
1699  uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
1700  assert(hashPrevBlock == view.GetBestBlock());
1701 
1702  nBlocksTotal++;
1703 
1704  // Special case for the genesis block, skipping connection of its transactions
1705  // (its coinbase is unspendable)
1706  if (block.GetHash() == m_params.GetConsensus().hashGenesisBlock) {
1707  if (!fJustCheck)
1708  view.SetBestBlock(pindex->GetBlockHash());
1709  return true;
1710  }
1711 
1712  bool fScriptChecks = true;
1713  if (!hashAssumeValid.IsNull()) {
1714  // We've been configured with the hash of a block which has been externally verified to have a valid history.
1715  // A suitable default value is included with the software and updated from time to time. Because validity
1716  // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1717  // This setting doesn't force the selection of any particular chain but makes validating some faster by
1718  // effectively caching the result of part of the verification.
1719  BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
1720  if (it != m_blockman.m_block_index.end()) {
1721  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1722  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1724  // This block is a member of the assumed verified chain and an ancestor of the best header.
1725  // Script verification is skipped when connecting blocks under the
1726  // assumevalid block. Assuming the assumevalid block is valid this
1727  // is safe because block merkle hashes are still computed and checked,
1728  // Of course, if an assumed valid block is invalid due to false scriptSigs
1729  // this optimization would allow an invalid chain to be accepted.
1730  // The equivalent time check discourages hash power from extorting the network via DOS attack
1731  // into accepting an invalid block through telling users they must manually set assumevalid.
1732  // Requiring a software change or burying the invalid block, regardless of the setting, makes
1733  // it hard to hide the implication of the demand. This also avoids having release candidates
1734  // that are hardly doing any signature verification at all in testing without having to
1735  // artificially set the default assumed verified block further back.
1736  // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
1737  // least as good as the expected chain.
1738  fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
1739  }
1740  }
1741  }
1742 
1743  int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
1744  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
1745 
1746  // Do not allow blocks that contain transactions which 'overwrite' older transactions,
1747  // unless those are already completely spent.
1748  // If such overwrites are allowed, coinbases and transactions depending upon those
1749  // can be duplicated to remove the ability to spend the first instance -- even after
1750  // being sent to another address.
1751  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
1752  // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
1753  // already refuses previously-known transaction ids entirely.
1754  // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
1755  // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
1756  // two in the chain that violate it. This prevents exploiting the issue against nodes during their
1757  // initial block download.
1758  bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
1759  (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
1760 
1761  // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
1762  // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
1763  // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
1764  // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
1765  // duplicate transactions descending from the known pairs either.
1766  // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
1767 
1768  // BIP34 requires that a block at height X (block X) has its coinbase
1769  // scriptSig start with a CScriptNum of X (indicated height X). The above
1770  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1771  // case that there is a block X before the BIP34 height of 227,931 which has
1772  // an indicated height Y where Y is greater than X. The coinbase for block
1773  // X would also be a valid coinbase for block Y, which could be a BIP30
1774  // violation. An exhaustive search of all mainnet coinbases before the
1775  // BIP34 height which have an indicated height greater than the block height
1776  // reveals many occurrences. The 3 lowest indicated heights found are
1777  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1778  // heights would be the first opportunity for BIP30 to be violated.
1779 
1780  // The search reveals a great many blocks which have an indicated height
1781  // greater than 1,983,702, so we simply remove the optimization to skip
1782  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1783  // that block in another 25 years or so, we should take advantage of a
1784  // future consensus change to do a new and improved version of BIP34 that
1785  // will actually prevent ever creating any duplicate coinbases in the
1786  // future.
1787  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1788 
1789  // There is no potential to create a duplicate coinbase at block 209,921
1790  // because this is still before the BIP34 height and so explicit BIP30
1791  // checking is still active.
1792 
1793  // The final case is block 176,684 which has an indicated height of
1794  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1795  // before block 490,897 so there was not much opportunity to address this
1796  // case other than to carefully analyze it and determine it would not be a
1797  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1798  // block 176,684, but it is important to note that even if it hadn't been or
1799  // is remined on an alternate fork with a duplicate coinbase, we would still
1800  // not run into a BIP30 violation. This is because the coinbase for 176,684
1801  // is spent in block 185,956 in transaction
1802  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1803  // spending transaction can't be duplicated because it also spends coinbase
1804  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1805  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1806  // duplicatable until that height, and it's currently impossible to create a
1807  // chain that long. Nevertheless we may wish to consider a future soft fork
1808  // which retroactively prevents block 490,897 from creating a duplicate
1809  // coinbase. The two historical BIP30 violations often provide a confusing
1810  // edge case when manipulating the UTXO and it would be simpler not to have
1811  // another edge case to deal with.
1812 
1813  // testnet3 has no blocks before the BIP34 height with indicated heights
1814  // post BIP34 before approximately height 486,000,000 and presumably will
1815  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1816  // BIP30 checking again.
1817  assert(pindex->pprev);
1818  CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(m_params.GetConsensus().BIP34Height);
1819  //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
1820  fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == m_params.GetConsensus().BIP34Hash));
1821 
1822  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
1823  // consensus change that ensures coinbases at those heights can not
1824  // duplicate earlier coinbases.
1825  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1826  for (const auto& tx : block.vtx) {
1827  for (size_t o = 0; o < tx->vout.size(); o++) {
1828  if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
1829  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1830  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
1831  }
1832  }
1833  }
1834  }
1835 
1836  // Enforce BIP68 (sequence locks)
1837  int nLockTimeFlags = 0;
1839  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1840  }
1841 
1842  // Get the script flags for this block
1843  unsigned int flags = GetBlockScriptFlags(pindex, m_params.GetConsensus());
1844 
1845  int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
1846  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
1847 
1848  CBlockUndo blockundo;
1849 
1850  // Precomputed transaction data pointers must not be invalidated
1851  // until after `control` has run the script checks (potentially
1852  // in multiple threads). Preallocate the vector size so a new allocation
1853  // doesn't invalidate pointers into the vector, and keep txsdata in scope
1854  // for as long as `control`.
1855  CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
1856  std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
1857 
1858  std::vector<int> prevheights;
1859  CAmount nFees = 0;
1860  int nInputs = 0;
1861  int64_t nSigOpsCost = 0;
1862  blockundo.vtxundo.reserve(block.vtx.size() - 1);
1863  for (unsigned int i = 0; i < block.vtx.size(); i++)
1864  {
1865  const CTransaction &tx = *(block.vtx[i]);
1866 
1867  nInputs += tx.vin.size();
1868 
1869  if (!tx.IsCoinBase())
1870  {
1871  CAmount txfee = 0;
1872  TxValidationState tx_state;
1873  if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
1874  // Any transaction validation failure in ConnectBlock is a block consensus failure
1876  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
1877  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
1878  }
1879  nFees += txfee;
1880  if (!MoneyRange(nFees)) {
1881  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
1882  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
1883  }
1884 
1885  // Check that transaction is BIP68 final
1886  // BIP68 lock checks (as opposed to nLockTime checks) must
1887  // be in ConnectBlock because they require the UTXO set
1888  prevheights.resize(tx.vin.size());
1889  for (size_t j = 0; j < tx.vin.size(); j++) {
1890  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
1891  }
1892 
1893  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
1894  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
1895  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
1896  }
1897  }
1898 
1899  // GetTransactionSigOpCost counts 3 types of sigops:
1900  // * legacy (always)
1901  // * p2sh (when P2SH enabled in flags and excludes coinbase)
1902  // * witness (when witness enabled in flags and excludes coinbase)
1903  nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
1904  if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
1905  LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
1906  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
1907  }
1908 
1909  if (!tx.IsCoinBase())
1910  {
1911  std::vector<CScriptCheck> vChecks;
1912  bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
1913  TxValidationState tx_state;
1914  if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
1915  // Any transaction validation failure in ConnectBlock is a block consensus failure
1917  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
1918  return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
1919  tx.GetHash().ToString(), state.ToString());
1920  }
1921  control.Add(vChecks);
1922  }
1923 
1924  CTxUndo undoDummy;
1925  if (i > 0) {
1926  blockundo.vtxundo.push_back(CTxUndo());
1927  }
1928  UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
1929  }
1930  int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
1931  LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
1932 
1933  CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, m_params.GetConsensus());
1934  if (block.vtx[0]->GetValueOut() > blockReward) {
1935  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
1936  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
1937  }
1938 
1939  if (!control.Wait()) {
1940  LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
1941  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
1942  }
1943  int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
1944  LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
1945 
1946  if (fJustCheck)
1947  return true;
1948 
1949  if (!WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
1950  return false;
1951  }
1952 
1953  if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
1955  setDirtyBlockIndex.insert(pindex);
1956  }
1957 
1958  assert(pindex->phashBlock);
1959  // add this block to the view's block chain
1960  view.SetBestBlock(pindex->GetBlockHash());
1961 
1962  int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
1963  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
1964 
1965  int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
1966  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
1967 
1968  TRACE7(validation, block_connected,
1969  block.GetHash().ToString().c_str(),
1970  pindex->nHeight,
1971  block.vtx.size(),
1972  nInputs,
1973  nSigOpsCost,
1974  GetTimeMicros() - nTimeStart, // in microseconds (µs)
1975  block.GetHash().data()
1976  );
1977 
1978  return true;
1979 }
1980 
1981 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
1982 {
1983  return this->GetCoinsCacheSizeState(
1985  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
1986 }
1987 
1988 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
1989  size_t max_coins_cache_size_bytes,
1990  size_t max_mempool_size_bytes)
1991 {
1992  const int64_t nMempoolUsage = m_mempool ? m_mempool->DynamicMemoryUsage() : 0;
1993  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
1994  int64_t nTotalSpace =
1995  max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
1996 
1998  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
1999  int64_t large_threshold =
2000  std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2001 
2002  if (cacheSize > nTotalSpace) {
2003  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2005  } else if (cacheSize > large_threshold) {
2007  }
2008  return CoinsCacheSizeState::OK;
2009 }
2010 
2012  BlockValidationState &state,
2013  FlushStateMode mode,
2014  int nManualPruneHeight)
2015 {
2016  LOCK(cs_main);
2017  assert(this->CanFlushToDisk());
2018  static std::chrono::microseconds nLastWrite{0};
2019  static std::chrono::microseconds nLastFlush{0};
2020  std::set<int> setFilesToPrune;
2021  bool full_flush_completed = false;
2022 
2023  const size_t coins_count = CoinsTip().GetCacheSize();
2024  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2025 
2026  try {
2027  {
2028  bool fFlushForPrune = false;
2029  bool fDoFullFlush = false;
2030 
2031  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState();
2033  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2034  // make sure we don't prune above the blockfilterindexes bestblocks
2035  // pruning is height-based
2036  int last_prune = m_chain.Height(); // last height we can prune
2038  last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
2039  });
2040 
2041  if (nManualPruneHeight > 0) {
2042  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2043 
2044  m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
2045  } else {
2046  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2047 
2049  fCheckForPruning = false;
2050  }
2051  if (!setFilesToPrune.empty()) {
2052  fFlushForPrune = true;
2053  if (!fHavePruned) {
2054  m_blockman.m_block_tree_db->WriteFlag("prunedblockfiles", true);
2055  fHavePruned = true;
2056  }
2057  }
2058  }
2059  const auto nNow = GetTime<std::chrono::microseconds>();
2060  // Avoid writing/flushing immediately after startup.
2061  if (nLastWrite.count() == 0) {
2062  nLastWrite = nNow;
2063  }
2064  if (nLastFlush.count() == 0) {
2065  nLastFlush = nNow;
2066  }
2067  // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2068  bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2069  // The cache is over the limit, we have to write now.
2070  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2071  // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2072  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2073  // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2074  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2075  // Combine all conditions that result in a full cache flush.
2076  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2077  // Write blocks and block index to disk.
2078  if (fDoFullFlush || fPeriodicWrite) {
2079  // Depend on nMinDiskSpace to ensure we can write block index
2081  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2082  }
2083  {
2084  LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2085 
2086  // First make sure all block and undo data is flushed to disk.
2087  FlushBlockFile();
2088  }
2089 
2090  // Then update all block file information (which may refer to block and undo files).
2091  {
2092  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2093 
2094  std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2095  vFiles.reserve(setDirtyFileInfo.size());
2096  for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2097  vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2098  setDirtyFileInfo.erase(it++);
2099  }
2100  std::vector<const CBlockIndex*> vBlocks;
2101  vBlocks.reserve(setDirtyBlockIndex.size());
2102  for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2103  vBlocks.push_back(*it);
2104  setDirtyBlockIndex.erase(it++);
2105  }
2106  if (!m_blockman.m_block_tree_db->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2107  return AbortNode(state, "Failed to write to block index database");
2108  }
2109  }
2110  // Finally remove any pruned files
2111  if (fFlushForPrune) {
2112  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2113 
2114  UnlinkPrunedFiles(setFilesToPrune);
2115  }
2116  nLastWrite = nNow;
2117  }
2118  // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2119  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2120  LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2121  coins_count, coins_mem_usage / 1000));
2122 
2123  // Typical Coin structures on disk are around 48 bytes in size.
2124  // Pushing a new one to the database can cause it to be written
2125  // twice (once in the log, and once in the tables). This is already
2126  // an overestimation, as most will delete an existing entry or
2127  // overwrite one. Still, use a conservative safety factor of 2.
2128  if (!CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2129  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2130  }
2131  // Flush the chainstate (which may refer to block index entries).
2132  if (!CoinsTip().Flush())
2133  return AbortNode(state, "Failed to write to coin database");
2134  nLastFlush = nNow;
2135  full_flush_completed = true;
2136  }
2137  }
2138  if (full_flush_completed) {
2139  // Update best block in wallet (so we can detect restored wallets).
2141  }
2142  } catch (const std::runtime_error& e) {
2143  return AbortNode(state, std::string("System error while flushing: ") + e.what());
2144  }
2145  return true;
2146 }
2147 
2149 {
2150  BlockValidationState state;
2151  if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
2152  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2153  }
2154 }
2155 
2157 {
2158  BlockValidationState state;
2159  fCheckForPruning = true;
2160  if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) {
2161  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2162  }
2163 }
2164 
2165 static void DoWarning(const bilingual_str& warning)
2166 {
2167  static bool fWarned = false;
2168  SetMiscWarning(warning);
2169  if (!fWarned) {
2170  AlertNotify(warning.original);
2171  fWarned = true;
2172  }
2173 }
2174 
2176 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2177 {
2178  if (!res.empty()) res += Untranslated(", ");
2179  res += warn;
2180 }
2181 
2182 void CChainState::UpdateTip(const CBlockIndex* pindexNew)
2183 {
2184  // New best block
2185  if (m_mempool) {
2187  }
2188 
2189  {
2191  g_best_block = pindexNew->GetBlockHash();
2192  g_best_block_cv.notify_all();
2193  }
2194 
2195  bilingual_str warning_messages;
2196  if (!this->IsInitialBlockDownload()) {
2197  const CBlockIndex* pindex = pindexNew;
2198  for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2199  WarningBitsConditionChecker checker(bit);
2200  ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache[bit]);
2201  if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2202  const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
2203  if (state == ThresholdState::ACTIVE) {
2204  DoWarning(warning);
2205  } else {
2206  AppendWarning(warning_messages, warning);
2207  }
2208  }
2209  }
2210  }
2211  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
2212  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
2213  log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
2214  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2215  GuessVerificationProgress(m_params.TxData(), pindexNew), this->CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), this->CoinsTip().GetCacheSize(),
2216  !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
2217 }
2218 
2230 {
2233 
2234  CBlockIndex *pindexDelete = m_chain.Tip();
2235  assert(pindexDelete);
2236  // Read block from disk.
2237  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2238  CBlock& block = *pblock;
2239  if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) {
2240  return error("DisconnectTip(): Failed to read block");
2241  }
2242  // Apply the block atomically to the chain state.
2243  int64_t nStart = GetTimeMicros();
2244  {
2245  CCoinsViewCache view(&CoinsTip());
2246  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2247  if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2248  return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2249  bool flushed = view.Flush();
2250  assert(flushed);
2251  }
2252  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2253  // Write the chain state to disk, if necessary.
2255  return false;
2256  }
2257 
2258  if (disconnectpool && m_mempool) {
2259  // Save transactions to re-add to mempool at end of reorg
2260  for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2261  disconnectpool->addTransaction(*it);
2262  }
2263  while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2264  // Drop the earliest entry, and remove its children from the mempool.
2265  auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2267  disconnectpool->removeEntry(it);
2268  }
2269  }
2270 
2271  m_chain.SetTip(pindexDelete->pprev);
2272 
2273  UpdateTip(pindexDelete->pprev);
2274  // Let wallets know transactions went from 1-confirmed to
2275  // 0-confirmed or conflicted:
2276  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2277  return true;
2278 }
2279 
2280 static int64_t nTimeReadFromDisk = 0;
2281 static int64_t nTimeConnectTotal = 0;
2282 static int64_t nTimeFlush = 0;
2283 static int64_t nTimeChainState = 0;
2284 static int64_t nTimePostConnect = 0;
2285 
2287  CBlockIndex* pindex = nullptr;
2288  std::shared_ptr<const CBlock> pblock;
2290 };
2299 private:
2300  std::vector<PerBlockConnectTrace> blocksConnected;
2301 
2302 public:
2303  explicit ConnectTrace() : blocksConnected(1) {}
2304 
2305  void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2306  assert(!blocksConnected.back().pindex);
2307  assert(pindex);
2308  assert(pblock);
2309  blocksConnected.back().pindex = pindex;
2310  blocksConnected.back().pblock = std::move(pblock);
2311  blocksConnected.emplace_back();
2312  }
2313 
2314  std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2315  // We always keep one extra block at the end of our list because
2316  // blocks are added after all the conflicted transactions have
2317  // been filled in. Thus, the last entry should always be an empty
2318  // one waiting for the transactions from the next block. We pop
2319  // the last entry here to make sure the list we return is sane.
2320  assert(!blocksConnected.back().pindex);
2321  blocksConnected.pop_back();
2322  return blocksConnected;
2323  }
2324 };
2325 
2332 bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
2333 {
2336 
2337  assert(pindexNew->pprev == m_chain.Tip());
2338  // Read block from disk.
2339  int64_t nTime1 = GetTimeMicros();
2340  std::shared_ptr<const CBlock> pthisBlock;
2341  if (!pblock) {
2342  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2343  if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_params.GetConsensus())) {
2344  return AbortNode(state, "Failed to read block");
2345  }
2346  pthisBlock = pblockNew;
2347  } else {
2348  pthisBlock = pblock;
2349  }
2350  const CBlock& blockConnecting = *pthisBlock;
2351  // Apply the block atomically to the chain state.
2352  int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2353  int64_t nTime3;
2354  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2355  {
2356  CCoinsViewCache view(&CoinsTip());
2357  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
2358  GetMainSignals().BlockChecked(blockConnecting, state);
2359  if (!rv) {
2360  if (state.IsInvalid())
2361  InvalidBlockFound(pindexNew, state);
2362  return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2363  }
2364  nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2365  assert(nBlocksTotal > 0);
2366  LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2367  bool flushed = view.Flush();
2368  assert(flushed);
2369  }
2370  int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2371  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2372  // Write the chain state to disk, if necessary.
2374  return false;
2375  }
2376  int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2377  LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2378  // Remove conflicting transactions from the mempool.;
2379  if (m_mempool) {
2380  m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2381  disconnectpool.removeForBlock(blockConnecting.vtx);
2382  }
2383  // Update m_chain & related variables.
2384  m_chain.SetTip(pindexNew);
2385  UpdateTip(pindexNew);
2386 
2387  int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2388  LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2389  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2390 
2391  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2392  return true;
2393 }
2394 
2400  do {
2401  CBlockIndex *pindexNew = nullptr;
2402 
2403  // Find the best candidate header.
2404  {
2405  std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2406  if (it == setBlockIndexCandidates.rend())
2407  return nullptr;
2408  pindexNew = *it;
2409  }
2410 
2411  // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2412  // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2413  CBlockIndex *pindexTest = pindexNew;
2414  bool fInvalidAncestor = false;
2415  while (pindexTest && !m_chain.Contains(pindexTest)) {
2416  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2417 
2418  // Pruned nodes may have entries in setBlockIndexCandidates for
2419  // which block files have been deleted. Remove those as candidates
2420  // for the most work chain if we come across them; we can't switch
2421  // to a chain unless we have all the non-active-chain parent blocks.
2422  bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2423  bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2424  if (fFailedChain || fMissingData) {
2425  // Candidate chain is not usable (either invalid or missing data)
2426  if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2427  pindexBestInvalid = pindexNew;
2428  CBlockIndex *pindexFailed = pindexNew;
2429  // Remove the entire chain from the set.
2430  while (pindexTest != pindexFailed) {
2431  if (fFailedChain) {
2432  pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2433  } else if (fMissingData) {
2434  // If we're missing data, then add back to m_blocks_unlinked,
2435  // so that if the block arrives in the future we can try adding
2436  // to setBlockIndexCandidates again.
2438  std::make_pair(pindexFailed->pprev, pindexFailed));
2439  }
2440  setBlockIndexCandidates.erase(pindexFailed);
2441  pindexFailed = pindexFailed->pprev;
2442  }
2443  setBlockIndexCandidates.erase(pindexTest);
2444  fInvalidAncestor = true;
2445  break;
2446  }
2447  pindexTest = pindexTest->pprev;
2448  }
2449  if (!fInvalidAncestor)
2450  return pindexNew;
2451  } while(true);
2452 }
2453 
2456  // Note that we can't delete the current block itself, as we may need to return to it later in case a
2457  // reorganization to a better block fails.
2458  std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2459  while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2460  setBlockIndexCandidates.erase(it++);
2461  }
2462  // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2463  assert(!setBlockIndexCandidates.empty());
2464 }
2465 
2472 bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2473 {
2476 
2477  const CBlockIndex* pindexOldTip = m_chain.Tip();
2478  const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
2479 
2480  // Disconnect active blocks which are no longer in the best chain.
2481  bool fBlocksDisconnected = false;
2482  DisconnectedBlockTransactions disconnectpool;
2483  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2484  if (!DisconnectTip(state, &disconnectpool)) {
2485  // This is likely a fatal error, but keep the mempool consistent,
2486  // just in case. Only remove from the mempool in this case.
2487  MaybeUpdateMempoolForReorg(disconnectpool, false);
2488 
2489  // If we're unable to disconnect a block during normal operation,
2490  // then that is a failure of our local system -- we should abort
2491  // rather than stay on a less work chain.
2492  AbortNode(state, "Failed to disconnect block; see debug.log for details");
2493  return false;
2494  }
2495  fBlocksDisconnected = true;
2496  }
2497 
2498  // Build list of new blocks to connect (in descending height order).
2499  std::vector<CBlockIndex*> vpindexToConnect;
2500  bool fContinue = true;
2501  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2502  while (fContinue && nHeight != pindexMostWork->nHeight) {
2503  // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2504  // a few blocks along the way.
2505  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2506  vpindexToConnect.clear();
2507  vpindexToConnect.reserve(nTargetHeight - nHeight);
2508  CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2509  while (pindexIter && pindexIter->nHeight != nHeight) {
2510  vpindexToConnect.push_back(pindexIter);
2511  pindexIter = pindexIter->pprev;
2512  }
2513  nHeight = nTargetHeight;
2514 
2515  // Connect new blocks.
2516  for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
2517  if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2518  if (state.IsInvalid()) {
2519  // The block violates a consensus rule.
2521  InvalidChainFound(vpindexToConnect.front());
2522  }
2523  state = BlockValidationState();
2524  fInvalidFound = true;
2525  fContinue = false;
2526  break;
2527  } else {
2528  // A system error occurred (disk space, database error, ...).
2529  // Make the mempool consistent with the current tip, just in case
2530  // any observers try to use it before shutdown.
2531  MaybeUpdateMempoolForReorg(disconnectpool, false);
2532  return false;
2533  }
2534  } else {
2536  if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2537  // We're in a better position than we were. Return temporarily to release the lock.
2538  fContinue = false;
2539  break;
2540  }
2541  }
2542  }
2543  }
2544 
2545  if (fBlocksDisconnected) {
2546  // If any blocks were disconnected, disconnectpool may be non empty. Add
2547  // any disconnected transactions back to the mempool.
2548  MaybeUpdateMempoolForReorg(disconnectpool, true);
2549  }
2550  if (m_mempool) m_mempool->check(*this);
2551 
2553 
2554  return true;
2555 }
2556 
2558 {
2562 }
2563 
2565  bool fNotify = false;
2566  bool fInitialBlockDownload = false;
2567  static CBlockIndex* pindexHeaderOld = nullptr;
2568  CBlockIndex* pindexHeader = nullptr;
2569  {
2570  LOCK(cs_main);
2571  pindexHeader = pindexBestHeader;
2572 
2573  if (pindexHeader != pindexHeaderOld) {
2574  fNotify = true;
2575  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
2576  pindexHeaderOld = pindexHeader;
2577  }
2578  }
2579  // Send block tip changed notifications without cs_main
2580  if (fNotify) {
2581  uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2582  }
2583  return fNotify;
2584 }
2585 
2588 
2589  if (GetMainSignals().CallbacksPending() > 10) {
2591  }
2592 }
2593 
2594 bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
2595 {
2596  // Note that while we're often called here from ProcessNewBlock, this is
2597  // far from a guarantee. Things in the P2P/RPC will often end up calling
2598  // us in the middle of ProcessNewBlock - do not assume pblock is set
2599  // sanely for performance or correctness!
2601 
2602  // ABC maintains a fair degree of expensive-to-calculate internal state
2603  // because this function periodically releases cs_main so that it does not lock up other threads for too long
2604  // during large connects - and to allow for e.g. the callback queue to drain
2605  // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2607 
2608  CBlockIndex *pindexMostWork = nullptr;
2609  CBlockIndex *pindexNewTip = nullptr;
2610  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2611  do {
2612  // Block until the validation queue drains. This should largely
2613  // never happen in normal operation, however may happen during
2614  // reindex, causing memory blowup if we run too far ahead.
2615  // Note that if a validationinterface callback ends up calling
2616  // ActivateBestChain this may lead to a deadlock! We should
2617  // probably have a DEBUG_LOCKORDER test for this in the future.
2619 
2620  {
2621  LOCK(cs_main);
2622  // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2623  LOCK(MempoolMutex());
2624  CBlockIndex* starting_tip = m_chain.Tip();
2625  bool blocks_connected = false;
2626  do {
2627  // We absolutely may not unlock cs_main until we've made forward progress
2628  // (with the exception of shutdown due to hardware issues, low disk space, etc).
2629  ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2630 
2631  if (pindexMostWork == nullptr) {
2632  pindexMostWork = FindMostWorkChain();
2633  }
2634 
2635  // Whether we have anything to do at all.
2636  if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2637  break;
2638  }
2639 
2640  bool fInvalidFound = false;
2641  std::shared_ptr<const CBlock> nullBlockPtr;
2642  if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2643  // A system error occurred
2644  return false;
2645  }
2646  blocks_connected = true;
2647 
2648  if (fInvalidFound) {
2649  // Wipe cache, we may need another branch now.
2650  pindexMostWork = nullptr;
2651  }
2652  pindexNewTip = m_chain.Tip();
2653 
2654  for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2655  assert(trace.pblock && trace.pindex);
2656  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2657  }
2658  } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2659  if (!blocks_connected) return true;
2660 
2661  const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2662  bool fInitialDownload = IsInitialBlockDownload();
2663 
2664  // Notify external listeners about the new tip.
2665  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2666  if (pindexFork != pindexNewTip) {
2667  // Notify ValidationInterface subscribers
2668  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2669 
2670  // Always notify the UI if a new block tip was connected
2671  uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2672  }
2673  }
2674  // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2675 
2676  if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2677 
2678  // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
2679  // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
2680  // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
2681  // that the best block hash is non-null.
2682  if (ShutdownRequested()) break;
2683  } while (pindexNewTip != pindexMostWork);
2684  CheckBlockIndex();
2685 
2686  // Write changes periodically to disk, after relay.
2688  return false;
2689  }
2690 
2691  return true;
2692 }
2693 
2695 {
2696  {
2697  LOCK(cs_main);
2698  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
2699  // Nothing to do, this block is not at the tip.
2700  return true;
2701  }
2703  // The chain has been extended since the last call, reset the counter.
2705  }
2707  setBlockIndexCandidates.erase(pindex);
2709  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
2710  // We can't keep reducing the counter if somebody really wants to
2711  // call preciousblock 2**31-1 times on the same set of tips...
2713  }
2714  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
2715  setBlockIndexCandidates.insert(pindex);
2717  }
2718  }
2719 
2720  return ActivateBestChain(state, std::shared_ptr<const CBlock>());
2721 }
2722 
2724 {
2725  // Genesis block can't be invalidated
2726  assert(pindex);
2727  if (pindex->nHeight == 0) return false;
2728 
2729  CBlockIndex* to_mark_failed = pindex;
2730  bool pindex_was_in_chain = false;
2731  int disconnected = 0;
2732 
2733  // We do not allow ActivateBestChain() to run while InvalidateBlock() is
2734  // running, as that could cause the tip to change while we disconnect
2735  // blocks.
2737 
2738  // We'll be acquiring and releasing cs_main below, to allow the validation
2739  // callbacks to run. However, we should keep the block index in a
2740  // consistent state as we disconnect blocks -- in particular we need to
2741  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
2742  // To avoid walking the block index repeatedly in search of candidates,
2743  // build a map once so that we can look up candidate blocks by chain
2744  // work as we go.
2745  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
2746 
2747  {
2748  LOCK(cs_main);
2749  for (const auto& entry : m_blockman.m_block_index) {
2750  CBlockIndex *candidate = entry.second;
2751  // We don't need to put anything in our active chain into the
2752  // multimap, because those candidates will be found and considered
2753  // as we disconnect.
2754  // Instead, consider only non-active-chain blocks that have at
2755  // least as much work as where we expect the new tip to end up.
2756  if (!m_chain.Contains(candidate) &&
2757  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
2758  candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
2759  candidate->HaveTxsDownloaded()) {
2760  candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
2761  }
2762  }
2763  }
2764 
2765  // Disconnect (descendants of) pindex, and mark them invalid.
2766  while (true) {
2767  if (ShutdownRequested()) break;
2768 
2769  // Make sure the queue of validation callbacks doesn't grow unboundedly.
2771 
2772  LOCK(cs_main);
2773  // Lock for as long as disconnectpool is in scope to make sure MaybeUpdateMempoolForReorg is
2774  // called after DisconnectTip without unlocking in between
2775  LOCK(MempoolMutex());
2776  if (!m_chain.Contains(pindex)) break;
2777  pindex_was_in_chain = true;
2778  CBlockIndex *invalid_walk_tip = m_chain.Tip();
2779 
2780  // ActivateBestChain considers blocks already in m_chain
2781  // unconditionally valid already, so force disconnect away from it.
2782  DisconnectedBlockTransactions disconnectpool;
2783  bool ret = DisconnectTip(state, &disconnectpool);
2784  // DisconnectTip will add transactions to disconnectpool.
2785  // Adjust the mempool to be consistent with the new tip, adding
2786  // transactions back to the mempool if disconnecting was successful,
2787  // and we're not doing a very deep invalidation (in which case
2788  // keeping the mempool up to date is probably futile anyway).
2789  MaybeUpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
2790  if (!ret) return false;
2791  assert(invalid_walk_tip->pprev == m_chain.Tip());
2792 
2793  // We immediately mark the disconnected blocks as invalid.
2794  // This prevents a case where pruned nodes may fail to invalidateblock
2795  // and be left unable to start as they have no tip candidates (as there
2796  // are no blocks that meet the "have data and are not invalid per
2797  // nStatus" criteria for inclusion in setBlockIndexCandidates).
2798  invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
2799  setDirtyBlockIndex.insert(invalid_walk_tip);
2800  setBlockIndexCandidates.erase(invalid_walk_tip);
2801  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
2802  if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
2803  // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
2804  // need to be BLOCK_FAILED_CHILD instead.
2805  to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
2806  setDirtyBlockIndex.insert(to_mark_failed);
2807  }
2808 
2809  // Add any equal or more work headers to setBlockIndexCandidates
2810  auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
2811  while (candidate_it != candidate_blocks_by_work.end()) {
2812  if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
2813  setBlockIndexCandidates.insert(candidate_it->second);
2814  candidate_it = candidate_blocks_by_work.erase(candidate_it);
2815  } else {
2816  ++candidate_it;
2817  }
2818  }
2819 
2820  // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
2821  // iterations, or, if it's the last one, call InvalidChainFound on it.
2822  to_mark_failed = invalid_walk_tip;
2823  }
2824 
2825  CheckBlockIndex();
2826 
2827  {
2828  LOCK(cs_main);
2829  if (m_chain.Contains(to_mark_failed)) {
2830  // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
2831  return false;
2832  }
2833 
2834  // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
2835  to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
2836  setDirtyBlockIndex.insert(to_mark_failed);
2837  setBlockIndexCandidates.erase(to_mark_failed);
2838  m_blockman.m_failed_blocks.insert(to_mark_failed);
2839 
2840  // If any new blocks somehow arrived while we were disconnecting
2841  // (above), then the pre-calculation of what should go into
2842  // setBlockIndexCandidates may have missed entries. This would
2843  // technically be an inconsistency in the block index, but if we clean
2844  // it up here, this should be an essentially unobservable error.
2845  // Loop back over all block index entries and add any missing entries
2846  // to setBlockIndexCandidates.
2847  BlockMap::iterator it = m_blockman.m_block_index.begin();
2848  while (it != m_blockman.m_block_index.end()) {
2849  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
2850  setBlockIndexCandidates.insert(it->second);
2851  }
2852  it++;
2853  }
2854 
2855  InvalidChainFound(to_mark_failed);
2856  }
2857 
2858  // Only notify about a new block tip if the active chain was modified.
2859  if (pindex_was_in_chain) {
2860  uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
2861  }
2862  return true;
2863 }
2864 
2867 
2868  int nHeight = pindex->nHeight;
2869 
2870  // Remove the invalidity flag from this block and all its descendants.
2871  BlockMap::iterator it = m_blockman.m_block_index.begin();
2872  while (it != m_blockman.m_block_index.end()) {
2873  if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
2874  it->second->nStatus &= ~BLOCK_FAILED_MASK;
2875  setDirtyBlockIndex.insert(it->second);
2876  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
2877  setBlockIndexCandidates.insert(it->second);
2878  }
2879  if (it->second == pindexBestInvalid) {
2880  // Reset invalid block marker if it was pointing to one of those.
2881  pindexBestInvalid = nullptr;
2882  }
2883  m_blockman.m_failed_blocks.erase(it->second);
2884  }
2885  it++;
2886  }
2887 
2888  // Remove the invalidity flag from all ancestors too.
2889  while (pindex != nullptr) {
2890  if (pindex->nStatus & BLOCK_FAILED_MASK) {
2891  pindex->nStatus &= ~BLOCK_FAILED_MASK;
2892  setDirtyBlockIndex.insert(pindex);
2893  m_blockman.m_failed_blocks.erase(pindex);
2894  }
2895  pindex = pindex->pprev;
2896  }
2897 }
2898 
2900 {
2902 
2903  // Check for duplicate
2904  uint256 hash = block.GetHash();
2905  BlockMap::iterator it = m_block_index.find(hash);
2906  if (it != m_block_index.end())
2907  return it->second;
2908 
2909  // Construct new block index object
2910  CBlockIndex* pindexNew = new CBlockIndex(block);
2911  // We assign the sequence id to blocks only when the full data is available,
2912  // to avoid miners withholding blocks but broadcasting headers, to get a
2913  // competitive advantage.
2914  pindexNew->nSequenceId = 0;
2915  BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
2916  pindexNew->phashBlock = &((*mi).first);
2917  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
2918  if (miPrev != m_block_index.end())
2919  {
2920  pindexNew->pprev = (*miPrev).second;
2921  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
2922  pindexNew->BuildSkip();
2923  }
2924  pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
2925  pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
2926  pindexNew->RaiseValidity(BLOCK_VALID_TREE);
2927  if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
2928  pindexBestHeader = pindexNew;
2929 
2930  setDirtyBlockIndex.insert(pindexNew);
2931 
2932  return pindexNew;
2933 }
2934 
2936 void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
2937 {
2938  pindexNew->nTx = block.vtx.size();
2939  pindexNew->nChainTx = 0;
2940  pindexNew->nFile = pos.nFile;
2941  pindexNew->nDataPos = pos.nPos;
2942  pindexNew->nUndoPos = 0;
2943  pindexNew->nStatus |= BLOCK_HAVE_DATA;
2945  pindexNew->nStatus |= BLOCK_OPT_WITNESS;
2946  }
2948  setDirtyBlockIndex.insert(pindexNew);
2949 
2950  if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
2951  // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
2952  std::deque<CBlockIndex*> queue;
2953  queue.push_back(pindexNew);
2954 
2955  // Recursively process any descendant blocks that now may be eligible to be connected.
2956  while (!queue.empty()) {
2957  CBlockIndex *pindex = queue.front();
2958  queue.pop_front();
2959  pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
2960  {
2962  pindex->nSequenceId = nBlockSequenceId++;
2963  }
2964  if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
2965  setBlockIndexCandidates.insert(pindex);
2966  }
2967  std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
2968  while (range.first != range.second) {
2969  std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
2970  queue.push_back(it->second);
2971  range.first++;
2972  m_blockman.m_blocks_unlinked.erase(it);
2973  }
2974  }
2975  } else {
2976  if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
2977  m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
2978  }
2979  }
2980 }
2981 
2982 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
2983 {
2984  // Check proof of work matches claimed amount
2985  if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
2986  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
2987 
2988  return true;
2989 }
2990 
2991 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
2992 {
2993  // These are checks that are independent of context.
2994 
2995  if (block.fChecked)
2996  return true;
2997 
2998  // Check that the header is valid (particularly PoW). This is mostly
2999  // redundant with the call in AcceptBlockHeader.
3000  if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3001  return false;
3002 
3003  // Signet only: check block solution
3004  if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
3005  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
3006  }
3007 
3008  // Check the merkle root.
3009  if (fCheckMerkleRoot) {
3010  bool mutated;
3011  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3012  if (block.hashMerkleRoot != hashMerkleRoot2)
3013  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3014 
3015  // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3016  // of transactions in a block without affecting the merkle root of a block,
3017  // while still invalidating it.
3018  if (mutated)
3019  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3020  }
3021 
3022  // All potential-corruption validation must be done before we do any
3023  // transaction validation, as otherwise we may mark the header as invalid
3024  // because we receive the wrong transactions for it.
3025  // Note that witness malleability is checked in ContextualCheckBlock, so no
3026  // checks that use witness data may be performed here.
3027 
3028  // Size limits
3030  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3031 
3032  // First transaction must be coinbase, the rest must not be
3033  if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3034  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3035  for (unsigned int i = 1; i < block.vtx.size(); i++)
3036  if (block.vtx[i]->IsCoinBase())
3037  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3038 
3039  // Check transactions
3040  // Must check for duplicate inputs (see CVE-2018-17144)
3041  for (const auto& tx : block.vtx) {
3042  TxValidationState tx_state;
3043  if (!CheckTransaction(*tx, tx_state)) {
3044  // CheckBlock() does context-free validation checks. The only
3045  // possible failures are consensus failures.
3048  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3049  }
3050  }
3051  unsigned int nSigOps = 0;
3052  for (const auto& tx : block.vtx)
3053  {
3054  nSigOps += GetLegacySigOpCount(*tx);
3055  }
3057  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3058 
3059  if (fCheckPOW && fCheckMerkleRoot)
3060  block.fChecked = true;
3061 
3062  return true;
3063 }
3064 
3065 void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3066 {
3067  int commitpos = GetWitnessCommitmentIndex(block);
3068  static const std::vector<unsigned char> nonce(32, 0x00);
3069  if (commitpos != NO_WITNESS_COMMITMENT && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT) && !block.vtx[0]->HasWitness()) {
3070  CMutableTransaction tx(*block.vtx[0]);
3071  tx.vin[0].scriptWitness.stack.resize(1);
3072  tx.vin[0].scriptWitness.stack[0] = nonce;
3073  block.vtx[0] = MakeTransactionRef(std::move(tx));
3074  }
3075 }
3076 
3077 std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3078 {
3079  std::vector<unsigned char> commitment;
3080  int commitpos = GetWitnessCommitmentIndex(block);
3081  std::vector<unsigned char> ret(32, 0x00);
3082  if (commitpos == NO_WITNESS_COMMITMENT) {
3083  uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3084  CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3085  CTxOut out;
3086  out.nValue = 0;
3088  out.scriptPubKey[0] = OP_RETURN;
3089  out.scriptPubKey[1] = 0x24;
3090  out.scriptPubKey[2] = 0xaa;
3091  out.scriptPubKey[3] = 0x21;
3092  out.scriptPubKey[4] = 0xa9;
3093  out.scriptPubKey[5] = 0xed;
3094  memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3095  commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3096  CMutableTransaction tx(*block.vtx[0]);
3097  tx.vout.push_back(out);
3098  block.vtx[0] = MakeTransactionRef(std::move(tx));
3099  }
3100  UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3101  return commitment;
3102 }
3103 
3105 {
3106  const MapCheckpoints& checkpoints = data.mapCheckpoints;
3107 
3108  for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3109  {
3110  const uint256& hash = i.second;
3111  CBlockIndex* pindex = LookupBlockIndex(hash);
3112  if (pindex) {
3113  return pindex;
3114  }
3115  }
3116  return nullptr;
3117 }
3118 
3128 static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3129 {
3130  assert(pindexPrev != nullptr);
3131  const int nHeight = pindexPrev->nHeight + 1;
3132 
3133  // Check proof of work
3134  const Consensus::Params& consensusParams = params.GetConsensus();
3135  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3136  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3137 
3138  // Check against checkpoints
3139  if (fCheckpointsEnabled) {
3140  // Don't accept any forks from the main chain prior to last checkpoint.
3141  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3142  // BlockIndex().
3143  CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints());
3144  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3145  LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3146  return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3147  }
3148  }
3149 
3150  // Check timestamp against prev
3151  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3152  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3153 
3154  // Check timestamp
3155  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3156  return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3157 
3158  // Reject blocks with outdated version
3159  if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
3160  (block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_DERSIG)) ||
3161  (block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CLTV))) {
3162  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3163  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3164  }
3165 
3166  return true;
3167 }
3168 
3175 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3176 {
3177  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3178 
3179  // Enforce BIP113 (Median Time Past).
3180  int nLockTimeFlags = 0;
3181  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV)) {
3182  assert(pindexPrev != nullptr);
3183  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3184  }
3185 
3186  int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3187  ? pindexPrev->GetMedianTimePast()
3188  : block.GetBlockTime();
3189 
3190  // Check that all transactions are finalized
3191  for (const auto& tx : block.vtx) {
3192  if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3193  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3194  }
3195  }
3196 
3197  // Enforce rule that the coinbase starts with serialized block height
3198  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB))
3199  {
3200  CScript expect = CScript() << nHeight;
3201  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3202  !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3203  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3204  }
3205  }
3206 
3207  // Validation for witness commitments.
3208  // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3209  // coinbase (where 0x0000....0000 is used instead).
3210  // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3211  // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3212  // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3213  // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3214  // multiple, the last one is used.
3215  bool fHaveWitness = false;
3216  if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT)) {
3217  int commitpos = GetWitnessCommitmentIndex(block);
3218  if (commitpos != NO_WITNESS_COMMITMENT) {
3219  bool malleated = false;
3220  uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3221  // The malleation check is ignored; as the transaction tree itself
3222  // already does not permit it, it is impossible to trigger in the
3223  // witness tree.
3224  if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3225  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3226  }
3227  CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3228  if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3229  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3230  }
3231  fHaveWitness = true;
3232  }
3233  }
3234 
3235  // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3236  if (!fHaveWitness) {
3237  for (const auto& tx : block.vtx) {
3238  if (tx->HasWitness()) {
3239  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3240  }
3241  }
3242  }
3243 
3244  // After the coinbase witness reserved value and commitment are verified,
3245  // we can check if the block weight passes (before we've checked the
3246  // coinbase witness, it would be possible for the weight to be too
3247  // large by filling up the coinbase witness, which doesn't change
3248  // the block hash, so we couldn't mark the block as permanently
3249  // failed).
3250  if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3251  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3252  }
3253 
3254  return true;
3255 }
3256 
3257 bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3258 {
3260  // Check for duplicate
3261  uint256 hash = block.GetHash();
3262  BlockMap::iterator miSelf = m_block_index.find(hash);
3263  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3264  if (miSelf != m_block_index.end()) {
3265  // Block header is already known.
3266  CBlockIndex* pindex = miSelf->second;
3267  if (ppindex)
3268  *ppindex = pindex;
3269  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3270  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
3271  return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3272  }
3273  return true;
3274  }
3275 
3276  if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
3277  LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3278  return false;
3279  }
3280 
3281  // Get prev block index
3282  CBlockIndex* pindexPrev = nullptr;
3283  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3284  if (mi == m_block_index.end()) {
3285  LogPrintf("ERROR: %s: prev block not found\n", __func__);
3286  return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3287  }
3288  pindexPrev = (*mi).second;
3289  if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3290  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3291  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3292  }
3293  if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime()))
3294  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3295 
3296  /* Determine if this block descends from any block which has been found
3297  * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3298  * them as failed. For example:
3299  *
3300  * D3
3301  * /
3302  * B2 - C2
3303  * / \
3304  * A D2 - E2 - F2
3305  * \
3306  * B1 - C1 - D1 - E1
3307  *
3308  * In the case that we attempted to reorg from E1 to F2, only to find
3309  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3310  * but NOT D3 (it was not in any of our candidate sets at the time).
3311  *
3312  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3313  * in LoadBlockIndex.
3314  */
3315  if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3316  // The above does not mean "invalid": it checks if the previous block
3317  // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3318  // optimization, in the common case of adding a new block to the tip,
3319  // we don't need to iterate over the failed blocks list.
3320  for (const CBlockIndex* failedit : m_failed_blocks) {
3321  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3322  assert(failedit->nStatus & BLOCK_FAILED_VALID);
3323  CBlockIndex* invalid_walk = pindexPrev;
3324  while (invalid_walk != failedit) {
3325  invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3326  setDirtyBlockIndex.insert(invalid_walk);
3327  invalid_walk = invalid_walk->pprev;
3328  }
3329  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3330  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3331  }
3332  }
3333  }
3334  }
3335  CBlockIndex* pindex = AddToBlockIndex(block);
3336 
3337  if (ppindex)
3338  *ppindex = pindex;
3339 
3340  return true;
3341 }
3342 
3343 // Exposed wrapper for AcceptBlockHeader
3344 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3345 {
3347  {
3348  LOCK(cs_main);
3349  for (const CBlockHeader& header : headers) {
3350  CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3351  bool accepted = m_blockman.AcceptBlockHeader(
3352  header, state, chainparams, &pindex);
3354 
3355  if (!accepted) {
3356  return false;
3357  }
3358  if (ppindex) {
3359  *ppindex = pindex;
3360  }
3361  }
3362  }
3364  if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
3365  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3366  }
3367  }
3368  return true;
3369 }
3370 
3372 bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3373 {
3374  const CBlock& block = *pblock;
3375 
3376  if (fNewBlock) *fNewBlock = false;
3378 
3379  CBlockIndex *pindexDummy = nullptr;
3380  CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3381 
3382  bool accepted_header = m_blockman.AcceptBlockHeader(block, state, m_params, &pindex);
3383  CheckBlockIndex();
3384 
3385  if (!accepted_header)
3386  return false;
3387 
3388  // Try to process all requested blocks that we don't have, but only
3389  // process an unrequested block if it's new and has enough work to
3390  // advance our tip, and isn't too many blocks ahead.
3391  bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3392  bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3393  // Blocks that are too out-of-order needlessly limit the effectiveness of
3394  // pruning, because pruning will not delete block files that contain any
3395  // blocks which are too close in height to the tip. Apply this test
3396  // regardless of whether pruning is enabled; it should generally be safe to
3397  // not process unrequested blocks.
3398  bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3399 
3400  // TODO: Decouple this function from the block download logic by removing fRequested
3401  // This requires some new chain data structure to efficiently look up if a
3402  // block is in a chain leading to a candidate for best tip, despite not
3403  // being such a candidate itself.
3404 
3405  // TODO: deal better with return value and error conditions for duplicate
3406  // and unrequested blocks.
3407  if (fAlreadyHave) return true;
3408  if (!fRequested) { // If we didn't ask for it:
3409  if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3410  if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3411  if (fTooFarAhead) return true; // Block height is too high
3412 
3413  // Protect against DoS attacks from low-work chains.
3414  // If our tip is behind, a peer could try to send us
3415  // low-work blocks on a fake chain that we would never
3416  // request; don't process these.
3417  if (pindex->nChainWork < nMinimumChainWork) return true;
3418  }
3419 
3420  if (!CheckBlock(block, state, m_params.GetConsensus()) ||
3421  !ContextualCheckBlock(block, state, m_params.GetConsensus(), pindex->pprev)) {
3422  if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3423  pindex->nStatus |= BLOCK_FAILED_VALID;
3424  setDirtyBlockIndex.insert(pindex);
3425  }
3426  return error("%s: %s", __func__, state.ToString());
3427  }
3428 
3429  // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3430  // (but if it does not build on our best tip, let the SendMessages loop relay it)
3431  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3432  GetMainSignals().NewPoWValidBlock(pindex, pblock);
3433 
3434  // Write block to history file
3435  if (fNewBlock) *fNewBlock = true;
3436  try {
3437  FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, m_chain, m_params, dbp);
3438  if (blockPos.IsNull()) {
3439  state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3440  return false;
3441  }
3442  ReceivedBlockTransactions(block, pindex, blockPos);
3443  } catch (const std::runtime_error& e) {
3444  return AbortNode(state, std::string("System error: ") + e.what());
3445  }
3446 
3448 
3449  CheckBlockIndex();
3450 
3451  return true;
3452 }
3453 
3454 bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock>& block, bool force_processing, bool* new_block)
3455 {
3457 
3458  {
3459  CBlockIndex *pindex = nullptr;
3460  if (new_block) *new_block = false;
3461  BlockValidationState state;
3462 
3463  // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3464  // Therefore, the following critical section must include the CheckBlock() call as well.
3465  LOCK(cs_main);
3466 
3467  // Skipping AcceptBlock() for CheckBlock() failures means that we will never mark a block as invalid if
3468  // CheckBlock() fails. This is protective against consensus failure if there are any unknown forms of block
3469  // malleability that cause CheckBlock() to fail; see e.g. CVE-2012-2459 and
3470  // https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2019-February/016697.html. Because CheckBlock() is
3471  // not very expensive, the anti-DoS benefits of caching failure (of a definitely-invalid block) are not substantial.
3472  bool ret = CheckBlock(*block, state, chainparams.GetConsensus());
3473  if (ret) {
3474  // Store to disk
3475  ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block);
3476  }
3477  if (!ret) {
3478  GetMainSignals().BlockChecked(*block, state);
3479  return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3480  }
3481  }
3482 
3484 
3485  BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3486  if (!ActiveChainstate().ActivateBestChain(state, block)) {
3487  return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3488  }
3489 
3490  return true;
3491 }
3492 
3494  const CChainParams& chainparams,
3495  CChainState& chainstate,
3496  const CBlock& block,
3497  CBlockIndex* pindexPrev,
3498  bool fCheckPOW,
3499  bool fCheckMerkleRoot)
3500 {
3502  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
3503  CCoinsViewCache viewNew(&chainstate.CoinsTip());
3504  uint256 block_hash(block.GetHash());
3505  CBlockIndex indexDummy(block);
3506  indexDummy.pprev = pindexPrev;
3507  indexDummy.nHeight = pindexPrev->nHeight + 1;
3508  indexDummy.phashBlock = &block_hash;
3509 
3510  // NOTE: CheckBlockHeader is called by CheckBlock
3511  if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime()))
3512  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3513  if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3514  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3515  if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3516  return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3517  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) {
3518  return false;
3519  }
3520  assert(state.IsValid());
3521 
3522  return true;
3523 }
3524 
3529 void BlockManager::PruneOneBlockFile(const int fileNumber)
3530 {
3533 
3534  for (const auto& entry : m_block_index) {
3535  CBlockIndex* pindex = entry.second;
3536  if (pindex->nFile == fileNumber) {
3537  pindex->nStatus &= ~BLOCK_HAVE_DATA;
3538  pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3539  pindex->nFile = 0;
3540  pindex->nDataPos = 0;
3541  pindex->nUndoPos = 0;
3542  setDirtyBlockIndex.insert(pindex);
3543 
3544  // Prune from m_blocks_unlinked -- any block we prune would have
3545  // to be downloaded again in order to consider its chain, at which
3546  // point it would be considered as a candidate for
3547  // m_blocks_unlinked or setBlockIndexCandidates.
3548  auto range = m_blocks_unlinked.equal_range(pindex->pprev);
3549  while (range.first != range.second) {
3550  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
3551  range.first++;
3552  if (_it->second == pindex) {
3553  m_blocks_unlinked.erase(_it);
3554  }
3555  }
3556  }
3557  }
3558 
3559  vinfoBlockFile[fileNumber].SetNull();
3560  setDirtyFileInfo.insert(fileNumber);
3561 }
3562 
3563 void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
3564 {
3565  assert(fPruneMode && nManualPruneHeight > 0);
3566 
3568  if (chain_tip_height < 0) {
3569  return;
3570  }
3571 
3572  // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3573  unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
3574  int count = 0;
3575  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3576  if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3577  continue;
3578  }
3579  PruneOneBlockFile(fileNumber);
3580  setFilesToPrune.insert(fileNumber);
3581  count++;
3582  }
3583  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
3584 }
3585 
3586 /* This function is called from the RPC code for pruneblockchain */
3587 void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
3588 {
3589  BlockValidationState state;
3590  if (!active_chainstate.FlushStateToDisk(
3591  state, FlushStateMode::NONE, nManualPruneHeight)) {
3592  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
3593  }
3594 }
3595 
3596 void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
3597 {
3599  if (chain_tip_height < 0 || nPruneTarget == 0) {
3600  return;
3601  }
3602  if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
3603  return;
3604  }
3605 
3606  unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP));
3607  uint64_t nCurrentUsage = CalculateCurrentUsage();
3608  // We don't check to prune until after we've allocated new space for files
3609  // So we should leave a buffer under our target to account for another allocation
3610  // before the next pruning.
3611  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
3612  uint64_t nBytesToPrune;
3613  int count = 0;
3614 
3615  if (nCurrentUsage + nBuffer >= nPruneTarget) {
3616  // On a prune event, the chainstate DB is flushed.
3617  // To avoid excessive prune events negating the benefit of high dbcache
3618  // values, we should not prune too rapidly.
3619  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
3620  if (is_ibd) {
3621  // Since this is only relevant during IBD, we use a fixed 10%
3622  nBuffer += nPruneTarget / 10;
3623  }
3624 
3625  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3626  nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
3627 
3628  if (vinfoBlockFile[fileNumber].nSize == 0) {
3629  continue;
3630  }
3631 
3632  if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
3633  break;
3634  }
3635 
3636  // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3637  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3638  continue;
3639  }
3640 
3641  PruneOneBlockFile(fileNumber);
3642  // Queue up the files for removal
3643  setFilesToPrune.insert(fileNumber);
3644  nCurrentUsage -= nBytesToPrune;
3645  count++;
3646  }
3647  }
3648 
3649  LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3650  nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
3651  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
3652  nLastBlockWeCanPrune, count);
3653 }
3654 
3656 {
3658 
3659  if (hash.IsNull())
3660  return nullptr;
3661 
3662  // Return existing
3663  BlockMap::iterator mi = m_block_index.find(hash);
3664  if (mi != m_block_index.end())
3665  return (*mi).second;
3666 
3667  // Create new
3668  CBlockIndex* pindexNew = new CBlockIndex();
3669  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3670  pindexNew->phashBlock = &((*mi).first);
3671 
3672  return pindexNew;
3673 }
3674 
3676  const Consensus::Params& consensus_params,
3677  std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
3678 {
3679  if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
3680  return false;
3681  }
3682 
3683  // Calculate nChainWork
3684  std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
3685  vSortedByHeight.reserve(m_block_index.size());
3686  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
3687  {
3688  CBlockIndex* pindex = item.second;
3689  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
3690  }
3691  sort(vSortedByHeight.begin(), vSortedByHeight.end());
3692  for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
3693  {
3694  if (ShutdownRequested()) return false;
3695  CBlockIndex* pindex = item.second;
3696  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
3697  pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
3698  // We can link the chain of blocks for which we've received transactions at some point.
3699  // Pruned nodes may have deleted the block.
3700  if (pindex->nTx > 0) {
3701  if (pindex->pprev) {
3702  if (pindex->pprev->HaveTxsDownloaded()) {
3703  pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
3704  } else {
3705  pindex->nChainTx = 0;
3706  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
3707  }
3708  } else {
3709  pindex->nChainTx = pindex->nTx;
3710  }
3711  }
3712  if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
3713  pindex->nStatus |= BLOCK_FAILED_CHILD;
3714  setDirtyBlockIndex.insert(pindex);
3715  }
3716  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
3717  block_index_candidates.insert(pindex);
3718  }
3719  if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
3720  pindexBestInvalid = pindex;
3721  if (pindex->pprev)
3722  pindex->BuildSkip();
3723  if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
3724  pindexBestHeader = pindex;
3725  }
3726 
3727  return true;
3728 }
3729 
3731  m_failed_blocks.clear();
3732  m_blocks_unlinked.clear();
3733 
3734  for (const BlockMap::value_type& entry : m_block_index) {
3735  delete entry.second;
3736  }
3737 
3738  m_block_index.clear();
3739 }
3740 
3741 bool BlockManager::LoadBlockIndexDB(std::set<CBlockIndex*, CBlockIndexWorkComparator>& setBlockIndexCandidates)
3742 {
3743  if (!LoadBlockIndex(
3744  ::Params().GetConsensus(),
3745  setBlockIndexCandidates)) {
3746  return false;
3747  }
3748 
3749  // Load block file info
3750  m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
3751  vinfoBlockFile.resize(nLastBlockFile + 1);
3752  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
3753  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
3754  m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
3755  }
3756  LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
3757  for (int nFile = nLastBlockFile + 1; true; nFile++) {
3758  CBlockFileInfo info;
3759  if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
3760  vinfoBlockFile.push_back(info);
3761  } else {
3762  break;
3763  }
3764  }
3765 
3766  // Check presence of blk files
3767  LogPrintf("Checking all blk files are present...\n");
3768  std::set<int> setBlkDataFiles;
3769  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
3770  CBlockIndex* pindex = item.second;
3771  if (pindex->nStatus & BLOCK_HAVE_DATA) {
3772  setBlkDataFiles.insert(pindex->nFile);
3773  }
3774  }
3775  for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
3776  {
3777  FlatFilePos pos(*it, 0);
3778  if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
3779  return false;
3780  }
3781  }
3782 
3783  // Check whether we have ever pruned block & undo files
3784  m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
3785  if (fHavePruned)
3786  LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
3787 
3788  // Check whether we need to continue reindexing
3789  bool fReindexing = false;
3790  m_block_tree_db->ReadReindexing(fReindexing);
3791  if(fReindexing) fReindex = true;
3792 
3793  return true;
3794 }
3795 
3797 {
3798  if (!m_mempool) return;
3799  if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
3800  ::LoadMempool(*m_mempool, *this);
3801  }
3803 }
3804 
3806 {
3808  const CCoinsViewCache& coins_cache = CoinsTip();
3809  assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
3810  const CBlockIndex* tip = m_chain.Tip();
3811 
3812  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
3813  return true;
3814  }
3815 
3816  // Load pointer to end of best chain
3817  CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
3818  if (!pindex) {
3819  return false;
3820  }
3821  m_chain.SetTip(pindex);
3823 
3824  tip = m_chain.Tip();
3825  LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
3826  tip->GetBlockHash().ToString(),
3827  m_chain.Height(),
3830  return true;
3831 }
3832 
3834 {
3835  uiInterface.ShowProgress(_("Verifying blocks…").translated, 0, false);
3836 }
3837 
3839 {
3840  uiInterface.ShowProgress("", 100, false);
3841 }
3842 
3844  CChainState& chainstate,
3845  const CChainParams& chainparams,
3846  CCoinsView& coinsview,
3847  int nCheckLevel, int nCheckDepth)
3848 {
3850 
3851  if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr)
3852  return true;
3853 
3854  // Verify blocks in the best chain
3855  if (nCheckDepth <= 0 || nCheckDepth > chainstate.m_chain.Height())
3856  nCheckDepth = chainstate.m_chain.Height();
3857  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
3858  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
3859  CCoinsViewCache coins(&coinsview);
3860  CBlockIndex* pindex;
3861  CBlockIndex* pindexFailure = nullptr;
3862  int nGoodTransactions = 0;
3863  BlockValidationState state;
3864  int reportDone = 0;
3865  LogPrintf("[0%%]..."); /* Continued */
3866 
3867  const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
3868 
3869  for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
3870  const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
3871  if (reportDone < percentageDone/10) {
3872  // report every 10% step
3873  LogPrintf("[%d%%]...", percentageDone); /* Continued */
3874  reportDone = percentageDone/10;
3875  }
3876  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
3877  if (pindex->nHeight <= chainstate.m_chain.Height()-nCheckDepth)
3878  break;
3879  if ((fPruneMode || is_snapshot_cs) && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
3880  // If pruning or running under an assumeutxo snapshot, only go
3881  // back as far as we have data.
3882  LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
3883  break;
3884  }
3885  CBlock block;
3886  // check level 0: read from disk
3887  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
3888  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3889  // check level 1: verify block validity
3890  if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
3891  return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
3892  pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
3893  // check level 2: verify undo validity
3894  if (nCheckLevel >= 2 && pindex) {
3895  CBlockUndo undo;
3896  if (!pindex->GetUndoPos().IsNull()) {
3897  if (!UndoReadFromDisk(undo, pindex)) {
3898  return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3899  }
3900  }
3901  }
3902  // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
3903  size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
3904 
3905  if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
3906  assert(coins.GetBestBlock() == pindex->GetBlockHash());
3907  DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
3908  if (res == DISCONNECT_FAILED) {
3909  return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3910  }
3911  if (res == DISCONNECT_UNCLEAN) {
3912  nGoodTransactions = 0;
3913  pindexFailure = pindex;
3914  } else {
3915  nGoodTransactions += block.vtx.size();
3916  }
3917  }
3918  if (ShutdownRequested()) return true;
3919  }
3920  if (pindexFailure)
3921  return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
3922 
3923  // store block count as we move pindex at check level >= 4
3924  int block_count = chainstate.m_chain.Height() - pindex->nHeight;
3925 
3926  // check level 4: try reconnecting blocks
3927  if (nCheckLevel >= 4) {
3928  while (pindex != chainstate.m_chain.Tip()) {
3929  const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
3930  if (reportDone < percentageDone/10) {
3931  // report every 10% step
3932  LogPrintf("[%d%%]...", percentageDone); /* Continued */
3933  reportDone = percentageDone/10;
3934  }
3935  uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
3936  pindex = chainstate.m_chain.Next(pindex);
3937  CBlock block;
3938  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
3939  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3940  if (!chainstate.ConnectBlock(block, state, pindex, coins)) {
3941  return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
3942  }
3943  if (ShutdownRequested()) return true;
3944  }
3945  }
3946 
3947  LogPrintf("[DONE].\n");
3948  LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
3949 
3950  return true;
3951 }
3952 
3955 {
3956  // TODO: merge with ConnectBlock
3957  CBlock block;
3958  if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
3959  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
3960  }
3961 
3962  for (const CTransactionRef& tx : block.vtx) {
3963  if (!tx->IsCoinBase()) {
3964  for (const CTxIn &txin : tx->vin) {
3965  inputs.SpendCoin(txin.prevout);
3966  }
3967  }
3968  // Pass check = true as every addition may be an overwrite.
3969  AddCoins(inputs, *tx, pindex->nHeight, true);
3970  }
3971  return true;
3972 }
3973 
3975 {
3976  LOCK(cs_main);
3977 
3978  CCoinsView& db = this->CoinsDB();
3979  CCoinsViewCache cache(&db);
3980 
3981  std::vector<uint256> hashHeads = db.GetHeadBlocks();
3982  if (hashHeads.empty()) return true; // We're already in a consistent state.
3983  if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
3984 
3985  uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false);
3986  LogPrintf("Replaying blocks\n");
3987 
3988  const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
3989  const CBlockIndex* pindexNew; // New tip during the interrupted flush.
3990  const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
3991 
3992  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
3993  return error("ReplayBlocks(): reorganization to unknown block requested");
3994  }
3995  pindexNew = m_blockman.m_block_index[hashHeads[0]];
3996 
3997  if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
3998  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
3999  return error("ReplayBlocks(): reorganization from unknown block requested");
4000  }
4001  pindexOld = m_blockman.m_block_index[hashHeads[1]];
4002  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4003  assert(pindexFork != nullptr);
4004  }
4005 
4006  // Rollback along the old branch.
4007  while (pindexOld != pindexFork) {
4008  if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4009  CBlock block;
4010  if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
4011  return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4012  }
4013  LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4014  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4015  if (res == DISCONNECT_FAILED) {
4016  return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4017  }
4018  // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4019  // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4020  // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4021  // the result is still a version of the UTXO set with the effects of that block undone.
4022  }
4023  pindexOld = pindexOld->pprev;
4024  }
4025 
4026  // Roll forward from the forking point to the new tip.
4027  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4028  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4029  const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4030  LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4031  uiInterface.ShowProgress(_("Replaying blocks…").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4032  if (!RollforwardBlock(pindex, cache)) return false;
4033  }
4034 
4035  cache.SetBestBlock(pindexNew->GetBlockHash());
4036  cache.Flush();
4037  uiInterface.ShowProgress("", 100, false);
4038  return true;
4039 }
4040 
4042 {
4044 
4045  // At and above m_params.SegwitHeight, segwit consensus rules must be validated
4046  CBlockIndex* block{m_chain.Tip()};
4047 
4048  while (block != nullptr && DeploymentActiveAt(*block, m_params.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
4049  if (!(block->nStatus & BLOCK_OPT_WITNESS)) {
4050  // block is insufficiently validated for a segwit client
4051  return true;
4052  }
4053  block = block->pprev;
4054  }
4055 
4056  return false;
4057 }
4058 
4060  nBlockSequenceId = 1;
4061  setBlockIndexCandidates.clear();
4062 }
4063 
4064 // May NOT be used after any connections are up as much
4065 // of the peer-processing logic assumes a consistent
4066 // block index state
4068 {
4069  LOCK(cs_main);
4070  chainman.Unload();
4071  pindexBestInvalid = nullptr;
4072  pindexBestHeader = nullptr;
4073  if (mempool) mempool->clear();
4074  vinfoBlockFile.clear();
4075  nLastBlockFile = 0;
4076  setDirtyBlockIndex.clear();
4077  setDirtyFileInfo.clear();
4079  for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4080  warningcache[b].clear();
4081  }
4082  fHavePruned = false;
4083 }
4084 
4086 {
4088  // Load block index from databases
4089  bool needs_init = fReindex;
4090  if (!fReindex) {
4091  bool ret = m_blockman.LoadBlockIndexDB(ActiveChainstate().setBlockIndexCandidates);
4092  if (!ret) return false;
4093  needs_init = m_blockman.m_block_index.empty();
4094  }
4095 
4096  if (needs_init) {
4097  // Everything here is for *new* reindex/DBs. Thus, though
4098  // LoadBlockIndexDB may have set fReindex if we shut down
4099  // mid-reindex previously, we don't check fReindex and
4100  // instead only check it prior to LoadBlockIndexDB to set
4101  // needs_init.
4102 
4103  LogPrintf("Initializing databases...\n");
4104  }
4105  return true;
4106 }
4107 
4109 {
4110  LOCK(cs_main);
4111 
4112  // Check whether we're already initialized by checking for genesis in
4113  // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4114  // set based on the coins db, not the block index db, which is the only
4115  // thing loaded at this point.
4116  if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash()))
4117  return true;
4118 
4119  try {
4120  const CBlock& block = m_params.GenesisBlock();
4121  FlatFilePos blockPos = SaveBlockToDisk(block, 0, m_chain, m_params, nullptr);
4122  if (blockPos.IsNull())
4123  return error("%s: writing genesis block to disk failed", __func__);
4124  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4125  ReceivedBlockTransactions(block, pindex, blockPos);
4126  } catch (const std::runtime_error& e) {
4127  return error("%s: failed to write genesis block: %s", __func__, e.what());
4128  }
4129 
4130  return true;
4131 }
4132 
4133 void CChainState::LoadExternalBlockFile(FILE* fileIn, FlatFilePos* dbp)
4134 {
4135  // Map of disk positions for blocks with unknown parent (only used for reindex)
4136  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4137  int64_t nStart = GetTimeMillis();
4138 
4139  int nLoaded = 0;
4140  try {
4141  // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4143  uint64_t nRewind = blkdat.GetPos();
4144  while (!blkdat.eof()) {
4145  if (ShutdownRequested()) return;
4146 
4147  blkdat.SetPos(nRewind);
4148  nRewind++; // start one byte further next time, in case of failure
4149  blkdat.SetLimit(); // remove former limit
4150  unsigned int nSize = 0;
4151  try {
4152  // locate a header
4153  unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4154  blkdat.FindByte(m_params.MessageStart()[0]);
4155  nRewind = blkdat.GetPos()+1;
4156  blkdat >> buf;
4158  continue;
4159  }
4160  // read size
4161  blkdat >> nSize;
4162  if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4163  continue;
4164  } catch (const std::exception&) {
4165  // no valid block header found; don't complain
4166  break;
4167  }
4168  try {
4169  // read block
4170  uint64_t nBlockPos = blkdat.GetPos();
4171  if (dbp)
4172  dbp->nPos = nBlockPos;
4173  blkdat.SetLimit(nBlockPos + nSize);
4174  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4175  CBlock& block = *pblock;
4176  blkdat >> block;
4177  nRewind = blkdat.GetPos();
4178 
4179  uint256 hash = block.GetHash();
4180  {
4181  LOCK(cs_main);
4182  // detect out of order blocks, and store them for later
4183  if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
4184  LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4185  block.hashPrevBlock.ToString());
4186  if (dbp)
4187  mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4188  continue;
4189  }
4190 
4191  // process in case the block isn't known yet
4192  CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
4193  if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4194  BlockValidationState state;
4195  if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) {
4196  nLoaded++;
4197  }
4198  if (state.IsError()) {
4199  break;
4200  }
4201  } else if (hash != m_params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4202  LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4203  }
4204  }
4205 
4206  // Activate the genesis block so normal node progress can continue
4207  if (hash == m_params.GetConsensus().hashGenesisBlock) {
4208  BlockValidationState state;
4209  if (!ActivateBestChain(state, nullptr)) {
4210  break;
4211  }
4212  }
4213 
4214  NotifyHeaderTip(*this);
4215 
4216  // Recursively process earlier encountered successors of this block
4217  std::deque<uint256> queue;
4218  queue.push_back(hash);
4219  while (!queue.empty()) {
4220  uint256 head = queue.front();
4221  queue.pop_front();
4222  std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4223  while (range.first != range.second) {
4224  std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4225  std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4226  if (ReadBlockFromDisk(*pblockrecursive, it->second, m_params.GetConsensus())) {
4227  LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4228  head.ToString());
4229  LOCK(cs_main);
4230  BlockValidationState dummy;
4231  if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) {
4232  nLoaded++;
4233  queue.push_back(pblockrecursive->GetHash());
4234  }
4235  }
4236  range.first++;
4237  mapBlocksUnknownParent.erase(it);
4238  NotifyHeaderTip(*this);
4239  }
4240  }
4241  } catch (const std::exception& e) {
4242  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4243  }
4244  }
4245  } catch (const std::runtime_error& e) {
4246  AbortNode(std::string("System error: ") + e.what());
4247  }
4248  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4249 }
4250 
4252 {
4253  if (!fCheckBlockIndex) {
4254  return;
4255  }
4256 
4257  LOCK(cs_main);
4258 
4259  // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4260  // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4261  // tests when iterating the block tree require that m_chain has been initialized.)
4262  if (m_chain.Height() < 0) {
4263  assert(m_blockman.m_block_index.size() <= 1);
4264  return;
4265  }
4266 
4267  // Build forward-pointing map of the entire block tree.
4268  std::multimap<CBlockIndex*,CBlockIndex*> forward;
4269  for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4270  forward.insert(std::make_pair(entry.second->pprev, entry.second));
4271  }
4272 
4273  assert(forward.size() == m_blockman.m_block_index.size());
4274 
4275  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4276  CBlockIndex *pindex = rangeGenesis.first->second;
4277  rangeGenesis.first++;
4278  assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4279 
4280  // Iterate over the entire block tree, using depth-first search.
4281  // Along the way, remember whether there are blocks on the path from genesis
4282  // block being explored which are the first to have certain properties.
4283  size_t nNodes = 0;
4284  int nHeight = 0;
4285  CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4286  CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4287  CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4288  CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4289  CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4290  CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4291  CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4292  while (pindex != nullptr) {
4293  nNodes++;
4294  if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4295  if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4296  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4297  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4298  if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4299  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4300  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4301 
4302  // Begin: actual consistency checks.
4303  if (pindex->pprev == nullptr) {
4304  // Genesis block checks.
4305  assert(pindex->GetBlockHash() == m_params.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
4306  assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4307  }
4308  if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4309  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4310  // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4311  if (!fHavePruned) {
4312  // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4313  assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4314  assert(pindexFirstMissing == pindexFirstNeverProcessed);
4315  } else {
4316  // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4317  if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4318  }
4319  if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4320  assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4321  // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4322  assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4323  assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4324  assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4325  assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4326  assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4327  assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4328  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4329  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4330  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4331  if (pindexFirstInvalid == nullptr) {
4332  // Checks for not-invalid blocks.
4333  assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4334  }
4335  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4336  if (pindexFirstInvalid == nullptr) {
4337  // If this block sorts at least as good as the current tip and
4338  // is valid and we have all data for its parents, it must be in
4339  // setBlockIndexCandidates. m_chain.Tip() must also be there
4340  // even if some data has been pruned.
4341  if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
4342  assert(setBlockIndexCandidates.count(pindex));
4343  }
4344  // If some parent is missing, then it could be that this block was in
4345  // setBlockIndexCandidates but had to be removed because of the missing data.
4346  // In this case it must be in m_blocks_unlinked -- see test below.
4347  }
4348  } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4349  assert(setBlockIndexCandidates.count(pindex) == 0);
4350  }
4351  // Check whether this block is in m_blocks_unlinked.
4352  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4353  bool foundInUnlinked = false;
4354  while (rangeUnlinked.first != rangeUnlinked.second) {
4355  assert(rangeUnlinked.first->first == pindex->pprev);
4356  if (rangeUnlinked.first->second == pindex) {
4357  foundInUnlinked = true;
4358  break;
4359  }
4360  rangeUnlinked.first++;
4361  }
4362  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4363  // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4364  assert(foundInUnlinked);
4365  }
4366  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4367  if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4368  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4369  // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4370  assert(fHavePruned); // We must have pruned.
4371  // This block may have entered m_blocks_unlinked if:
4372  // - it has a descendant that at some point had more work than the
4373  // tip, and
4374  // - we tried switching to that descendant but were missing
4375  // data for some intermediate block between m_chain and the
4376  // tip.
4377  // So if this block is itself better than m_chain.Tip() and it wasn't in
4378  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4379  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4380  if (pindexFirstInvalid == nullptr) {
4381  assert(foundInUnlinked);
4382  }
4383  }
4384  }
4385  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4386  // End: actual consistency checks.
4387 
4388  // Try descending into the first subnode.
4389  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4390  if (range.first != range.second) {
4391  // A subnode was found.
4392  pindex = range.first->second;
4393  nHeight++;
4394  continue;
4395  }
4396  // This is a leaf node.
4397  // Move upwards until we reach a node of which we have not yet visited the last child.
4398  while (pindex) {
4399  // We are going to either move to a parent or a sibling of pindex.
4400  // If pindex was the first with a certain property, unset the corresponding variable.
4401  if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4402  if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4403  if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4404  if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4405  if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4406  if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4407  if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4408  // Find our parent.
4409  CBlockIndex* pindexPar = pindex->pprev;
4410  // Find which child we just visited.
4411  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4412  while (rangePar.first->second != pindex) {
4413  assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4414  rangePar.first++;
4415  }
4416  // Proceed to the next one.
4417  rangePar.first++;
4418  if (rangePar.first != rangePar.second) {
4419  // Move to the sibling.
4420  pindex = rangePar.first->second;
4421  break;
4422  } else {
4423  // Move up further.
4424  pindex = pindexPar;
4425  nHeight--;
4426  continue;
4427  }
4428  }
4429  }
4430 
4431  // Check that we actually traversed the entire map.
4432  assert(nNodes == forward.size());
4433 }
4434 
4435 std::string CChainState::ToString()
4436 {
4437  CBlockIndex* tip = m_chain.Tip();
4438  return strprintf("Chainstate [%s] @ height %d (%s)",
4439  m_from_snapshot_blockhash ? "snapshot" : "ibd",
4440  tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4441 }
4442 
4443 bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
4444 {
4445  if (coinstip_size == m_coinstip_cache_size_bytes &&
4446  coinsdb_size == m_coinsdb_cache_size_bytes) {
4447  // Cache sizes are unchanged, no need to continue.
4448  return true;
4449  }
4450  size_t old_coinstip_size = m_coinstip_cache_size_bytes;
4451  m_coinstip_cache_size_bytes = coinstip_size;
4452  m_coinsdb_cache_size_bytes = coinsdb_size;
4453  CoinsDB().ResizeCache(coinsdb_size);
4454 
4455  LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
4456  this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
4457  LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
4458  this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
4459 
4460  BlockValidationState state;
4461  bool ret;
4462 
4463  if (coinstip_size > old_coinstip_size) {
4464  // Likely no need to flush if cache sizes have grown.
4466  } else {
4467  // Otherwise, flush state to disk and deallocate the in-memory coins map.
4470  }
4471  return ret;
4472 }
4473 
4474 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
4475 
4476 bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
4477 {
4478  const CChainParams& chainparams = Params();
4479  int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
4480  FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat", "rb")};
4481  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
4482  if (file.IsNull()) {
4483  LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
4484  return false;
4485  }
4486 
4487  int64_t count = 0;
4488  int64_t expired = 0;
4489  int64_t failed = 0;
4490  int64_t already_there = 0;
4491  int64_t unbroadcast = 0;
4492  int64_t nNow = GetTime();
4493 
4494  try {
4495  uint64_t version;
4496  file >> version;
4497  if (version != MEMPOOL_DUMP_VERSION) {
4498  return false;
4499  }
4500  uint64_t num;
4501  file >> num;
4502  while (num--) {
4503  CTransactionRef tx;
4504  int64_t nTime;
4505  int64_t nFeeDelta;
4506  file >> tx;
4507  file >> nTime;
4508  file >> nFeeDelta;
4509 
4510  CAmount amountdelta = nFeeDelta;
4511  if (amountdelta) {
4512  pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
4513  }
4514  if (nTime > nNow - nExpiryTimeout) {
4515  LOCK(cs_main);
4516  if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */,
4517  false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) {
4518  ++count;
4519  } else {
4520  // mempool may contain the transaction already, e.g. from
4521  // wallet(s) having loaded it while we were processing
4522  // mempool transactions; consider these as valid, instead of
4523  // failed, but mark them as 'already there'
4524  if (pool.exists(tx->GetHash())) {
4525  ++already_there;
4526  } else {
4527  ++failed;
4528  }
4529  }
4530  } else {
4531  ++expired;
4532  }
4533  if (ShutdownRequested())
4534  return false;
4535  }
4536  std::map<uint256, CAmount> mapDeltas;
4537  file >> mapDeltas;
4538 
4539  for (const auto& i : mapDeltas) {
4540  pool.PrioritiseTransaction(i.first, i.second);
4541  }
4542 
4543  std::set<uint256> unbroadcast_txids;
4544  file >> unbroadcast_txids;
4545  unbroadcast = unbroadcast_txids.size();
4546  for (const auto& txid : unbroadcast_txids) {
4547  // Ensure transactions were accepted to mempool then add to
4548  // unbroadcast set.
4549  if (pool.get(txid) != nullptr) pool.AddUnbroadcastTx(txid);
4550  }
4551  } catch (const std::exception& e) {
4552  LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
4553  return false;
4554  }
4555 
4556  LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
4557  return true;
4558 }
4559 
4560 bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function, bool skip_file_commit)
4561 {
4562  int64_t start = GetTimeMicros();
4563 
4564  std::map<uint256, CAmount> mapDeltas;
4565  std::vector<TxMempoolInfo> vinfo;
4566  std::set<uint256> unbroadcast_txids;
4567 
4568  static Mutex dump_mutex;
4569  LOCK(dump_mutex);
4570 
4571  {
4572  LOCK(pool.cs);
4573  for (const auto &i : pool.mapDeltas) {
4574  mapDeltas[i.first] = i.second;
4575  }
4576  vinfo = pool.infoAll();
4577  unbroadcast_txids = pool.GetUnbroadcastTxs();
4578  }
4579 
4580  int64_t mid = GetTimeMicros();
4581 
4582  try {
4583  FILE* filestr{mockable_fopen_function(gArgs.GetDataDirNet() / "mempool.dat.new", "wb")};
4584  if (!filestr) {
4585  return false;
4586  }
4587 
4588  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
4589 
4590  uint64_t version = MEMPOOL_DUMP_VERSION;
4591  file << version;
4592 
4593  file << (uint64_t)vinfo.size();
4594  for (const auto& i : vinfo) {
4595  file << *(i.tx);
4596  file << int64_t{count_seconds(i.m_time)};
4597  file << int64_t{i.nFeeDelta};
4598  mapDeltas.erase(i.tx->GetHash());
4599  }
4600 
4601  file << mapDeltas;
4602 
4603  LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
4604  file << unbroadcast_txids;
4605 
4606  if (!skip_file_commit && !FileCommit(file.Get()))
4607  throw std::runtime_error("FileCommit failed");
4608  file.fclose();
4609  if (!RenameOver(gArgs.GetDataDirNet() / "mempool.dat.new", gArgs.GetDataDirNet() / "mempool.dat")) {
4610  throw std::runtime_error("Rename failed");
4611  }
4612  int64_t last = GetTimeMicros();
4613  LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
4614  } catch (const std::exception& e) {
4615  LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
4616  return false;
4617  }
4618  return true;
4619 }
4620 
4623 double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
4624  if (pindex == nullptr)
4625  return 0.0;
4626 
4627  int64_t nNow = time(nullptr);
4628 
4629  double fTxTotal;
4630 
4631  if (pindex->nChainTx <= data.nTxCount) {
4632  fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
4633  } else {
4634  fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
4635  }
4636 
4637  return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
4638 }
4639 
4640 std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
4641 {
4642  LOCK(::cs_main);
4643  if (m_active_chainstate && m_active_chainstate->m_from_snapshot_blockhash) {
4644  // If a snapshot chainstate exists, it will always be our active.
4645  return m_active_chainstate->m_from_snapshot_blockhash;
4646  }
4647  return std::nullopt;
4648 }
4649 
4650 std::vector<CChainState*> ChainstateManager::GetAll()
4651 {
4652  LOCK(::cs_main);
4653  std::vector<CChainState*> out;
4654 
4655  if (!IsSnapshotValidated() && m_ibd_chainstate) {
4656  out.push_back(m_ibd_chainstate.get());
4657  }
4658 
4659  if (m_snapshot_chainstate) {
4660  out.push_back(m_snapshot_chainstate.get());
4661  }
4662 
4663  return out;
4664 }
4665 
4666 CChainState& ChainstateManager::InitializeChainstate(
4667  CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
4668 {
4669  bool is_snapshot = snapshot_blockhash.has_value();
4670  std::unique_ptr<CChainState>& to_modify =
4671  is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
4672 
4673  if (to_modify) {
4674  throw std::logic_error("should not be overwriting a chainstate");
4675  }
4676  to_modify.reset(new CChainState(mempool, m_blockman, snapshot_blockhash));
4677 
4678  // Snapshot chainstates and initial IBD chaintates always become active.
4679  if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
4680  LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
4681  m_active_chainstate = to_modify.get();
4682  } else {
4683  throw std::logic_error("unexpected chainstate activation");
4684  }
4685 
4686  return *to_modify;
4687 }
4688 
4690  const int height, const CChainParams& chainparams)
4691 {
4692  const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo();
4693  const auto assumeutxo_found = valid_assumeutxos_map.find(height);
4694 
4695  if (assumeutxo_found != valid_assumeutxos_map.end()) {
4696  return &assumeutxo_found->second;
4697  }
4698  return nullptr;
4699 }
4700 
4702  CAutoFile& coins_file,
4703  const SnapshotMetadata& metadata,
4704  bool in_memory)
4705 {
4706  uint256 base_blockhash = metadata.m_base_blockhash;
4707 
4708  if (this->SnapshotBlockhash()) {
4709  LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n");
4710  return false;
4711  }
4712 
4713  int64_t current_coinsdb_cache_size{0};
4714  int64_t current_coinstip_cache_size{0};
4715 
4716  // Cache percentages to allocate to each chainstate.
4717  //
4718  // These particular percentages don't matter so much since they will only be
4719  // relevant during snapshot activation; caches are rebalanced at the conclusion of
4720  // this function. We want to give (essentially) all available cache capacity to the
4721  // snapshot to aid the bulk load later in this function.
4722  static constexpr double IBD_CACHE_PERC = 0.01;
4723  static constexpr double SNAPSHOT_CACHE_PERC = 0.99;
4724 
4725  {
4726  LOCK(::cs_main);
4727  // Resize the coins caches to ensure we're not exceeding memory limits.
4728  //
4729  // Allocate the majority of the cache to the incoming snapshot chainstate, since
4730  // (optimistically) getting to its tip will be the top priority. We'll need to call
4731  // `MaybeRebalanceCaches()` once we're done with this function to ensure
4732  // the right allocation (including the possibility that no snapshot was activated
4733  // and that we should restore the active chainstate caches to their original size).
4734  //
4735  current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes;
4736  current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes;
4737 
4738  // Temporarily resize the active coins cache to make room for the newly-created
4739  // snapshot chain.
4740  this->ActiveChainstate().ResizeCoinsCaches(
4741  static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC),
4742  static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC));
4743  }
4744 
4745  auto snapshot_chainstate = WITH_LOCK(::cs_main, return std::make_unique<CChainState>(
4746  /* mempool */ nullptr, m_blockman, base_blockhash));
4747 
4748  {
4749  LOCK(::cs_main);
4750  snapshot_chainstate->InitCoinsDB(
4751  static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC),
4752  in_memory, false, "chainstate");
4753  snapshot_chainstate->InitCoinsCache(
4754  static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
4755  }
4756 
4757  const bool snapshot_ok = this->PopulateAndValidateSnapshot(
4758  *snapshot_chainstate, coins_file, metadata);
4759 
4760  if (!snapshot_ok) {
4761  WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
4762  return false;
4763  }
4764 
4765  {
4766  LOCK(::cs_main);
4767  assert(!m_snapshot_chainstate);
4768  m_snapshot_chainstate.swap(snapshot_chainstate);
4769  const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip();
4770  assert(chaintip_loaded);
4771 
4772  m_active_chainstate = m_snapshot_chainstate.get();
4773 
4774  LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
4775  LogPrintf("[snapshot] (%.2f MB)\n",
4776  m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000));
4777 
4778  this->MaybeRebalanceCaches();
4779  }
4780  return true;
4781 }
4782 
4784  CChainState& snapshot_chainstate,
4785  CAutoFile& coins_file,
4786  const SnapshotMetadata& metadata)
4787 {
4788  // It's okay to release cs_main before we're done using `coins_cache` because we know
4789  // that nothing else will be referencing the newly created snapshot_chainstate yet.
4790  CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip());
4791 
4792  uint256 base_blockhash = metadata.m_base_blockhash;
4793 
4794  CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash));
4795 
4796  if (!snapshot_start_block) {
4797  // Needed for GetUTXOStats and ExpectedAssumeutxo to determine the height and to avoid a crash when base_blockhash.IsNull()
4798  LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n",
4799  base_blockhash.ToString());
4800  return false;
4801  }
4802 
4803  int base_height = snapshot_start_block->nHeight;
4804  auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params());
4805 
4806  if (!maybe_au_data) {
4807  LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */
4808  "(%d) - refusing to load snapshot\n", base_height);
4809  return false;
4810  }
4811 
4812  const AssumeutxoData& au_data = *maybe_au_data;
4813 
4814  COutPoint outpoint;
4815  Coin coin;
4816  const uint64_t coins_count = metadata.m_coins_count;
4817  uint64_t coins_left = metadata.m_coins_count;
4818 
4819  LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString());
4820  int64_t flush_now{0};
4821  int64_t coins_processed{0};
4822 
4823  while (coins_left > 0) {
4824  try {
4825  coins_file >> outpoint;
4826  coins_file >> coin;
4827  } catch (const std::ios_base::failure&) {
4828  LogPrintf("[snapshot] bad snapshot format or truncated snapshot after deserializing %d coins\n",
4829  coins_count - coins_left);
4830  return false;
4831  }
4832  if (coin.nHeight > base_height ||
4833  outpoint.n >= std::numeric_limits<decltype(outpoint.n)>::max() // Avoid integer wrap-around in coinstats.cpp:ApplyHash
4834  ) {
4835  LogPrintf("[snapshot] bad snapshot data after deserializing %d coins\n",
4836  coins_count - coins_left);
4837  return false;
4838  }
4839 
4840  coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin));
4841 
4842  --coins_left;
4843  ++coins_processed;
4844 
4845  if (coins_processed % 1000000 == 0) {
4846  LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n",
4847  coins_processed,
4848  static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count),
4849  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
4850  }
4851 
4852  // Batch write and flush (if we need to) every so often.
4853  //
4854  // If our average Coin size is roughly 41 bytes, checking every 120,000 coins
4855  // means <5MB of memory imprecision.
4856  if (coins_processed % 120000 == 0) {
4857  if (ShutdownRequested()) {
4858  return false;
4859  }
4860 
4861  const auto snapshot_cache_state = WITH_LOCK(::cs_main,
4862  return snapshot_chainstate.GetCoinsCacheSizeState());
4863 
4864  if (snapshot_cache_state >=
4866  LogPrintf("[snapshot] flushing coins cache (%.2f MB)... ", /* Continued */
4867  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
4868  flush_now = GetTimeMillis();
4869 
4870  // This is a hack - we don't know what the actual best block is, but that
4871  // doesn't matter for the purposes of flushing the cache here. We'll set this
4872  // to its correct value (`base_blockhash`) below after the coins are loaded.
4873  coins_cache.SetBestBlock(GetRandHash());
4874 
4875  coins_cache.Flush();
4876  LogPrintf("done (%.2fms)\n", GetTimeMillis() - flush_now);
4877  }
4878  }
4879  }
4880 
4881  // Important that we set this. This and the coins_cache accesses above are
4882  // sort of a layer violation, but either we reach into the innards of
4883  // CCoinsViewCache here or we have to invert some of the CChainState to
4884  // embed them in a snapshot-activation-specific CCoinsViewCache bulk load
4885  // method.
4886  coins_cache.SetBestBlock(base_blockhash);
4887 
4888  bool out_of_coins{false};
4889  try {
4890  coins_file >> outpoint;
4891  } catch (const std::ios_base::failure&) {
4892  // We expect an exception since we should be out of coins.
4893  out_of_coins = true;
4894  }
4895  if (!out_of_coins) {
4896  LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n",
4897  coins_count);
4898  return false;
4899  }
4900 
4901  LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n",
4902  coins_count,
4903  coins_cache.DynamicMemoryUsage() / (1000 * 1000),
4904  base_blockhash.ToString());
4905 
4906  LogPrintf("[snapshot] flushing snapshot chainstate to disk\n");
4907  // No need to acquire cs_main since this chainstate isn't being used yet.
4908  coins_cache.Flush(); // TODO: if #17487 is merged, add erase=false here for better performance.
4909 
4910  assert(coins_cache.GetBestBlock() == base_blockhash);
4911 
4913  auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ };
4914 
4915  // As above, okay to immediately release cs_main here since no other context knows
4916  // about the snapshot_chainstate.
4917  CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB());
4918 
4919  if (!GetUTXOStats(snapshot_coinsdb, WITH_LOCK(::cs_main, return std::ref(m_blockman)), stats, breakpoint_fnc)) {
4920  LogPrintf("[snapshot] failed to generate coins stats\n");
4921  return false;
4922  }
4923 
4924  // Assert that the deserialized chainstate contents match the expected assumeutxo value.
4925  if (AssumeutxoHash{stats.hashSerialized} != au_data.hash_serialized) {
4926  LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n",
4927  au_data.hash_serialized.ToString(), stats.hashSerialized.ToString());
4928  return false;
4929  }
4930 
4931  snapshot_chainstate.m_chain.SetTip(snapshot_start_block);
4932 
4933  // The remainder of this function requires modifying data protected by cs_main.
4934  LOCK(::cs_main);
4935 
4936  // Fake various pieces of CBlockIndex state:
4937  CBlockIndex* index = nullptr;
4938  for (int i = 0; i <= snapshot_chainstate.m_chain.Height(); ++i) {
4939  index = snapshot_chainstate.m_chain[i];
4940 
4941  // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex
4942  // entries (among other things)
4943  if (!index->nTx) {
4944  index->nTx = 1;
4945  }
4946  // Fake nChainTx so that GuessVerificationProgress reports accurately
4947  index->nChainTx = index->pprev ? index->pprev->nChainTx + index->nTx : 1;
4948 
4949  // Fake BLOCK_OPT_WITNESS so that CChainState::NeedsRedownload()
4950  // won't ask to rewind the entire assumed-valid chain on startup.
4951  if (index->pprev && DeploymentActiveAt(*index, ::Params().GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) {
4952  index->nStatus |= BLOCK_OPT_WITNESS;
4953  }
4954  }
4955 
4956  assert(index);
4957  index->nChainTx = au_data.nChainTx;
4958  snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
4959 
4960  LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
4961  coins_cache.DynamicMemoryUsage() / (1000 * 1000));
4962  return true;
4963 }
4964 
4966 {
4967  LOCK(::cs_main);
4968  assert(m_active_chainstate);
4969  return *m_active_chainstate;
4970 }
4971 
4973 {
4974  LOCK(::cs_main);
4975  return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
4976 }
4977 
4979 {
4980  LOCK(::cs_main);
4981  if (m_snapshot_chainstate && IsSnapshotValidated()) {
4982  return *m_snapshot_chainstate.get();
4983  }
4984  assert(m_ibd_chainstate);
4985  return *m_ibd_chainstate.get();
4986 }
4987 
4989 {
4990  LOCK(::cs_main);
4991  return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get());
4992 }
4993 
4994 void ChainstateManager::Unload()
4995 {
4996  for (CChainState* chainstate : this->GetAll()) {
4997  chainstate->m_chain.SetTip(nullptr);
4998  chainstate->UnloadBlockIndex();
4999  }
5000 
5001  m_blockman.Unload();
5002 }
5003 
5004 void ChainstateManager::Reset()
5005 {
5006  LOCK(::cs_main);
5007  m_ibd_chainstate.reset();
5008  m_snapshot_chainstate.reset();
5009  m_active_chainstate = nullptr;
5010  m_snapshot_validated = false;
5011 }
5012 
5013 void ChainstateManager::MaybeRebalanceCaches()
5014 {
5015  if (m_ibd_chainstate && !m_snapshot_chainstate) {
5016  LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
5017  // Allocate everything to the IBD chainstate.
5018  m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5019  }
5020  else if (m_snapshot_chainstate && !m_ibd_chainstate) {
5021  LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
5022  // Allocate everything to the snapshot chainstate.
5023  m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5024  }
5025  else if (m_ibd_chainstate && m_snapshot_chainstate) {
5026  // If both chainstates exist, determine who needs more cache based on IBD status.
5027  //
5028  // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
5029  if (m_snapshot_chainstate->IsInitialBlockDownload()) {
5030  m_ibd_chainstate->ResizeCoinsCaches(
5032  m_snapshot_chainstate->ResizeCoinsCaches(
5034  } else {
5035  m_snapshot_chainstate->ResizeCoinsCaches(
5037  m_ibd_chainstate->ResizeCoinsCaches(
5039  }
5040  }
5041 }
ScriptErrorString
std::string ScriptErrorString(const ScriptError serror)
Definition: script_error.cpp:10
MAX_FUTURE_BLOCK_TIME
static constexpr int64_t MAX_FUTURE_BLOCK_TIME
Maximum amount of time that a block timestamp is allowed to exceed the current network-adjusted time ...
Definition: chain.h:22
WarningBitsConditionChecker::EndTime
int64_t EndTime(const Consensus::Params &params) const override
Definition: validation.cpp:1593
CVerifyDB::~CVerifyDB
~CVerifyDB()
Definition: validation.cpp:3838
CBlockIndex::GetBlockTime
int64_t GetBlockTime() const
Definition: chain.h:260
GetSerializeSize
size_t GetSerializeSize(const T &t, int nVersion=0)
Definition: serialize.h:1080
CTxIn
An input of a transaction.
Definition: transaction.h:65