Bitcoin Core  21.99.0
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <validation.h>
7 
8 #include <arith_uint256.h>
9 #include <chain.h>
10 #include <chainparams.h>
11 #include <checkqueue.h>
12 #include <consensus/consensus.h>
13 #include <consensus/merkle.h>
14 #include <consensus/tx_check.h>
15 #include <consensus/tx_verify.h>
16 #include <consensus/validation.h>
17 #include <cuckoocache.h>
18 #include <flatfile.h>
19 #include <hash.h>
20 #include <index/blockfilterindex.h>
21 #include <index/txindex.h>
22 #include <logging.h>
23 #include <logging/timer.h>
24 #include <node/blockstorage.h>
25 #include <node/coinstats.h>
26 #include <node/ui_interface.h>
27 #include <policy/policy.h>
28 #include <policy/settings.h>
29 #include <pow.h>
30 #include <primitives/block.h>
31 #include <primitives/transaction.h>
32 #include <random.h>
33 #include <reverse_iterator.h>
34 #include <script/script.h>
35 #include <script/sigcache.h>
36 #include <shutdown.h>
37 #include <signet.h>
38 #include <timedata.h>
39 #include <tinyformat.h>
40 #include <txdb.h>
41 #include <txmempool.h>
42 #include <uint256.h>
43 #include <undo.h>
44 #include <util/check.h> // For NDEBUG compile time check
45 #include <util/moneystr.h>
46 #include <util/rbf.h>
47 #include <util/strencodings.h>
48 #include <util/system.h>
49 #include <util/translation.h>
50 #include <validationinterface.h>
51 #include <warnings.h>
52 
53 #include <optional>
54 #include <string>
55 
56 #include <boost/algorithm/string/replace.hpp>
57 
58 #define MICRO 0.000001
59 #define MILLI 0.001
60 
66 static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
68 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
70 static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
72 static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
74 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
76 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
78 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
79 const std::vector<std::string> CHECKLEVEL_DOC {
80  "level 0 reads the blocks from disk",
81  "level 1 verifies block validity",
82  "level 2 verifies undo data",
83  "level 3 checks disconnection of tip blocks",
84  "level 4 tries to reconnect the blocks",
85  "each level includes the checks of the previous levels",
86 };
87 
89  // First sort by most total work, ...
90  if (pa->nChainWork > pb->nChainWork) return false;
91  if (pa->nChainWork < pb->nChainWork) return true;
92 
93  // ... then by earliest time received, ...
94  if (pa->nSequenceId < pb->nSequenceId) return false;
95  if (pa->nSequenceId > pb->nSequenceId) return true;
96 
97  // Use pointer address as tie breaker (should only happen with blocks
98  // loaded from disk, as those all have id 0).
99  if (pa < pb) return false;
100  if (pa > pb) return true;
101 
102  // Identical blocks.
103  return false;
104 }
105 
107 
109 {
110  LOCK(::cs_main);
111  assert(g_chainman.m_active_chainstate);
112  return *g_chainman.m_active_chainstate;
113 }
114 
116 {
117  LOCK(::cs_main);
119 }
120 
132 
135 std::condition_variable g_best_block_cv;
138 std::atomic_bool fImporting(false);
139 std::atomic_bool fReindex(false);
140 bool fHavePruned = false;
141 bool fPruneMode = false;
142 bool fRequireStandard = true;
143 bool fCheckBlockIndex = false;
145 uint64_t nPruneTarget = 0;
147 
150 
152 
153 // Internal stuff
154 namespace {
155  CBlockIndex* pindexBestInvalid = nullptr;
156 
157  RecursiveMutex cs_LastBlockFile;
158  std::vector<CBlockFileInfo> vinfoBlockFile;
159  int nLastBlockFile = 0;
164  bool fCheckForPruning = false;
165 
167  std::set<CBlockIndex*> setDirtyBlockIndex;
168 
170  std::set<int> setDirtyFileInfo;
171 } // anon namespace
172 
174 {
176  assert(std::addressof(g_chainman.BlockIndex()) == std::addressof(m_block_index));
177  BlockMap::const_iterator it = m_block_index.find(hash);
178  return it == m_block_index.end() ? nullptr : it->second;
179 }
180 
182 {
184 
185  assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
186  // Find the latest block common to locator and chain - we expect that
187  // locator.vHave is sorted descending by height.
188  for (const uint256& hash : locator.vHave) {
189  CBlockIndex* pindex = LookupBlockIndex(hash);
190  if (pindex) {
191  if (chain.Contains(pindex))
192  return pindex;
193  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
194  return chain.Tip();
195  }
196  }
197  }
198  return chain.Genesis();
199 }
200 
201 std::unique_ptr<CBlockTreeDB> pblocktree;
202 
203 bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
204  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
205  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
206  std::vector<CScriptCheck>* pvChecks = nullptr)
208 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
209 static FlatFileSeq BlockFileSeq();
210 static FlatFileSeq UndoFileSeq();
211 
212 bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
213 {
215  assert(active_chain_tip); // TODO: Make active_chain_tip a reference
216  assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip));
217 
218  // By convention a negative value for flags indicates that the
219  // current network-enforced consensus rules should be used. In
220  // a future soft-fork scenario that would mean checking which
221  // rules would be enforced for the next block and setting the
222  // appropriate flags. At the present time no soft-forks are
223  // scheduled, so no flags are set.
224  flags = std::max(flags, 0);
225 
226  // CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate
227  // nLockTime because when IsFinalTx() is called within
228  // CBlock::AcceptBlock(), the height of the block *being*
229  // evaluated is what is used. Thus if we want to know if a
230  // transaction can be part of the *next* block, we need to call
231  // IsFinalTx() with one more than active_chain_tip.Height().
232  const int nBlockHeight = active_chain_tip->nHeight + 1;
233 
234  // BIP113 requires that time-locked transactions have nLockTime set to
235  // less than the median time of the previous block they're contained in.
236  // When the next block is created its previous block will be the current
237  // chain tip, so we use that to calculate the median time passed to
238  // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
239  const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
240  ? active_chain_tip->GetMedianTimePast()
241  : GetAdjustedTime();
242 
243  return IsFinalTx(tx, nBlockHeight, nBlockTime);
244 }
245 
246 bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp)
247 {
249  assert(lp);
250  // If there are relative lock times then the maxInputBlock will be set
251  // If there are no relative lock times, the LockPoints don't depend on the chain
252  if (lp->maxInputBlock) {
253  // Check whether ::ChainActive() is an extension of the block at which the LockPoints
254  // calculation was valid. If not LockPoints are no longer valid
255  assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
256  if (!active_chain.Contains(lp->maxInputBlock)) {
257  return false;
258  }
259  }
260 
261  // LockPoints still valid
262  return true;
263 }
264 
265 bool CheckSequenceLocks(CChainState& active_chainstate,
266  const CTxMemPool& pool,
267  const CTransaction& tx,
268  int flags,
269  LockPoints* lp,
270  bool useExistingLockPoints)
271 {
273  AssertLockHeld(pool.cs);
274  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
275 
276  CBlockIndex* tip = active_chainstate.m_chain.Tip();
277  assert(tip != nullptr);
278 
279  CBlockIndex index;
280  index.pprev = tip;
281  // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate
282  // height based locks because when SequenceLocks() is called within
283  // ConnectBlock(), the height of the block *being*
284  // evaluated is what is used.
285  // Thus if we want to know if a transaction can be part of the
286  // *next* block, we need to use one more than active_chainstate.m_chain.Height()
287  index.nHeight = tip->nHeight + 1;
288 
289  std::pair<int, int64_t> lockPair;
290  if (useExistingLockPoints) {
291  assert(lp);
292  lockPair.first = lp->height;
293  lockPair.second = lp->time;
294  }
295  else {
296  // CoinsTip() contains the UTXO set for active_chainstate.m_chain.Tip()
297  CCoinsViewMemPool viewMemPool(&active_chainstate.CoinsTip(), pool);
298  std::vector<int> prevheights;
299  prevheights.resize(tx.vin.size());
300  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
301  const CTxIn& txin = tx.vin[txinIndex];
302  Coin coin;
303  if (!viewMemPool.GetCoin(txin.prevout, coin)) {
304  return error("%s: Missing input", __func__);
305  }
306  if (coin.nHeight == MEMPOOL_HEIGHT) {
307  // Assume all mempool transaction confirm in the next block
308  prevheights[txinIndex] = tip->nHeight + 1;
309  } else {
310  prevheights[txinIndex] = coin.nHeight;
311  }
312  }
313  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
314  if (lp) {
315  lp->height = lockPair.first;
316  lp->time = lockPair.second;
317  // Also store the hash of the block with the highest height of
318  // all the blocks which have sequence locked prevouts.
319  // This hash needs to still be on the chain
320  // for these LockPoint calculations to be valid
321  // Note: It is impossible to correctly calculate a maxInputBlock
322  // if any of the sequence locked inputs depend on unconfirmed txs,
323  // except in the special case where the relative lock time/height
324  // is 0, which is equivalent to no sequence lock. Since we assume
325  // input height of tip+1 for mempool txs and test the resulting
326  // lockPair from CalculateSequenceLocks against tip+1. We know
327  // EvaluateSequenceLocks will fail if there was a non-zero sequence
328  // lock on a mempool input, so we can use the return value of
329  // CheckSequenceLocks to indicate the LockPoints validity
330  int maxInputHeight = 0;
331  for (const int height : prevheights) {
332  // Can ignore mempool inputs since we'll fail if they had non-zero locks
333  if (height != tip->nHeight+1) {
334  maxInputHeight = std::max(maxInputHeight, height);
335  }
336  }
337  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
338  }
339  }
340  return EvaluateSequenceLocks(index, lockPair);
341 }
342 
343 // Returns the script flags which should be checked for a given block
344 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
345 
346 static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age)
348 {
349  int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
350  if (expired != 0) {
351  LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
352  }
353 
354  std::vector<COutPoint> vNoSpendsRemaining;
355  pool.TrimToSize(limit, &vNoSpendsRemaining);
356  assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache));
357  for (const COutPoint& removed : vNoSpendsRemaining)
358  coins_cache.Uncache(removed);
359 }
360 
362 {
364  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
365  if (active_chainstate.IsInitialBlockDownload())
366  return false;
367  if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
368  return false;
369  if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1)
370  return false;
371  return true;
372 }
373 
374 /* Make mempool consistent after a reorg, by re-adding or recursively erasing
375  * disconnected block transactions from the mempool, and also removing any
376  * other transactions from the mempool that are no longer valid given the new
377  * tip/height.
378  *
379  * Note: we assume that disconnectpool only contains transactions that are NOT
380  * confirmed in the current chain nor already in the mempool (otherwise,
381  * in-mempool descendants of such transactions would be removed).
382  *
383  * Passing fAddToMempool=false will skip trying to add the transactions back,
384  * and instead just erase from the mempool as needed.
385  */
386 
387 static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs)
388 {
390  AssertLockHeld(mempool.cs);
391  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
392  std::vector<uint256> vHashUpdate;
393  // disconnectpool's insertion_order index sorts the entries from
394  // oldest to newest, but the oldest entry will be the last tx from the
395  // latest mined block that was disconnected.
396  // Iterate disconnectpool in reverse, so that we add transactions
397  // back to the mempool starting with the earliest transaction that had
398  // been previously seen in a block.
399  auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
400  while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
401  // ignore validation errors in resurrected transactions
402  if (!fAddToMempool || (*it)->IsCoinBase() ||
403  AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) {
404  // If the transaction doesn't make it in to the mempool, remove any
405  // transactions that depend on it (which would now be orphans).
406  mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
407  } else if (mempool.exists((*it)->GetHash())) {
408  vHashUpdate.push_back((*it)->GetHash());
409  }
410  ++it;
411  }
412  disconnectpool.queuedTx.clear();
413  // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
414  // no in-mempool children, which is generally not true when adding
415  // previously-confirmed transactions back to the mempool.
416  // UpdateTransactionsFromBlock finds descendants of any transactions in
417  // the disconnectpool that were added back and cleans up the mempool state.
418  mempool.UpdateTransactionsFromBlock(vHashUpdate);
419 
420  // We also need to remove any now-immature transactions
421  mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS);
422  // Re-limit mempool size, in case we added any transactions
423  LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
424 }
425 
432  const CCoinsViewCache& view, const CTxMemPool& pool,
433  unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip)
435 {
437  AssertLockHeld(pool.cs);
438 
439  assert(!tx.IsCoinBase());
440  for (const CTxIn& txin : tx.vin) {
441  const Coin& coin = view.AccessCoin(txin.prevout);
442 
443  // This coin was checked in PreChecks and MemPoolAccept
444  // has been holding cs_main since then.
445  Assume(!coin.IsSpent());
446  if (coin.IsSpent()) return false;
447 
448  // If the Coin is available, there are 2 possibilities:
449  // it is available in our current ChainstateActive UTXO set,
450  // or it's a UTXO provided by a transaction in our mempool.
451  // Ensure the scriptPubKeys in Coins from CoinsView are correct.
452  const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
453  if (txFrom) {
454  assert(txFrom->GetHash() == txin.prevout.hash);
455  assert(txFrom->vout.size() > txin.prevout.n);
456  assert(txFrom->vout[txin.prevout.n] == coin.out);
457  } else {
458  assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip));
459  const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout);
460  assert(!coinFromUTXOSet.IsSpent());
461  assert(coinFromUTXOSet.out == coin.out);
462  }
463  }
464 
465  // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
466  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
467 }
468 
469 namespace {
470 
471 class MemPoolAccept
472 {
473 public:
474  explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
475  m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
476  m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
477  m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
478  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {
479  assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
480  }
481 
482  // We put the arguments we're handed into a struct, so we can pass them
483  // around easier.
484  struct ATMPArgs {
485  const CChainParams& m_chainparams;
486  const int64_t m_accept_time;
487  const bool m_bypass_limits;
488  /*
489  * Return any outpoints which were not previously present in the coins
490  * cache, but were added as a result of validating the tx for mempool
491  * acceptance. This allows the caller to optionally remove the cache
492  * additions if the associated transaction ends up being rejected by
493  * the mempool.
494  */
495  std::vector<COutPoint>& m_coins_to_uncache;
496  const bool m_test_accept;
497  };
498 
499  // Single transaction acceptance
500  MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
501 
502 private:
503  // All the intermediate state that gets passed between the various levels
504  // of checking a given transaction.
505  struct Workspace {
506  explicit Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
507  std::set<uint256> m_conflicts;
508  CTxMemPool::setEntries m_all_conflicting;
509  CTxMemPool::setEntries m_ancestors;
510  std::unique_ptr<CTxMemPoolEntry> m_entry;
511  std::list<CTransactionRef> m_replaced_transactions;
512 
513  bool m_replacement_transaction;
514  CAmount m_base_fees;
515  CAmount m_modified_fees;
516  CAmount m_conflicting_fees;
517  size_t m_conflicting_size;
518 
519  const CTransactionRef& m_ptx;
520  const uint256& m_hash;
521  TxValidationState m_state;
522  };
523 
524  // Run the policy checks on a given transaction, excluding any script checks.
525  // Looks up inputs, calculates feerate, considers replacement, evaluates
526  // package limits, etc. As this function can be invoked for "free" by a peer,
527  // only tests that are fast should be done here (to avoid CPU DoS).
528  bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
529 
530  // Run the script checks using our policy flags. As this can be slow, we should
531  // only invoke this on transactions that have otherwise passed policy checks.
532  bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
533 
534  // Re-run the script checks, using consensus flags, and try to cache the
535  // result in the scriptcache. This should be done after
536  // PolicyScriptChecks(). This requires that all inputs either be in our
537  // utxo set or in the mempool.
538  bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
539 
540  // Try to add the transaction to the mempool, removing any conflicts first.
541  // Returns true if the transaction is in the mempool after any size
542  // limiting is performed, false otherwise.
543  bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
544 
545  // Compare a package's feerate against minimum allowed.
546  bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs)
547  {
548  CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
549  if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
550  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
551  }
552 
553  if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
554  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
555  }
556  return true;
557  }
558 
559 private:
560  CTxMemPool& m_pool;
561  CCoinsViewCache m_view;
562  CCoinsViewMemPool m_viewmempool;
563  CCoinsView m_dummy;
564 
565  CChainState& m_active_chainstate;
566 
567  // The package limits in effect at the time of invocation.
568  const size_t m_limit_ancestors;
569  const size_t m_limit_ancestor_size;
570  // These may be modified while evaluating a transaction (eg to account for
571  // in-mempool conflicts; see below).
572  size_t m_limit_descendants;
573  size_t m_limit_descendant_size;
574 };
575 
576 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
577 {
578  const CTransactionRef& ptx = ws.m_ptx;
579  const CTransaction& tx = *ws.m_ptx;
580  const uint256& hash = ws.m_hash;
581 
582  // Copy/alias what we need out of args
583  const int64_t nAcceptTime = args.m_accept_time;
584  const bool bypass_limits = args.m_bypass_limits;
585  std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
586 
587  // Alias what we need out of ws
588  TxValidationState& state = ws.m_state;
589  std::set<uint256>& setConflicts = ws.m_conflicts;
590  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
591  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
592  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
593  bool& fReplacementTransaction = ws.m_replacement_transaction;
594  CAmount& nModifiedFees = ws.m_modified_fees;
595  CAmount& nConflictingFees = ws.m_conflicting_fees;
596  size_t& nConflictingSize = ws.m_conflicting_size;
597 
598  if (!CheckTransaction(tx, state)) {
599  return false; // state filled in by CheckTransaction
600  }
601 
602  // Coinbase is only valid in a block, not as a loose transaction
603  if (tx.IsCoinBase())
604  return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
605 
606  // Rather not work on nonstandard transactions (unless -testnet/-regtest)
607  std::string reason;
608  if (fRequireStandard && !IsStandardTx(tx, reason))
609  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
610 
611  // Do not work on transactions that are too small.
612  // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
613  // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
614  // 64-byte transactions.
616  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
617 
618  // Only accept nLockTime-using transactions that can be mined in the next
619  // block; we don't want our mempool filled up with transactions that can't
620  // be mined yet.
621  assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
622  if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
623  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
624 
625  // is it already in the memory pool?
626  if (m_pool.exists(hash)) {
627  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
628  }
629 
630  // Check for conflicts with in-memory transactions
631  for (const CTxIn &txin : tx.vin)
632  {
633  const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
634  if (ptxConflicting) {
635  if (!setConflicts.count(ptxConflicting->GetHash()))
636  {
637  // Allow opt-out of transaction replacement by setting
638  // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
639  //
640  // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
641  // non-replaceable transactions. All inputs rather than just one
642  // is for the sake of multi-party protocols, where we don't
643  // want a single party to be able to disable replacement.
644  //
645  // The opt-out ignores descendants as anyone relying on
646  // first-seen mempool behavior should be checking all
647  // unconfirmed ancestors anyway; doing otherwise is hopelessly
648  // insecure.
649  bool fReplacementOptOut = true;
650  for (const CTxIn &_txin : ptxConflicting->vin)
651  {
652  if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
653  {
654  fReplacementOptOut = false;
655  break;
656  }
657  }
658  if (fReplacementOptOut) {
659  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
660  }
661 
662  setConflicts.insert(ptxConflicting->GetHash());
663  }
664  }
665  }
666 
667  LockPoints lp;
668  m_view.SetBackend(m_viewmempool);
669 
670  assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
671  const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip();
672  // do all inputs exist?
673  for (const CTxIn& txin : tx.vin) {
674  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
675  coins_to_uncache.push_back(txin.prevout);
676  }
677 
678  // Note: this call may add txin.prevout to the coins cache
679  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
680  // later (via coins_to_uncache) if this tx turns out to be invalid.
681  if (!m_view.HaveCoin(txin.prevout)) {
682  // Are inputs missing because we already have the tx?
683  for (size_t out = 0; out < tx.vout.size(); out++) {
684  // Optimistically just do efficient check of cache for outputs
685  if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
686  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
687  }
688  }
689  // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
690  return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
691  }
692  }
693 
694  // This is const, but calls into the back end CoinsViews. The CCoinsViewDB at the bottom of the
695  // hierarchy brings the best block into scope. See CCoinsViewDB::GetBestBlock().
696  m_view.GetBestBlock();
697 
698  // we have all inputs cached now, so switch back to dummy (to protect
699  // against bugs where we pull more inputs from disk that miss being added
700  // to coins_to_uncache)
701  m_view.SetBackend(m_dummy);
702 
703  // Only accept BIP68 sequence locked transactions that can be mined in the next
704  // block; we don't want our mempool filled up with transactions that can't
705  // be mined yet.
706  // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
707  // CoinsViewCache instead of create its own
708  assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
709  if (!CheckSequenceLocks(m_active_chainstate, m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
710  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
711 
712  assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman));
713  if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) {
714  return false; // state filled in by CheckTxInputs
715  }
716 
717  // Check for non-standard pay-to-script-hash in inputs
718  const auto& params = args.m_chainparams.GetConsensus();
719  assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
720  auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache);
721  if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) {
722  return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
723  }
724 
725  // Check for non-standard witnesses.
726  if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
727  return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
728 
729  int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
730 
731  // nModifiedFees includes any fee deltas from PrioritiseTransaction
732  nModifiedFees = ws.m_base_fees;
733  m_pool.ApplyDelta(hash, nModifiedFees);
734 
735  // Keep track of transactions that spend a coinbase, which we re-scan
736  // during reorgs to ensure COINBASE_MATURITY is still met.
737  bool fSpendsCoinbase = false;
738  for (const CTxIn &txin : tx.vin) {
739  const Coin &coin = m_view.AccessCoin(txin.prevout);
740  if (coin.IsCoinBase()) {
741  fSpendsCoinbase = true;
742  break;
743  }
744  }
745 
746  assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
747  entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(),
748  fSpendsCoinbase, nSigOpsCost, lp));
749  unsigned int nSize = entry->GetTxSize();
750 
751  if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
752  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
753  strprintf("%d", nSigOpsCost));
754 
755  // No transactions are allowed below minRelayTxFee except from disconnected
756  // blocks
757  if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
758 
759  const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
760  // Calculate in-mempool ancestors, up to a limit.
761  if (setConflicts.size() == 1) {
762  // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
763  // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
764  // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
765  // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
766  // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
767  // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
768  // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
769  // for off-chain contract systems (see link in the comment below).
770  //
771  // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
772  // conflict directly with exactly one other transaction (but may evict children of said transaction),
773  // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
774  // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
775  // amended, we may need to move that check to here instead of removing it wholesale.
776  //
777  // Such transactions are clearly not merging any existing packages, so we are only concerned with
778  // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
779  // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
780  // to.
781  //
782  // To check these we first check if we meet the RBF criteria, above, and increment the descendant
783  // limits by the direct conflict and its descendants (as these are recalculated in
784  // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
785  // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
786  // the ancestor limits should be the same for both our new transaction and any conflicts).
787  // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
788  // into force here (as we're only adding a single transaction).
789  assert(setIterConflicting.size() == 1);
790  CTxMemPool::txiter conflict = *setIterConflicting.begin();
791 
792  m_limit_descendants += 1;
793  m_limit_descendant_size += conflict->GetSizeWithDescendants();
794  }
795 
796  std::string errString;
797  if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
798  setAncestors.clear();
799  // If CalculateMemPoolAncestors fails second time, we want the original error string.
800  std::string dummy_err_string;
801  // Contracting/payment channels CPFP carve-out:
802  // If the new transaction is relatively small (up to 40k weight)
803  // and has at most one ancestor (ie ancestor limit of 2, including
804  // the new transaction), allow it if its parent has exactly the
805  // descendant limit descendants.
806  //
807  // This allows protocols which rely on distrusting counterparties
808  // being able to broadcast descendants of an unconfirmed transaction
809  // to be secure by simply only having two immediately-spendable
810  // outputs - one for each counterparty. For more info on the uses for
811  // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
812  if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
813  !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
814  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
815  }
816  }
817 
818  // A transaction that spends outputs that would be replaced by it is invalid. Now
819  // that we have the set of all ancestors we can detect this
820  // pathological case by making sure setConflicts and setAncestors don't
821  // intersect.
822  for (CTxMemPool::txiter ancestorIt : setAncestors)
823  {
824  const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
825  if (setConflicts.count(hashAncestor))
826  {
827  return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
828  strprintf("%s spends conflicting transaction %s",
829  hash.ToString(),
830  hashAncestor.ToString()));
831  }
832  }
833 
834  // Check if it's economically rational to mine this transaction rather
835  // than the ones it replaces.
836  nConflictingFees = 0;
837  nConflictingSize = 0;
838  uint64_t nConflictingCount = 0;
839 
840  // If we don't hold the lock allConflicting might be incomplete; the
841  // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
842  // mempool consistency for us.
843  fReplacementTransaction = setConflicts.size();
844  if (fReplacementTransaction)
845  {
846  CFeeRate newFeeRate(nModifiedFees, nSize);
847  std::set<uint256> setConflictsParents;
848  const int maxDescendantsToVisit = 100;
849  for (const auto& mi : setIterConflicting) {
850  // Don't allow the replacement to reduce the feerate of the
851  // mempool.
852  //
853  // We usually don't want to accept replacements with lower
854  // feerates than what they replaced as that would lower the
855  // feerate of the next block. Requiring that the feerate always
856  // be increased is also an easy-to-reason about way to prevent
857  // DoS attacks via replacements.
858  //
859  // We only consider the feerates of transactions being directly
860  // replaced, not their indirect descendants. While that does
861  // mean high feerate children are ignored when deciding whether
862  // or not to replace, we do require the replacement to pay more
863  // overall fees too, mitigating most cases.
864  CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
865  if (newFeeRate <= oldFeeRate)
866  {
867  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
868  strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
869  hash.ToString(),
870  newFeeRate.ToString(),
871  oldFeeRate.ToString()));
872  }
873 
874  for (const CTxIn &txin : mi->GetTx().vin)
875  {
876  setConflictsParents.insert(txin.prevout.hash);
877  }
878 
879  nConflictingCount += mi->GetCountWithDescendants();
880  }
881  // This potentially overestimates the number of actual descendants
882  // but we just want to be conservative to avoid doing too much
883  // work.
884  if (nConflictingCount <= maxDescendantsToVisit) {
885  // If not too many to replace, then calculate the set of
886  // transactions that would have to be evicted
887  for (CTxMemPool::txiter it : setIterConflicting) {
888  m_pool.CalculateDescendants(it, allConflicting);
889  }
890  for (CTxMemPool::txiter it : allConflicting) {
891  nConflictingFees += it->GetModifiedFee();
892  nConflictingSize += it->GetTxSize();
893  }
894  } else {
895  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
896  strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
897  hash.ToString(),
898  nConflictingCount,
899  maxDescendantsToVisit));
900  }
901 
902  for (unsigned int j = 0; j < tx.vin.size(); j++)
903  {
904  // We don't want to accept replacements that require low
905  // feerate junk to be mined first. Ideally we'd keep track of
906  // the ancestor feerates and make the decision based on that,
907  // but for now requiring all new inputs to be confirmed works.
908  //
909  // Note that if you relax this to make RBF a little more useful,
910  // this may break the CalculateMempoolAncestors RBF relaxation,
911  // above. See the comment above the first CalculateMempoolAncestors
912  // call for more info.
913  if (!setConflictsParents.count(tx.vin[j].prevout.hash))
914  {
915  // Rather than check the UTXO set - potentially expensive -
916  // it's cheaper to just check if the new input refers to a
917  // tx that's in the mempool.
918  if (m_pool.exists(tx.vin[j].prevout.hash)) {
919  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
920  strprintf("replacement %s adds unconfirmed input, idx %d",
921  hash.ToString(), j));
922  }
923  }
924  }
925 
926  // The replacement must pay greater fees than the transactions it
927  // replaces - if we did the bandwidth used by those conflicting
928  // transactions would not be paid for.
929  if (nModifiedFees < nConflictingFees)
930  {
931  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
932  strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
933  hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
934  }
935 
936  // Finally in addition to paying more fees than the conflicts the
937  // new transaction must pay for its own bandwidth.
938  CAmount nDeltaFees = nModifiedFees - nConflictingFees;
939  if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
940  {
941  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
942  strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
943  hash.ToString(),
944  FormatMoney(nDeltaFees),
946  }
947  }
948  return true;
949 }
950 
951 bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
952 {
953  const CTransaction& tx = *ws.m_ptx;
954  TxValidationState& state = ws.m_state;
955 
956  constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
957 
958  // Check input scripts and signatures.
959  // This is done last to help prevent CPU exhaustion denial-of-service attacks.
960  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
961  // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
962  // need to turn both off, and compare against just turning off CLEANSTACK
963  // to see if the failure is specifically due to witness validation.
964  TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
965  if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
966  !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
967  // Only the witness is missing, so the transaction itself may be fine.
969  state.GetRejectReason(), state.GetDebugMessage());
970  }
971  return false; // state filled in by CheckInputScripts
972  }
973 
974  return true;
975 }
976 
977 bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
978 {
979  const CTransaction& tx = *ws.m_ptx;
980  const uint256& hash = ws.m_hash;
981  TxValidationState& state = ws.m_state;
982  const CChainParams& chainparams = args.m_chainparams;
983 
984  // Check again against the current block tip's script verification
985  // flags to cache our script execution flags. This is, of course,
986  // useless if the next block has different script flags from the
987  // previous one, but because the cache tracks script flags for us it
988  // will auto-invalidate and we'll just have a few blocks of extra
989  // misses on soft-fork activation.
990  //
991  // This is also useful in case of bugs in the standard flags that cause
992  // transactions to pass as valid when they're actually invalid. For
993  // instance the STRICTENC flag was incorrectly allowing certain
994  // CHECKSIG NOT scripts to pass, even though they were invalid.
995  //
996  // There is a similar check in CreateNewBlock() to prevent creating
997  // invalid blocks (using TestBlockValidity), however allowing such
998  // transactions into the mempool can be exploited as a DoS attack.
999  assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain));
1000  unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus());
1001  assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
1002  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) {
1003  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
1004  __func__, hash.ToString(), state.ToString());
1005  }
1006 
1007  return true;
1008 }
1009 
1010 bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
1011 {
1012  const CTransaction& tx = *ws.m_ptx;
1013  const uint256& hash = ws.m_hash;
1014  TxValidationState& state = ws.m_state;
1015  const bool bypass_limits = args.m_bypass_limits;
1016 
1017  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
1018  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
1019  const CAmount& nModifiedFees = ws.m_modified_fees;
1020  const CAmount& nConflictingFees = ws.m_conflicting_fees;
1021  const size_t& nConflictingSize = ws.m_conflicting_size;
1022  const bool fReplacementTransaction = ws.m_replacement_transaction;
1023  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
1024 
1025  // Remove conflicting transactions from the mempool
1026  for (CTxMemPool::txiter it : allConflicting)
1027  {
1028  LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
1029  it->GetTx().GetHash().ToString(),
1030  hash.ToString(),
1031  FormatMoney(nModifiedFees - nConflictingFees),
1032  (int)entry->GetTxSize() - (int)nConflictingSize);
1033  ws.m_replaced_transactions.push_back(it->GetSharedTx());
1034  }
1035  m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
1036 
1037  // This transaction should only count for fee estimation if:
1038  // - it isn't a BIP 125 replacement transaction (may not be widely supported)
1039  // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1040  // - the node is not behind
1041  // - the transaction is not dependent on any other transactions in the mempool
1042  assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
1043  bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
1044 
1045  // Store transaction in memory
1046  m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
1047 
1048  // trim mempool and check if tx was trimmed
1049  if (!bypass_limits) {
1050  assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip()));
1051  LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
1052  if (!m_pool.exists(hash))
1053  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1054  }
1055  return true;
1056 }
1057 
1058 MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1059 {
1061  LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1062 
1063  Workspace ws(ptx);
1064 
1065  if (!PreChecks(args, ws)) return MempoolAcceptResult(ws.m_state);
1066 
1067  // Only compute the precomputed transaction data if we need to verify
1068  // scripts (ie, other policy checks pass). We perform the inexpensive
1069  // checks first and avoid hashing and signature verification unless those
1070  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1072 
1073  if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state);
1074 
1075  if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state);
1076 
1077  // Tx was accepted, but not added
1078  if (args.m_test_accept) {
1079  return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees);
1080  }
1081 
1082  if (!Finalize(args, ws)) return MempoolAcceptResult(ws.m_state);
1083 
1084  GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
1085 
1086  return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees);
1087 }
1088 
1089 } // anon namespace
1090 
1093  CChainState& active_chainstate,
1094  const CTransactionRef &tx, int64_t nAcceptTime,
1095  bool bypass_limits, bool test_accept)
1097 {
1098  std::vector<COutPoint> coins_to_uncache;
1099  MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache, test_accept };
1100 
1101  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
1102  const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
1104  // Remove coins that were not present in the coins cache before calling
1105  // AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
1106  // number of invalid transactions that attempt to overrun the in-memory coins cache
1107  // (`CCoinsViewCache::cacheCoins`).
1108 
1109  for (const COutPoint& hashTx : coins_to_uncache)
1110  active_chainstate.CoinsTip().Uncache(hashTx);
1111  }
1112  // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1113  BlockValidationState state_dummy;
1114  active_chainstate.FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
1115  return result;
1116 }
1117 
1119  bool bypass_limits, bool test_accept)
1120 {
1121  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
1122  return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept);
1123 }
1124 
1125 CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
1126 {
1127  LOCK(cs_main);
1128 
1129  if (block_index) {
1130  CBlock block;
1131  if (ReadBlockFromDisk(block, block_index, consensusParams)) {
1132  for (const auto& tx : block.vtx) {
1133  if (tx->GetHash() == hash) {
1134  hashBlock = block_index->GetBlockHash();
1135  return tx;
1136  }
1137  }
1138  }
1139  return nullptr;
1140  }
1141  if (mempool) {
1142  CTransactionRef ptx = mempool->get(hash);
1143  if (ptx) return ptx;
1144  }
1145  if (g_txindex) {
1146  CTransactionRef tx;
1147  if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
1148  }
1149  return nullptr;
1150 }
1151 
1152 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1153 {
1154  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1155  // Force block reward to zero when right shift is undefined.
1156  if (halvings >= 64)
1157  return 0;
1158 
1159  CAmount nSubsidy = 50 * COIN;
1160  // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1161  nSubsidy >>= halvings;
1162  return nSubsidy;
1163 }
1164 
1166  std::string ldb_name,
1167  size_t cache_size_bytes,
1168  bool in_memory,
1169  bool should_wipe) : m_dbview(
1170  GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1171  m_catcherview(&m_dbview) {}
1172 
1173 void CoinsViews::InitCache()
1174 {
1175  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
1176 }
1177 
1178 CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash)
1179  : m_mempool(mempool),
1180  m_blockman(blockman),
1181  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1182 
1184  size_t cache_size_bytes,
1185  bool in_memory,
1186  bool should_wipe,
1187  std::string leveldb_name)
1188 {
1190  leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
1191  }
1192 
1193  m_coins_views = std::make_unique<CoinsViews>(
1194  leveldb_name, cache_size_bytes, in_memory, should_wipe);
1195 }
1196 
1197 void CChainState::InitCoinsCache(size_t cache_size_bytes)
1198 {
1199  assert(m_coins_views != nullptr);
1200  m_coinstip_cache_size_bytes = cache_size_bytes;
1201  m_coins_views->InitCache();
1202 }
1203 
1204 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1205 // is a performance-related implementation detail. This function must be marked
1206 // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1207 // can call it.
1208 //
1210 {
1211  // Optimization: pre-test latch before taking the lock.
1212  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1213  return false;
1214 
1215  LOCK(cs_main);
1216  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1217  return false;
1218  if (fImporting || fReindex)
1219  return true;
1220  if (m_chain.Tip() == nullptr)
1221  return true;
1223  return true;
1224  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1225  return true;
1226  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1227  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1228  return false;
1229 }
1230 
1231 static void AlertNotify(const std::string& strMessage)
1232 {
1233  uiInterface.NotifyAlertChanged();
1234 #if HAVE_SYSTEM
1235  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1236  if (strCmd.empty()) return;
1237 
1238  // Alert text should be plain ascii coming from a trusted source, but to
1239  // be safe we first strip anything not in safeChars, then add single quotes around
1240  // the whole string before passing it to the shell:
1241  std::string singleQuote("'");
1242  std::string safeStatus = SanitizeString(strMessage);
1243  safeStatus = singleQuote+safeStatus+singleQuote;
1244  boost::replace_all(strCmd, "%s", safeStatus);
1245 
1246  std::thread t(runCommand, strCmd);
1247  t.detach(); // thread runs free
1248 #endif
1249 }
1250 
1252 {
1254  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
1255 
1256  // Before we get past initial download, we cannot reliably alert about forks
1257  // (we assume we don't get stuck on a fork before finishing our initial sync)
1258  if (IsInitialBlockDownload()) {
1259  return;
1260  }
1261 
1262  if (pindexBestInvalid && pindexBestInvalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
1263  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1265  } else {
1267  }
1268 }
1269 
1270 // Called both upon regular invalid block discovery *and* InvalidateBlock
1272 {
1273  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
1274  if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1275  pindexBestInvalid = pindexNew;
1276  if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1278  }
1279 
1280  LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1281  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1282  log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1283  CBlockIndex *tip = m_chain.Tip();
1284  assert (tip);
1285  LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1286  tip->GetBlockHash().ToString(), m_chain.Height(), log(tip->nChainWork.getdouble())/log(2.0),
1289 }
1290 
1291 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1292 // which does its own setBlockIndexCandidates manageent.
1295  pindex->nStatus |= BLOCK_FAILED_VALID;
1296  m_blockman.m_failed_blocks.insert(pindex);
1297  setDirtyBlockIndex.insert(pindex);
1298  setBlockIndexCandidates.erase(pindex);
1299  InvalidChainFound(pindex);
1300  }
1301 }
1302 
1303 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1304 {
1305  // mark inputs spent
1306  if (!tx.IsCoinBase()) {
1307  txundo.vprevout.reserve(tx.vin.size());
1308  for (const CTxIn &txin : tx.vin) {
1309  txundo.vprevout.emplace_back();
1310  bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1311  assert(is_spent);
1312  }
1313  }
1314  // add outputs
1315  AddCoins(inputs, tx, nHeight);
1316 }
1317 
1318 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1319 {
1320  CTxUndo txundo;
1321  UpdateCoins(tx, inputs, txundo, nHeight);
1322 }
1323 
1325  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1326  const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1328 }
1329 
1331 {
1333  assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
1334  CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1335  return pindexPrev->nHeight + 1;
1336 }
1337 
1338 
1341 
1343  // Setup the salted hasher
1344  uint256 nonce = GetRandHash();
1345  // We want the nonce to be 64 bytes long to force the hasher to process
1346  // this chunk, which makes later hash computations more efficient. We
1347  // just write our 32-byte entropy twice to fill the 64 bytes.
1350  // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1351  // setup_bytes creates the minimum possible cache (2 elements).
1352  size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1353  size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1354  LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1355  (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1356 }
1357 
1378  const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore,
1379  bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
1380  std::vector<CScriptCheck>* pvChecks)
1381 {
1382  if (tx.IsCoinBase()) return true;
1383 
1384  if (pvChecks) {
1385  pvChecks->reserve(tx.vin.size());
1386  }
1387 
1388  // First check if script executions have been cached with the same
1389  // flags. Note that this assumes that the inputs provided are
1390  // correct (ie that the transaction hash which is in tx's prevouts
1391  // properly commits to the scriptPubKey in the inputs view of that
1392  // transaction).
1393  uint256 hashCacheEntry;
1395  hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1396  AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1397  if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1398  return true;
1399  }
1400 
1401  if (!txdata.m_spent_outputs_ready) {
1402  std::vector<CTxOut> spent_outputs;
1403  spent_outputs.reserve(tx.vin.size());
1404 
1405  for (const auto& txin : tx.vin) {
1406  const COutPoint& prevout = txin.prevout;
1407  const Coin& coin = inputs.AccessCoin(prevout);
1408  assert(!coin.IsSpent());
1409  spent_outputs.emplace_back(coin.out);
1410  }
1411  txdata.Init(tx, std::move(spent_outputs));
1412  }
1413  assert(txdata.m_spent_outputs.size() == tx.vin.size());
1414 
1415  for (unsigned int i = 0; i < tx.vin.size(); i++) {
1416 
1417  // We very carefully only pass in things to CScriptCheck which
1418  // are clearly committed to by tx' witness hash. This provides
1419  // a sanity check that our caching is not introducing consensus
1420  // failures through additional data in, eg, the coins being
1421  // spent being checked as a part of CScriptCheck.
1422 
1423  // Verify signature
1424  CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
1425  if (pvChecks) {
1426  pvChecks->push_back(CScriptCheck());
1427  check.swap(pvChecks->back());
1428  } else if (!check()) {
1430  // Check whether the failure was caused by a
1431  // non-mandatory script verification check, such as
1432  // non-standard DER encodings or non-null dummy
1433  // arguments; if so, ensure we return NOT_STANDARD
1434  // instead of CONSENSUS to avoid downstream users
1435  // splitting the network between upgraded and
1436  // non-upgraded nodes by banning CONSENSUS-failing
1437  // data providers.
1438  CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
1439  flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1440  if (check2())
1441  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1442  }
1443  // MANDATORY flag failures correspond to
1444  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1445  // failures are the most serious case of validation
1446  // failures, we may need to consider using
1447  // RECENT_CONSENSUS_CHANGE for any script failure that
1448  // could be due to non-upgraded nodes which we may want to
1449  // support, to avoid splitting the network (but this
1450  // depends on the details of how net_processing handles
1451  // such errors).
1452  return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1453  }
1454  }
1455 
1456  if (cacheFullScriptStore && !pvChecks) {
1457  // We executed all of the provided scripts, and were told to
1458  // cache the result. Do so now.
1459  g_scriptExecutionCache.insert(hashCacheEntry);
1460  }
1461 
1462  return true;
1463 }
1464 
1465 static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
1466 {
1467  // Open history file to append
1469  if (fileout.IsNull())
1470  return error("%s: OpenUndoFile failed", __func__);
1471 
1472  // Write index header
1473  unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1474  fileout << messageStart << nSize;
1475 
1476  // Write undo data
1477  long fileOutPos = ftell(fileout.Get());
1478  if (fileOutPos < 0)
1479  return error("%s: ftell failed", __func__);
1480  pos.nPos = (unsigned int)fileOutPos;
1481  fileout << blockundo;
1482 
1483  // calculate & write checksum
1485  hasher << hashBlock;
1486  hasher << blockundo;
1487  fileout << hasher.GetHash();
1488 
1489  return true;
1490 }
1491 
1492 bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
1493 {
1494  FlatFilePos pos = pindex->GetUndoPos();
1495  if (pos.IsNull()) {
1496  return error("%s: no undo data available", __func__);
1497  }
1498 
1499  // Open history file to read
1500  CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1501  if (filein.IsNull())
1502  return error("%s: OpenUndoFile failed", __func__);
1503 
1504  // Read block
1505  uint256 hashChecksum;
1506  CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
1507  try {
1508  verifier << pindex->pprev->GetBlockHash();
1509  verifier >> blockundo;
1510  filein >> hashChecksum;
1511  }
1512  catch (const std::exception& e) {
1513  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1514  }
1515 
1516  // Verify checksum
1517  if (hashChecksum != verifier.GetHash())
1518  return error("%s: Checksum mismatch", __func__);
1519 
1520  return true;
1521 }
1522 
1523 static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
1524 {
1525  AbortNode(strMessage, userMessage);
1526  return state.Error(strMessage);
1527 }
1528 
1536 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1537 {
1538  bool fClean = true;
1539 
1540  if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1541 
1542  if (undo.nHeight == 0) {
1543  // Missing undo metadata (height and coinbase). Older versions included this
1544  // information only in undo records for the last spend of a transactions'
1545  // outputs. This implies that it must be present for some other output of the same tx.
1546  const Coin& alternate = AccessByTxid(view, out.hash);
1547  if (!alternate.IsSpent()) {
1548  undo.nHeight = alternate.nHeight;
1549  undo.fCoinBase = alternate.fCoinBase;
1550  } else {
1551  return DISCONNECT_FAILED; // adding output for transaction without known metadata
1552  }
1553  }
1554  // If the coin already exists as an unspent coin in the cache, then the
1555  // possible_overwrite parameter to AddCoin must be set to true. We have
1556  // already checked whether an unspent coin exists above using HaveCoin, so
1557  // we don't need to guess. When fClean is false, an unspent coin already
1558  // existed and it is an overwrite.
1559  view.AddCoin(out, std::move(undo), !fClean);
1560 
1561  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1562 }
1563 
1567 {
1568  bool fClean = true;
1569 
1570  CBlockUndo blockUndo;
1571  if (!UndoReadFromDisk(blockUndo, pindex)) {
1572  error("DisconnectBlock(): failure reading undo data");
1573  return DISCONNECT_FAILED;
1574  }
1575 
1576  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1577  error("DisconnectBlock(): block and undo data inconsistent");
1578  return DISCONNECT_FAILED;
1579  }
1580 
1581  // undo transactions in reverse order
1582  for (int i = block.vtx.size() - 1; i >= 0; i--) {
1583  const CTransaction &tx = *(block.vtx[i]);
1584  uint256 hash = tx.GetHash();
1585  bool is_coinbase = tx.IsCoinBase();
1586 
1587  // Check that all outputs are available and match the outputs in the block itself
1588  // exactly.
1589  for (size_t o = 0; o < tx.vout.size(); o++) {
1590  if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1591  COutPoint out(hash, o);
1592  Coin coin;
1593  bool is_spent = view.SpendCoin(out, &coin);
1594  if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1595  fClean = false; // transaction output mismatch
1596  }
1597  }
1598  }
1599 
1600  // restore inputs
1601  if (i > 0) { // not coinbases
1602  CTxUndo &txundo = blockUndo.vtxundo[i-1];
1603  if (txundo.vprevout.size() != tx.vin.size()) {
1604  error("DisconnectBlock(): transaction and undo data inconsistent");
1605  return DISCONNECT_FAILED;
1606  }
1607  for (unsigned int j = tx.vin.size(); j-- > 0;) {
1608  const COutPoint &out = tx.vin[j].prevout;
1609  int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1610  if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1611  fClean = fClean && res != DISCONNECT_UNCLEAN;
1612  }
1613  // At this point, all of txundo.vprevout should have been moved out.
1614  }
1615  }
1616 
1617  // move best block pointer to prevout block
1618  view.SetBestBlock(pindex->pprev->GetBlockHash());
1619 
1620  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1621 }
1622 
1623 static void FlushUndoFile(int block_file, bool finalize = false)
1624 {
1625  FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1626  if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1627  AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
1628  }
1629 }
1630 
1631 static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
1632 {
1633  LOCK(cs_LastBlockFile);
1634  FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
1635  if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1636  AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
1637  }
1638  // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
1639  // e.g. during IBD or a sync after a node going offline
1640  if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
1641 }
1642 
1643 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
1644 
1645 static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
1646 {
1647  // Write undo information to disk
1648  if (pindex->GetUndoPos().IsNull()) {
1649  FlatFilePos _pos;
1650  if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
1651  return error("ConnectBlock(): FindUndoPos failed");
1652  if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
1653  return AbortNode(state, "Failed to write undo data");
1654  // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1655  // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1656  // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1657  // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1658  // the FindBlockPos function
1659  if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
1660  FlushUndoFile(_pos.nFile, true);
1661  }
1662 
1663  // update nUndoPos in block index
1664  pindex->nUndoPos = _pos.nPos;
1665  pindex->nStatus |= BLOCK_HAVE_UNDO;
1666  setDirtyBlockIndex.insert(pindex);
1667  }
1668 
1669  return true;
1670 }
1671 
1673 
1674 void StartScriptCheckWorkerThreads(int threads_num)
1675 {
1676  scriptcheckqueue.StartWorkerThreads(threads_num);
1677 }
1678 
1680 {
1681  scriptcheckqueue.StopWorkerThreads();
1682 }
1683 
1685 
1686 int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
1687 {
1688  LOCK(cs_main);
1689  int32_t nVersion = VERSIONBITS_TOP_BITS;
1690 
1691  for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1692  ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
1693  if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
1694  nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
1695  }
1696  }
1697 
1698  return nVersion;
1699 }
1700 
1705 {
1706 private:
1707  int bit;
1708 
1709 public:
1710  explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1711 
1712  int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1713  int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1714  int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1715  int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1716 
1717  bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1718  {
1719  return pindex->nHeight >= params.MinBIP9WarningHeight &&
1721  ((pindex->nVersion >> bit) & 1) != 0 &&
1722  ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1723  }
1724 };
1725 
1727 
1728 // 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
1729 // mainnet. We no longer need to support disabling the segwit deployment
1730 // except for testing purposes, due to limitations of the functional test
1731 // environment. See test/functional/p2p-segwit.py.
1732 static bool IsScriptWitnessEnabled(const Consensus::Params& params)
1733 {
1734  return params.SegwitHeight != std::numeric_limits<int>::max();
1735 }
1736 
1737 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1739 
1740  unsigned int flags = SCRIPT_VERIFY_NONE;
1741 
1742  // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
1743  // retroactively applied to testnet)
1744  // However, only one historical block violated the P2SH rules (on both
1745  // mainnet and testnet), so for simplicity, always leave P2SH
1746  // on except for the one violating block.
1747  if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
1748  pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
1749  *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
1750  {
1752  }
1753 
1754  // Enforce WITNESS rules whenever P2SH is in effect (and the segwit
1755  // deployment is defined).
1756  if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
1758  }
1759 
1760  // Start enforcing the DERSIG (BIP66) rule
1761  if (pindex->nHeight >= consensusparams.BIP66Height) {
1763  }
1764 
1765  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
1766  if (pindex->nHeight >= consensusparams.BIP65Height) {
1768  }
1769 
1770  // Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
1771  if (pindex->nHeight >= consensusparams.CSVHeight) {
1773  }
1774 
1775  // Start enforcing Taproot using versionbits logic.
1778  }
1779 
1780  // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
1781  if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
1783  }
1784 
1785  return flags;
1786 }
1787 
1788 
1789 
1790 static int64_t nTimeCheck = 0;
1791 static int64_t nTimeForks = 0;
1792 static int64_t nTimeVerify = 0;
1793 static int64_t nTimeConnect = 0;
1794 static int64_t nTimeIndex = 0;
1795 static int64_t nTimeCallbacks = 0;
1796 static int64_t nTimeTotal = 0;
1797 static int64_t nBlocksTotal = 0;
1798 
1803  CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
1804 {
1806  assert(pindex);
1807  assert(*pindex->phashBlock == block.GetHash());
1808  int64_t nTimeStart = GetTimeMicros();
1809 
1810  // Check it again in case a previous version let a bad block in
1811  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1812  // ContextualCheckBlockHeader() here. This means that if we add a new
1813  // consensus rule that is enforced in one of those two functions, then we
1814  // may have let in a block that violates the rule prior to updating the
1815  // software, and we would NOT be enforcing the rule here. Fully solving
1816  // upgrade from one software version to the next after a consensus rule
1817  // change is potentially tricky and issue-specific (see RewindBlockIndex()
1818  // for one general approach that was used for BIP 141 deployment).
1819  // Also, currently the rule against blocks more than 2 hours in the future
1820  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1821  // re-enforce that rule here (at least until we make it impossible for
1822  // GetAdjustedTime() to go backward).
1823  if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
1825  // We don't write down blocks to disk if they may have been
1826  // corrupted, so this should be impossible unless we're having hardware
1827  // problems.
1828  return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
1829  }
1830  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
1831  }
1832 
1833  // verify that the view's current state corresponds to the previous block
1834  uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
1835  assert(hashPrevBlock == view.GetBestBlock());
1836 
1837  nBlocksTotal++;
1838 
1839  // Special case for the genesis block, skipping connection of its transactions
1840  // (its coinbase is unspendable)
1841  if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
1842  if (!fJustCheck)
1843  view.SetBestBlock(pindex->GetBlockHash());
1844  return true;
1845  }
1846 
1847  bool fScriptChecks = true;
1848  if (!hashAssumeValid.IsNull()) {
1849  // We've been configured with the hash of a block which has been externally verified to have a valid history.
1850  // A suitable default value is included with the software and updated from time to time. Because validity
1851  // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1852  // This setting doesn't force the selection of any particular chain but makes validating some faster by
1853  // effectively caching the result of part of the verification.
1854  BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
1855  if (it != m_blockman.m_block_index.end()) {
1856  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1857  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1859  // This block is a member of the assumed verified chain and an ancestor of the best header.
1860  // Script verification is skipped when connecting blocks under the
1861  // assumevalid block. Assuming the assumevalid block is valid this
1862  // is safe because block merkle hashes are still computed and checked,
1863  // Of course, if an assumed valid block is invalid due to false scriptSigs
1864  // this optimization would allow an invalid chain to be accepted.
1865  // The equivalent time check discourages hash power from extorting the network via DOS attack
1866  // into accepting an invalid block through telling users they must manually set assumevalid.
1867  // Requiring a software change or burying the invalid block, regardless of the setting, makes
1868  // it hard to hide the implication of the demand. This also avoids having release candidates
1869  // that are hardly doing any signature verification at all in testing without having to
1870  // artificially set the default assumed verified block further back.
1871  // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
1872  // least as good as the expected chain.
1873  fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
1874  }
1875  }
1876  }
1877 
1878  int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
1879  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
1880 
1881  // Do not allow blocks that contain transactions which 'overwrite' older transactions,
1882  // unless those are already completely spent.
1883  // If such overwrites are allowed, coinbases and transactions depending upon those
1884  // can be duplicated to remove the ability to spend the first instance -- even after
1885  // being sent to another address.
1886  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
1887  // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
1888  // already refuses previously-known transaction ids entirely.
1889  // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
1890  // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
1891  // two in the chain that violate it. This prevents exploiting the issue against nodes during their
1892  // initial block download.
1893  bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
1894  (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
1895 
1896  // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
1897  // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
1898  // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
1899  // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
1900  // duplicate transactions descending from the known pairs either.
1901  // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
1902 
1903  // BIP34 requires that a block at height X (block X) has its coinbase
1904  // scriptSig start with a CScriptNum of X (indicated height X). The above
1905  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1906  // case that there is a block X before the BIP34 height of 227,931 which has
1907  // an indicated height Y where Y is greater than X. The coinbase for block
1908  // X would also be a valid coinbase for block Y, which could be a BIP30
1909  // violation. An exhaustive search of all mainnet coinbases before the
1910  // BIP34 height which have an indicated height greater than the block height
1911  // reveals many occurrences. The 3 lowest indicated heights found are
1912  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1913  // heights would be the first opportunity for BIP30 to be violated.
1914 
1915  // The search reveals a great many blocks which have an indicated height
1916  // greater than 1,983,702, so we simply remove the optimization to skip
1917  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1918  // that block in another 25 years or so, we should take advantage of a
1919  // future consensus change to do a new and improved version of BIP34 that
1920  // will actually prevent ever creating any duplicate coinbases in the
1921  // future.
1922  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1923 
1924  // There is no potential to create a duplicate coinbase at block 209,921
1925  // because this is still before the BIP34 height and so explicit BIP30
1926  // checking is still active.
1927 
1928  // The final case is block 176,684 which has an indicated height of
1929  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1930  // before block 490,897 so there was not much opportunity to address this
1931  // case other than to carefully analyze it and determine it would not be a
1932  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1933  // block 176,684, but it is important to note that even if it hadn't been or
1934  // is remined on an alternate fork with a duplicate coinbase, we would still
1935  // not run into a BIP30 violation. This is because the coinbase for 176,684
1936  // is spent in block 185,956 in transaction
1937  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1938  // spending transaction can't be duplicated because it also spends coinbase
1939  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1940  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1941  // duplicatable until that height, and it's currently impossible to create a
1942  // chain that long. Nevertheless we may wish to consider a future soft fork
1943  // which retroactively prevents block 490,897 from creating a duplicate
1944  // coinbase. The two historical BIP30 violations often provide a confusing
1945  // edge case when manipulating the UTXO and it would be simpler not to have
1946  // another edge case to deal with.
1947 
1948  // testnet3 has no blocks before the BIP34 height with indicated heights
1949  // post BIP34 before approximately height 486,000,000 and presumably will
1950  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1951  // BIP30 checking again.
1952  assert(pindex->pprev);
1953  CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
1954  //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
1955  fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
1956 
1957  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
1958  // consensus change that ensures coinbases at those heights can not
1959  // duplicate earlier coinbases.
1960  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1961  for (const auto& tx : block.vtx) {
1962  for (size_t o = 0; o < tx->vout.size(); o++) {
1963  if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
1964  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1965  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
1966  }
1967  }
1968  }
1969  }
1970 
1971  // Start enforcing BIP68 (sequence locks)
1972  int nLockTimeFlags = 0;
1973  if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
1974  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1975  }
1976 
1977  // Get the script flags for this block
1978  unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
1979 
1980  int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
1981  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
1982 
1983  CBlockUndo blockundo;
1984 
1985  // Precomputed transaction data pointers must not be invalidated
1986  // until after `control` has run the script checks (potentially
1987  // in multiple threads). Preallocate the vector size so a new allocation
1988  // doesn't invalidate pointers into the vector, and keep txsdata in scope
1989  // for as long as `control`.
1990  CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
1991  std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
1992 
1993  std::vector<int> prevheights;
1994  CAmount nFees = 0;
1995  int nInputs = 0;
1996  int64_t nSigOpsCost = 0;
1997  blockundo.vtxundo.reserve(block.vtx.size() - 1);
1998  for (unsigned int i = 0; i < block.vtx.size(); i++)
1999  {
2000  const CTransaction &tx = *(block.vtx[i]);
2001 
2002  nInputs += tx.vin.size();
2003 
2004  if (!tx.IsCoinBase())
2005  {
2006  CAmount txfee = 0;
2007  TxValidationState tx_state;
2008  if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
2009  // Any transaction validation failure in ConnectBlock is a block consensus failure
2011  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2012  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
2013  }
2014  nFees += txfee;
2015  if (!MoneyRange(nFees)) {
2016  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
2017  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
2018  }
2019 
2020  // Check that transaction is BIP68 final
2021  // BIP68 lock checks (as opposed to nLockTime checks) must
2022  // be in ConnectBlock because they require the UTXO set
2023  prevheights.resize(tx.vin.size());
2024  for (size_t j = 0; j < tx.vin.size(); j++) {
2025  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
2026  }
2027 
2028  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2029  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
2030  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
2031  }
2032  }
2033 
2034  // GetTransactionSigOpCost counts 3 types of sigops:
2035  // * legacy (always)
2036  // * p2sh (when P2SH enabled in flags and excludes coinbase)
2037  // * witness (when witness enabled in flags and excludes coinbase)
2038  nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
2039  if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
2040  LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
2041  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
2042  }
2043 
2044  if (!tx.IsCoinBase())
2045  {
2046  std::vector<CScriptCheck> vChecks;
2047  bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2048  TxValidationState tx_state;
2049  if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
2050  // Any transaction validation failure in ConnectBlock is a block consensus failure
2052  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2053  return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
2054  tx.GetHash().ToString(), state.ToString());
2055  }
2056  control.Add(vChecks);
2057  }
2058 
2059  CTxUndo undoDummy;
2060  if (i > 0) {
2061  blockundo.vtxundo.push_back(CTxUndo());
2062  }
2063  UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2064  }
2065  int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
2066  LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
2067 
2068  CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
2069  if (block.vtx[0]->GetValueOut() > blockReward) {
2070  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
2071  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
2072  }
2073 
2074  if (!control.Wait()) {
2075  LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
2076  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
2077  }
2078  int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
2079  LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2080 
2081  if (fJustCheck)
2082  return true;
2083 
2084  if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
2085  return false;
2086 
2087  if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
2089  setDirtyBlockIndex.insert(pindex);
2090  }
2091 
2092  assert(pindex->phashBlock);
2093  // add this block to the view's block chain
2094  view.SetBestBlock(pindex->GetBlockHash());
2095 
2096  int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
2097  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
2098 
2099  int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
2100  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
2101 
2102  return true;
2103 }
2104 
2105 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
2106 {
2107  return this->GetCoinsCacheSizeState(
2108  tx_pool,
2110  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2111 }
2112 
2113 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
2114  const CTxMemPool* tx_pool,
2115  size_t max_coins_cache_size_bytes,
2116  size_t max_mempool_size_bytes)
2117 {
2118  const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
2119  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2120  int64_t nTotalSpace =
2121  max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2122 
2124  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
2125  int64_t large_threshold =
2126  std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2127 
2128  if (cacheSize > nTotalSpace) {
2129  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2131  } else if (cacheSize > large_threshold) {
2133  }
2134  return CoinsCacheSizeState::OK;
2135 }
2136 
2138  const CChainParams& chainparams,
2139  BlockValidationState &state,
2140  FlushStateMode mode,
2141  int nManualPruneHeight)
2142 {
2143  LOCK(cs_main);
2144  assert(this->CanFlushToDisk());
2145  static std::chrono::microseconds nLastWrite{0};
2146  static std::chrono::microseconds nLastFlush{0};
2147  std::set<int> setFilesToPrune;
2148  bool full_flush_completed = false;
2149 
2150  const size_t coins_count = CoinsTip().GetCacheSize();
2151  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2152 
2153  try {
2154  {
2155  bool fFlushForPrune = false;
2156  bool fDoFullFlush = false;
2157 
2158  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool);
2159  LOCK(cs_LastBlockFile);
2160  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2161  // make sure we don't prune above the blockfilterindexes bestblocks
2162  // pruning is height-based
2163  int last_prune = m_chain.Height(); // last height we can prune
2165  last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height));
2166  });
2167 
2168  if (nManualPruneHeight > 0) {
2169  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2170 
2171  m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height());
2172  } else {
2173  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2174 
2175  m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
2176  fCheckForPruning = false;
2177  }
2178  if (!setFilesToPrune.empty()) {
2179  fFlushForPrune = true;
2180  if (!fHavePruned) {
2181  pblocktree->WriteFlag("prunedblockfiles", true);
2182  fHavePruned = true;
2183  }
2184  }
2185  }
2186  const auto nNow = GetTime<std::chrono::microseconds>();
2187  // Avoid writing/flushing immediately after startup.
2188  if (nLastWrite.count() == 0) {
2189  nLastWrite = nNow;
2190  }
2191  if (nLastFlush.count() == 0) {
2192  nLastFlush = nNow;
2193  }
2194  // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2195  bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2196  // The cache is over the limit, we have to write now.
2197  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2198  // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2199  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2200  // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2201  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2202  // Combine all conditions that result in a full cache flush.
2203  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2204  // Write blocks and block index to disk.
2205  if (fDoFullFlush || fPeriodicWrite) {
2206  // Depend on nMinDiskSpace to ensure we can write block index
2208  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2209  }
2210  {
2211  LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2212 
2213  // First make sure all block and undo data is flushed to disk.
2214  FlushBlockFile();
2215  }
2216 
2217  // Then update all block file information (which may refer to block and undo files).
2218  {
2219  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2220 
2221  std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2222  vFiles.reserve(setDirtyFileInfo.size());
2223  for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2224  vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2225  setDirtyFileInfo.erase(it++);
2226  }
2227  std::vector<const CBlockIndex*> vBlocks;
2228  vBlocks.reserve(setDirtyBlockIndex.size());
2229  for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2230  vBlocks.push_back(*it);
2231  setDirtyBlockIndex.erase(it++);
2232  }
2233  if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2234  return AbortNode(state, "Failed to write to block index database");
2235  }
2236  }
2237  // Finally remove any pruned files
2238  if (fFlushForPrune) {
2239  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2240 
2241  UnlinkPrunedFiles(setFilesToPrune);
2242  }
2243  nLastWrite = nNow;
2244  }
2245  // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2246  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2247  LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2248  coins_count, coins_mem_usage / 1000));
2249 
2250  // Typical Coin structures on disk are around 48 bytes in size.
2251  // Pushing a new one to the database can cause it to be written
2252  // twice (once in the log, and once in the tables). This is already
2253  // an overestimation, as most will delete an existing entry or
2254  // overwrite one. Still, use a conservative safety factor of 2.
2255  if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2256  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2257  }
2258  // Flush the chainstate (which may refer to block index entries).
2259  if (!CoinsTip().Flush())
2260  return AbortNode(state, "Failed to write to coin database");
2261  nLastFlush = nNow;
2262  full_flush_completed = true;
2263  }
2264  }
2265  if (full_flush_completed) {
2266  // Update best block in wallet (so we can detect restored wallets).
2268  }
2269  } catch (const std::runtime_error& e) {
2270  return AbortNode(state, std::string("System error while flushing: ") + e.what());
2271  }
2272  return true;
2273 }
2274 
2276  BlockValidationState state;
2277  const CChainParams& chainparams = Params();
2278  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2279  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2280  }
2281 }
2282 
2284  BlockValidationState state;
2285  fCheckForPruning = true;
2286  const CChainParams& chainparams = Params();
2287 
2288  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2289  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2290  }
2291 }
2292 
2293 static void DoWarning(const bilingual_str& warning)
2294 {
2295  static bool fWarned = false;
2296  SetMiscWarning(warning);
2297  if (!fWarned) {
2298  AlertNotify(warning.original);
2299  fWarned = true;
2300  }
2301 }
2302 
2304 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2305 {
2306  if (!res.empty()) res += Untranslated(", ");
2307  res += warn;
2308 }
2309 
2311 static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams, CChainState& active_chainstate)
2313 {
2314  // New best block
2315  mempool.AddTransactionsUpdated(1);
2316 
2317  {
2319  g_best_block = pindexNew->GetBlockHash();
2320  g_best_block_cv.notify_all();
2321  }
2322 
2323  bilingual_str warning_messages;
2324  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
2325  if (!active_chainstate.IsInitialBlockDownload()) {
2326  const CBlockIndex* pindex = pindexNew;
2327  for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2328  WarningBitsConditionChecker checker(bit);
2329  ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
2330  if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2331  const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
2332  if (state == ThresholdState::ACTIVE) {
2333  DoWarning(warning);
2334  } else {
2335  AppendWarning(warning_messages, warning);
2336  }
2337  }
2338  }
2339  }
2340  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
2341  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
2342  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
2343  log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
2344  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2345  GuessVerificationProgress(chainParams.TxData(), pindexNew), active_chainstate.CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), active_chainstate.CoinsTip().GetCacheSize(),
2346  !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
2347 }
2348 
2360 {
2363 
2364  CBlockIndex *pindexDelete = m_chain.Tip();
2365  assert(pindexDelete);
2366  // Read block from disk.
2367  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2368  CBlock& block = *pblock;
2369  if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
2370  return error("DisconnectTip(): Failed to read block");
2371  // Apply the block atomically to the chain state.
2372  int64_t nStart = GetTimeMicros();
2373  {
2374  CCoinsViewCache view(&CoinsTip());
2375  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2376  if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2377  return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2378  bool flushed = view.Flush();
2379  assert(flushed);
2380  }
2381  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2382  // Write the chain state to disk, if necessary.
2383  if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2384  return false;
2385 
2386  if (disconnectpool) {
2387  // Save transactions to re-add to mempool at end of reorg
2388  for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2389  disconnectpool->addTransaction(*it);
2390  }
2391  while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2392  // Drop the earliest entry, and remove its children from the mempool.
2393  auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2395  disconnectpool->removeEntry(it);
2396  }
2397  }
2398 
2399  m_chain.SetTip(pindexDelete->pprev);
2400 
2401  UpdateTip(m_mempool, pindexDelete->pprev, chainparams, *this);
2402  // Let wallets know transactions went from 1-confirmed to
2403  // 0-confirmed or conflicted:
2404  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2405  return true;
2406 }
2407 
2408 static int64_t nTimeReadFromDisk = 0;
2409 static int64_t nTimeConnectTotal = 0;
2410 static int64_t nTimeFlush = 0;
2411 static int64_t nTimeChainState = 0;
2412 static int64_t nTimePostConnect = 0;
2413 
2415  CBlockIndex* pindex = nullptr;
2416  std::shared_ptr<const CBlock> pblock;
2418 };
2427 private:
2428  std::vector<PerBlockConnectTrace> blocksConnected;
2429 
2430 public:
2431  explicit ConnectTrace() : blocksConnected(1) {}
2432 
2433  void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2434  assert(!blocksConnected.back().pindex);
2435  assert(pindex);
2436  assert(pblock);
2437  blocksConnected.back().pindex = pindex;
2438  blocksConnected.back().pblock = std::move(pblock);
2439  blocksConnected.emplace_back();
2440  }
2441 
2442  std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2443  // We always keep one extra block at the end of our list because
2444  // blocks are added after all the conflicted transactions have
2445  // been filled in. Thus, the last entry should always be an empty
2446  // one waiting for the transactions from the next block. We pop
2447  // the last entry here to make sure the list we return is sane.
2448  assert(!blocksConnected.back().pindex);
2449  blocksConnected.pop_back();
2450  return blocksConnected;
2451  }
2452 };
2453 
2460 bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
2461 {
2464 
2465  assert(pindexNew->pprev == m_chain.Tip());
2466  // Read block from disk.
2467  int64_t nTime1 = GetTimeMicros();
2468  std::shared_ptr<const CBlock> pthisBlock;
2469  if (!pblock) {
2470  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2471  if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
2472  return AbortNode(state, "Failed to read block");
2473  pthisBlock = pblockNew;
2474  } else {
2475  pthisBlock = pblock;
2476  }
2477  const CBlock& blockConnecting = *pthisBlock;
2478  // Apply the block atomically to the chain state.
2479  int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2480  int64_t nTime3;
2481  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2482  {
2483  CCoinsViewCache view(&CoinsTip());
2484  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
2485  GetMainSignals().BlockChecked(blockConnecting, state);
2486  if (!rv) {
2487  if (state.IsInvalid())
2488  InvalidBlockFound(pindexNew, state);
2489  return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2490  }
2491  nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2492  assert(nBlocksTotal > 0);
2493  LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2494  bool flushed = view.Flush();
2495  assert(flushed);
2496  }
2497  int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2498  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2499  // Write the chain state to disk, if necessary.
2500  if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2501  return false;
2502  int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2503  LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2504  // Remove conflicting transactions from the mempool.;
2505  m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2506  disconnectpool.removeForBlock(blockConnecting.vtx);
2507  // Update m_chain & related variables.
2508  m_chain.SetTip(pindexNew);
2509  UpdateTip(m_mempool, pindexNew, chainparams, *this);
2510 
2511  int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2512  LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2513  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2514 
2515  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2516  return true;
2517 }
2518 
2524  do {
2525  CBlockIndex *pindexNew = nullptr;
2526 
2527  // Find the best candidate header.
2528  {
2529  std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2530  if (it == setBlockIndexCandidates.rend())
2531  return nullptr;
2532  pindexNew = *it;
2533  }
2534 
2535  // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2536  // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2537  CBlockIndex *pindexTest = pindexNew;
2538  bool fInvalidAncestor = false;
2539  while (pindexTest && !m_chain.Contains(pindexTest)) {
2540  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2541 
2542  // Pruned nodes may have entries in setBlockIndexCandidates for
2543  // which block files have been deleted. Remove those as candidates
2544  // for the most work chain if we come across them; we can't switch
2545  // to a chain unless we have all the non-active-chain parent blocks.
2546  bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2547  bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2548  if (fFailedChain || fMissingData) {
2549  // Candidate chain is not usable (either invalid or missing data)
2550  if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2551  pindexBestInvalid = pindexNew;
2552  CBlockIndex *pindexFailed = pindexNew;
2553  // Remove the entire chain from the set.
2554  while (pindexTest != pindexFailed) {
2555  if (fFailedChain) {
2556  pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2557  } else if (fMissingData) {
2558  // If we're missing data, then add back to m_blocks_unlinked,
2559  // so that if the block arrives in the future we can try adding
2560  // to setBlockIndexCandidates again.
2562  std::make_pair(pindexFailed->pprev, pindexFailed));
2563  }
2564  setBlockIndexCandidates.erase(pindexFailed);
2565  pindexFailed = pindexFailed->pprev;
2566  }
2567  setBlockIndexCandidates.erase(pindexTest);
2568  fInvalidAncestor = true;
2569  break;
2570  }
2571  pindexTest = pindexTest->pprev;
2572  }
2573  if (!fInvalidAncestor)
2574  return pindexNew;
2575  } while(true);
2576 }
2577 
2580  // Note that we can't delete the current block itself, as we may need to return to it later in case a
2581  // reorganization to a better block fails.
2582  std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2583  while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2584  setBlockIndexCandidates.erase(it++);
2585  }
2586  // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2587  assert(!setBlockIndexCandidates.empty());
2588 }
2589 
2596 bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2597 {
2600  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
2601 
2602  const CBlockIndex* pindexOldTip = m_chain.Tip();
2603  const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork);
2604 
2605  // Disconnect active blocks which are no longer in the best chain.
2606  bool fBlocksDisconnected = false;
2607  DisconnectedBlockTransactions disconnectpool;
2608  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2609  if (!DisconnectTip(state, chainparams, &disconnectpool)) {
2610  // This is likely a fatal error, but keep the mempool consistent,
2611  // just in case. Only remove from the mempool in this case.
2612  UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
2613 
2614  // If we're unable to disconnect a block during normal operation,
2615  // then that is a failure of our local system -- we should abort
2616  // rather than stay on a less work chain.
2617  AbortNode(state, "Failed to disconnect block; see debug.log for details");
2618  return false;
2619  }
2620  fBlocksDisconnected = true;
2621  }
2622 
2623  // Build list of new blocks to connect (in descending height order).
2624  std::vector<CBlockIndex*> vpindexToConnect;
2625  bool fContinue = true;
2626  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2627  while (fContinue && nHeight != pindexMostWork->nHeight) {
2628  // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2629  // a few blocks along the way.
2630  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2631  vpindexToConnect.clear();
2632  vpindexToConnect.reserve(nTargetHeight - nHeight);
2633  CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2634  while (pindexIter && pindexIter->nHeight != nHeight) {
2635  vpindexToConnect.push_back(pindexIter);
2636  pindexIter = pindexIter->pprev;
2637  }
2638  nHeight = nTargetHeight;
2639 
2640  // Connect new blocks.
2641  for (CBlockIndex* pindexConnect : reverse_iterate(vpindexToConnect)) {
2642  if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2643  if (state.IsInvalid()) {
2644  // The block violates a consensus rule.
2646  InvalidChainFound(vpindexToConnect.front());
2647  }
2648  state = BlockValidationState();
2649  fInvalidFound = true;
2650  fContinue = false;
2651  break;
2652  } else {
2653  // A system error occurred (disk space, database error, ...).
2654  // Make the mempool consistent with the current tip, just in case
2655  // any observers try to use it before shutdown.
2656  UpdateMempoolForReorg(*this, m_mempool, disconnectpool, false);
2657  return false;
2658  }
2659  } else {
2661  if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2662  // We're in a better position than we were. Return temporarily to release the lock.
2663  fContinue = false;
2664  break;
2665  }
2666  }
2667  }
2668  }
2669 
2670  if (fBlocksDisconnected) {
2671  // If any blocks were disconnected, disconnectpool may be non empty. Add
2672  // any disconnected transactions back to the mempool.
2673  UpdateMempoolForReorg(*this, m_mempool, disconnectpool, true);
2674  }
2675  m_mempool.check(*this);
2676 
2678 
2679  return true;
2680 }
2681 
2683 {
2684  if (!init) return SynchronizationState::POST_INIT;
2687 }
2688 
2690  bool fNotify = false;
2691  bool fInitialBlockDownload = false;
2692  static CBlockIndex* pindexHeaderOld = nullptr;
2693  CBlockIndex* pindexHeader = nullptr;
2694  {
2695  LOCK(cs_main);
2696  pindexHeader = pindexBestHeader;
2697 
2698  if (pindexHeader != pindexHeaderOld) {
2699  fNotify = true;
2700  assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate));
2701  fInitialBlockDownload = chainstate.IsInitialBlockDownload();
2702  pindexHeaderOld = pindexHeader;
2703  }
2704  }
2705  // Send block tip changed notifications without cs_main
2706  if (fNotify) {
2707  uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2708  }
2709  return fNotify;
2710 }
2711 
2714 
2715  if (GetMainSignals().CallbacksPending() > 10) {
2717  }
2718 }
2719 
2720 bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2721  // Note that while we're often called here from ProcessNewBlock, this is
2722  // far from a guarantee. Things in the P2P/RPC will often end up calling
2723  // us in the middle of ProcessNewBlock - do not assume pblock is set
2724  // sanely for performance or correctness!
2726 
2727  // ABC maintains a fair degree of expensive-to-calculate internal state
2728  // because this function periodically releases cs_main so that it does not lock up other threads for too long
2729  // during large connects - and to allow for e.g. the callback queue to drain
2730  // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2732 
2733  CBlockIndex *pindexMostWork = nullptr;
2734  CBlockIndex *pindexNewTip = nullptr;
2735  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2736  do {
2737  // Block until the validation queue drains. This should largely
2738  // never happen in normal operation, however may happen during
2739  // reindex, causing memory blowup if we run too far ahead.
2740  // Note that if a validationinterface callback ends up calling
2741  // ActivateBestChain this may lead to a deadlock! We should
2742  // probably have a DEBUG_LOCKORDER test for this in the future.
2744 
2745  {
2746  LOCK(cs_main);
2747  LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2748  CBlockIndex* starting_tip = m_chain.Tip();
2749  bool blocks_connected = false;
2750  do {
2751  // We absolutely may not unlock cs_main until we've made forward progress
2752  // (with the exception of shutdown due to hardware issues, low disk space, etc).
2753  ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2754 
2755  if (pindexMostWork == nullptr) {
2756  pindexMostWork = FindMostWorkChain();
2757  }
2758 
2759  // Whether we have anything to do at all.
2760  if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2761  break;
2762  }
2763 
2764  bool fInvalidFound = false;
2765  std::shared_ptr<const CBlock> nullBlockPtr;
2766  if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2767  // A system error occurred
2768  return false;
2769  }
2770  blocks_connected = true;
2771 
2772  if (fInvalidFound) {
2773  // Wipe cache, we may need another branch now.
2774  pindexMostWork = nullptr;
2775  }
2776  pindexNewTip = m_chain.Tip();
2777 
2778  for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2779  assert(trace.pblock && trace.pindex);
2780  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2781  }
2782  } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2783  if (!blocks_connected) return true;
2784 
2785  const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2786  bool fInitialDownload = IsInitialBlockDownload();
2787 
2788  // Notify external listeners about the new tip.
2789  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2790  if (pindexFork != pindexNewTip) {
2791  // Notify ValidationInterface subscribers
2792  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2793 
2794  // Always notify the UI if a new block tip was connected
2795  uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2796  }
2797  }
2798  // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2799 
2800  if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2801 
2802  // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
2803  // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
2804  // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
2805  // that the best block hash is non-null.
2806  if (ShutdownRequested()) break;
2807  } while (pindexNewTip != pindexMostWork);
2808  CheckBlockIndex(chainparams.GetConsensus());
2809 
2810  // Write changes periodically to disk, after relay.
2811  if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
2812  return false;
2813  }
2814 
2815  return true;
2816 }
2817 
2819 {
2820  {
2821  LOCK(cs_main);
2822  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
2823  // Nothing to do, this block is not at the tip.
2824  return true;
2825  }
2827  // The chain has been extended since the last call, reset the counter.
2829  }
2831  setBlockIndexCandidates.erase(pindex);
2833  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
2834  // We can't keep reducing the counter if somebody really wants to
2835  // call preciousblock 2**31-1 times on the same set of tips...
2837  }
2838  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
2839  setBlockIndexCandidates.insert(pindex);
2841  }
2842  }
2843 
2844  return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
2845 }
2846 
2848 {
2849  // Genesis block can't be invalidated
2850  assert(pindex);
2851  if (pindex->nHeight == 0) return false;
2852 
2853  CBlockIndex* to_mark_failed = pindex;
2854  bool pindex_was_in_chain = false;
2855  int disconnected = 0;
2856 
2857  // We do not allow ActivateBestChain() to run while InvalidateBlock() is
2858  // running, as that could cause the tip to change while we disconnect
2859  // blocks.
2861 
2862  // We'll be acquiring and releasing cs_main below, to allow the validation
2863  // callbacks to run. However, we should keep the block index in a
2864  // consistent state as we disconnect blocks -- in particular we need to
2865  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
2866  // To avoid walking the block index repeatedly in search of candidates,
2867  // build a map once so that we can look up candidate blocks by chain
2868  // work as we go.
2869  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
2870 
2871  {
2872  LOCK(cs_main);
2873  for (const auto& entry : m_blockman.m_block_index) {
2874  CBlockIndex *candidate = entry.second;
2875  // We don't need to put anything in our active chain into the
2876  // multimap, because those candidates will be found and considered
2877  // as we disconnect.
2878  // Instead, consider only non-active-chain blocks that have at
2879  // least as much work as where we expect the new tip to end up.
2880  if (!m_chain.Contains(candidate) &&
2881  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
2882  candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
2883  candidate->HaveTxsDownloaded()) {
2884  candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
2885  }
2886  }
2887  }
2888 
2889  // Disconnect (descendants of) pindex, and mark them invalid.
2890  while (true) {
2891  if (ShutdownRequested()) break;
2892 
2893  // Make sure the queue of validation callbacks doesn't grow unboundedly.
2895 
2896  LOCK(cs_main);
2897  LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
2898  if (!m_chain.Contains(pindex)) break;
2899  pindex_was_in_chain = true;
2900  CBlockIndex *invalid_walk_tip = m_chain.Tip();
2901 
2902  // ActivateBestChain considers blocks already in m_chain
2903  // unconditionally valid already, so force disconnect away from it.
2904  DisconnectedBlockTransactions disconnectpool;
2905  bool ret = DisconnectTip(state, chainparams, &disconnectpool);
2906  // DisconnectTip will add transactions to disconnectpool.
2907  // Adjust the mempool to be consistent with the new tip, adding
2908  // transactions back to the mempool if disconnecting was successful,
2909  // and we're not doing a very deep invalidation (in which case
2910  // keeping the mempool up to date is probably futile anyway).
2911  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
2912  UpdateMempoolForReorg(*this, m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
2913  if (!ret) return false;
2914  assert(invalid_walk_tip->pprev == m_chain.Tip());
2915 
2916  // We immediately mark the disconnected blocks as invalid.
2917  // This prevents a case where pruned nodes may fail to invalidateblock
2918  // and be left unable to start as they have no tip candidates (as there
2919  // are no blocks that meet the "have data and are not invalid per
2920  // nStatus" criteria for inclusion in setBlockIndexCandidates).
2921  invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
2922  setDirtyBlockIndex.insert(invalid_walk_tip);
2923  setBlockIndexCandidates.erase(invalid_walk_tip);
2924  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
2925  if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
2926  // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
2927  // need to be BLOCK_FAILED_CHILD instead.
2928  to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
2929  setDirtyBlockIndex.insert(to_mark_failed);
2930  }
2931 
2932  // Add any equal or more work headers to setBlockIndexCandidates
2933  auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
2934  while (candidate_it != candidate_blocks_by_work.end()) {
2935  if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
2936  setBlockIndexCandidates.insert(candidate_it->second);
2937  candidate_it = candidate_blocks_by_work.erase(candidate_it);
2938  } else {
2939  ++candidate_it;
2940  }
2941  }
2942 
2943  // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
2944  // iterations, or, if it's the last one, call InvalidChainFound on it.
2945  to_mark_failed = invalid_walk_tip;
2946  }
2947 
2948  CheckBlockIndex(chainparams.GetConsensus());
2949 
2950  {
2951  LOCK(cs_main);
2952  if (m_chain.Contains(to_mark_failed)) {
2953  // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
2954  return false;
2955  }
2956 
2957  // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
2958  to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
2959  setDirtyBlockIndex.insert(to_mark_failed);
2960  setBlockIndexCandidates.erase(to_mark_failed);
2961  m_blockman.m_failed_blocks.insert(to_mark_failed);
2962 
2963  // If any new blocks somehow arrived while we were disconnecting
2964  // (above), then the pre-calculation of what should go into
2965  // setBlockIndexCandidates may have missed entries. This would
2966  // technically be an inconsistency in the block index, but if we clean
2967  // it up here, this should be an essentially unobservable error.
2968  // Loop back over all block index entries and add any missing entries
2969  // to setBlockIndexCandidates.
2970  BlockMap::iterator it = m_blockman.m_block_index.begin();
2971  while (it != m_blockman.m_block_index.end()) {
2972  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
2973  setBlockIndexCandidates.insert(it->second);
2974  }
2975  it++;
2976  }
2977 
2978  InvalidChainFound(to_mark_failed);
2979  }
2980 
2981  // Only notify about a new block tip if the active chain was modified.
2982  if (pindex_was_in_chain) {
2983  uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
2984  }
2985  return true;
2986 }
2987 
2990 
2991  int nHeight = pindex->nHeight;
2992 
2993  // Remove the invalidity flag from this block and all its descendants.
2994  BlockMap::iterator it = m_blockman.m_block_index.begin();
2995  while (it != m_blockman.m_block_index.end()) {
2996  if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
2997  it->second->nStatus &= ~BLOCK_FAILED_MASK;
2998  setDirtyBlockIndex.insert(it->second);
2999  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
3000  setBlockIndexCandidates.insert(it->second);
3001  }
3002  if (it->second == pindexBestInvalid) {
3003  // Reset invalid block marker if it was pointing to one of those.
3004  pindexBestInvalid = nullptr;
3005  }
3006  m_blockman.m_failed_blocks.erase(it->second);
3007  }
3008  it++;
3009  }
3010 
3011  // Remove the invalidity flag from all ancestors too.
3012  while (pindex != nullptr) {
3013  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3014  pindex->nStatus &= ~BLOCK_FAILED_MASK;
3015  setDirtyBlockIndex.insert(pindex);
3016  m_blockman.m_failed_blocks.erase(pindex);
3017  }
3018  pindex = pindex->pprev;
3019  }
3020 }
3021 
3023 {
3025 
3026  // Check for duplicate
3027  uint256 hash = block.GetHash();
3028  BlockMap::iterator it = m_block_index.find(hash);
3029  if (it != m_block_index.end())
3030  return it->second;
3031 
3032  // Construct new block index object
3033  CBlockIndex* pindexNew = new CBlockIndex(block);
3034  // We assign the sequence id to blocks only when the full data is available,
3035  // to avoid miners withholding blocks but broadcasting headers, to get a
3036  // competitive advantage.
3037  pindexNew->nSequenceId = 0;
3038  BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3039  pindexNew->phashBlock = &((*mi).first);
3040  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3041  if (miPrev != m_block_index.end())
3042  {
3043  pindexNew->pprev = (*miPrev).second;
3044  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3045  pindexNew->BuildSkip();
3046  }
3047  pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
3048  pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
3049  pindexNew->RaiseValidity(BLOCK_VALID_TREE);
3050  if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
3051  pindexBestHeader = pindexNew;
3052 
3053  setDirtyBlockIndex.insert(pindexNew);
3054 
3055  return pindexNew;
3056 }
3057 
3059 void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
3060 {
3061  pindexNew->nTx = block.vtx.size();
3062  pindexNew->nChainTx = 0;
3063  pindexNew->nFile = pos.nFile;
3064  pindexNew->nDataPos = pos.nPos;
3065  pindexNew->nUndoPos = 0;
3066  pindexNew->nStatus |= BLOCK_HAVE_DATA;
3067  if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
3068  pindexNew->nStatus |= BLOCK_OPT_WITNESS;
3069  }
3071  setDirtyBlockIndex.insert(pindexNew);
3072 
3073  if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
3074  // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3075  std::deque<CBlockIndex*> queue;
3076  queue.push_back(pindexNew);
3077 
3078  // Recursively process any descendant blocks that now may be eligible to be connected.
3079  while (!queue.empty()) {
3080  CBlockIndex *pindex = queue.front();
3081  queue.pop_front();
3082  pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3083  {
3085  pindex->nSequenceId = nBlockSequenceId++;
3086  }
3087  if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3088  setBlockIndexCandidates.insert(pindex);
3089  }
3090  std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3091  while (range.first != range.second) {
3092  std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3093  queue.push_back(it->second);
3094  range.first++;
3096  }
3097  }
3098  } else {
3099  if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3100  m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3101  }
3102  }
3103 }
3104 
3105 // TODO move to blockstorage
3106 bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
3107 {
3108  LOCK(cs_LastBlockFile);
3109 
3110  unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3111  if (vinfoBlockFile.size() <= nFile) {
3112  vinfoBlockFile.resize(nFile + 1);
3113  }
3114 
3115  bool finalize_undo = false;
3116  if (!fKnown) {
3117  while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
3118  // when the undo file is keeping up with the block file, we want to flush it explicitly
3119  // when it is lagging behind (more blocks arrive than are being connected), we let the
3120  // undo block write case handle it
3121  assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
3122  finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
3123  nFile++;
3124  if (vinfoBlockFile.size() <= nFile) {
3125  vinfoBlockFile.resize(nFile + 1);
3126  }
3127  }
3128  pos.nFile = nFile;
3129  pos.nPos = vinfoBlockFile[nFile].nSize;
3130  }
3131 
3132  if ((int)nFile != nLastBlockFile) {
3133  if (!fKnown) {
3134  LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
3135  }
3136  FlushBlockFile(!fKnown, finalize_undo);
3137  nLastBlockFile = nFile;
3138  }
3139 
3140  vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3141  if (fKnown)
3142  vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3143  else
3144  vinfoBlockFile[nFile].nSize += nAddSize;
3145 
3146  if (!fKnown) {
3147  bool out_of_space;
3148  size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3149  if (out_of_space) {
3150  return AbortNode("Disk space is too low!", _("Disk space is too low!"));
3151  }
3152  if (bytes_allocated != 0 && fPruneMode) {
3153  fCheckForPruning = true;
3154  }
3155  }
3156 
3157  setDirtyFileInfo.insert(nFile);
3158  return true;
3159 }
3160 
3161 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
3162 {
3163  pos.nFile = nFile;
3164 
3165  LOCK(cs_LastBlockFile);
3166 
3167  pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3168  vinfoBlockFile[nFile].nUndoSize += nAddSize;
3169  setDirtyFileInfo.insert(nFile);
3170 
3171  bool out_of_space;
3172  size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3173  if (out_of_space) {
3174  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
3175  }
3176  if (bytes_allocated != 0 && fPruneMode) {
3177  fCheckForPruning = true;
3178  }
3179 
3180  return true;
3181 }
3182 
3183 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
3184 {
3185  // Check proof of work matches claimed amount
3186  if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
3187  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
3188 
3189  return true;
3190 }
3191 
3192 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
3193 {
3194  // These are checks that are independent of context.
3195 
3196  if (block.fChecked)
3197  return true;
3198 
3199  // Check that the header is valid (particularly PoW). This is mostly
3200  // redundant with the call in AcceptBlockHeader.
3201  if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3202  return false;
3203 
3204  // Signet only: check block solution
3205  if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
3206  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
3207  }
3208 
3209  // Check the merkle root.
3210  if (fCheckMerkleRoot) {
3211  bool mutated;
3212  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3213  if (block.hashMerkleRoot != hashMerkleRoot2)
3214  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3215 
3216  // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3217  // of transactions in a block without affecting the merkle root of a block,
3218  // while still invalidating it.
3219  if (mutated)
3220  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3221  }
3222 
3223  // All potential-corruption validation must be done before we do any
3224  // transaction validation, as otherwise we may mark the header as invalid
3225  // because we receive the wrong transactions for it.
3226  // Note that witness malleability is checked in ContextualCheckBlock, so no
3227  // checks that use witness data may be performed here.
3228 
3229  // Size limits
3231  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3232 
3233  // First transaction must be coinbase, the rest must not be
3234  if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3235  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3236  for (unsigned int i = 1; i < block.vtx.size(); i++)
3237  if (block.vtx[i]->IsCoinBase())
3238  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3239 
3240  // Check transactions
3241  // Must check for duplicate inputs (see CVE-2018-17144)
3242  for (const auto& tx : block.vtx) {
3243  TxValidationState tx_state;
3244  if (!CheckTransaction(*tx, tx_state)) {
3245  // CheckBlock() does context-free validation checks. The only
3246  // possible failures are consensus failures.
3249  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3250  }
3251  }
3252  unsigned int nSigOps = 0;
3253  for (const auto& tx : block.vtx)
3254  {
3255  nSigOps += GetLegacySigOpCount(*tx);
3256  }
3258  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3259 
3260  if (fCheckPOW && fCheckMerkleRoot)
3261  block.fChecked = true;
3262 
3263  return true;
3264 }
3265 
3266 bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
3267 {
3268  int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3269  return (height >= params.SegwitHeight);
3270 }
3271 
3272 void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3273 {
3274  int commitpos = GetWitnessCommitmentIndex(block);
3275  static const std::vector<unsigned char> nonce(32, 0x00);
3276  if (commitpos != NO_WITNESS_COMMITMENT && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
3277  CMutableTransaction tx(*block.vtx[0]);
3278  tx.vin[0].scriptWitness.stack.resize(1);
3279  tx.vin[0].scriptWitness.stack[0] = nonce;
3280  block.vtx[0] = MakeTransactionRef(std::move(tx));
3281  }
3282 }
3283 
3284 std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3285 {
3286  std::vector<unsigned char> commitment;
3287  int commitpos = GetWitnessCommitmentIndex(block);
3288  std::vector<unsigned char> ret(32, 0x00);
3289  if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
3290  if (commitpos == NO_WITNESS_COMMITMENT) {
3291  uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3292  CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3293  CTxOut out;
3294  out.nValue = 0;
3296  out.scriptPubKey[0] = OP_RETURN;
3297  out.scriptPubKey[1] = 0x24;
3298  out.scriptPubKey[2] = 0xaa;
3299  out.scriptPubKey[3] = 0x21;
3300  out.scriptPubKey[4] = 0xa9;
3301  out.scriptPubKey[5] = 0xed;
3302  memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3303  commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3304  CMutableTransaction tx(*block.vtx[0]);
3305  tx.vout.push_back(out);
3306  block.vtx[0] = MakeTransactionRef(std::move(tx));
3307  }
3308  }
3309  UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3310  return commitment;
3311 }
3312 
3314 {
3315  const MapCheckpoints& checkpoints = data.mapCheckpoints;
3316 
3317  for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3318  {
3319  const uint256& hash = i.second;
3320  assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this));
3321  CBlockIndex* pindex = LookupBlockIndex(hash);
3322  if (pindex) {
3323  return pindex;
3324  }
3325  }
3326  return nullptr;
3327 }
3328 
3338 static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3339 {
3340  assert(pindexPrev != nullptr);
3341  const int nHeight = pindexPrev->nHeight + 1;
3342 
3343  // Check proof of work
3344  const Consensus::Params& consensusParams = params.GetConsensus();
3345  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3346  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3347 
3348  // Check against checkpoints
3349  if (fCheckpointsEnabled) {
3350  // Don't accept any forks from the main chain prior to last checkpoint.
3351  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3352  // BlockIndex().
3353  assert(std::addressof(g_chainman.m_blockman) == std::addressof(blockman));
3354  CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints());
3355  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3356  LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3357  return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3358  }
3359  }
3360 
3361  // Check timestamp against prev
3362  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3363  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3364 
3365  // Check timestamp
3366  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3367  return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3368 
3369  // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3370  // check for version 2, 3 and 4 upgrades
3371  if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3372  (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3373  (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
3374  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3375  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3376 
3377  return true;
3378 }
3379 
3386 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3387 {
3388  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3389 
3390  // Start enforcing BIP113 (Median Time Past).
3391  int nLockTimeFlags = 0;
3392  if (nHeight >= consensusParams.CSVHeight) {
3393  assert(pindexPrev != nullptr);
3394  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3395  }
3396 
3397  int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3398  ? pindexPrev->GetMedianTimePast()
3399  : block.GetBlockTime();
3400 
3401  // Check that all transactions are finalized
3402  for (const auto& tx : block.vtx) {
3403  if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3404  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3405  }
3406  }
3407 
3408  // Enforce rule that the coinbase starts with serialized block height
3409  if (nHeight >= consensusParams.BIP34Height)
3410  {
3411  CScript expect = CScript() << nHeight;
3412  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3413  !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3414  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3415  }
3416  }
3417 
3418  // Validation for witness commitments.
3419  // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3420  // coinbase (where 0x0000....0000 is used instead).
3421  // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3422  // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3423  // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3424  // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3425  // multiple, the last one is used.
3426  bool fHaveWitness = false;
3427  if (nHeight >= consensusParams.SegwitHeight) {
3428  int commitpos = GetWitnessCommitmentIndex(block);
3429  if (commitpos != NO_WITNESS_COMMITMENT) {
3430  bool malleated = false;
3431  uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3432  // The malleation check is ignored; as the transaction tree itself
3433  // already does not permit it, it is impossible to trigger in the
3434  // witness tree.
3435  if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3436  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3437  }
3438  CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3439  if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3440  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3441  }
3442  fHaveWitness = true;
3443  }
3444  }
3445 
3446  // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3447  if (!fHaveWitness) {
3448  for (const auto& tx : block.vtx) {
3449  if (tx->HasWitness()) {
3450  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3451  }
3452  }
3453  }
3454 
3455  // After the coinbase witness reserved value and commitment are verified,
3456  // we can check if the block weight passes (before we've checked the
3457  // coinbase witness, it would be possible for the weight to be too
3458  // large by filling up the coinbase witness, which doesn't change
3459  // the block hash, so we couldn't mark the block as permanently
3460  // failed).
3461  if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3462  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3463  }
3464 
3465  return true;
3466 }
3467 
3468 bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3469 {
3471  // Check for duplicate
3472  uint256 hash = block.GetHash();
3473  BlockMap::iterator miSelf = m_block_index.find(hash);
3474  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3475  if (miSelf != m_block_index.end()) {
3476  // Block header is already known.
3477  CBlockIndex* pindex = miSelf->second;
3478  if (ppindex)
3479  *ppindex = pindex;
3480  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3481  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
3482  return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3483  }
3484  return true;
3485  }
3486 
3487  if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
3488  LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3489  return false;
3490  }
3491 
3492  // Get prev block index
3493  CBlockIndex* pindexPrev = nullptr;
3494  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3495  if (mi == m_block_index.end()) {
3496  LogPrintf("ERROR: %s: prev block not found\n", __func__);
3497  return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3498  }
3499  pindexPrev = (*mi).second;
3500  if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3501  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3502  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3503  }
3504  if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime()))
3505  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3506 
3507  /* Determine if this block descends from any block which has been found
3508  * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3509  * them as failed. For example:
3510  *
3511  * D3
3512  * /
3513  * B2 - C2
3514  * / \
3515  * A D2 - E2 - F2
3516  * \
3517  * B1 - C1 - D1 - E1
3518  *
3519  * In the case that we attempted to reorg from E1 to F2, only to find
3520  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3521  * but NOT D3 (it was not in any of our candidate sets at the time).
3522  *
3523  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3524  * in LoadBlockIndex.
3525  */
3526  if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3527  // The above does not mean "invalid": it checks if the previous block
3528  // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3529  // optimization, in the common case of adding a new block to the tip,
3530  // we don't need to iterate over the failed blocks list.
3531  for (const CBlockIndex* failedit : m_failed_blocks) {
3532  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3533  assert(failedit->nStatus & BLOCK_FAILED_VALID);
3534  CBlockIndex* invalid_walk = pindexPrev;
3535  while (invalid_walk != failedit) {
3536  invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3537  setDirtyBlockIndex.insert(invalid_walk);
3538  invalid_walk = invalid_walk->pprev;
3539  }
3540  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3541  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3542  }
3543  }
3544  }
3545  }
3546  CBlockIndex* pindex = AddToBlockIndex(block);
3547 
3548  if (ppindex)
3549  *ppindex = pindex;
3550 
3551  return true;
3552 }
3553 
3554 // Exposed wrapper for AcceptBlockHeader
3555 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3556 {
3557  assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate()));
3559  {
3560  LOCK(cs_main);
3561  for (const CBlockHeader& header : headers) {
3562  CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3563  bool accepted = m_blockman.AcceptBlockHeader(
3564  header, state, chainparams, &pindex);
3566 
3567  if (!accepted) {
3568  return false;
3569  }
3570  if (ppindex) {
3571  *ppindex = pindex;
3572  }
3573  }
3574  }
3576  if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) {
3577  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3578  }
3579  }
3580  return true;
3581 }
3582 
3584 bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3585 {
3586  const CBlock& block = *pblock;
3587 
3588  if (fNewBlock) *fNewBlock = false;
3590 
3591  CBlockIndex *pindexDummy = nullptr;
3592  CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3593 
3594  bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
3595  CheckBlockIndex(chainparams.GetConsensus());
3596 
3597  if (!accepted_header)
3598  return false;
3599 
3600  // Try to process all requested blocks that we don't have, but only
3601  // process an unrequested block if it's new and has enough work to
3602  // advance our tip, and isn't too many blocks ahead.
3603  bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3604  bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3605  // Blocks that are too out-of-order needlessly limit the effectiveness of
3606  // pruning, because pruning will not delete block files that contain any
3607  // blocks which are too close in height to the tip. Apply this test
3608  // regardless of whether pruning is enabled; it should generally be safe to
3609  // not process unrequested blocks.
3610  bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3611 
3612  // TODO: Decouple this function from the block download logic by removing fRequested
3613  // This requires some new chain data structure to efficiently look up if a
3614  // block is in a chain leading to a candidate for best tip, despite not
3615  // being such a candidate itself.
3616 
3617  // TODO: deal better with return value and error conditions for duplicate
3618  // and unrequested blocks.
3619  if (fAlreadyHave) return true;
3620  if (!fRequested) { // If we didn't ask for it:
3621  if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3622  if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3623  if (fTooFarAhead) return true; // Block height is too high
3624 
3625  // Protect against DoS attacks from low-work chains.
3626  // If our tip is behind, a peer could try to send us
3627  // low-work blocks on a fake chain that we would never
3628  // request; don't process these.
3629  if (pindex->nChainWork < nMinimumChainWork) return true;
3630  }
3631 
3632  if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
3633  !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
3634  if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3635  pindex->nStatus |= BLOCK_FAILED_VALID;
3636  setDirtyBlockIndex.insert(pindex);
3637  }
3638  return error("%s: %s", __func__, state.ToString());
3639  }
3640 
3641  // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3642  // (but if it does not build on our best tip, let the SendMessages loop relay it)
3643  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3644  GetMainSignals().NewPoWValidBlock(pindex, pblock);
3645 
3646  // Write block to history file
3647  if (fNewBlock) *fNewBlock = true;
3648  assert(std::addressof(::ChainActive()) == std::addressof(m_chain));
3649  try {
3650  FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, m_chain, chainparams, dbp);
3651  if (blockPos.IsNull()) {
3652  state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3653  return false;
3654  }
3655  ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
3656  } catch (const std::runtime_error& e) {
3657  return AbortNode(state, std::string("System error: ") + e.what());
3658  }
3659 
3660  FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
3661 
3662  CheckBlockIndex(chainparams.GetConsensus());
3663 
3664  return true;
3665 }
3666 
3667 bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
3668 {
3670  assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate()));
3671 
3672  {
3673  CBlockIndex *pindex = nullptr;
3674  if (fNewBlock) *fNewBlock = false;
3675  BlockValidationState state;
3676 
3677  // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3678  // Therefore, the following critical section must include the CheckBlock() call as well.
3679  LOCK(cs_main);
3680 
3681  // Ensure that CheckBlock() passes before calling AcceptBlock, as
3682  // belt-and-suspenders.
3683  bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
3684  if (ret) {
3685  // Store to disk
3686  ret = ActiveChainstate().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
3687  }
3688  if (!ret) {
3689  GetMainSignals().BlockChecked(*pblock, state);
3690  return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3691  }
3692  }
3693 
3695 
3696  BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3697  if (!ActiveChainstate().ActivateBestChain(state, chainparams, pblock))
3698  return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3699 
3700  return true;
3701 }
3702 
3704  const CChainParams& chainparams,
3705  CChainState& chainstate,
3706  const CBlock& block,
3707  CBlockIndex* pindexPrev,
3708  bool fCheckPOW,
3709  bool fCheckMerkleRoot)
3710 {
3712  assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate));
3713  assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip());
3714  CCoinsViewCache viewNew(&chainstate.CoinsTip());
3715  uint256 block_hash(block.GetHash());
3716  CBlockIndex indexDummy(block);
3717  indexDummy.pprev = pindexPrev;
3718  indexDummy.nHeight = pindexPrev->nHeight + 1;
3719  indexDummy.phashBlock = &block_hash;
3720 
3721  // NOTE: CheckBlockHeader is called by CheckBlock
3722  assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainstate.m_blockman));
3723  if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime()))
3724  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3725  if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3726  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3727  if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3728  return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3729  if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
3730  return false;
3731  assert(state.IsValid());
3732 
3733  return true;
3734 }
3735 
3740 /* Calculate the amount of disk space the block & undo files currently use */
3742 {
3743  LOCK(cs_LastBlockFile);
3744 
3745  uint64_t retval = 0;
3746  for (const CBlockFileInfo &file : vinfoBlockFile) {
3747  retval += file.nSize + file.nUndoSize;
3748  }
3749  return retval;
3750 }
3751 
3752 void BlockManager::PruneOneBlockFile(const int fileNumber)
3753 {
3755  LOCK(cs_LastBlockFile);
3756 
3757  for (const auto& entry : m_block_index) {
3758  CBlockIndex* pindex = entry.second;
3759  if (pindex->nFile == fileNumber) {
3760  pindex->nStatus &= ~BLOCK_HAVE_DATA;
3761  pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3762  pindex->nFile = 0;
3763  pindex->nDataPos = 0;
3764  pindex->nUndoPos = 0;
3765  setDirtyBlockIndex.insert(pindex);
3766 
3767  // Prune from m_blocks_unlinked -- any block we prune would have
3768  // to be downloaded again in order to consider its chain, at which
3769  // point it would be considered as a candidate for
3770  // m_blocks_unlinked or setBlockIndexCandidates.
3771  auto range = m_blocks_unlinked.equal_range(pindex->pprev);
3772  while (range.first != range.second) {
3773  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
3774  range.first++;
3775  if (_it->second == pindex) {
3776  m_blocks_unlinked.erase(_it);
3777  }
3778  }
3779  }
3780  }
3781 
3782  vinfoBlockFile[fileNumber].SetNull();
3783  setDirtyFileInfo.insert(fileNumber);
3784 }
3785 
3786 
3787 void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
3788 {
3789  for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
3790  FlatFilePos pos(*it, 0);
3791  fs::remove(BlockFileSeq().FileName(pos));
3792  fs::remove(UndoFileSeq().FileName(pos));
3793  LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
3794  }
3795 }
3796 
3797 void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
3798 {
3799  assert(fPruneMode && nManualPruneHeight > 0);
3800 
3801  LOCK2(cs_main, cs_LastBlockFile);
3802  if (chain_tip_height < 0) {
3803  return;
3804  }
3805 
3806  // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3807  unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
3808  int count = 0;
3809  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3810  if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3811  continue;
3812  }
3813  PruneOneBlockFile(fileNumber);
3814  setFilesToPrune.insert(fileNumber);
3815  count++;
3816  }
3817  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
3818 }
3819 
3820 /* This function is called from the RPC code for pruneblockchain */
3821 void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
3822 {
3823  BlockValidationState state;
3824  const CChainParams& chainparams = Params();
3825  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
3826  if (!active_chainstate.FlushStateToDisk(
3827  chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
3828  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
3829  }
3830 }
3831 
3832 void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
3833 {
3834  LOCK2(cs_main, cs_LastBlockFile);
3835  if (chain_tip_height < 0 || nPruneTarget == 0) {
3836  return;
3837  }
3838  if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
3839  return;
3840  }
3841 
3842  unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP));
3843  uint64_t nCurrentUsage = CalculateCurrentUsage();
3844  // We don't check to prune until after we've allocated new space for files
3845  // So we should leave a buffer under our target to account for another allocation
3846  // before the next pruning.
3847  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
3848  uint64_t nBytesToPrune;
3849  int count = 0;
3850 
3851  if (nCurrentUsage + nBuffer >= nPruneTarget) {
3852  // On a prune event, the chainstate DB is flushed.
3853  // To avoid excessive prune events negating the benefit of high dbcache
3854  // values, we should not prune too rapidly.
3855  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
3856  if (is_ibd) {
3857  // Since this is only relevant during IBD, we use a fixed 10%
3858  nBuffer += nPruneTarget / 10;
3859  }
3860 
3861  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3862  nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
3863 
3864  if (vinfoBlockFile[fileNumber].nSize == 0) {
3865  continue;
3866  }
3867 
3868  if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
3869  break;
3870  }
3871 
3872  // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3873  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
3874  continue;
3875  }
3876 
3877  PruneOneBlockFile(fileNumber);
3878  // Queue up the files for removal
3879  setFilesToPrune.insert(fileNumber);
3880  nCurrentUsage -= nBytesToPrune;
3881  count++;
3882  }
3883  }
3884 
3885  LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3886  nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
3887  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
3888  nLastBlockWeCanPrune, count);
3889 }
3890 
3892 {
3893  return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
3894 }
3895 
3897 {
3899 }
3900 
3901 FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
3902  return BlockFileSeq().Open(pos, fReadOnly);
3903 }
3904 
3906 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
3907  return UndoFileSeq().Open(pos, fReadOnly);
3908 }
3909 
3910 fs::path GetBlockPosFilename(const FlatFilePos &pos)
3911 {
3912  return BlockFileSeq().FileName(pos);
3913 }
3914 
3916 {
3918 
3919  if (hash.IsNull())
3920  return nullptr;
3921 
3922  // Return existing
3923  BlockMap::iterator mi = m_block_index.find(hash);
3924  if (mi != m_block_index.end())
3925  return (*mi).second;
3926 
3927  // Create new
3928  CBlockIndex* pindexNew = new CBlockIndex();
3929  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3930  pindexNew->phashBlock = &((*mi).first);
3931 
3932  return pindexNew;
3933 }
3934 
3936  const Consensus::Params& consensus_params,
3937  CBlockTreeDB& blocktree,
3938  std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
3939 {
3940  if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
3941  return false;
3942 
3943  // Calculate nChainWork
3944  std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
3945  vSortedByHeight.reserve(m_block_index.size());
3946  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
3947  {
3948  CBlockIndex* pindex = item.second;
3949  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
3950  }
3951  sort(vSortedByHeight.begin(), vSortedByHeight.end());
3952  for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
3953  {
3954  if (ShutdownRequested()) return false;
3955  CBlockIndex* pindex = item.second;
3956  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
3957  pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
3958  // We can link the chain of blocks for which we've received transactions at some point.
3959  // Pruned nodes may have deleted the block.
3960  if (pindex->nTx > 0) {
3961  if (pindex->pprev) {
3962  if (pindex->pprev->HaveTxsDownloaded()) {
3963  pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
3964  } else {
3965  pindex->nChainTx = 0;
3966  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
3967  }
3968  } else {
3969  pindex->nChainTx = pindex->nTx;
3970  }
3971  }
3972  if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
3973  pindex->nStatus |= BLOCK_FAILED_CHILD;
3974  setDirtyBlockIndex.insert(pindex);
3975  }
3976  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
3977  block_index_candidates.insert(pindex);
3978  }
3979  if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
3980  pindexBestInvalid = pindex;
3981  if (pindex->pprev)
3982  pindex->BuildSkip();
3983  if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
3984  pindexBestHeader = pindex;
3985  }
3986 
3987  return true;
3988 }
3989 
3991  m_failed_blocks.clear();
3992  m_blocks_unlinked.clear();
3993 
3994  for (const BlockMap::value_type& entry : m_block_index) {
3995  delete entry.second;
3996  }
3997 
3998  m_block_index.clear();
3999 }
4000 
4002 {
4003  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4005  chainparams.GetConsensus(), *pblocktree,
4007  return false;
4008  }
4009 
4010  // Load block file info
4011  pblocktree->ReadLastBlockFile(nLastBlockFile);
4012  vinfoBlockFile.resize(nLastBlockFile + 1);
4013  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4014  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4015  pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4016  }
4017  LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
4018  for (int nFile = nLastBlockFile + 1; true; nFile++) {
4019  CBlockFileInfo info;
4020  if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4021  vinfoBlockFile.push_back(info);
4022  } else {
4023  break;
4024  }
4025  }
4026 
4027  // Check presence of blk files
4028  LogPrintf("Checking all blk files are present...\n");
4029  std::set<int> setBlkDataFiles;
4030  for (const std::pair<const uint256, CBlockIndex*>& item : m_blockman.m_block_index) {
4031  CBlockIndex* pindex = item.second;
4032  if (pindex->nStatus & BLOCK_HAVE_DATA) {
4033  setBlkDataFiles.insert(pindex->nFile);
4034  }
4035  }
4036  for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
4037  {
4038  FlatFilePos pos(*it, 0);
4039  if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
4040  return false;
4041  }
4042  }
4043 
4044  // Check whether we have ever pruned block & undo files
4045  pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4046  if (fHavePruned)
4047  LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4048 
4049  // Check whether we need to continue reindexing
4050  bool fReindexing = false;
4051  pblocktree->ReadReindexing(fReindexing);
4052  if(fReindexing) fReindex = true;
4053 
4054  return true;
4055 }
4056 
4058 {
4059  if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4060  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4061  ::LoadMempool(m_mempool, *this);
4062  }
4064 }
4065 
4066 bool CChainState::LoadChainTip(const CChainParams& chainparams)
4067 {
4069  const CCoinsViewCache& coins_cache = CoinsTip();
4070  assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
4071  const CBlockIndex* tip = m_chain.Tip();
4072 
4073  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4074  return true;
4075  }
4076 
4077  // Load pointer to end of best chain
4078  CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock());
4079  if (!pindex) {
4080  return false;
4081  }
4082  m_chain.SetTip(pindex);
4084 
4085  tip = m_chain.Tip();
4086  LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4087  tip->GetBlockHash().ToString(),
4088  m_chain.Height(),
4090  GuessVerificationProgress(chainparams.TxData(), tip));
4091  return true;
4092 }
4093 
4095 {
4096  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4097 }
4098 
4100 {
4101  uiInterface.ShowProgress("", 100, false);
4102 }
4103 
4104 bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CChainState& active_chainstate, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
4105 {
4107 
4108  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
4109  if (active_chainstate.m_chain.Tip() == nullptr || active_chainstate.m_chain.Tip()->pprev == nullptr)
4110  return true;
4111 
4112  // Verify blocks in the best chain
4113  if (nCheckDepth <= 0 || nCheckDepth > active_chainstate.m_chain.Height())
4114  nCheckDepth = active_chainstate.m_chain.Height();
4115  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4116  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4117  CCoinsViewCache coins(coinsview);
4118  CBlockIndex* pindex;
4119  CBlockIndex* pindexFailure = nullptr;
4120  int nGoodTransactions = 0;
4121  BlockValidationState state;
4122  int reportDone = 0;
4123  LogPrintf("[0%%]..."); /* Continued */
4124  for (pindex = active_chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
4125  const int percentageDone = std::max(1, std::min(99, (int)(((double)(active_chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
4126  if (reportDone < percentageDone/10) {
4127  // report every 10% step
4128  LogPrintf("[%d%%]...", percentageDone); /* Continued */
4129  reportDone = percentageDone/10;
4130  }
4131  uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4132  if (pindex->nHeight <= active_chainstate.m_chain.Height()-nCheckDepth)
4133  break;
4134  if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4135  // If pruning, only go back as far as we have data.
4136  LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
4137  break;
4138  }
4139  CBlock block;
4140  // check level 0: read from disk
4141  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4142  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4143  // check level 1: verify block validity
4144  if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
4145  return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
4146  pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4147  // check level 2: verify undo validity
4148  if (nCheckLevel >= 2 && pindex) {
4149  CBlockUndo undo;
4150  if (!pindex->GetUndoPos().IsNull()) {
4151  if (!UndoReadFromDisk(undo, pindex)) {
4152  return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4153  }
4154  }
4155  }
4156  // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4157  if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + active_chainstate.CoinsTip().DynamicMemoryUsage()) <= active_chainstate.m_coinstip_cache_size_bytes) {
4158  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4159  DisconnectResult res = active_chainstate.DisconnectBlock(block, pindex, coins);
4160  if (res == DISCONNECT_FAILED) {
4161  return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4162  }
4163  if (res == DISCONNECT_UNCLEAN) {
4164  nGoodTransactions = 0;
4165  pindexFailure = pindex;
4166  } else {
4167  nGoodTransactions += block.vtx.size();
4168  }
4169  }
4170  if (ShutdownRequested()) return true;
4171  }
4172  if (pindexFailure)
4173  return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", active_chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4174 
4175  // store block count as we move pindex at check level >= 4
4176  int block_count = active_chainstate.m_chain.Height() - pindex->nHeight;
4177 
4178  // check level 4: try reconnecting blocks
4179  if (nCheckLevel >= 4) {
4180  while (pindex != active_chainstate.m_chain.Tip()) {
4181  const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(active_chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
4182  if (reportDone < percentageDone/10) {
4183  // report every 10% step
4184  LogPrintf("[%d%%]...", percentageDone); /* Continued */
4185  reportDone = percentageDone/10;
4186  }
4187  uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4188  pindex = active_chainstate.m_chain.Next(pindex);
4189  CBlock block;
4190  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4191  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4192  if (!active_chainstate.ConnectBlock(block, state, pindex, coins, chainparams))
4193  return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4194  if (ShutdownRequested()) return true;
4195  }
4196  }
4197 
4198  LogPrintf("[DONE].\n");
4199  LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
4200 
4201  return true;
4202 }
4203 
4205 bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
4206 {
4207  // TODO: merge with ConnectBlock
4208  CBlock block;
4209  if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
4210  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4211  }
4212 
4213  for (const CTransactionRef& tx : block.vtx) {
4214  if (!tx->IsCoinBase()) {
4215  for (const CTxIn &txin : tx->vin) {
4216  inputs.SpendCoin(txin.prevout);
4217  }
4218  }
4219  // Pass check = true as every addition may be an overwrite.
4220  AddCoins(inputs, *tx, pindex->nHeight, true);
4221  }
4222  return true;
4223 }
4224 
4226 {
4227  LOCK(cs_main);
4228 
4229  CCoinsView& db = this->CoinsDB();
4230  CCoinsViewCache cache(&db);
4231 
4232  std::vector<uint256> hashHeads = db.GetHeadBlocks();
4233  if (hashHeads.empty()) return true; // We're already in a consistent state.
4234  if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4235 
4236  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4237  LogPrintf("Replaying blocks\n");
4238 
4239  const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4240  const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4241  const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4242 
4243  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4244  return error("ReplayBlocks(): reorganization to unknown block requested");
4245  }
4246  pindexNew = m_blockman.m_block_index[hashHeads[0]];
4247 
4248  if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4249  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4250  return error("ReplayBlocks(): reorganization from unknown block requested");
4251  }
4252  pindexOld = m_blockman.m_block_index[hashHeads[1]];
4253  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4254  assert(pindexFork != nullptr);
4255  }
4256 
4257  // Rollback along the old branch.
4258  while (pindexOld != pindexFork) {
4259  if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4260  CBlock block;
4261  if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
4262  return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4263  }
4264  LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4265  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4266  if (res == DISCONNECT_FAILED) {
4267  return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4268  }
4269  // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4270  // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4271  // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4272  // the result is still a version of the UTXO set with the effects of that block undone.
4273  }
4274  pindexOld = pindexOld->pprev;
4275  }
4276 
4277  // Roll forward from the forking point to the new tip.
4278  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4279  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4280  const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4281  LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4282  uiInterface.ShowProgress(_("Replaying blocks...").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4283  if (!RollforwardBlock(pindex, cache, params)) return false;
4284  }
4285 
4286  cache.SetBestBlock(pindexNew->GetBlockHash());
4287  cache.Flush();
4288  uiInterface.ShowProgress("", 100, false);
4289  return true;
4290 }
4291 
4294 {
4296  assert(!m_chain.Contains(index)); // Make sure this block isn't active
4297 
4298  // Reduce validity
4299  index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
4300  // Remove have-data flags.
4301  index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
4302  // Remove storage location.
4303  index->nFile = 0;
4304  index->nDataPos = 0;
4305  index->nUndoPos = 0;
4306  // Remove various other things
4307  index->nTx = 0;
4308  index->nChainTx = 0;
4309  index->nSequenceId = 0;
4310  // Make sure it gets written.
4311  setDirtyBlockIndex.insert(index);
4312  // Update indexes
4313  setBlockIndexCandidates.erase(index);
4314  auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev);
4315  while (ret.first != ret.second) {
4316  if (ret.first->second == index) {
4317  m_blockman.m_blocks_unlinked.erase(ret.first++);
4318  } else {
4319  ++ret.first;
4320  }
4321  }
4322  // Mark parent as eligible for main chain again
4323  if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
4324  setBlockIndexCandidates.insert(index->pprev);
4325  }
4326 }
4327 
4329 {
4330  // Note that during -reindex-chainstate we are called with an empty m_chain!
4331 
4332  // First erase all post-segwit blocks without witness not in the main chain,
4333  // as this can we done without costly DisconnectTip calls. Active
4334  // blocks will be dealt with below (releasing cs_main in between).
4335  {
4336  LOCK(cs_main);
4337  for (const auto& entry : m_blockman.m_block_index) {
4338  if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
4339  EraseBlockData(entry.second);
4340  }
4341  }
4342  }
4343 
4344  // Find what height we need to reorganize to.
4345  CBlockIndex *tip;
4346  int nHeight = 1;
4347  {
4348  LOCK(cs_main);
4349  while (nHeight <= m_chain.Height()) {
4350  // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
4351  // blocks in ConnectBlock, we don't need to go back and
4352  // re-download/re-verify blocks from before segwit actually activated.
4353  if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
4354  break;
4355  }
4356  nHeight++;
4357  }
4358 
4359  tip = m_chain.Tip();
4360  }
4361  // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4362 
4363  BlockValidationState state;
4364  // Loop until the tip is below nHeight, or we reach a pruned block.
4365  while (!ShutdownRequested()) {
4366  {
4367  LOCK(cs_main);
4368  LOCK(m_mempool.cs);
4369  // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
4370  assert(tip == m_chain.Tip());
4371  if (tip == nullptr || tip->nHeight < nHeight) break;
4372  if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
4373  // If pruning, don't try rewinding past the HAVE_DATA point;
4374  // since older blocks can't be served anyway, there's
4375  // no need to walk further, and trying to DisconnectTip()
4376  // will fail (and require a needless reindex/redownload
4377  // of the blockchain).
4378  break;
4379  }
4380 
4381  // Disconnect block
4382  if (!DisconnectTip(state, params, nullptr)) {
4383  return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
4384  }
4385 
4386  // Reduce validity flag and have-data flags.
4387  // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4388  // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4389  // Note: If we encounter an insufficiently validated block that
4390  // is on m_chain, it must be because we are a pruning node, and
4391  // this block or some successor doesn't HAVE_DATA, so we were unable to
4392  // rewind all the way. Blocks remaining on m_chain at this point
4393  // must not have their validity reduced.
4394  EraseBlockData(tip);
4395 
4396  tip = tip->pprev;
4397  }
4398  // Make sure the queue of validation callbacks doesn't grow unboundedly.
4400 
4401  // Occasionally flush state to disk.
4402  if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
4403  LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4404  return false;
4405  }
4406  }
4407 
4408  {
4409  LOCK(cs_main);
4410  if (m_chain.Tip() != nullptr) {
4411  // We can't prune block index candidates based on our tip if we have
4412  // no tip due to m_chain being empty!
4414 
4415  CheckBlockIndex(params.GetConsensus());
4416 
4417  // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
4418  // and skip it here, we're about to -reindex-chainstate anyway, so
4419  // it'll get called a bunch real soon.
4420  BlockValidationState state;
4421  if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
4422  LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4423  return false;
4424  }
4425  }
4426  }
4427 
4428  return true;
4429 }
4430 
4432  nBlockSequenceId = 1;
4433  setBlockIndexCandidates.clear();
4434 }
4435 
4436 // May NOT be used after any connections are up as much
4437 // of the peer-processing logic assumes a consistent
4438 // block index state
4440 {
4441  LOCK(cs_main);
4442  chainman.Unload();
4443  pindexBestInvalid = nullptr;
4444  pindexBestHeader = nullptr;
4445  if (mempool) mempool->clear();
4446  vinfoBlockFile.clear();
4447  nLastBlockFile = 0;
4448  setDirtyBlockIndex.clear();
4449  setDirtyFileInfo.clear();
4451  for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4452  warningcache[b].clear();
4453  }
4454  fHavePruned = false;
4455 }
4456 
4458 {
4460  // Load block index from databases
4461  bool needs_init = fReindex;
4462  if (!fReindex) {
4463  bool ret = ActiveChainstate().LoadBlockIndexDB(chainparams);
4464  if (!ret) return false;
4465  needs_init = m_blockman.m_block_index.empty();
4466  }
4467 
4468  if (needs_init) {
4469  // Everything here is for *new* reindex/DBs. Thus, though
4470  // LoadBlockIndexDB may have set fReindex if we shut down
4471  // mid-reindex previously, we don't check fReindex and
4472  // instead only check it prior to LoadBlockIndexDB to set
4473  // needs_init.
4474 
4475  LogPrintf("Initializing databases...\n");
4476  }
4477  return true;
4478 }
4479 
4481 {
4482  LOCK(cs_main);
4483 
4484  // Check whether we're already initialized by checking for genesis in
4485  // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4486  // set based on the coins db, not the block index db, which is the only
4487  // thing loaded at this point.
4488  if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
4489  return true;
4490 
4491  assert(std::addressof(::ChainActive()) == std::addressof(m_chain));
4492  try {
4493  const CBlock& block = chainparams.GenesisBlock();
4494  FlatFilePos blockPos = SaveBlockToDisk(block, 0, m_chain, chainparams, nullptr);
4495  if (blockPos.IsNull())
4496  return error("%s: writing genesis block to disk failed", __func__);
4497  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4498  ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
4499  } catch (const std::runtime_error& e) {
4500  return error("%s: failed to write genesis block: %s", __func__, e.what());
4501  }
4502 
4503  return true;
4504 }
4505 
4506 void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp)
4507 {
4508  // Map of disk positions for blocks with unknown parent (only used for reindex)
4509  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4510  int64_t nStart = GetTimeMillis();
4511 
4512  int nLoaded = 0;
4513  try {
4514  // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4516  uint64_t nRewind = blkdat.GetPos();
4517  while (!blkdat.eof()) {
4518  if (ShutdownRequested()) return;
4519 
4520  blkdat.SetPos(nRewind);
4521  nRewind++; // start one byte further next time, in case of failure
4522  blkdat.SetLimit(); // remove former limit
4523  unsigned int nSize = 0;
4524  try {
4525  // locate a header
4526  unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4527  blkdat.FindByte(chainparams.MessageStart()[0]);
4528  nRewind = blkdat.GetPos()+1;
4529  blkdat >> buf;
4530  if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
4531  continue;
4532  // read size
4533  blkdat >> nSize;
4534  if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4535  continue;
4536  } catch (const std::exception&) {
4537  // no valid block header found; don't complain
4538  break;
4539  }
4540  try {
4541  // read block
4542  uint64_t nBlockPos = blkdat.GetPos();
4543  if (dbp)
4544  dbp->nPos = nBlockPos;
4545  blkdat.SetLimit(nBlockPos + nSize);
4546  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4547  CBlock& block = *pblock;
4548  blkdat >> block;
4549  nRewind = blkdat.GetPos();
4550 
4551  uint256 hash = block.GetHash();
4552  {
4553  LOCK(cs_main);
4554  // detect out of order blocks, and store them for later
4555  assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman));
4556  if (hash != chainparams.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
4557  LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4558  block.hashPrevBlock.ToString());
4559  if (dbp)
4560  mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4561  continue;
4562  }
4563 
4564  // process in case the block isn't known yet
4565  assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman));
4566  CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
4567  if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4568  BlockValidationState state;
4569  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4570  if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
4571  nLoaded++;
4572  }
4573  if (state.IsError()) {
4574  break;
4575  }
4576  } else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4577  LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4578  }
4579  }
4580 
4581  // Activate the genesis block so normal node progress can continue
4582  if (hash == chainparams.GetConsensus().hashGenesisBlock) {
4583  BlockValidationState state;
4584  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4585  if (!ActivateBestChain(state, chainparams, nullptr)) {
4586  break;
4587  }
4588  }
4589 
4590  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4591  NotifyHeaderTip(*this);
4592 
4593  // Recursively process earlier encountered successors of this block
4594  std::deque<uint256> queue;
4595  queue.push_back(hash);
4596  while (!queue.empty()) {
4597  uint256 head = queue.front();
4598  queue.pop_front();
4599  std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4600  while (range.first != range.second) {
4601  std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4602  std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4603  if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
4604  {
4605  LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4606  head.ToString());
4607  LOCK(cs_main);
4608  BlockValidationState dummy;
4609  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4610  if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
4611  {
4612  nLoaded++;
4613  queue.push_back(pblockrecursive->GetHash());
4614  }
4615  }
4616  range.first++;
4617  mapBlocksUnknownParent.erase(it);
4618  assert(std::addressof(::ChainstateActive()) == std::addressof(*this));
4619  NotifyHeaderTip(*this);
4620  }
4621  }
4622  } catch (const std::exception& e) {
4623  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4624  }
4625  }
4626  } catch (const std::runtime_error& e) {
4627  AbortNode(std::string("System error: ") + e.what());
4628  }
4629  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4630 }
4631 
4633 {
4634  if (!fCheckBlockIndex) {
4635  return;
4636  }
4637 
4638  LOCK(cs_main);
4639 
4640  // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4641  // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4642  // tests when iterating the block tree require that m_chain has been initialized.)
4643  if (m_chain.Height() < 0) {
4644  assert(m_blockman.m_block_index.size() <= 1);
4645  return;
4646  }
4647 
4648  // Build forward-pointing map of the entire block tree.
4649  std::multimap<CBlockIndex*,CBlockIndex*> forward;
4650  for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4651  forward.insert(std::make_pair(entry.second->pprev, entry.second));
4652  }
4653 
4654  assert(forward.size() == m_blockman.m_block_index.size());
4655 
4656  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4657  CBlockIndex *pindex = rangeGenesis.first->second;
4658  rangeGenesis.first++;
4659  assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4660 
4661  // Iterate over the entire block tree, using depth-first search.
4662  // Along the way, remember whether there are blocks on the path from genesis
4663  // block being explored which are the first to have certain properties.
4664  size_t nNodes = 0;
4665  int nHeight = 0;
4666  CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4667  CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4668  CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4669  CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4670  CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4671  CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4672  CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4673  while (pindex != nullptr) {
4674  nNodes++;
4675  if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4676  if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4677  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4678  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4679  if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4680  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4681  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4682 
4683  // Begin: actual consistency checks.
4684  if (pindex->pprev == nullptr) {
4685  // Genesis block checks.
4686  assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
4687  assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4688  }
4689  if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4690  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4691  // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4692  if (!fHavePruned) {
4693  // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4694  assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4695  assert(pindexFirstMissing == pindexFirstNeverProcessed);
4696  } else {
4697  // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4698  if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4699  }
4700  if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4701  assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4702  // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4703  assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4704  assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4705  assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4706  assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4707  assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4708  assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4709  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4710  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4711  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4712  if (pindexFirstInvalid == nullptr) {
4713  // Checks for not-invalid blocks.
4714  assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4715  }
4716  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4717  if (pindexFirstInvalid == nullptr) {
4718  // If this block sorts at least as good as the current tip and
4719  // is valid and we have all data for its parents, it must be in
4720  // setBlockIndexCandidates. m_chain.Tip() must also be there
4721  // even if some data has been pruned.
4722  if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
4723  assert(setBlockIndexCandidates.count(pindex));
4724  }
4725  // If some parent is missing, then it could be that this block was in
4726  // setBlockIndexCandidates but had to be removed because of the missing data.
4727  // In this case it must be in m_blocks_unlinked -- see test below.
4728  }
4729  } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4730  assert(setBlockIndexCandidates.count(pindex) == 0);
4731  }
4732  // Check whether this block is in m_blocks_unlinked.
4733  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4734  bool foundInUnlinked = false;
4735  while (rangeUnlinked.first != rangeUnlinked.second) {
4736  assert(rangeUnlinked.first->first == pindex->pprev);
4737  if (rangeUnlinked.first->second == pindex) {
4738  foundInUnlinked = true;
4739  break;
4740  }
4741  rangeUnlinked.first++;
4742  }
4743  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4744  // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4745  assert(foundInUnlinked);
4746  }
4747  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4748  if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4749  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4750  // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4751  assert(fHavePruned); // We must have pruned.
4752  // This block may have entered m_blocks_unlinked if:
4753  // - it has a descendant that at some point had more work than the
4754  // tip, and
4755  // - we tried switching to that descendant but were missing
4756  // data for some intermediate block between m_chain and the
4757  // tip.
4758  // So if this block is itself better than m_chain.Tip() and it wasn't in
4759  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4760  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4761  if (pindexFirstInvalid == nullptr) {
4762  assert(foundInUnlinked);
4763  }
4764  }
4765  }
4766  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4767  // End: actual consistency checks.
4768 
4769  // Try descending into the first subnode.
4770  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4771  if (range.first != range.second) {
4772  // A subnode was found.
4773  pindex = range.first->second;
4774  nHeight++;
4775  continue;
4776  }
4777  // This is a leaf node.
4778  // Move upwards until we reach a node of which we have not yet visited the last child.
4779  while (pindex) {
4780  // We are going to either move to a parent or a sibling of pindex.
4781  // If pindex was the first with a certain property, unset the corresponding variable.
4782  if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4783  if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4784  if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4785  if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4786  if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4787  if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4788  if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4789  // Find our parent.
4790  CBlockIndex* pindexPar = pindex->pprev;
4791  // Find which child we just visited.
4792  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4793  while (rangePar.first->second != pindex) {
4794  assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4795  rangePar.first++;
4796  }
4797  // Proceed to the next one.
4798  rangePar.first++;
4799  if (rangePar.first != rangePar.second) {
4800  // Move to the sibling.
4801  pindex = rangePar.first->second;
4802  break;
4803  } else {
4804  // Move up further.
4805  pindex = pindexPar;
4806  nHeight--;
4807  continue;
4808  }
4809  }
4810  }
4811 
4812  // Check that we actually traversed the entire map.
4813  assert(nNodes == forward.size());
4814 }
4815 
4816 std::string CChainState::ToString()
4817 {
4818  CBlockIndex* tip = m_chain.Tip();
4819  return strprintf("Chainstate [%s] @ height %d (%s)",
4820  m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
4821  tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4822 }
4823 
4824 bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
4825 {
4826  if (coinstip_size == m_coinstip_cache_size_bytes &&
4827  coinsdb_size == m_coinsdb_cache_size_bytes) {
4828  // Cache sizes are unchanged, no need to continue.
4829  return true;
4830  }
4831  size_t old_coinstip_size = m_coinstip_cache_size_bytes;
4832  m_coinstip_cache_size_bytes = coinstip_size;
4833  m_coinsdb_cache_size_bytes = coinsdb_size;
4834  CoinsDB().ResizeCache(coinsdb_size);
4835 
4836  LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
4837  this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
4838  LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
4839  this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
4840 
4841  BlockValidationState state;
4842  const CChainParams& chainparams = Params();
4843 
4844  bool ret;
4845 
4846  if (coinstip_size > old_coinstip_size) {
4847  // Likely no need to flush if cache sizes have grown.
4848  ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
4849  } else {
4850  // Otherwise, flush state to disk and deallocate the in-memory coins map.
4851  ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
4853  }
4854  return ret;
4855 }
4856 
4857 std::string CBlockFileInfo::ToString() const
4858 {
4859  return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
4860 }
4861 
4863 {
4864  LOCK(cs_LastBlockFile);
4865 
4866  return &vinfoBlockFile.at(n);
4867 }
4868 
4869 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
4870 
4871 bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
4872 {
4873  const CChainParams& chainparams = Params();
4874  int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
4875  FILE* filestr{mockable_fopen_function(GetDataDir() / "mempool.dat", "rb")};
4876  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
4877  if (file.IsNull()) {
4878  LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
4879  return false;
4880  }
4881 
4882  int64_t count = 0;
4883  int64_t expired = 0;
4884  int64_t failed = 0;
4885  int64_t already_there = 0;
4886  int64_t unbroadcast = 0;
4887  int64_t nNow = GetTime();
4888 
4889  try {
4890  uint64_t version;
4891  file >> version;
4892  if (version != MEMPOOL_DUMP_VERSION) {
4893  return false;
4894  }
4895  uint64_t num;
4896  file >> num;
4897  while (num--) {
4898  CTransactionRef tx;
4899  int64_t nTime;
4900  int64_t nFeeDelta;
4901  file >> tx;
4902  file >> nTime;
4903  file >> nFeeDelta;
4904 
4905  CAmount amountdelta = nFeeDelta;
4906  if (amountdelta) {
4907  pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
4908  }
4909  if (nTime > nNow - nExpiryTimeout) {
4910  LOCK(cs_main);
4911  assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate));
4912  if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */,
4913  false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) {
4914  ++count;
4915  } else {
4916  // mempool may contain the transaction already, e.g. from
4917  // wallet(s) having loaded it while we were processing
4918  // mempool transactions; consider these as valid, instead of
4919  // failed, but mark them as 'already there'
4920  if (pool.exists(tx->GetHash())) {
4921  ++already_there;
4922  } else {
4923  ++failed;
4924  }
4925  }
4926  } else {
4927  ++expired;
4928  }
4929  if (ShutdownRequested<