Bitcoin Core  0.20.99
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <validation.h>
7 
8 #include <arith_uint256.h>
9 #include <chain.h>
10 #include <chainparams.h>
11 #include <checkqueue.h>
12 #include <consensus/consensus.h>
13 #include <consensus/merkle.h>
14 #include <consensus/tx_check.h>
15 #include <consensus/tx_verify.h>
16 #include <consensus/validation.h>
17 #include <cuckoocache.h>
18 #include <flatfile.h>
19 #include <hash.h>
20 #include <index/txindex.h>
21 #include <logging.h>
22 #include <logging/timer.h>
23 #include <node/ui_interface.h>
24 #include <optional.h>
25 #include <policy/fees.h>
26 #include <policy/policy.h>
27 #include <policy/settings.h>
28 #include <pow.h>
29 #include <primitives/block.h>
30 #include <primitives/transaction.h>
31 #include <random.h>
32 #include <reverse_iterator.h>
33 #include <script/script.h>
34 #include <script/sigcache.h>
35 #include <shutdown.h>
36 #include <timedata.h>
37 #include <tinyformat.h>
38 #include <txdb.h>
39 #include <txmempool.h>
40 #include <uint256.h>
41 #include <undo.h>
42 #include <util/moneystr.h>
43 #include <util/rbf.h>
44 #include <util/strencodings.h>
45 #include <util/system.h>
46 #include <util/translation.h>
47 #include <validationinterface.h>
48 #include <warnings.h>
49 
50 #include <string>
51 
52 #include <boost/algorithm/string/replace.hpp>
53 
54 #if defined(NDEBUG)
55 # error "Bitcoin cannot be compiled without assertions."
56 #endif
57 
58 #define MICRO 0.000001
59 #define MILLI 0.001
60 
66 static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
68 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
70 static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
72 static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
74 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
76 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
78 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
79 const std::vector<std::string> CHECKLEVEL_DOC {
80  "level 0 reads the blocks from disk",
81  "level 1 verifies block validity",
82  "level 2 verifies undo data",
83  "level 3 checks disconnection of tip blocks",
84  "level 4 tries to reconnect the blocks",
85  "each level includes the checks of the previous levels",
86 };
87 
89  // First sort by most total work, ...
90  if (pa->nChainWork > pb->nChainWork) return false;
91  if (pa->nChainWork < pb->nChainWork) return true;
92 
93  // ... then by earliest time received, ...
94  if (pa->nSequenceId < pb->nSequenceId) return false;
95  if (pa->nSequenceId > pb->nSequenceId) return true;
96 
97  // Use pointer address as tie breaker (should only happen with blocks
98  // loaded from disk, as those all have id 0).
99  if (pa < pb) return false;
100  if (pa > pb) return true;
101 
102  // Identical blocks.
103  return false;
104 }
105 
107 
109 {
110  LOCK(::cs_main);
111  assert(g_chainman.m_active_chainstate);
112  return *g_chainman.m_active_chainstate;
113 }
114 
116 {
117  LOCK(::cs_main);
119 }
120 
132 
135 std::condition_variable g_best_block_cv;
138 std::atomic_bool fImporting(false);
139 std::atomic_bool fReindex(false);
140 bool fHavePruned = false;
141 bool fPruneMode = false;
142 bool fRequireStandard = true;
143 bool fCheckBlockIndex = false;
145 size_t nCoinCacheUsage = 5000 * 300;
146 uint64_t nPruneTarget = 0;
148 
151 
153 
155 CTxMemPool mempool(&feeEstimator);
156 
157 // Internal stuff
158 namespace {
159  CBlockIndex* pindexBestInvalid = nullptr;
160 
161  RecursiveMutex cs_LastBlockFile;
162  std::vector<CBlockFileInfo> vinfoBlockFile;
163  int nLastBlockFile = 0;
168  bool fCheckForPruning = false;
169 
171  std::set<CBlockIndex*> setDirtyBlockIndex;
172 
174  std::set<int> setDirtyFileInfo;
175 } // anon namespace
176 
178 {
179  AssertLockHeld(cs_main);
180  BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
181  return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
182 }
183 
185 {
186  AssertLockHeld(cs_main);
187 
188  // Find the latest block common to locator and chain - we expect that
189  // locator.vHave is sorted descending by height.
190  for (const uint256& hash : locator.vHave) {
191  CBlockIndex* pindex = LookupBlockIndex(hash);
192  if (pindex) {
193  if (chain.Contains(pindex))
194  return pindex;
195  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
196  return chain.Tip();
197  }
198  }
199  }
200  return chain.Genesis();
201 }
202 
203 std::unique_ptr<CBlockTreeDB> pblocktree;
204 
205 // See definition for documentation
206 static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight);
207 static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
208 bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
209 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
210 static FlatFileSeq BlockFileSeq();
211 static FlatFileSeq UndoFileSeq();
212 
213 bool CheckFinalTx(const CTransaction &tx, int flags)
214 {
215  AssertLockHeld(cs_main);
216 
217  // By convention a negative value for flags indicates that the
218  // current network-enforced consensus rules should be used. In
219  // a future soft-fork scenario that would mean checking which
220  // rules would be enforced for the next block and setting the
221  // appropriate flags. At the present time no soft-forks are
222  // scheduled, so no flags are set.
223  flags = std::max(flags, 0);
224 
225  // CheckFinalTx() uses ::ChainActive().Height()+1 to evaluate
226  // nLockTime because when IsFinalTx() is called within
227  // CBlock::AcceptBlock(), the height of the block *being*
228  // evaluated is what is used. Thus if we want to know if a
229  // transaction can be part of the *next* block, we need to call
230  // IsFinalTx() with one more than ::ChainActive().Height().
231  const int nBlockHeight = ::ChainActive().Height() + 1;
232 
233  // BIP113 requires that time-locked transactions have nLockTime set to
234  // less than the median time of the previous block they're contained in.
235  // When the next block is created its previous block will be the current
236  // chain tip, so we use that to calculate the median time passed to
237  // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
238  const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
240  : GetAdjustedTime();
241 
242  return IsFinalTx(tx, nBlockHeight, nBlockTime);
243 }
244 
246 {
247  AssertLockHeld(cs_main);
248  assert(lp);
249  // If there are relative lock times then the maxInputBlock will be set
250  // If there are no relative lock times, the LockPoints don't depend on the chain
251  if (lp->maxInputBlock) {
252  // Check whether ::ChainActive() is an extension of the block at which the LockPoints
253  // calculation was valid. If not LockPoints are no longer valid
254  if (!::ChainActive().Contains(lp->maxInputBlock)) {
255  return false;
256  }
257  }
258 
259  // LockPoints still valid
260  return true;
261 }
262 
263 bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints)
264 {
265  AssertLockHeld(cs_main);
266  AssertLockHeld(pool.cs);
267 
268  CBlockIndex* tip = ::ChainActive().Tip();
269  assert(tip != nullptr);
270 
271  CBlockIndex index;
272  index.pprev = tip;
273  // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate
274  // height based locks because when SequenceLocks() is called within
275  // ConnectBlock(), the height of the block *being*
276  // evaluated is what is used.
277  // Thus if we want to know if a transaction can be part of the
278  // *next* block, we need to use one more than ::ChainActive().Height()
279  index.nHeight = tip->nHeight + 1;
280 
281  std::pair<int, int64_t> lockPair;
282  if (useExistingLockPoints) {
283  assert(lp);
284  lockPair.first = lp->height;
285  lockPair.second = lp->time;
286  }
287  else {
288  // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
289  CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
290  std::vector<int> prevheights;
291  prevheights.resize(tx.vin.size());
292  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
293  const CTxIn& txin = tx.vin[txinIndex];
294  Coin coin;
295  if (!viewMemPool.GetCoin(txin.prevout, coin)) {
296  return error("%s: Missing input", __func__);
297  }
298  if (coin.nHeight == MEMPOOL_HEIGHT) {
299  // Assume all mempool transaction confirm in the next block
300  prevheights[txinIndex] = tip->nHeight + 1;
301  } else {
302  prevheights[txinIndex] = coin.nHeight;
303  }
304  }
305  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
306  if (lp) {
307  lp->height = lockPair.first;
308  lp->time = lockPair.second;
309  // Also store the hash of the block with the highest height of
310  // all the blocks which have sequence locked prevouts.
311  // This hash needs to still be on the chain
312  // for these LockPoint calculations to be valid
313  // Note: It is impossible to correctly calculate a maxInputBlock
314  // if any of the sequence locked inputs depend on unconfirmed txs,
315  // except in the special case where the relative lock time/height
316  // is 0, which is equivalent to no sequence lock. Since we assume
317  // input height of tip+1 for mempool txs and test the resulting
318  // lockPair from CalculateSequenceLocks against tip+1. We know
319  // EvaluateSequenceLocks will fail if there was a non-zero sequence
320  // lock on a mempool input, so we can use the return value of
321  // CheckSequenceLocks to indicate the LockPoints validity
322  int maxInputHeight = 0;
323  for (const int height : prevheights) {
324  // Can ignore mempool inputs since we'll fail if they had non-zero locks
325  if (height != tip->nHeight+1) {
326  maxInputHeight = std::max(maxInputHeight, height);
327  }
328  }
329  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
330  }
331  }
332  return EvaluateSequenceLocks(index, lockPair);
333 }
334 
335 // Returns the script flags which should be checked for a given block
336 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
337 
338 static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age)
339  EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main)
340 {
341  int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
342  if (expired != 0) {
343  LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
344  }
345 
346  std::vector<COutPoint> vNoSpendsRemaining;
347  pool.TrimToSize(limit, &vNoSpendsRemaining);
348  for (const COutPoint& removed : vNoSpendsRemaining)
349  ::ChainstateActive().CoinsTip().Uncache(removed);
350 }
351 
353 {
354  AssertLockHeld(cs_main);
355  if (::ChainstateActive().IsInitialBlockDownload())
356  return false;
357  if (::ChainActive().Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
358  return false;
359  if (::ChainActive().Height() < pindexBestHeader->nHeight - 1)
360  return false;
361  return true;
362 }
363 
364 /* Make mempool consistent after a reorg, by re-adding or recursively erasing
365  * disconnected block transactions from the mempool, and also removing any
366  * other transactions from the mempool that are no longer valid given the new
367  * tip/height.
368  *
369  * Note: we assume that disconnectpool only contains transactions that are NOT
370  * confirmed in the current chain nor already in the mempool (otherwise,
371  * in-mempool descendants of such transactions would be removed).
372  *
373  * Passing fAddToMempool=false will skip trying to add the transactions back,
374  * and instead just erase from the mempool as needed.
375  */
376 
377 static void UpdateMempoolForReorg(DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs)
378 {
379  AssertLockHeld(cs_main);
380  std::vector<uint256> vHashUpdate;
381  // disconnectpool's insertion_order index sorts the entries from
382  // oldest to newest, but the oldest entry will be the last tx from the
383  // latest mined block that was disconnected.
384  // Iterate disconnectpool in reverse, so that we add transactions
385  // back to the mempool starting with the earliest transaction that had
386  // been previously seen in a block.
387  auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
388  while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
389  // ignore validation errors in resurrected transactions
390  TxValidationState stateDummy;
391  if (!fAddToMempool || (*it)->IsCoinBase() ||
392  !AcceptToMemoryPool(mempool, stateDummy, *it,
393  nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
394  // If the transaction doesn't make it in to the mempool, remove any
395  // transactions that depend on it (which would now be orphans).
397  } else if (mempool.exists((*it)->GetHash())) {
398  vHashUpdate.push_back((*it)->GetHash());
399  }
400  ++it;
401  }
402  disconnectpool.queuedTx.clear();
403  // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
404  // no in-mempool children, which is generally not true when adding
405  // previously-confirmed transactions back to the mempool.
406  // UpdateTransactionsFromBlock finds descendants of any transactions in
407  // the disconnectpool that were added back and cleans up the mempool state.
409 
410  // We also need to remove any now-immature transactions
412  // Re-limit mempool size, in case we added any transactions
413  LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
414 }
415 
416 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
417 // were somehow broken and returning the wrong scriptPubKeys
418 static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
419  unsigned int flags, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
420  AssertLockHeld(cs_main);
421 
422  // pool.cs should be locked already, but go ahead and re-take the lock here
423  // to enforce that mempool doesn't change between when we check the view
424  // and when we actually call through to CheckInputScripts
425  LOCK(pool.cs);
426 
427  assert(!tx.IsCoinBase());
428  for (const CTxIn& txin : tx.vin) {
429  const Coin& coin = view.AccessCoin(txin.prevout);
430 
431  // AcceptToMemoryPoolWorker has already checked that the coins are
432  // available, so this shouldn't fail. If the inputs are not available
433  // here then return false.
434  if (coin.IsSpent()) return false;
435 
436  // Check equivalence for available inputs.
437  const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
438  if (txFrom) {
439  assert(txFrom->GetHash() == txin.prevout.hash);
440  assert(txFrom->vout.size() > txin.prevout.n);
441  assert(txFrom->vout[txin.prevout.n] == coin.out);
442  } else {
443  const Coin& coinFromDisk = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout);
444  assert(!coinFromDisk.IsSpent());
445  assert(coinFromDisk.out == coin.out);
446  }
447  }
448 
449  // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
450  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
451 }
452 
453 namespace {
454 
455 class MemPoolAccept
456 {
457 public:
458  MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
459  m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
460  m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
461  m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
462  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {}
463 
464  // We put the arguments we're handed into a struct, so we can pass them
465  // around easier.
466  struct ATMPArgs {
467  const CChainParams& m_chainparams;
468  TxValidationState &m_state;
469  const int64_t m_accept_time;
470  std::list<CTransactionRef>* m_replaced_transactions;
471  const bool m_bypass_limits;
472  const CAmount& m_absurd_fee;
473  /*
474  * Return any outpoints which were not previously present in the coins
475  * cache, but were added as a result of validating the tx for mempool
476  * acceptance. This allows the caller to optionally remove the cache
477  * additions if the associated transaction ends up being rejected by
478  * the mempool.
479  */
480  std::vector<COutPoint>& m_coins_to_uncache;
481  const bool m_test_accept;
482  };
483 
484  // Single transaction acceptance
485  bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
486 
487 private:
488  // All the intermediate state that gets passed between the various levels
489  // of checking a given transaction.
490  struct Workspace {
491  Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
492  std::set<uint256> m_conflicts;
493  CTxMemPool::setEntries m_all_conflicting;
494  CTxMemPool::setEntries m_ancestors;
495  std::unique_ptr<CTxMemPoolEntry> m_entry;
496 
497  bool m_replacement_transaction;
498  CAmount m_modified_fees;
499  CAmount m_conflicting_fees;
500  size_t m_conflicting_size;
501 
502  const CTransactionRef& m_ptx;
503  const uint256& m_hash;
504  };
505 
506  // Run the policy checks on a given transaction, excluding any script checks.
507  // Looks up inputs, calculates feerate, considers replacement, evaluates
508  // package limits, etc. As this function can be invoked for "free" by a peer,
509  // only tests that are fast should be done here (to avoid CPU DoS).
510  bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
511 
512  // Run the script checks using our policy flags. As this can be slow, we should
513  // only invoke this on transactions that have otherwise passed policy checks.
514  bool PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
515 
516  // Re-run the script checks, using consensus flags, and try to cache the
517  // result in the scriptcache. This should be done after
518  // PolicyScriptChecks(). This requires that all inputs either be in our
519  // utxo set or in the mempool.
520  bool ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
521 
522  // Try to add the transaction to the mempool, removing any conflicts first.
523  // Returns true if the transaction is in the mempool after any size
524  // limiting is performed, false otherwise.
525  bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
526 
527  // Compare a package's feerate against minimum allowed.
528  bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state)
529  {
530  CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
531  if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
532  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
533  }
534 
535  if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
536  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
537  }
538  return true;
539  }
540 
541 private:
542  CTxMemPool& m_pool;
543  CCoinsViewCache m_view;
544  CCoinsViewMemPool m_viewmempool;
545  CCoinsView m_dummy;
546 
547  // The package limits in effect at the time of invocation.
548  const size_t m_limit_ancestors;
549  const size_t m_limit_ancestor_size;
550  // These may be modified while evaluating a transaction (eg to account for
551  // in-mempool conflicts; see below).
552  size_t m_limit_descendants;
553  size_t m_limit_descendant_size;
554 };
555 
556 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
557 {
558  const CTransactionRef& ptx = ws.m_ptx;
559  const CTransaction& tx = *ws.m_ptx;
560  const uint256& hash = ws.m_hash;
561 
562  // Copy/alias what we need out of args
563  TxValidationState &state = args.m_state;
564  const int64_t nAcceptTime = args.m_accept_time;
565  const bool bypass_limits = args.m_bypass_limits;
566  const CAmount& nAbsurdFee = args.m_absurd_fee;
567  std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
568 
569  // Alias what we need out of ws
570  std::set<uint256>& setConflicts = ws.m_conflicts;
571  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
572  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
573  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
574  bool& fReplacementTransaction = ws.m_replacement_transaction;
575  CAmount& nModifiedFees = ws.m_modified_fees;
576  CAmount& nConflictingFees = ws.m_conflicting_fees;
577  size_t& nConflictingSize = ws.m_conflicting_size;
578 
579  if (!CheckTransaction(tx, state))
580  return false; // state filled in by CheckTransaction
581 
582  // Coinbase is only valid in a block, not as a loose transaction
583  if (tx.IsCoinBase())
584  return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
585 
586  // Rather not work on nonstandard transactions (unless -testnet/-regtest)
587  std::string reason;
588  if (fRequireStandard && !IsStandardTx(tx, reason))
589  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
590 
591  // Do not work on transactions that are too small.
592  // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
593  // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
594  // 64-byte transactions.
596  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
597 
598  // Only accept nLockTime-using transactions that can be mined in the next
599  // block; we don't want our mempool filled up with transactions that can't
600  // be mined yet.
602  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
603 
604  // is it already in the memory pool?
605  if (m_pool.exists(hash)) {
606  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
607  }
608 
609  // Check for conflicts with in-memory transactions
610  for (const CTxIn &txin : tx.vin)
611  {
612  const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
613  if (ptxConflicting) {
614  if (!setConflicts.count(ptxConflicting->GetHash()))
615  {
616  // Allow opt-out of transaction replacement by setting
617  // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
618  //
619  // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
620  // non-replaceable transactions. All inputs rather than just one
621  // is for the sake of multi-party protocols, where we don't
622  // want a single party to be able to disable replacement.
623  //
624  // The opt-out ignores descendants as anyone relying on
625  // first-seen mempool behavior should be checking all
626  // unconfirmed ancestors anyway; doing otherwise is hopelessly
627  // insecure.
628  bool fReplacementOptOut = true;
629  for (const CTxIn &_txin : ptxConflicting->vin)
630  {
631  if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
632  {
633  fReplacementOptOut = false;
634  break;
635  }
636  }
637  if (fReplacementOptOut) {
638  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
639  }
640 
641  setConflicts.insert(ptxConflicting->GetHash());
642  }
643  }
644  }
645 
646  LockPoints lp;
647  m_view.SetBackend(m_viewmempool);
648 
649  CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
650  // do all inputs exist?
651  for (const CTxIn& txin : tx.vin) {
652  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
653  coins_to_uncache.push_back(txin.prevout);
654  }
655 
656  // Note: this call may add txin.prevout to the coins cache
657  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
658  // later (via coins_to_uncache) if this tx turns out to be invalid.
659  if (!m_view.HaveCoin(txin.prevout)) {
660  // Are inputs missing because we already have the tx?
661  for (size_t out = 0; out < tx.vout.size(); out++) {
662  // Optimistically just do efficient check of cache for outputs
663  if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
664  return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
665  }
666  }
667  // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
668  return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
669  }
670  }
671 
672  // Bring the best block into scope
673  m_view.GetBestBlock();
674 
675  // we have all inputs cached now, so switch back to dummy (to protect
676  // against bugs where we pull more inputs from disk that miss being added
677  // to coins_to_uncache)
678  m_view.SetBackend(m_dummy);
679 
680  // Only accept BIP68 sequence locked transactions that can be mined in the next
681  // block; we don't want our mempool filled up with transactions that can't
682  // be mined yet.
683  // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
684  // CoinsViewCache instead of create its own
685  if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
686  return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
687 
688  CAmount nFees = 0;
689  if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
690  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
691  }
692 
693  // Check for non-standard pay-to-script-hash in inputs
694  if (fRequireStandard && !AreInputsStandard(tx, m_view))
695  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs");
696 
697  // Check for non-standard witness in P2WSH
698  if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
699  return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
700 
701  int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
702 
703  // nModifiedFees includes any fee deltas from PrioritiseTransaction
704  nModifiedFees = nFees;
705  m_pool.ApplyDelta(hash, nModifiedFees);
706 
707  // Keep track of transactions that spend a coinbase, which we re-scan
708  // during reorgs to ensure COINBASE_MATURITY is still met.
709  bool fSpendsCoinbase = false;
710  for (const CTxIn &txin : tx.vin) {
711  const Coin &coin = m_view.AccessCoin(txin.prevout);
712  if (coin.IsCoinBase()) {
713  fSpendsCoinbase = true;
714  break;
715  }
716  }
717 
718  entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
719  fSpendsCoinbase, nSigOpsCost, lp));
720  unsigned int nSize = entry->GetTxSize();
721 
722  if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
723  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
724  strprintf("%d", nSigOpsCost));
725 
726  // No transactions are allowed below minRelayTxFee except from disconnected
727  // blocks
728  if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
729 
730  if (nAbsurdFee && nFees > nAbsurdFee)
732  "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee));
733 
734  const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
735  // Calculate in-mempool ancestors, up to a limit.
736  if (setConflicts.size() == 1) {
737  // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
738  // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
739  // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
740  // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
741  // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
742  // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
743  // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
744  // for off-chain contract systems (see link in the comment below).
745  //
746  // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
747  // conflict directly with exactly one other transaction (but may evict children of said transaction),
748  // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
749  // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
750  // amended, we may need to move that check to here instead of removing it wholesale.
751  //
752  // Such transactions are clearly not merging any existing packages, so we are only concerned with
753  // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
754  // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
755  // to.
756  //
757  // To check these we first check if we meet the RBF criteria, above, and increment the descendant
758  // limits by the direct conflict and its descendants (as these are recalculated in
759  // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
760  // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
761  // the ancestor limits should be the same for both our new transaction and any conflicts).
762  // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
763  // into force here (as we're only adding a single transaction).
764  assert(setIterConflicting.size() == 1);
765  CTxMemPool::txiter conflict = *setIterConflicting.begin();
766 
767  m_limit_descendants += 1;
768  m_limit_descendant_size += conflict->GetSizeWithDescendants();
769  }
770 
771  std::string errString;
772  if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
773  setAncestors.clear();
774  // If CalculateMemPoolAncestors fails second time, we want the original error string.
775  std::string dummy_err_string;
776  // Contracting/payment channels CPFP carve-out:
777  // If the new transaction is relatively small (up to 40k weight)
778  // and has at most one ancestor (ie ancestor limit of 2, including
779  // the new transaction), allow it if its parent has exactly the
780  // descendant limit descendants.
781  //
782  // This allows protocols which rely on distrusting counterparties
783  // being able to broadcast descendants of an unconfirmed transaction
784  // to be secure by simply only having two immediately-spendable
785  // outputs - one for each counterparty. For more info on the uses for
786  // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
787  if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
788  !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
789  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
790  }
791  }
792 
793  // A transaction that spends outputs that would be replaced by it is invalid. Now
794  // that we have the set of all ancestors we can detect this
795  // pathological case by making sure setConflicts and setAncestors don't
796  // intersect.
797  for (CTxMemPool::txiter ancestorIt : setAncestors)
798  {
799  const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
800  if (setConflicts.count(hashAncestor))
801  {
802  return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
803  strprintf("%s spends conflicting transaction %s",
804  hash.ToString(),
805  hashAncestor.ToString()));
806  }
807  }
808 
809  // Check if it's economically rational to mine this transaction rather
810  // than the ones it replaces.
811  nConflictingFees = 0;
812  nConflictingSize = 0;
813  uint64_t nConflictingCount = 0;
814 
815  // If we don't hold the lock allConflicting might be incomplete; the
816  // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
817  // mempool consistency for us.
818  fReplacementTransaction = setConflicts.size();
819  if (fReplacementTransaction)
820  {
821  CFeeRate newFeeRate(nModifiedFees, nSize);
822  std::set<uint256> setConflictsParents;
823  const int maxDescendantsToVisit = 100;
824  for (const auto& mi : setIterConflicting) {
825  // Don't allow the replacement to reduce the feerate of the
826  // mempool.
827  //
828  // We usually don't want to accept replacements with lower
829  // feerates than what they replaced as that would lower the
830  // feerate of the next block. Requiring that the feerate always
831  // be increased is also an easy-to-reason about way to prevent
832  // DoS attacks via replacements.
833  //
834  // We only consider the feerates of transactions being directly
835  // replaced, not their indirect descendants. While that does
836  // mean high feerate children are ignored when deciding whether
837  // or not to replace, we do require the replacement to pay more
838  // overall fees too, mitigating most cases.
839  CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
840  if (newFeeRate <= oldFeeRate)
841  {
842  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
843  strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
844  hash.ToString(),
845  newFeeRate.ToString(),
846  oldFeeRate.ToString()));
847  }
848 
849  for (const CTxIn &txin : mi->GetTx().vin)
850  {
851  setConflictsParents.insert(txin.prevout.hash);
852  }
853 
854  nConflictingCount += mi->GetCountWithDescendants();
855  }
856  // This potentially overestimates the number of actual descendants
857  // but we just want to be conservative to avoid doing too much
858  // work.
859  if (nConflictingCount <= maxDescendantsToVisit) {
860  // If not too many to replace, then calculate the set of
861  // transactions that would have to be evicted
862  for (CTxMemPool::txiter it : setIterConflicting) {
863  m_pool.CalculateDescendants(it, allConflicting);
864  }
865  for (CTxMemPool::txiter it : allConflicting) {
866  nConflictingFees += it->GetModifiedFee();
867  nConflictingSize += it->GetTxSize();
868  }
869  } else {
870  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
871  strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
872  hash.ToString(),
873  nConflictingCount,
874  maxDescendantsToVisit));
875  }
876 
877  for (unsigned int j = 0; j < tx.vin.size(); j++)
878  {
879  // We don't want to accept replacements that require low
880  // feerate junk to be mined first. Ideally we'd keep track of
881  // the ancestor feerates and make the decision based on that,
882  // but for now requiring all new inputs to be confirmed works.
883  //
884  // Note that if you relax this to make RBF a little more useful,
885  // this may break the CalculateMempoolAncestors RBF relaxation,
886  // above. See the comment above the first CalculateMempoolAncestors
887  // call for more info.
888  if (!setConflictsParents.count(tx.vin[j].prevout.hash))
889  {
890  // Rather than check the UTXO set - potentially expensive -
891  // it's cheaper to just check if the new input refers to a
892  // tx that's in the mempool.
893  if (m_pool.exists(tx.vin[j].prevout.hash)) {
894  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
895  strprintf("replacement %s adds unconfirmed input, idx %d",
896  hash.ToString(), j));
897  }
898  }
899  }
900 
901  // The replacement must pay greater fees than the transactions it
902  // replaces - if we did the bandwidth used by those conflicting
903  // transactions would not be paid for.
904  if (nModifiedFees < nConflictingFees)
905  {
906  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
907  strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
908  hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
909  }
910 
911  // Finally in addition to paying more fees than the conflicts the
912  // new transaction must pay for its own bandwidth.
913  CAmount nDeltaFees = nModifiedFees - nConflictingFees;
914  if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
915  {
916  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
917  strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
918  hash.ToString(),
919  FormatMoney(nDeltaFees),
921  }
922  }
923  return true;
924 }
925 
926 bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
927 {
928  const CTransaction& tx = *ws.m_ptx;
929 
930  TxValidationState &state = args.m_state;
931 
932  constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
933 
934  // Check input scripts and signatures.
935  // This is done last to help prevent CPU exhaustion denial-of-service attacks.
936  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
937  // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
938  // need to turn both off, and compare against just turning off CLEANSTACK
939  // to see if the failure is specifically due to witness validation.
940  TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
941  if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
942  !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
943  // Only the witness is missing, so the transaction itself may be fine.
945  state.GetRejectReason(), state.GetDebugMessage());
946  }
947  return false; // state filled in by CheckInputScripts
948  }
949 
950  return true;
951 }
952 
953 bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
954 {
955  const CTransaction& tx = *ws.m_ptx;
956  const uint256& hash = ws.m_hash;
957 
958  TxValidationState &state = args.m_state;
959  const CChainParams& chainparams = args.m_chainparams;
960 
961  // Check again against the current block tip's script verification
962  // flags to cache our script execution flags. This is, of course,
963  // useless if the next block has different script flags from the
964  // previous one, but because the cache tracks script flags for us it
965  // will auto-invalidate and we'll just have a few blocks of extra
966  // misses on soft-fork activation.
967  //
968  // This is also useful in case of bugs in the standard flags that cause
969  // transactions to pass as valid when they're actually invalid. For
970  // instance the STRICTENC flag was incorrectly allowing certain
971  // CHECKSIG NOT scripts to pass, even though they were invalid.
972  //
973  // There is a similar check in CreateNewBlock() to prevent creating
974  // invalid blocks (using TestBlockValidity), however allowing such
975  // transactions into the mempool can be exploited as a DoS attack.
976  unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
977  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
978  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
979  __func__, hash.ToString(), state.ToString());
980  }
981 
982  return true;
983 }
984 
985 bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
986 {
987  const CTransaction& tx = *ws.m_ptx;
988  const uint256& hash = ws.m_hash;
989  TxValidationState &state = args.m_state;
990  const bool bypass_limits = args.m_bypass_limits;
991 
992  CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
993  CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
994  const CAmount& nModifiedFees = ws.m_modified_fees;
995  const CAmount& nConflictingFees = ws.m_conflicting_fees;
996  const size_t& nConflictingSize = ws.m_conflicting_size;
997  const bool fReplacementTransaction = ws.m_replacement_transaction;
998  std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
999 
1000  // Remove conflicting transactions from the mempool
1001  for (CTxMemPool::txiter it : allConflicting)
1002  {
1003  LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
1004  it->GetTx().GetHash().ToString(),
1005  hash.ToString(),
1006  FormatMoney(nModifiedFees - nConflictingFees),
1007  (int)entry->GetTxSize() - (int)nConflictingSize);
1008  if (args.m_replaced_transactions)
1009  args.m_replaced_transactions->push_back(it->GetSharedTx());
1010  }
1011  m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
1012 
1013  // This transaction should only count for fee estimation if:
1014  // - it isn't a BIP 125 replacement transaction (may not be widely supported)
1015  // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1016  // - the node is not behind
1017  // - the transaction is not dependent on any other transactions in the mempool
1018  bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx);
1019 
1020  // Store transaction in memory
1021  m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
1022 
1023  // trim mempool and check if tx was trimmed
1024  if (!bypass_limits) {
1025  LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
1026  if (!m_pool.exists(hash))
1027  return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1028  }
1029  return true;
1030 }
1031 
1032 bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1033 {
1034  AssertLockHeld(cs_main);
1035  LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1036 
1037  Workspace workspace(ptx);
1038 
1039  if (!PreChecks(args, workspace)) return false;
1040 
1041  // Only compute the precomputed transaction data if we need to verify
1042  // scripts (ie, other policy checks pass). We perform the inexpensive
1043  // checks first and avoid hashing and signature verification unless those
1044  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1046 
1047  if (!PolicyScriptChecks(args, workspace, txdata)) return false;
1048 
1049  if (!ConsensusScriptChecks(args, workspace, txdata)) return false;
1050 
1051  // Tx was accepted, but not added
1052  if (args.m_test_accept) return true;
1053 
1054  if (!Finalize(args, workspace)) return false;
1055 
1057 
1058  return true;
1059 }
1060 
1061 } // anon namespace
1062 
1064 static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
1065  int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
1066  bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1067 {
1068  std::vector<COutPoint> coins_to_uncache;
1069  MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept };
1070  bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
1071  if (!res) {
1072  // Remove coins that were not present in the coins cache before calling ATMPW;
1073  // this is to prevent memory DoS in case we receive a large number of
1074  // invalid transactions that attempt to overrun the in-memory coins cache
1075  // (`CCoinsViewCache::cacheCoins`).
1076 
1077  for (const COutPoint& hashTx : coins_to_uncache)
1078  ::ChainstateActive().CoinsTip().Uncache(hashTx);
1079  }
1080  // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1081  BlockValidationState state_dummy;
1082  ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
1083  return res;
1084 }
1085 
1087  std::list<CTransactionRef>* plTxnReplaced,
1088  bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
1089 {
1090  const CChainParams& chainparams = Params();
1091  return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
1092 }
1093 
1098 bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, const CBlockIndex* const block_index)
1099 {
1100  LOCK(cs_main);
1101 
1102  if (!block_index) {
1103  CTransactionRef ptx = mempool.get(hash);
1104  if (ptx) {
1105  txOut = ptx;
1106  return true;
1107  }
1108 
1109  if (g_txindex) {
1110  return g_txindex->FindTx(hash, hashBlock, txOut);
1111  }
1112  } else {
1113  CBlock block;
1114  if (ReadBlockFromDisk(block, block_index, consensusParams)) {
1115  for (const auto& tx : block.vtx) {
1116  if (tx->GetHash() == hash) {
1117  txOut = tx;
1118  hashBlock = block_index->GetBlockHash();
1119  return true;
1120  }
1121  }
1122  }
1123  }
1124 
1125  return false;
1126 }
1127 
1128 
1129 
1130 
1131 
1132 
1134 //
1135 // CBlock and CBlockIndex
1136 //
1137 
1138 static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
1139 {
1140  // Open history file to append
1142  if (fileout.IsNull())
1143  return error("WriteBlockToDisk: OpenBlockFile failed");
1144 
1145  // Write index header
1146  unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
1147  fileout << messageStart << nSize;
1148 
1149  // Write block
1150  long fileOutPos = ftell(fileout.Get());
1151  if (fileOutPos < 0)
1152  return error("WriteBlockToDisk: ftell failed");
1153  pos.nPos = (unsigned int)fileOutPos;
1154  fileout << block;
1155 
1156  return true;
1157 }
1158 
1159 bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
1160 {
1161  block.SetNull();
1162 
1163  // Open history file to read
1164  CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
1165  if (filein.IsNull())
1166  return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
1167 
1168  // Read block
1169  try {
1170  filein >> block;
1171  }
1172  catch (const std::exception& e) {
1173  return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
1174  }
1175 
1176  // Check the header
1177  if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
1178  return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
1179 
1180  return true;
1181 }
1182 
1183 bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1184 {
1185  FlatFilePos blockPos;
1186  {
1187  LOCK(cs_main);
1188  blockPos = pindex->GetBlockPos();
1189  }
1190 
1191  if (!ReadBlockFromDisk(block, blockPos, consensusParams))
1192  return false;
1193  if (block.GetHash() != pindex->GetBlockHash())
1194  return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1195  pindex->ToString(), pindex->GetBlockPos().ToString());
1196  return true;
1197 }
1198 
1199 bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
1200 {
1201  FlatFilePos hpos = pos;
1202  hpos.nPos -= 8; // Seek back 8 bytes for meta header
1203  CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
1204  if (filein.IsNull()) {
1205  return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
1206  }
1207 
1208  try {
1210  unsigned int blk_size;
1211 
1212  filein >> blk_start >> blk_size;
1213 
1214  if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
1215  return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
1216  HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE),
1217  HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE));
1218  }
1219 
1220  if (blk_size > MAX_SIZE) {
1221  return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
1222  blk_size, MAX_SIZE);
1223  }
1224 
1225  block.resize(blk_size); // Zeroing of memory is intentional here
1226  filein.read((char*)block.data(), blk_size);
1227  } catch(const std::exception& e) {
1228  return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
1229  }
1230 
1231  return true;
1232 }
1233 
1234 bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
1235 {
1236  FlatFilePos block_pos;
1237  {
1238  LOCK(cs_main);
1239  block_pos = pindex->GetBlockPos();
1240  }
1241 
1242  return ReadRawBlockFromDisk(block, block_pos, message_start);
1243 }
1244 
1245 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1246 {
1247  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1248  // Force block reward to zero when right shift is undefined.
1249  if (halvings >= 64)
1250  return 0;
1251 
1252  CAmount nSubsidy = 50 * COIN;
1253  // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1254  nSubsidy >>= halvings;
1255  return nSubsidy;
1256 }
1257 
1259  std::string ldb_name,
1260  size_t cache_size_bytes,
1261  bool in_memory,
1262  bool should_wipe) : m_dbview(
1263  GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1264  m_catcherview(&m_dbview) {}
1265 
1266 void CoinsViews::InitCache()
1267 {
1268  m_cacheview = MakeUnique<CCoinsViewCache>(&m_catcherview);
1269 }
1270 
1271 CChainState::CChainState(BlockManager& blockman, uint256 from_snapshot_blockhash)
1272  : m_blockman(blockman),
1273  m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1274 
1276  size_t cache_size_bytes,
1277  bool in_memory,
1278  bool should_wipe,
1279  std::string leveldb_name)
1280 {
1282  leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
1283  }
1284 
1285  m_coins_views = MakeUnique<CoinsViews>(
1286  leveldb_name, cache_size_bytes, in_memory, should_wipe);
1287 }
1288 
1289 void CChainState::InitCoinsCache()
1290 {
1291  assert(m_coins_views != nullptr);
1292  m_coins_views->InitCache();
1293 }
1294 
1295 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1296 // is a performance-related implementation detail. This function must be marked
1297 // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1298 // can call it.
1299 //
1301 {
1302  // Optimization: pre-test latch before taking the lock.
1303  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1304  return false;
1305 
1306  LOCK(cs_main);
1307  if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1308  return false;
1309  if (fImporting || fReindex)
1310  return true;
1311  if (m_chain.Tip() == nullptr)
1312  return true;
1314  return true;
1315  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1316  return true;
1317  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1318  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1319  return false;
1320 }
1321 
1322 static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
1323 
1325 {
1326  LOCK(::cs_main);
1327  return g_chainman.m_blockman.m_block_index;
1328 }
1329 
1330 static void AlertNotify(const std::string& strMessage)
1331 {
1332  uiInterface.NotifyAlertChanged();
1333 #if HAVE_SYSTEM
1334  std::string strCmd = gArgs.GetArg("-alertnotify", "");
1335  if (strCmd.empty()) return;
1336 
1337  // Alert text should be plain ascii coming from a trusted source, but to
1338  // be safe we first strip anything not in safeChars, then add single quotes around
1339  // the whole string before passing it to the shell:
1340  std::string singleQuote("'");
1341  std::string safeStatus = SanitizeString(strMessage);
1342  safeStatus = singleQuote+safeStatus+singleQuote;
1343  boost::replace_all(strCmd, "%s", safeStatus);
1344 
1345  std::thread t(runCommand, strCmd);
1346  t.detach(); // thread runs free
1347 #endif
1348 }
1349 
1351 {
1352  AssertLockHeld(cs_main);
1353  // Before we get past initial download, we cannot reliably alert about forks
1354  // (we assume we don't get stuck on a fork before finishing our initial sync)
1356  return;
1357 
1358  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1359  // of our head, drop it
1360  if (pindexBestForkTip && ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72)
1361  pindexBestForkTip = nullptr;
1362 
1363  if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > ::ChainActive().Tip()->nChainWork + (GetBlockProof(*::ChainActive().Tip()) * 6)))
1364  {
1366  {
1367  std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
1368  pindexBestForkBase->phashBlock->ToString() + std::string("'");
1369  AlertNotify(warning);
1370  }
1371  if (pindexBestForkTip && pindexBestForkBase)
1372  {
1373  LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
1375  pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
1376  SetfLargeWorkForkFound(true);
1377  }
1378  else
1379  {
1380  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1382  }
1383  }
1384  else
1385  {
1386  SetfLargeWorkForkFound(false);
1388  }
1389 }
1390 
1392 {
1393  AssertLockHeld(cs_main);
1394  // If we are on a fork that is sufficiently large, set a warning flag
1395  CBlockIndex* pfork = pindexNewForkTip;
1396  CBlockIndex* plonger = ::ChainActive().Tip();
1397  while (pfork && pfork != plonger)
1398  {
1399  while (plonger && plonger->nHeight > pfork->nHeight)
1400  plonger = plonger->pprev;
1401  if (pfork == plonger)
1402  break;
1403  pfork = pfork->pprev;
1404  }
1405 
1406  // We define a condition where we should warn the user about as a fork of at least 7 blocks
1407  // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1408  // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1409  // hash rate operating on the fork.
1410  // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1411  // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1412  // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1413  if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1414  pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
1415  ::ChainActive().Height() - pindexNewForkTip->nHeight < 72)
1416  {
1417  pindexBestForkTip = pindexNewForkTip;
1418  pindexBestForkBase = pfork;
1419  }
1420 
1422 }
1423 
1424 // Called both upon regular invalid block discovery *and* InvalidateBlock
1426 {
1427  if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1428  pindexBestInvalid = pindexNew;
1429  if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1430  pindexBestHeader = ::ChainActive().Tip();
1431  }
1432 
1433  LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
1434  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1435  log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1436  CBlockIndex *tip = ::ChainActive().Tip();
1437  assert (tip);
1438  LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
1439  tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0),
1442 }
1443 
1444 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1445 // which does its own setBlockIndexCandidates manageent.
1448  pindex->nStatus |= BLOCK_FAILED_VALID;
1449  m_blockman.m_failed_blocks.insert(pindex);
1450  setDirtyBlockIndex.insert(pindex);
1451  setBlockIndexCandidates.erase(pindex);
1452  InvalidChainFound(pindex);
1453  }
1454 }
1455 
1456 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1457 {
1458  // mark inputs spent
1459  if (!tx.IsCoinBase()) {
1460  txundo.vprevout.reserve(tx.vin.size());
1461  for (const CTxIn &txin : tx.vin) {
1462  txundo.vprevout.emplace_back();
1463  bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1464  assert(is_spent);
1465  }
1466  }
1467  // add outputs
1468  AddCoins(inputs, tx, nHeight);
1469 }
1470 
1471 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1472 {
1473  CTxUndo txundo;
1474  UpdateCoins(tx, inputs, txundo, nHeight);
1475 }
1476 
1478  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1479  const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1480  return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
1481 }
1482 
1484 {
1485  LOCK(cs_main);
1486  CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1487  return pindexPrev->nHeight + 1;
1488 }
1489 
1490 
1493 
1495  // Setup the salted hasher
1496  uint256 nonce = GetRandHash();
1497  // We want the nonce to be 64 bytes long to force the hasher to process
1498  // this chunk, which makes later hash computations more efficient. We
1499  // just write our 32-byte entropy twice to fill the 64 bytes.
1500  g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1501  g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1502  // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1503  // setup_bytes creates the minimum possible cache (2 elements).
1504  size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1505  size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1506  LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1507  (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1508 }
1509 
1529 bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1530 {
1531  if (tx.IsCoinBase()) return true;
1532 
1533  if (pvChecks) {
1534  pvChecks->reserve(tx.vin.size());
1535  }
1536 
1537  // First check if script executions have been cached with the same
1538  // flags. Note that this assumes that the inputs provided are
1539  // correct (ie that the transaction hash which is in tx's prevouts
1540  // properly commits to the scriptPubKey in the inputs view of that
1541  // transaction).
1542  uint256 hashCacheEntry;
1544  hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1545  AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1546  if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1547  return true;
1548  }
1549 
1550  if (!txdata.m_ready) {
1551  txdata.Init(tx);
1552  }
1553 
1554  for (unsigned int i = 0; i < tx.vin.size(); i++) {
1555  const COutPoint &prevout = tx.vin[i].prevout;
1556  const Coin& coin = inputs.AccessCoin(prevout);
1557  assert(!coin.IsSpent());
1558 
1559  // We very carefully only pass in things to CScriptCheck which
1560  // are clearly committed to by tx' witness hash. This provides
1561  // a sanity check that our caching is not introducing consensus
1562  // failures through additional data in, eg, the coins being
1563  // spent being checked as a part of CScriptCheck.
1564 
1565  // Verify signature
1566  CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata);
1567  if (pvChecks) {
1568  pvChecks->push_back(CScriptCheck());
1569  check.swap(pvChecks->back());
1570  } else if (!check()) {
1572  // Check whether the failure was caused by a
1573  // non-mandatory script verification check, such as
1574  // non-standard DER encodings or non-null dummy
1575  // arguments; if so, ensure we return NOT_STANDARD
1576  // instead of CONSENSUS to avoid downstream users
1577  // splitting the network between upgraded and
1578  // non-upgraded nodes by banning CONSENSUS-failing
1579  // data providers.
1580  CScriptCheck check2(coin.out, tx, i,
1581  flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1582  if (check2())
1583  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1584  }
1585  // MANDATORY flag failures correspond to
1586  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1587  // failures are the most serious case of validation
1588  // failures, we may need to consider using
1589  // RECENT_CONSENSUS_CHANGE for any script failure that
1590  // could be due to non-upgraded nodes which we may want to
1591  // support, to avoid splitting the network (but this
1592  // depends on the details of how net_processing handles
1593  // such errors).
1594  return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1595  }
1596  }
1597 
1598  if (cacheFullScriptStore && !pvChecks) {
1599  // We executed all of the provided scripts, and were told to
1600  // cache the result. Do so now.
1601  g_scriptExecutionCache.insert(hashCacheEntry);
1602  }
1603 
1604  return true;
1605 }
1606 
1607 static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
1608 {
1609  // Open history file to append
1611  if (fileout.IsNull())
1612  return error("%s: OpenUndoFile failed", __func__);
1613 
1614  // Write index header
1615  unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1616  fileout << messageStart << nSize;
1617 
1618  // Write undo data
1619  long fileOutPos = ftell(fileout.Get());
1620  if (fileOutPos < 0)
1621  return error("%s: ftell failed", __func__);
1622  pos.nPos = (unsigned int)fileOutPos;
1623  fileout << blockundo;
1624 
1625  // calculate & write checksum
1627  hasher << hashBlock;
1628  hasher << blockundo;
1629  fileout << hasher.GetHash();
1630 
1631  return true;
1632 }
1633 
1634 bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
1635 {
1636  FlatFilePos pos = pindex->GetUndoPos();
1637  if (pos.IsNull()) {
1638  return error("%s: no undo data available", __func__);
1639  }
1640 
1641  // Open history file to read
1642  CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1643  if (filein.IsNull())
1644  return error("%s: OpenUndoFile failed", __func__);
1645 
1646  // Read block
1647  uint256 hashChecksum;
1648  CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
1649  try {
1650  verifier << pindex->pprev->GetBlockHash();
1651  verifier >> blockundo;
1652  filein >> hashChecksum;
1653  }
1654  catch (const std::exception& e) {
1655  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1656  }
1657 
1658  // Verify checksum
1659  if (hashChecksum != verifier.GetHash())
1660  return error("%s: Checksum mismatch", __func__);
1661 
1662  return true;
1663 }
1664 
1666 static bool AbortNode(const std::string& strMessage, bilingual_str user_message = bilingual_str())
1667 {
1668  SetMiscWarning(Untranslated(strMessage));
1669  LogPrintf("*** %s\n", strMessage);
1670  if (user_message.empty()) {
1671  user_message = _("A fatal internal error occurred, see debug.log for details");
1672  }
1673  AbortError(user_message);
1674  StartShutdown();
1675  return false;
1676 }
1677 
1678 static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
1679 {
1680  AbortNode(strMessage, userMessage);
1681  return state.Error(strMessage);
1682 }
1683 
1691 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1692 {
1693  bool fClean = true;
1694 
1695  if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1696 
1697  if (undo.nHeight == 0) {
1698  // Missing undo metadata (height and coinbase). Older versions included this
1699  // information only in undo records for the last spend of a transactions'
1700  // outputs. This implies that it must be present for some other output of the same tx.
1701  const Coin& alternate = AccessByTxid(view, out.hash);
1702  if (!alternate.IsSpent()) {
1703  undo.nHeight = alternate.nHeight;
1704  undo.fCoinBase = alternate.fCoinBase;
1705  } else {
1706  return DISCONNECT_FAILED; // adding output for transaction without known metadata
1707  }
1708  }
1709  // If the coin already exists as an unspent coin in the cache, then the
1710  // possible_overwrite parameter to AddCoin must be set to true. We have
1711  // already checked whether an unspent coin exists above using HaveCoin, so
1712  // we don't need to guess. When fClean is false, an unspent coin already
1713  // existed and it is an overwrite.
1714  view.AddCoin(out, std::move(undo), !fClean);
1715 
1716  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1717 }
1718 
1722 {
1723  bool fClean = true;
1724 
1725  CBlockUndo blockUndo;
1726  if (!UndoReadFromDisk(blockUndo, pindex)) {
1727  error("DisconnectBlock(): failure reading undo data");
1728  return DISCONNECT_FAILED;
1729  }
1730 
1731  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1732  error("DisconnectBlock(): block and undo data inconsistent");
1733  return DISCONNECT_FAILED;
1734  }
1735 
1736  // undo transactions in reverse order
1737  for (int i = block.vtx.size() - 1; i >= 0; i--) {
1738  const CTransaction &tx = *(block.vtx[i]);
1739  uint256 hash = tx.GetHash();
1740  bool is_coinbase = tx.IsCoinBase();
1741 
1742  // Check that all outputs are available and match the outputs in the block itself
1743  // exactly.
1744  for (size_t o = 0; o < tx.vout.size(); o++) {
1745  if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1746  COutPoint out(hash, o);
1747  Coin coin;
1748  bool is_spent = view.SpendCoin(out, &coin);
1749  if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1750  fClean = false; // transaction output mismatch
1751  }
1752  }
1753  }
1754 
1755  // restore inputs
1756  if (i > 0) { // not coinbases
1757  CTxUndo &txundo = blockUndo.vtxundo[i-1];
1758  if (txundo.vprevout.size() != tx.vin.size()) {
1759  error("DisconnectBlock(): transaction and undo data inconsistent");
1760  return DISCONNECT_FAILED;
1761  }
1762  for (unsigned int j = tx.vin.size(); j-- > 0;) {
1763  const COutPoint &out = tx.vin[j].prevout;
1764  int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1765  if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1766  fClean = fClean && res != DISCONNECT_UNCLEAN;
1767  }
1768  // At this point, all of txundo.vprevout should have been moved out.
1769  }
1770  }
1771 
1772  // move best block pointer to prevout block
1773  view.SetBestBlock(pindex->pprev->GetBlockHash());
1774 
1775  return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1776 }
1777 
1778 static void FlushUndoFile(int block_file, bool finalize = false)
1779 {
1780  FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1781  if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1782  AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
1783  }
1784 }
1785 
1786 static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
1787 {
1788  LOCK(cs_LastBlockFile);
1789  FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
1790  if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1791  AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
1792  }
1793  // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
1794  // e.g. during IBD or a sync after a node going offline
1795  if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
1796 }
1797 
1798 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
1799 
1800 static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
1801 {
1802  // Write undo information to disk
1803  if (pindex->GetUndoPos().IsNull()) {
1804  FlatFilePos _pos;
1805  if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
1806  return error("ConnectBlock(): FindUndoPos failed");
1807  if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
1808  return AbortNode(state, "Failed to write undo data");
1809  // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1810  // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1811  // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1812  // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1813  // the FindBlockPos function
1814  if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
1815  FlushUndoFile(_pos.nFile, true);
1816  }
1817 
1818  // update nUndoPos in block index
1819  pindex->nUndoPos = _pos.nPos;
1820  pindex->nStatus |= BLOCK_HAVE_UNDO;
1821  setDirtyBlockIndex.insert(pindex);
1822  }
1823 
1824  return true;
1825 }
1826 
1828 
1829 void ThreadScriptCheck(int worker_num) {
1830  util::ThreadRename(strprintf("scriptch.%i", worker_num));
1831  scriptcheckqueue.Thread();
1832 }
1833 
1835 
1836 int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
1837 {
1838  LOCK(cs_main);
1839  int32_t nVersion = VERSIONBITS_TOP_BITS;
1840 
1841  for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1842  ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
1843  if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
1844  nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
1845  }
1846  }
1847 
1848  return nVersion;
1849 }
1850 
1855 {
1856 private:
1857  int bit;
1858 
1859 public:
1860  explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1861 
1862  int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
1863  int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
1864  int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
1865  int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1866 
1867  bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1868  {
1869  return pindex->nHeight >= params.MinBIP9WarningHeight &&
1871  ((pindex->nVersion >> bit) & 1) != 0 &&
1872  ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1873  }
1874 };
1875 
1876 static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
1877 
1878 // 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
1879 // mainnet. We no longer need to support disabling the segwit deployment
1880 // except for testing purposes, due to limitations of the functional test
1881 // environment. See test/functional/p2p-segwit.py.
1882 static bool IsScriptWitnessEnabled(const Consensus::Params& params)
1883 {
1884  return params.SegwitHeight != std::numeric_limits<int>::max();
1885 }
1886 
1887 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1888  AssertLockHeld(cs_main);
1889 
1890  unsigned int flags = SCRIPT_VERIFY_NONE;
1891 
1892  // BIP16 didn't become active until Apr 1 2012 (on mainnet, and
1893  // retroactively applied to testnet)
1894  // However, only one historical block violated the P2SH rules (on both
1895  // mainnet and testnet), so for simplicity, always leave P2SH
1896  // on except for the one violating block.
1897  if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
1898  pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
1899  *pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
1900  {
1901  flags |= SCRIPT_VERIFY_P2SH;
1902  }
1903 
1904  // Enforce WITNESS rules whenever P2SH is in effect (and the segwit
1905  // deployment is defined).
1906  if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
1907  flags |= SCRIPT_VERIFY_WITNESS;
1908  }
1909 
1910  // Start enforcing the DERSIG (BIP66) rule
1911  if (pindex->nHeight >= consensusparams.BIP66Height) {
1912  flags |= SCRIPT_VERIFY_DERSIG;
1913  }
1914 
1915  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
1916  if (pindex->nHeight >= consensusparams.BIP65Height) {
1918  }
1919 
1920  // Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
1921  if (pindex->nHeight >= consensusparams.CSVHeight) {
1923  }
1924 
1925  // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
1926  if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
1927  flags |= SCRIPT_VERIFY_NULLDUMMY;
1928  }
1929 
1930  return flags;
1931 }
1932 
1933 
1934 
1935 static int64_t nTimeCheck = 0;
1936 static int64_t nTimeForks = 0;
1937 static int64_t nTimeVerify = 0;
1938 static int64_t nTimeConnect = 0;
1939 static int64_t nTimeIndex = 0;
1940 static int64_t nTimeCallbacks = 0;
1941 static int64_t nTimeTotal = 0;
1942 static int64_t nBlocksTotal = 0;
1943 
1948  CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
1949 {
1950  AssertLockHeld(cs_main);
1951  assert(pindex);
1952  assert(*pindex->phashBlock == block.GetHash());
1953  int64_t nTimeStart = GetTimeMicros();
1954 
1955  // Check it again in case a previous version let a bad block in
1956  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1957  // ContextualCheckBlockHeader() here. This means that if we add a new
1958  // consensus rule that is enforced in one of those two functions, then we
1959  // may have let in a block that violates the rule prior to updating the
1960  // software, and we would NOT be enforcing the rule here. Fully solving
1961  // upgrade from one software version to the next after a consensus rule
1962  // change is potentially tricky and issue-specific (see RewindBlockIndex()
1963  // for one general approach that was used for BIP 141 deployment).
1964  // Also, currently the rule against blocks more than 2 hours in the future
1965  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1966  // re-enforce that rule here (at least until we make it impossible for
1967  // GetAdjustedTime() to go backward).
1968  if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
1970  // We don't write down blocks to disk if they may have been
1971  // corrupted, so this should be impossible unless we're having hardware
1972  // problems.
1973  return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
1974  }
1975  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
1976  }
1977 
1978  // verify that the view's current state corresponds to the previous block
1979  uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
1980  assert(hashPrevBlock == view.GetBestBlock());
1981 
1982  nBlocksTotal++;
1983 
1984  // Special case for the genesis block, skipping connection of its transactions
1985  // (its coinbase is unspendable)
1986  if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
1987  if (!fJustCheck)
1988  view.SetBestBlock(pindex->GetBlockHash());
1989  return true;
1990  }
1991 
1992  bool fScriptChecks = true;
1993  if (!hashAssumeValid.IsNull()) {
1994  // We've been configured with the hash of a block which has been externally verified to have a valid history.
1995  // A suitable default value is included with the software and updated from time to time. Because validity
1996  // relative to a piece of software is an objective fact these defaults can be easily reviewed.
1997  // This setting doesn't force the selection of any particular chain but makes validating some faster by
1998  // effectively caching the result of part of the verification.
1999  BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
2000  if (it != m_blockman.m_block_index.end()) {
2001  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
2002  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
2003  pindexBestHeader->nChainWork >= nMinimumChainWork) {
2004  // This block is a member of the assumed verified chain and an ancestor of the best header.
2005  // Script verification is skipped when connecting blocks under the
2006  // assumevalid block. Assuming the assumevalid block is valid this
2007  // is safe because block merkle hashes are still computed and checked,
2008  // Of course, if an assumed valid block is invalid due to false scriptSigs
2009  // this optimization would allow an invalid chain to be accepted.
2010  // The equivalent time check discourages hash power from extorting the network via DOS attack
2011  // into accepting an invalid block through telling users they must manually set assumevalid.
2012  // Requiring a software change or burying the invalid block, regardless of the setting, makes
2013  // it hard to hide the implication of the demand. This also avoids having release candidates
2014  // that are hardly doing any signature verification at all in testing without having to
2015  // artificially set the default assumed verified block further back.
2016  // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
2017  // least as good as the expected chain.
2018  fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
2019  }
2020  }
2021  }
2022 
2023  int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
2024  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
2025 
2026  // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2027  // unless those are already completely spent.
2028  // If such overwrites are allowed, coinbases and transactions depending upon those
2029  // can be duplicated to remove the ability to spend the first instance -- even after
2030  // being sent to another address.
2031  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
2032  // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2033  // already refuses previously-known transaction ids entirely.
2034  // This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
2035  // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
2036  // two in the chain that violate it. This prevents exploiting the issue against nodes during their
2037  // initial block download.
2038  bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
2039  (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
2040 
2041  // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
2042  // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
2043  // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
2044  // before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
2045  // duplicate transactions descending from the known pairs either.
2046  // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
2047 
2048  // BIP34 requires that a block at height X (block X) has its coinbase
2049  // scriptSig start with a CScriptNum of X (indicated height X). The above
2050  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
2051  // case that there is a block X before the BIP34 height of 227,931 which has
2052  // an indicated height Y where Y is greater than X. The coinbase for block
2053  // X would also be a valid coinbase for block Y, which could be a BIP30
2054  // violation. An exhaustive search of all mainnet coinbases before the
2055  // BIP34 height which have an indicated height greater than the block height
2056  // reveals many occurrences. The 3 lowest indicated heights found are
2057  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
2058  // heights would be the first opportunity for BIP30 to be violated.
2059 
2060  // The search reveals a great many blocks which have an indicated height
2061  // greater than 1,983,702, so we simply remove the optimization to skip
2062  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
2063  // that block in another 25 years or so, we should take advantage of a
2064  // future consensus change to do a new and improved version of BIP34 that
2065  // will actually prevent ever creating any duplicate coinbases in the
2066  // future.
2067  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
2068 
2069  // There is no potential to create a duplicate coinbase at block 209,921
2070  // because this is still before the BIP34 height and so explicit BIP30
2071  // checking is still active.
2072 
2073  // The final case is block 176,684 which has an indicated height of
2074  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
2075  // before block 490,897 so there was not much opportunity to address this
2076  // case other than to carefully analyze it and determine it would not be a
2077  // problem. Block 490,897 was, in fact, mined with a different coinbase than
2078  // block 176,684, but it is important to note that even if it hadn't been or
2079  // is remined on an alternate fork with a duplicate coinbase, we would still
2080  // not run into a BIP30 violation. This is because the coinbase for 176,684
2081  // is spent in block 185,956 in transaction
2082  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
2083  // spending transaction can't be duplicated because it also spends coinbase
2084  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
2085  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
2086  // duplicatable until that height, and it's currently impossible to create a
2087  // chain that long. Nevertheless we may wish to consider a future soft fork
2088  // which retroactively prevents block 490,897 from creating a duplicate
2089  // coinbase. The two historical BIP30 violations often provide a confusing
2090  // edge case when manipulating the UTXO and it would be simpler not to have
2091  // another edge case to deal with.
2092 
2093  // testnet3 has no blocks before the BIP34 height with indicated heights
2094  // post BIP34 before approximately height 486,000,000 and presumably will
2095  // be reset before it reaches block 1,983,702 and starts doing unnecessary
2096  // BIP30 checking again.
2097  assert(pindex->pprev);
2098  CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
2099  //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
2100  fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
2101 
2102  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
2103  // consensus change that ensures coinbases at those heights can not
2104  // duplicate earlier coinbases.
2105  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
2106  for (const auto& tx : block.vtx) {
2107  for (size_t o = 0; o < tx->vout.size(); o++) {
2108  if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
2109  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2110  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
2111  }
2112  }
2113  }
2114  }
2115 
2116  // Start enforcing BIP68 (sequence locks)
2117  int nLockTimeFlags = 0;
2118  if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
2119  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
2120  }
2121 
2122  // Get the script flags for this block
2123  unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
2124 
2125  int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
2126  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
2127 
2128  CBlockUndo blockundo;
2129 
2130  // Precomputed transaction data pointers must not be invalidated
2131  // until after `control` has run the script checks (potentially
2132  // in multiple threads). Preallocate the vector size so a new allocation
2133  // doesn't invalidate pointers into the vector, and keep txsdata in scope
2134  // for as long as `control`.
2135  CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
2136  std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
2137 
2138  std::vector<int> prevheights;
2139  CAmount nFees = 0;
2140  int nInputs = 0;
2141  int64_t nSigOpsCost = 0;
2142  blockundo.vtxundo.reserve(block.vtx.size() - 1);
2143  for (unsigned int i = 0; i < block.vtx.size(); i++)
2144  {
2145  const CTransaction &tx = *(block.vtx[i]);
2146 
2147  nInputs += tx.vin.size();
2148 
2149  if (!tx.IsCoinBase())
2150  {
2151  CAmount txfee = 0;
2152  TxValidationState tx_state;
2153  if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
2154  // Any transaction validation failure in ConnectBlock is a block consensus failure
2156  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2157  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
2158  }
2159  nFees += txfee;
2160  if (!MoneyRange(nFees)) {
2161  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
2162  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
2163  }
2164 
2165  // Check that transaction is BIP68 final
2166  // BIP68 lock checks (as opposed to nLockTime checks) must
2167  // be in ConnectBlock because they require the UTXO set
2168  prevheights.resize(tx.vin.size());
2169  for (size_t j = 0; j < tx.vin.size(); j++) {
2170  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
2171  }
2172 
2173  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2174  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
2175  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
2176  }
2177  }
2178 
2179  // GetTransactionSigOpCost counts 3 types of sigops:
2180  // * legacy (always)
2181  // * p2sh (when P2SH enabled in flags and excludes coinbase)
2182  // * witness (when witness enabled in flags and excludes coinbase)
2183  nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
2184  if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
2185  LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
2186  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
2187  }
2188 
2189  if (!tx.IsCoinBase())
2190  {
2191  std::vector<CScriptCheck> vChecks;
2192  bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2193  TxValidationState tx_state;
2194  if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
2195  // Any transaction validation failure in ConnectBlock is a block consensus failure
2197  tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2198  return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
2199  tx.GetHash().ToString(), state.ToString());
2200  }
2201  control.Add(vChecks);
2202  }
2203 
2204  CTxUndo undoDummy;
2205  if (i > 0) {
2206  blockundo.vtxundo.push_back(CTxUndo());
2207  }
2208  UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2209  }
2210  int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
2211  LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
2212 
2213  CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
2214  if (block.vtx[0]->GetValueOut() > blockReward) {
2215  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
2216  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
2217  }
2218 
2219  if (!control.Wait()) {
2220  LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
2221  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
2222  }
2223  int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
2224  LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2225 
2226  if (fJustCheck)
2227  return true;
2228 
2229  if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
2230  return false;
2231 
2232  if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
2234  setDirtyBlockIndex.insert(pindex);
2235  }
2236 
2237  assert(pindex->phashBlock);
2238  // add this block to the view's block chain
2239  view.SetBestBlock(pindex->GetBlockHash());
2240 
2241  int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
2242  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
2243 
2244  int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
2245  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
2246 
2247  return true;
2248 }
2249 
2250 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool& tx_pool)
2251 {
2252  return this->GetCoinsCacheSizeState(
2253  tx_pool,
2255  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2256 }
2257 
2258 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
2259  const CTxMemPool& tx_pool,
2260  size_t max_coins_cache_size_bytes,
2261  size_t max_mempool_size_bytes)
2262 {
2263  int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
2264  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2265  int64_t nTotalSpace =
2266  max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2267 
2269  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
2270  int64_t large_threshold =
2271  std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2272 
2273  if (cacheSize > nTotalSpace) {
2274  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2276  } else if (cacheSize > large_threshold) {
2278  }
2279  return CoinsCacheSizeState::OK;
2280 }
2281 
2283  const CChainParams& chainparams,
2284  BlockValidationState &state,
2285  FlushStateMode mode,
2286  int nManualPruneHeight)
2287 {
2288  LOCK(cs_main);
2289  assert(this->CanFlushToDisk());
2290  static std::chrono::microseconds nLastWrite{0};
2291  static std::chrono::microseconds nLastFlush{0};
2292  std::set<int> setFilesToPrune;
2293  bool full_flush_completed = false;
2294 
2295  const size_t coins_count = CoinsTip().GetCacheSize();
2296  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2297 
2298  try {
2299  {
2300  bool fFlushForPrune = false;
2301  bool fDoFullFlush = false;
2302  CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(::mempool);
2303  LOCK(cs_LastBlockFile);
2304  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2305  if (nManualPruneHeight > 0) {
2306  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2307 
2308  FindFilesToPruneManual(g_chainman, setFilesToPrune, nManualPruneHeight);
2309  } else {
2310  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2311 
2312  FindFilesToPrune(g_chainman, setFilesToPrune, chainparams.PruneAfterHeight());
2313  fCheckForPruning = false;
2314  }
2315  if (!setFilesToPrune.empty()) {
2316  fFlushForPrune = true;
2317  if (!fHavePruned) {
2318  pblocktree->WriteFlag("prunedblockfiles", true);
2319  fHavePruned = true;
2320  }
2321  }
2322  }
2323  const auto nNow = GetTime<std::chrono::microseconds>();
2324  // Avoid writing/flushing immediately after startup.
2325  if (nLastWrite.count() == 0) {
2326  nLastWrite = nNow;
2327  }
2328  if (nLastFlush.count() == 0) {
2329  nLastFlush = nNow;
2330  }
2331  // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2332  bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2333  // The cache is over the limit, we have to write now.
2334  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2335  // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2336  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2337  // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2338  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2339  // Combine all conditions that result in a full cache flush.
2340  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2341  // Write blocks and block index to disk.
2342  if (fDoFullFlush || fPeriodicWrite) {
2343  // Depend on nMinDiskSpace to ensure we can write block index
2344  if (!CheckDiskSpace(GetBlocksDir())) {
2345  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2346  }
2347  {
2348  LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2349 
2350  // First make sure all block and undo data is flushed to disk.
2351  FlushBlockFile();
2352  }
2353 
2354  // Then update all block file information (which may refer to block and undo files).
2355  {
2356  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2357 
2358  std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2359  vFiles.reserve(setDirtyFileInfo.size());
2360  for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2361  vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2362  setDirtyFileInfo.erase(it++);
2363  }
2364  std::vector<const CBlockIndex*> vBlocks;
2365  vBlocks.reserve(setDirtyBlockIndex.size());
2366  for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2367  vBlocks.push_back(*it);
2368  setDirtyBlockIndex.erase(it++);
2369  }
2370  if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2371  return AbortNode(state, "Failed to write to block index database");
2372  }
2373  }
2374  // Finally remove any pruned files
2375  if (fFlushForPrune) {
2376  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2377 
2378  UnlinkPrunedFiles(setFilesToPrune);
2379  }
2380  nLastWrite = nNow;
2381  }
2382  // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2383  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2384  LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2385  coins_count, coins_mem_usage / 1000));
2386 
2387  // Typical Coin structures on disk are around 48 bytes in size.
2388  // Pushing a new one to the database can cause it to be written
2389  // twice (once in the log, and once in the tables). This is already
2390  // an overestimation, as most will delete an existing entry or
2391  // overwrite one. Still, use a conservative safety factor of 2.
2392  if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2393  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2394  }
2395  // Flush the chainstate (which may refer to block index entries).
2396  if (!CoinsTip().Flush())
2397  return AbortNode(state, "Failed to write to coin database");
2398  nLastFlush = nNow;
2399  full_flush_completed = true;
2400  }
2401  }
2402  if (full_flush_completed) {
2403  // Update best block in wallet (so we can detect restored wallets).
2405  }
2406  } catch (const std::runtime_error& e) {
2407  return AbortNode(state, std::string("System error while flushing: ") + e.what());
2408  }
2409  return true;
2410 }
2411 
2413  BlockValidationState state;
2414  const CChainParams& chainparams = Params();
2415  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2416  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2417  }
2418 }
2419 
2421  BlockValidationState state;
2422  fCheckForPruning = true;
2423  const CChainParams& chainparams = Params();
2424 
2425  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2426  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2427  }
2428 }
2429 
2430 static void DoWarning(const bilingual_str& warning)
2431 {
2432  static bool fWarned = false;
2433  SetMiscWarning(warning);
2434  if (!fWarned) {
2435  AlertNotify(warning.original);
2436  fWarned = true;
2437  }
2438 }
2439 
2441 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2442 {
2443  if (!res.empty()) res += Untranslated(", ");
2444  res += warn;
2445 }
2446 
2448 void static UpdateTip(const CBlockIndex* pindexNew, const CChainParams& chainParams)
2449  EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
2450 {
2451  // New best block
2453 
2454  {
2455  LOCK(g_best_block_mutex);
2456  g_best_block = pindexNew->GetBlockHash();
2457  g_best_block_cv.notify_all();
2458  }
2459 
2460  bilingual_str warning_messages;
2462  {
2463  int nUpgraded = 0;
2464  const CBlockIndex* pindex = pindexNew;
2465  for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2466  WarningBitsConditionChecker checker(bit);
2467  ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
2468  if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2469  const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
2470  if (state == ThresholdState::ACTIVE) {
2471  DoWarning(warning);
2472  } else {
2473  AppendWarning(warning_messages, warning);
2474  }
2475  }
2476  }
2477  // Check the version of the last 100 blocks to see if we need to upgrade:
2478  for (int i = 0; i < 100 && pindex != nullptr; i++)
2479  {
2480  int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
2481  if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0)
2482  ++nUpgraded;
2483  pindex = pindex->pprev;
2484  }
2485  if (nUpgraded > 0)
2486  AppendWarning(warning_messages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded));
2487  }
2488  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
2489  pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
2490  log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
2491  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2492  GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(),
2493  !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
2494 
2495 }
2496 
2508 {
2509  CBlockIndex *pindexDelete = m_chain.Tip();
2510  assert(pindexDelete);
2511  // Read block from disk.
2512  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2513  CBlock& block = *pblock;
2514  if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
2515  return error("DisconnectTip(): Failed to read block");
2516  // Apply the block atomically to the chain state.
2517  int64_t nStart = GetTimeMicros();
2518  {
2519  CCoinsViewCache view(&CoinsTip());
2520  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2521  if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
2522  return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2523  bool flushed = view.Flush();
2524  assert(flushed);
2525  }
2526  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2527  // Write the chain state to disk, if necessary.
2528  if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2529  return false;
2530 
2531  if (disconnectpool) {
2532  // Save transactions to re-add to mempool at end of reorg
2533  for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2534  disconnectpool->addTransaction(*it);
2535  }
2536  while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2537  // Drop the earliest entry, and remove its children from the mempool.
2538  auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2540  disconnectpool->removeEntry(it);
2541  }
2542  }
2543 
2544  m_chain.SetTip(pindexDelete->pprev);
2545 
2546  UpdateTip(pindexDelete->pprev, chainparams);
2547  // Let wallets know transactions went from 1-confirmed to
2548  // 0-confirmed or conflicted:
2549  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2550  return true;
2551 }
2552 
2553 static int64_t nTimeReadFromDisk = 0;
2554 static int64_t nTimeConnectTotal = 0;
2555 static int64_t nTimeFlush = 0;
2556 static int64_t nTimeChainState = 0;
2557 static int64_t nTimePostConnect = 0;
2558 
2560  CBlockIndex* pindex = nullptr;
2561  std::shared_ptr<const CBlock> pblock;
2563 };
2572 private:
2573  std::vector<PerBlockConnectTrace> blocksConnected;
2574 
2575 public:
2576  explicit ConnectTrace() : blocksConnected(1) {}
2577 
2578  void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2579  assert(!blocksConnected.back().pindex);
2580  assert(pindex);
2581  assert(pblock);
2582  blocksConnected.back().pindex = pindex;
2583  blocksConnected.back().pblock = std::move(pblock);
2584  blocksConnected.emplace_back();
2585  }
2586 
2587  std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2588  // We always keep one extra block at the end of our list because
2589  // blocks are added after all the conflicted transactions have
2590  // been filled in. Thus, the last entry should always be an empty
2591  // one waiting for the transactions from the next block. We pop
2592  // the last entry here to make sure the list we return is sane.
2593  assert(!blocksConnected.back().pindex);
2594  blocksConnected.pop_back();
2595  return blocksConnected;
2596  }
2597 };
2598 
2605 bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
2606 {
2607  assert(pindexNew->pprev == m_chain.Tip());
2608  // Read block from disk.
2609  int64_t nTime1 = GetTimeMicros();
2610  std::shared_ptr<const CBlock> pthisBlock;
2611  if (!pblock) {
2612  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2613  if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
2614  return AbortNode(state, "Failed to read block");
2615  pthisBlock = pblockNew;
2616  } else {
2617  pthisBlock = pblock;
2618  }
2619  const CBlock& blockConnecting = *pthisBlock;
2620  // Apply the block atomically to the chain state.
2621  int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2622  int64_t nTime3;
2623  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2624  {
2625  CCoinsViewCache view(&CoinsTip());
2626  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
2627  GetMainSignals().BlockChecked(blockConnecting, state);
2628  if (!rv) {
2629  if (state.IsInvalid())
2630  InvalidBlockFound(pindexNew, state);
2631  return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2632  }
2633  nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2634  assert(nBlocksTotal > 0);
2635  LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2636  bool flushed = view.Flush();
2637  assert(flushed);
2638  }
2639  int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2640  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2641  // Write the chain state to disk, if necessary.
2642  if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2643  return false;
2644  int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2645  LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2646  // Remove conflicting transactions from the mempool.;
2647  mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2648  disconnectpool.removeForBlock(blockConnecting.vtx);
2649  // Update m_chain & related variables.
2650  m_chain.SetTip(pindexNew);
2651  UpdateTip(pindexNew, chainparams);
2652 
2653  int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2654  LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2655  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2656 
2657  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2658  return true;
2659 }
2660 
2666  do {
2667  CBlockIndex *pindexNew = nullptr;
2668 
2669  // Find the best candidate header.
2670  {
2671  std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2672  if (it == setBlockIndexCandidates.rend())
2673  return nullptr;
2674  pindexNew = *it;
2675  }
2676 
2677  // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2678  // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2679  CBlockIndex *pindexTest = pindexNew;
2680  bool fInvalidAncestor = false;
2681  while (pindexTest && !m_chain.Contains(pindexTest)) {
2682  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2683 
2684  // Pruned nodes may have entries in setBlockIndexCandidates for
2685  // which block files have been deleted. Remove those as candidates
2686  // for the most work chain if we come across them; we can't switch
2687  // to a chain unless we have all the non-active-chain parent blocks.
2688  bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2689  bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2690  if (fFailedChain || fMissingData) {
2691  // Candidate chain is not usable (either invalid or missing data)
2692  if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2693  pindexBestInvalid = pindexNew;
2694  CBlockIndex *pindexFailed = pindexNew;
2695  // Remove the entire chain from the set.
2696  while (pindexTest != pindexFailed) {
2697  if (fFailedChain) {
2698  pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2699  } else if (fMissingData) {
2700  // If we're missing data, then add back to m_blocks_unlinked,
2701  // so that if the block arrives in the future we can try adding
2702  // to setBlockIndexCandidates again.
2704  std::make_pair(pindexFailed->pprev, pindexFailed));
2705  }
2706  setBlockIndexCandidates.erase(pindexFailed);
2707  pindexFailed = pindexFailed->pprev;
2708  }
2709  setBlockIndexCandidates.erase(pindexTest);
2710  fInvalidAncestor = true;
2711  break;
2712  }
2713  pindexTest = pindexTest->pprev;
2714  }
2715  if (!fInvalidAncestor)
2716  return pindexNew;
2717  } while(true);
2718 }
2719 
2722  // Note that we can't delete the current block itself, as we may need to return to it later in case a
2723  // reorganization to a better block fails.
2724  std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2725  while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2726  setBlockIndexCandidates.erase(it++);
2727  }
2728  // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2729  assert(!setBlockIndexCandidates.empty());
2730 }
2731 
2738 bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2739 {
2740  AssertLockHeld(cs_main);
2741 
2742  const CBlockIndex *pindexOldTip = m_chain.Tip();
2743  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2744 
2745  // Disconnect active blocks which are no longer in the best chain.
2746  bool fBlocksDisconnected = false;
2747  DisconnectedBlockTransactions disconnectpool;
2748  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2749  if (!DisconnectTip(state, chainparams, &disconnectpool)) {
2750  // This is likely a fatal error, but keep the mempool consistent,
2751  // just in case. Only remove from the mempool in this case.
2752  UpdateMempoolForReorg(disconnectpool, false);
2753 
2754  // If we're unable to disconnect a block during normal operation,
2755  // then that is a failure of our local system -- we should abort
2756  // rather than stay on a less work chain.
2757  AbortNode(state, "Failed to disconnect block; see debug.log for details");
2758  return false;
2759  }
2760  fBlocksDisconnected = true;
2761  }
2762 
2763  // Build list of new blocks to connect.
2764  std::vector<CBlockIndex*> vpindexToConnect;
2765  bool fContinue = true;
2766  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2767  while (fContinue && nHeight != pindexMostWork->nHeight) {
2768  // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2769  // a few blocks along the way.
2770  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2771  vpindexToConnect.clear();
2772  vpindexToConnect.reserve(nTargetHeight - nHeight);
2773  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2774  while (pindexIter && pindexIter->nHeight != nHeight) {
2775  vpindexToConnect.push_back(pindexIter);
2776  pindexIter = pindexIter->pprev;
2777  }
2778  nHeight = nTargetHeight;
2779 
2780  // Connect new blocks.
2781  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2782  if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2783  if (state.IsInvalid()) {
2784  // The block violates a consensus rule.
2786  InvalidChainFound(vpindexToConnect.front());
2787  }
2788  state = BlockValidationState();
2789  fInvalidFound = true;
2790  fContinue = false;
2791  break;
2792  } else {
2793  // A system error occurred (disk space, database error, ...).
2794  // Make the mempool consistent with the current tip, just in case
2795  // any observers try to use it before shutdown.
2796  UpdateMempoolForReorg(disconnectpool, false);
2797  return false;
2798  }
2799  } else {
2801  if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2802  // We're in a better position than we were. Return temporarily to release the lock.
2803  fContinue = false;
2804  break;
2805  }
2806  }
2807  }
2808  }
2809 
2810  if (fBlocksDisconnected) {
2811  // If any blocks were disconnected, disconnectpool may be non empty. Add
2812  // any disconnected transactions back to the mempool.
2813  UpdateMempoolForReorg(disconnectpool, true);
2814  }
2815  mempool.check(&CoinsTip());
2816 
2817  // Callbacks/notifications for a new best chain.
2818  if (fInvalidFound)
2819  CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
2820  else
2822 
2823  return true;
2824 }
2825 
2827 {
2828  if (!init) return SynchronizationState::POST_INIT;
2831 }
2832 
2833 static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
2834  bool fNotify = false;
2835  bool fInitialBlockDownload = false;
2836  static CBlockIndex* pindexHeaderOld = nullptr;
2837  CBlockIndex* pindexHeader = nullptr;
2838  {
2839  LOCK(cs_main);
2840  pindexHeader = pindexBestHeader;
2841 
2842  if (pindexHeader != pindexHeaderOld) {
2843  fNotify = true;
2844  fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload();
2845  pindexHeaderOld = pindexHeader;
2846  }
2847  }
2848  // Send block tip changed notifications without cs_main
2849  if (fNotify) {
2850  uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2851  }
2852  return fNotify;
2853 }
2854 
2856  AssertLockNotHeld(cs_main);
2857 
2858  if (GetMainSignals().CallbacksPending() > 10) {
2860  }
2861 }
2862 
2863 bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2864  // Note that while we're often called here from ProcessNewBlock, this is
2865  // far from a guarantee. Things in the P2P/RPC will often end up calling
2866  // us in the middle of ProcessNewBlock - do not assume pblock is set
2867  // sanely for performance or correctness!
2868  AssertLockNotHeld(cs_main);
2869 
2870  // ABC maintains a fair degree of expensive-to-calculate internal state
2871  // because this function periodically releases cs_main so that it does not lock up other threads for too long
2872  // during large connects - and to allow for e.g. the callback queue to drain
2873  // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2875 
2876  CBlockIndex *pindexMostWork = nullptr;
2877  CBlockIndex *pindexNewTip = nullptr;
2878  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2879  do {
2880  // Block until the validation queue drains. This should largely
2881  // never happen in normal operation, however may happen during
2882  // reindex, causing memory blowup if we run too far ahead.
2883  // Note that if a validationinterface callback ends up calling
2884  // ActivateBestChain this may lead to a deadlock! We should
2885  // probably have a DEBUG_LOCKORDER test for this in the future.
2887 
2888  {
2889  LOCK2(cs_main, ::mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2890  CBlockIndex* starting_tip = m_chain.Tip();
2891  bool blocks_connected = false;
2892  do {
2893  // We absolutely may not unlock cs_main until we've made forward progress
2894  // (with the exception of shutdown due to hardware issues, low disk space, etc).
2895  ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2896 
2897  if (pindexMostWork == nullptr) {
2898  pindexMostWork = FindMostWorkChain();
2899  }
2900 
2901  // Whether we have anything to do at all.
2902  if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2903  break;
2904  }
2905 
2906  bool fInvalidFound = false;
2907  std::shared_ptr<const CBlock> nullBlockPtr;
2908  if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2909  // A system error occurred
2910  return false;
2911  }
2912  blocks_connected = true;
2913 
2914  if (fInvalidFound) {
2915  // Wipe cache, we may need another branch now.
2916  pindexMostWork = nullptr;
2917  }
2918  pindexNewTip = m_chain.Tip();
2919 
2920  for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2921  assert(trace.pblock && trace.pindex);
2922  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2923  }
2924  } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2925  if (!blocks_connected) return true;
2926 
2927  const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2928  bool fInitialDownload = IsInitialBlockDownload();
2929 
2930  // Notify external listeners about the new tip.
2931  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2932  if (pindexFork != pindexNewTip) {
2933  // Notify ValidationInterface subscribers
2934  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2935 
2936  // Always notify the UI if a new block tip was connected
2937  uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2938  }
2939  }
2940  // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2941 
2942  if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2943 
2944  // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
2945  // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
2946  // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
2947  // that the best block hash is non-null.
2948  if (ShutdownRequested()) break;
2949  } while (pindexNewTip != pindexMostWork);
2950  CheckBlockIndex(chainparams.GetConsensus());
2951 
2952  // Write changes periodically to disk, after relay.
2953  if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
2954  return false;
2955  }
2956 
2957  return true;
2958 }
2959 
2960 bool ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2961  return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock));
2962 }
2963 
2965 {
2966  {
2967  LOCK(cs_main);
2968  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
2969  // Nothing to do, this block is not at the tip.
2970  return true;
2971  }
2973  // The chain has been extended since the last call, reset the counter.
2975  }
2977  setBlockIndexCandidates.erase(pindex);
2979  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
2980  // We can't keep reducing the counter if somebody really wants to
2981  // call preciousblock 2**31-1 times on the same set of tips...
2983  }
2984  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
2985  setBlockIndexCandidates.insert(pindex);
2987  }
2988  }
2989 
2990  return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
2991 }
2992 bool PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
2993  return ::ChainstateActive().PreciousBlock(state, params, pindex);
2994 }
2995 
2997 {
2998  CBlockIndex* to_mark_failed = pindex;
2999  bool pindex_was_in_chain = false;
3000  int disconnected = 0;
3001 
3002  // We do not allow ActivateBestChain() to run while InvalidateBlock() is
3003  // running, as that could cause the tip to change while we disconnect
3004  // blocks.
3006 
3007  // We'll be acquiring and releasing cs_main below, to allow the validation
3008  // callbacks to run. However, we should keep the block index in a
3009  // consistent state as we disconnect blocks -- in particular we need to
3010  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3011  // To avoid walking the block index repeatedly in search of candidates,
3012  // build a map once so that we can look up candidate blocks by chain
3013  // work as we go.
3014  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3015 
3016  {
3017  LOCK(cs_main);
3018  for (const auto& entry : m_blockman.m_block_index) {
3019  CBlockIndex *candidate = entry.second;
3020  // We don't need to put anything in our active chain into the
3021  // multimap, because those candidates will be found and considered
3022  // as we disconnect.
3023  // Instead, consider only non-active-chain blocks that have at
3024  // least as much work as where we expect the new tip to end up.
3025  if (!m_chain.Contains(candidate) &&
3026  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3027  candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
3028  candidate->HaveTxsDownloaded()) {
3029  candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
3030  }
3031  }
3032  }
3033 
3034  // Disconnect (descendants of) pindex, and mark them invalid.
3035  while (true) {
3036  if (ShutdownRequested()) break;
3037 
3038  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3040 
3041  LOCK(cs_main);
3042  LOCK(::mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
3043  if (!m_chain.Contains(pindex)) break;
3044  pindex_was_in_chain = true;
3045  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3046 
3047  // ActivateBestChain considers blocks already in m_chain
3048  // unconditionally valid already, so force disconnect away from it.
3049  DisconnectedBlockTransactions disconnectpool;
3050  bool ret = DisconnectTip(state, chainparams, &disconnectpool);
3051  // DisconnectTip will add transactions to disconnectpool.
3052  // Adjust the mempool to be consistent with the new tip, adding
3053  // transactions back to the mempool if disconnecting was successful,
3054  // and we're not doing a very deep invalidation (in which case
3055  // keeping the mempool up to date is probably futile anyway).
3056  UpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
3057  if (!ret) return false;
3058  assert(invalid_walk_tip->pprev == m_chain.Tip());
3059 
3060  // We immediately mark the disconnected blocks as invalid.
3061  // This prevents a case where pruned nodes may fail to invalidateblock
3062  // and be left unable to start as they have no tip candidates (as there
3063  // are no blocks that meet the "have data and are not invalid per
3064  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3065  invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
3066  setDirtyBlockIndex.insert(invalid_walk_tip);
3067  setBlockIndexCandidates.erase(invalid_walk_tip);
3068  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3069  if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
3070  // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
3071  // need to be BLOCK_FAILED_CHILD instead.
3072  to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
3073  setDirtyBlockIndex.insert(to_mark_failed);
3074  }
3075 
3076  // Add any equal or more work headers to setBlockIndexCandidates
3077  auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
3078  while (candidate_it != candidate_blocks_by_work.end()) {
3079  if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
3080  setBlockIndexCandidates.insert(candidate_it->second);
3081  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3082  } else {
3083  ++candidate_it;
3084  }
3085  }
3086 
3087  // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
3088  // iterations, or, if it's the last one, call InvalidChainFound on it.
3089  to_mark_failed = invalid_walk_tip;
3090  }
3091 
3092  CheckBlockIndex(chainparams.GetConsensus());
3093 
3094  {
3095  LOCK(cs_main);
3096  if (m_chain.Contains(to_mark_failed)) {
3097  // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
3098  return false;
3099  }
3100 
3101  // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
3102  to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
3103  setDirtyBlockIndex.insert(to_mark_failed);
3104  setBlockIndexCandidates.erase(to_mark_failed);
3105  m_blockman.m_failed_blocks.insert(to_mark_failed);
3106 
3107  // If any new blocks somehow arrived while we were disconnecting
3108  // (above), then the pre-calculation of what should go into
3109  // setBlockIndexCandidates may have missed entries. This would
3110  // technically be an inconsistency in the block index, but if we clean
3111  // it up here, this should be an essentially unobservable error.
3112  // Loop back over all block index entries and add any missing entries
3113  // to setBlockIndexCandidates.
3114  BlockMap::iterator it = m_blockman.m_block_index.begin();
3115  while (it != m_blockman.m_block_index.end()) {
3116  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
3117  setBlockIndexCandidates.insert(it->second);
3118  }
3119  it++;
3120  }
3121 
3122  InvalidChainFound(to_mark_failed);
3123  }
3124 
3125  // Only notify about a new block tip if the active chain was modified.
3126  if (pindex_was_in_chain) {
3127  uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
3128  }
3129  return true;
3130 }
3131 
3132 bool InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
3133  return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex);
3134 }
3135 
3137  AssertLockHeld(cs_main);
3138 
3139  int nHeight = pindex->nHeight;
3140 
3141  // Remove the invalidity flag from this block and all its descendants.
3142  BlockMap::iterator it = m_blockman.m_block_index.begin();
3143  while (it != m_blockman.m_block_index.end()) {
3144  if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
3145  it->second->nStatus &= ~BLOCK_FAILED_MASK;
3146  setDirtyBlockIndex.insert(it->second);
3147  if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
3148  setBlockIndexCandidates.insert(it->second);
3149  }
3150  if (it->second == pindexBestInvalid) {
3151  // Reset invalid block marker if it was pointing to one of those.
3152  pindexBestInvalid = nullptr;
3153  }
3154  m_blockman.m_failed_blocks.erase(it->second);
3155  }
3156  it++;
3157  }
3158 
3159  // Remove the invalidity flag from all ancestors too.
3160  while (pindex != nullptr) {
3161  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3162  pindex->nStatus &= ~BLOCK_FAILED_MASK;
3163  setDirtyBlockIndex.insert(pindex);
3164  m_blockman.m_failed_blocks.erase(pindex);
3165  }
3166  pindex = pindex->pprev;
3167  }
3168 }
3169 
3172 }
3173 
3175 {
3176  AssertLockHeld(cs_main);
3177 
3178  // Check for duplicate
3179  uint256 hash = block.GetHash();
3180  BlockMap::iterator it = m_block_index.find(hash);
3181  if (it != m_block_index.end())
3182  return it->second;
3183 
3184  // Construct new block index object
3185  CBlockIndex* pindexNew = new CBlockIndex(block);
3186  // We assign the sequence id to blocks only when the full data is available,
3187  // to avoid miners withholding blocks but broadcasting headers, to get a
3188  // competitive advantage.
3189  pindexNew->nSequenceId = 0;
3190  BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3191  pindexNew->phashBlock = &((*mi).first);
3192  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3193  if (miPrev != m_block_index.end())
3194  {
3195  pindexNew->pprev = (*miPrev).second;
3196  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3197  pindexNew->BuildSkip();
3198  }
3199  pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
3200  pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
3201  pindexNew->RaiseValidity(BLOCK_VALID_TREE);
3202  if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
3203  pindexBestHeader = pindexNew;
3204 
3205  setDirtyBlockIndex.insert(pindexNew);
3206 
3207  return pindexNew;
3208 }
3209 
3211 void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
3212 {
3213  pindexNew->nTx = block.vtx.size();
3214  pindexNew->nChainTx = 0;
3215  pindexNew->nFile = pos.nFile;
3216  pindexNew->nDataPos = pos.nPos;
3217  pindexNew->nUndoPos = 0;
3218  pindexNew->nStatus |= BLOCK_HAVE_DATA;
3219  if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
3220  pindexNew->nStatus |= BLOCK_OPT_WITNESS;
3221  }
3223  setDirtyBlockIndex.insert(pindexNew);
3224 
3225  if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
3226  // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3227  std::deque<CBlockIndex*> queue;
3228  queue.push_back(pindexNew);
3229 
3230  // Recursively process any descendant blocks that now may be eligible to be connected.
3231  while (!queue.empty()) {
3232  CBlockIndex *pindex = queue.front();
3233  queue.pop_front();
3234  pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3235  {
3237  pindex->nSequenceId = nBlockSequenceId++;
3238  }
3239  if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3240  setBlockIndexCandidates.insert(pindex);
3241  }
3242  std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3243  while (range.first != range.second) {
3244  std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3245  queue.push_back(it->second);
3246  range.first++;
3247  m_blockman.m_blocks_unlinked.erase(it);
3248  }
3249  }
3250  } else {
3251  if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3252  m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3253  }
3254  }
3255 }
3256 
3257 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
3258 {
3259  LOCK(cs_LastBlockFile);
3260 
3261  unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3262  if (vinfoBlockFile.size() <= nFile) {
3263  vinfoBlockFile.resize(nFile + 1);
3264  }
3265 
3266  bool finalize_undo = false;
3267  if (!fKnown) {
3268  while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3269  // when the undo file is keeping up with the block file, we want to flush it explicitly
3270  // when it is lagging behind (more blocks arrive than are being connected), we let the
3271  // undo block write case handle it
3272  finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)ChainActive().Tip()->nHeight);
3273  nFile++;
3274  if (vinfoBlockFile.size() <= nFile) {
3275  vinfoBlockFile.resize(nFile + 1);
3276  }
3277  }
3278  pos.nFile = nFile;
3279  pos.nPos = vinfoBlockFile[nFile].nSize;
3280  }
3281 
3282  if ((int)nFile != nLastBlockFile) {
3283  if (!fKnown) {
3284  LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
3285  }
3286  FlushBlockFile(!fKnown, finalize_undo);
3287  nLastBlockFile = nFile;
3288  }
3289 
3290  vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3291  if (fKnown)
3292  vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3293  else
3294  vinfoBlockFile[nFile].nSize += nAddSize;
3295 
3296  if (!fKnown) {
3297  bool out_of_space;
3298  size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3299  if (out_of_space) {
3300  return AbortNode("Disk space is too low!", _("Disk space is too low!"));
3301  }
3302  if (bytes_allocated != 0 && fPruneMode) {
3303  fCheckForPruning = true;
3304  }
3305  }
3306 
3307  setDirtyFileInfo.insert(nFile);
3308  return true;
3309 }
3310 
3311 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
3312 {
3313  pos.nFile = nFile;
3314 
3315  LOCK(cs_LastBlockFile);
3316 
3317  pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3318  vinfoBlockFile[nFile].nUndoSize += nAddSize;
3319  setDirtyFileInfo.insert(nFile);
3320 
3321  bool out_of_space;
3322  size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3323  if (out_of_space) {
3324  return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
3325  }
3326  if (bytes_allocated != 0 && fPruneMode) {
3327  fCheckForPruning = true;
3328  }
3329 
3330  return true;
3331 }
3332 
3333 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
3334 {
3335  // Check proof of work matches claimed amount
3336  if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
3337  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
3338 
3339  return true;
3340 }
3341 
3342 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
3343 {
3344  // These are checks that are independent of context.
3345 
3346  if (block.fChecked)
3347  return true;
3348 
3349  // Check that the header is valid (particularly PoW). This is mostly
3350  // redundant with the call in AcceptBlockHeader.
3351  if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3352  return false;
3353 
3354  // Check the merkle root.
3355  if (fCheckMerkleRoot) {
3356  bool mutated;
3357  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3358  if (block.hashMerkleRoot != hashMerkleRoot2)
3359  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3360 
3361  // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3362  // of transactions in a block without affecting the merkle root of a block,
3363  // while still invalidating it.
3364  if (mutated)
3365  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3366  }
3367 
3368  // All potential-corruption validation must be done before we do any
3369  // transaction validation, as otherwise we may mark the header as invalid
3370  // because we receive the wrong transactions for it.
3371  // Note that witness malleability is checked in ContextualCheckBlock, so no
3372  // checks that use witness data may be performed here.
3373 
3374  // Size limits
3376  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3377 
3378  // First transaction must be coinbase, the rest must not be
3379  if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3380  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3381  for (unsigned int i = 1; i < block.vtx.size(); i++)
3382  if (block.vtx[i]->IsCoinBase())
3383  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3384 
3385  // Check transactions
3386  // Must check for duplicate inputs (see CVE-2018-17144)
3387  for (const auto& tx : block.vtx) {
3388  TxValidationState tx_state;
3389  if (!CheckTransaction(*tx, tx_state)) {
3390  // CheckBlock() does context-free validation checks. The only
3391  // possible failures are consensus failures.
3392  assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
3394  strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3395  }
3396  }
3397  unsigned int nSigOps = 0;
3398  for (const auto& tx : block.vtx)
3399  {
3400  nSigOps += GetLegacySigOpCount(*tx);
3401  }
3403  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3404 
3405  if (fCheckPOW && fCheckMerkleRoot)
3406  block.fChecked = true;
3407 
3408  return true;
3409 }
3410 
3411 bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
3412 {
3413  int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3414  return (height >= params.SegwitHeight);
3415 }
3416 
3418 {
3419  int commitpos = -1;
3420  if (!block.vtx.empty()) {
3421  for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
3422  const CTxOut& vout = block.vtx[0]->vout[o];
3424  vout.scriptPubKey[0] == OP_RETURN &&
3425  vout.scriptPubKey[1] == 0x24 &&
3426  vout.scriptPubKey[2] == 0xaa &&
3427  vout.scriptPubKey[3] == 0x21 &&
3428  vout.scriptPubKey[4] == 0xa9 &&
3429  vout.scriptPubKey[5] == 0xed) {
3430  commitpos = o;
3431  }
3432  }
3433  }
3434  return commitpos;
3435 }
3436 
3437 void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3438 {
3439  int commitpos = GetWitnessCommitmentIndex(block);
3440  static const std::vector<unsigned char> nonce(32, 0x00);
3441  if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
3442  CMutableTransaction tx(*block.vtx[0]);
3443  tx.vin[0].scriptWitness.stack.resize(1);
3444  tx.vin[0].scriptWitness.stack[0] = nonce;
3445  block.vtx[0] = MakeTransactionRef(std::move(tx));
3446  }
3447 }
3448 
3449 std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3450 {
3451  std::vector<unsigned char> commitment;
3452  int commitpos = GetWitnessCommitmentIndex(block);
3453  std::vector<unsigned char> ret(32, 0x00);
3454  if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
3455  if (commitpos == -1) {
3456  uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3457  CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin());
3458  CTxOut out;
3459  out.nValue = 0;
3461  out.scriptPubKey[0] = OP_RETURN;
3462  out.scriptPubKey[1] = 0x24;
3463  out.scriptPubKey[2] = 0xaa;
3464  out.scriptPubKey[3] = 0x21;
3465  out.scriptPubKey[4] = 0xa9;
3466  out.scriptPubKey[5] = 0xed;
3467  memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3468  commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3469  CMutableTransaction tx(*block.vtx[0]);
3470  tx.vout.push_back(out);
3471  block.vtx[0] = MakeTransactionRef(std::move(tx));
3472  }
3473  }
3474  UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3475  return commitment;
3476 }
3477 
3480 {
3481  const MapCheckpoints& checkpoints = data.mapCheckpoints;
3482 
3483  for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3484  {
3485  const uint256& hash = i.second;
3486  CBlockIndex* pindex = LookupBlockIndex(hash);
3487  if (pindex) {
3488  return pindex;
3489  }
3490  }
3491  return nullptr;
3492 }
3493 
3503 static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3504 {
3505  assert(pindexPrev != nullptr);
3506  const int nHeight = pindexPrev->nHeight + 1;
3507 
3508  // Check proof of work
3509  const Consensus::Params& consensusParams = params.GetConsensus();
3510  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3511  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3512 
3513  // Check against checkpoints
3514  if (fCheckpointsEnabled) {
3515  // Don't accept any forks from the main chain prior to last checkpoint.
3516  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3517  // BlockIndex().
3518  CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
3519  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3520  LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3521  return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3522  }
3523  }
3524 
3525  // Check timestamp against prev
3526  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3527  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3528 
3529  // Check timestamp
3530  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3531  return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3532 
3533  // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3534  // check for version 2, 3 and 4 upgrades
3535  if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3536  (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3537  (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
3538  return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3539  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3540 
3541  return true;
3542 }
3543 
3550 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3551 {
3552  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3553 
3554  // Start enforcing BIP113 (Median Time Past).
3555  int nLockTimeFlags = 0;
3556  if (nHeight >= consensusParams.CSVHeight) {
3557  assert(pindexPrev != nullptr);
3558  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3559  }
3560 
3561  int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3562  ? pindexPrev->GetMedianTimePast()
3563  : block.GetBlockTime();
3564 
3565  // Check that all transactions are finalized
3566  for (const auto& tx : block.vtx) {
3567  if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3568  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3569  }
3570  }
3571 
3572  // Enforce rule that the coinbase starts with serialized block height
3573  if (nHeight >= consensusParams.BIP34Height)
3574  {
3575  CScript expect = CScript() << nHeight;
3576  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3577  !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3578  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3579  }
3580  }
3581 
3582  // Validation for witness commitments.
3583  // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3584  // coinbase (where 0x0000....0000 is used instead).
3585  // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3586  // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3587  // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3588  // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3589  // multiple, the last one is used.
3590  bool fHaveWitness = false;
3591  if (nHeight >= consensusParams.SegwitHeight) {
3592  int commitpos = GetWitnessCommitmentIndex(block);
3593  if (commitpos != -1) {
3594  bool malleated = false;
3595  uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3596  // The malleation check is ignored; as the transaction tree itself
3597  // already does not permit it, it is impossible to trigger in the
3598  // witness tree.
3599  if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3600  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3601  }
3602  CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
3603  if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3604  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3605  }
3606  fHaveWitness = true;
3607  }
3608  }
3609 
3610  // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3611  if (!fHaveWitness) {
3612  for (const auto& tx : block.vtx) {
3613  if (tx->HasWitness()) {
3614  return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3615  }
3616  }
3617  }
3618 
3619  // After the coinbase witness reserved value and commitment are verified,
3620  // we can check if the block weight passes (before we've checked the
3621  // coinbase witness, it would be possible for the weight to be too
3622  // large by filling up the coinbase witness, which doesn't change
3623  // the block hash, so we couldn't mark the block as permanently
3624  // failed).
3625  if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3626  return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3627  }
3628 
3629  return true;
3630 }
3631 
3632 bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3633 {
3634  AssertLockHeld(cs_main);
3635  // Check for duplicate
3636  uint256 hash = block.GetHash();
3637  BlockMap::iterator miSelf = m_block_index.find(hash);
3638  CBlockIndex *pindex = nullptr;
3639  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3640  if (miSelf != m_block_index.end()) {
3641  // Block header is already known.
3642  pindex = miSelf->second;
3643  if (ppindex)
3644  *ppindex = pindex;
3645  if (pindex->nStatus & BLOCK_FAILED_MASK) {
3646  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
3647  return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3648  }
3649  return true;
3650  }
3651 
3652  if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
3653  return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3654 
3655  // Get prev block index
3656  CBlockIndex* pindexPrev = nullptr;
3657  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3658  if (mi == m_block_index.end()) {
3659  LogPrintf("ERROR: %s: prev block not found\n", __func__);
3660  return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3661  }
3662  pindexPrev = (*mi).second;
3663  if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3664  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3665  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3666  }
3667  if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3668  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3669 
3670  /* Determine if this block descends from any block which has been found
3671  * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3672  * them as failed. For example:
3673  *
3674  * D3
3675  * /
3676  * B2 - C2
3677  * / \
3678  * A D2 - E2 - F2
3679  * \
3680  * B1 - C1 - D1 - E1
3681  *
3682  * In the case that we attempted to reorg from E1 to F2, only to find
3683  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3684  * but NOT D3 (it was not in any of our candidate sets at the time).
3685  *
3686  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3687  * in LoadBlockIndex.
3688  */
3689  if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3690  // The above does not mean "invalid": it checks if the previous block
3691  // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3692  // optimization, in the common case of adding a new block to the tip,
3693  // we don't need to iterate over the failed blocks list.
3694  for (const CBlockIndex* failedit : m_failed_blocks) {
3695  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3696  assert(failedit->nStatus & BLOCK_FAILED_VALID);
3697  CBlockIndex* invalid_walk = pindexPrev;
3698  while (invalid_walk != failedit) {
3699  invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3700  setDirtyBlockIndex.insert(invalid_walk);
3701  invalid_walk = invalid_walk->pprev;
3702  }
3703  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3704  return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3705  }
3706  }
3707  }
3708  }
3709  if (pindex == nullptr)
3710  pindex = AddToBlockIndex(block);
3711 
3712  if (ppindex)
3713  *ppindex = pindex;
3714 
3715  return true;
3716 }
3717 
3718 // Exposed wrapper for AcceptBlockHeader
3719 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3720 {
3721  AssertLockNotHeld(cs_main);
3722  {
3723  LOCK(cs_main);
3724  for (const CBlockHeader& header : headers) {
3725  CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3726  bool accepted = m_blockman.AcceptBlockHeader(
3727  header, state, chainparams, &pindex);
3729 
3730  if (!accepted) {
3731  return false;
3732  }
3733  if (ppindex) {
3734  *ppindex = pindex;
3735  }
3736  }
3737  }
3738  if (NotifyHeaderTip()) {
3739  if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) {
3740  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3741  }
3742  }
3743  return true;
3744 }
3745 
3747 static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const FlatFilePos* dbp) {
3748  unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
3749  FlatFilePos blockPos;
3750  if (dbp != nullptr)
3751  blockPos = *dbp;
3752  if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
3753  error("%s: FindBlockPos failed", __func__);
3754  return FlatFilePos();
3755  }
3756  if (dbp == nullptr) {
3757  if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
3758  AbortNode("Failed to write block");
3759  return FlatFilePos();
3760  }
3761  }
3762  return blockPos;
3763 }
3764 
3766 bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3767 {
3768  const CBlock& block = *pblock;
3769 
3770  if (fNewBlock) *fNewBlock = false;
3771  AssertLockHeld(cs_main);
3772 
3773  CBlockIndex *pindexDummy = nullptr;
3774  CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3775 
3776  bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
3777  CheckBlockIndex(chainparams.GetConsensus());
3778 
3779  if (!accepted_header)
3780  return false;
3781 
3782  // Try to process all requested blocks that we don't have, but only
3783  // process an unrequested block if it's new and has enough work to
3784  // advance our tip, and isn't too many blocks ahead.
3785  bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3786  bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3787  // Blocks that are too out-of-order needlessly limit the effectiveness of
3788  // pruning, because pruning will not delete block files that contain any
3789  // blocks which are too close in height to the tip. Apply this test
3790  // regardless of whether pruning is enabled; it should generally be safe to
3791  // not process unrequested blocks.
3792  bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3793 
3794  // TODO: Decouple this function from the block download logic by removing fRequested
3795  // This requires some new chain data structure to efficiently look up if a
3796  // block is in a chain leading to a candidate for best tip, despite not
3797  // being such a candidate itself.
3798 
3799  // TODO: deal better with return value and error conditions for duplicate
3800  // and unrequested blocks.
3801  if (fAlreadyHave) return true;
3802  if (!fRequested) { // If we didn't ask for it:
3803  if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3804  if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3805  if (fTooFarAhead) return true; // Block height is too high
3806 
3807  // Protect against DoS attacks from low-work chains.
3808  // If our tip is behind, a peer could try to send us
3809  // low-work blocks on a fake chain that we would never
3810  // request; don't process these.
3811  if (pindex->nChainWork < nMinimumChainWork) return true;
3812  }
3813 
3814  if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
3815  !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
3816  if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3817  pindex->nStatus |= BLOCK_FAILED_VALID;
3818  setDirtyBlockIndex.insert(pindex);
3819  }
3820  return error("%s: %s", __func__, state.ToString());
3821  }
3822 
3823  // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3824  // (but if it does not build on our best tip, let the SendMessages loop relay it)
3825  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3826  GetMainSignals().NewPoWValidBlock(pindex, pblock);
3827 
3828  // Write block to history file
3829  if (fNewBlock) *fNewBlock = true;
3830  try {
3831  FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
3832  if (blockPos.IsNull()) {
3833  state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3834  return false;
3835  }
3836  ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
3837  } catch (const std::runtime_error& e) {
3838  return AbortNode(state, std::string("System error: ") + e.what());
3839  }
3840 
3841  FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
3842 
3843  CheckBlockIndex(chainparams.GetConsensus());
3844 
3845  return true;
3846 }
3847 
3848 bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
3849 {
3850  AssertLockNotHeld(cs_main);
3851 
3852  {
3853  CBlockIndex *pindex = nullptr;
3854  if (fNewBlock) *fNewBlock = false;
3855  BlockValidationState state;
3856 
3857  // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3858  // Therefore, the following critical section must include the CheckBlock() call as well.
3859  LOCK(cs_main);
3860 
3861  // Ensure that CheckBlock() passes before calling AcceptBlock, as
3862  // belt-and-suspenders.
3863  bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
3864  if (ret) {
3865  // Store to disk
3866  ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
3867  }
3868  if (!ret) {
3869  GetMainSignals().BlockChecked(*pblock, state);
3870  return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3871  }
3872  }
3873 
3874  NotifyHeaderTip();
3875 
3876  BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3877  if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
3878  return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3879 
3880  return true;
3881 }
3882 
3883 bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
3884 {
3885  AssertLockHeld(cs_main);
3886  assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
3887  CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
3888  uint256 block_hash(block.GetHash());
3889  CBlockIndex indexDummy(block);
3890  indexDummy.pprev = pindexPrev;
3891  indexDummy.nHeight = pindexPrev->nHeight + 1;
3892  indexDummy.phashBlock = &block_hash;
3893 
3894  // NOTE: CheckBlockHeader is called by CheckBlock
3895  if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3896  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3897  if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3898  return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3899  if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3900  return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3901  if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
3902  return false;
3903  assert(state.IsValid());
3904 
3905  return true;
3906 }
3907 
3912 /* Calculate the amount of disk space the block & undo files currently use */
3914 {
3915  LOCK(cs_LastBlockFile);
3916 
3917  uint64_t retval = 0;
3918  for (const CBlockFileInfo &file : vinfoBlockFile) {
3919  retval += file.nSize + file.nUndoSize;
3920  }
3921  return retval;
3922 }
3923 
3924 void ChainstateManager::PruneOneBlockFile(const int fileNumber)
3925 {
3926  AssertLockHeld(cs_main);
3927  LOCK(cs_LastBlockFile);
3928 
3929  for (const auto& entry : m_blockman.m_block_index) {
3930  CBlockIndex* pindex = entry.second;
3931  if (pindex->nFile == fileNumber) {
3932  pindex->nStatus &= ~BLOCK_HAVE_DATA;
3933  pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3934  pindex->nFile = 0;
3935  pindex->nDataPos = 0;
3936  pindex->nUndoPos = 0;
3937  setDirtyBlockIndex.insert(pindex);
3938 
3939  // Prune from m_blocks_unlinked -- any block we prune would have
3940  // to be downloaded again in order to consider its chain, at which
3941  // point it would be considered as a candidate for
3942  // m_blocks_unlinked or setBlockIndexCandidates.
3943  auto range = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
3944  while (range.first != range.second) {
3945  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
3946  range.first++;
3947  if (_it->second == pindex) {
3948  m_blockman.m_blocks_unlinked.erase(_it);
3949  }
3950  }
3951  }
3952  }
3953 
3954  vinfoBlockFile[fileNumber].SetNull();
3955  setDirtyFileInfo.insert(fileNumber);
3956 }
3957 
3958 
3959 void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
3960 {
3961  for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
3962  FlatFilePos pos(*it, 0);
3963  fs::remove(BlockFileSeq().FileName(pos));
3964  fs::remove(UndoFileSeq().FileName(pos));
3965  LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
3966  }
3967 }
3968 
3969 /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
3970 static void FindFilesToPruneManual(ChainstateManager& chainman, std::set<int>& setFilesToPrune, int nManualPruneHeight)
3971 {
3972  assert(fPruneMode && nManualPruneHeight > 0);
3973 
3974  LOCK2(cs_main, cs_LastBlockFile);
3975  if (::ChainActive().Tip() == nullptr)
3976  return;
3977 
3978  // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
3979  unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
3980  int count=0;
3981  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3982  if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
3983  continue;
3984  chainman.PruneOneBlockFile(fileNumber);
3985  setFilesToPrune.insert(fileNumber);
3986  count++;
3987  }
3988  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
3989 }
3990 
3991 /* This function is called from the RPC code for pruneblockchain */
3992 void PruneBlockFilesManual(int nManualPruneHeight)
3993 {
3994  BlockValidationState state;
3995  const CChainParams& chainparams = Params();
3997  chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
3998  LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
3999  }
4000 }
4001 
4017 static void FindFilesToPrune(ChainstateManager& chainman, std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
4018 {
4019  LOCK2(cs_main, cs_LastBlockFile);
4020  if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
4021  return;
4022  }
4023  if ((uint64_t)::ChainActive().Tip()->nHeight <= nPruneAfterHeight) {
4024  return;
4025  }
4026 
4027  unsigned int nLastBlockWeCanPrune = ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
4028  uint64_t nCurrentUsage = CalculateCurrentUsage();
4029  // We don't check to prune until after we've allocated new space for files
4030  // So we should leave a buffer under our target to account for another allocation
4031  // before the next pruning.
4032  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4033  uint64_t nBytesToPrune;
4034  int count=0;
4035 
4036  if (nCurrentUsage + nBuffer >= nPruneTarget) {
4037  // On a prune event, the chainstate DB is flushed.
4038  // To avoid excessive prune events negating the benefit of high dbcache
4039  // values, we should not prune too rapidly.
4040  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
4042  // Since this is only relevant during IBD, we use a fixed 10%
4043  nBuffer += nPruneTarget / 10;
4044  }
4045 
4046  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4047  nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
4048 
4049  if (vinfoBlockFile[fileNumber].nSize == 0)
4050  continue;
4051 
4052  if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
4053  break;
4054 
4055  // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4056  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
4057  continue;
4058 
4059  chainman.PruneOneBlockFile(fileNumber);
4060  // Queue up the files for removal
4061  setFilesToPrune.insert(fileNumber);
4062  nCurrentUsage -= nBytesToPrune;
4063  count++;
4064  }
4065  }
4066 
4067  LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
4068  nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
4069  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
4070  nLastBlockWeCanPrune, count);
4071 }
4072 
4074 {
4075  return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
4076 }
4077 
4079 {
4080  return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
4081 }
4082 
4083 FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
4084  return BlockFileSeq().Open(pos, fReadOnly);
4085 }
4086 
4088 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
4089  return UndoFileSeq().Open(pos, fReadOnly);
4090 }
4091 
4092 fs::path GetBlockPosFilename(const FlatFilePos &pos)
4093 {
4094  return BlockFileSeq().FileName(pos);
4095 }
4096 
4098 {
4099  AssertLockHeld(cs_main);
4100 
4101  if (hash.IsNull())
4102  return nullptr;
4103 
4104  // Return existing
4105  BlockMap::iterator mi = m_block_index.find(hash);
4106  if (mi != m_block_index.end())
4107  return (*mi).second;
4108 
4109  // Create new
4110  CBlockIndex* pindexNew = new CBlockIndex();
4111  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4112  pindexNew->phashBlock = &((*mi).first);
4113 
4114  return pindexNew;
4115 }
4116 
4118  const Consensus::Params& consensus_params,
4119  CBlockTreeDB& blocktree,
4120  std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
4121 {
4122  if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
4123  return false;
4124 
4125  // Calculate nChainWork
4126  std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
4127  vSortedByHeight.reserve(m_block_index.size());
4128  for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
4129  {
4130  CBlockIndex* pindex = item.second;
4131  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4132  }
4133  sort(vSortedByHeight.begin(), vSortedByHeight.end());
4134  for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
4135  {
4136  if (ShutdownRequested()) return false;
4137  CBlockIndex* pindex = item.second;
4138  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
4139  pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
4140  // We can link the chain of blocks for which we've received transactions at some point.
4141  // Pruned nodes may have deleted the block.
4142  if (pindex->nTx > 0) {
4143  if (pindex->pprev) {
4144  if (pindex->pprev->HaveTxsDownloaded()) {
4145  pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
4146  } else {
4147  pindex->nChainTx = 0;
4148  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4149  }
4150  } else {
4151  pindex->nChainTx = pindex->nTx;
4152  }
4153  }
4154  if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
4155  pindex->nStatus |= BLOCK_FAILED_CHILD;
4156  setDirtyBlockIndex.insert(pindex);
4157  }
4158  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4159  block_index_candidates.insert(pindex);
4160  }
4161  if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
4162  pindexBestInvalid = pindex;
4163  if (pindex->pprev)
4164  pindex->BuildSkip();
4165  if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
4166  pindexBestHeader = pindex;
4167  }
4168 
4169  return true;
4170 }
4171 
4173  m_failed_blocks.clear();
4174  m_blocks_unlinked.clear();
4175 
4176  for (const BlockMap::value_type& entry : m_block_index) {
4177  delete entry.second;
4178  }
4179 
4180  m_block_index.clear();
4181 }
4182 
4183 bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
4184 {
4185  if (!chainman.m_blockman.LoadBlockIndex(
4186  chainparams.GetConsensus(), *pblocktree,
4188  return false;
4189  }
4190 
4191  // Load block file info
4192  pblocktree->ReadLastBlockFile(nLastBlockFile);
4193  vinfoBlockFile.resize(nLastBlockFile + 1);
4194  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4195  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4196  pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4197  }
4198  LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
4199  for (int nFile = nLastBlockFile + 1; true; nFile++) {
4200  CBlockFileInfo info;
4201  if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4202  vinfoBlockFile.push_back(info);
4203  } else {
4204  break;
4205  }
4206  }
4207 
4208  // Check presence of blk files
4209  LogPrintf("Checking all blk files are present...\n");
4210  std::set<int> setBlkDataFiles;
4211  for (const std::pair<const uint256, CBlockIndex*>& item : chainman.BlockIndex()) {
4212  CBlockIndex* pindex = item.second;
4213  if (pindex->nStatus & BLOCK_HAVE_DATA) {
4214  setBlkDataFiles.insert(pindex->nFile);
4215  }
4216  }
4217  for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
4218  {
4219  FlatFilePos pos(*it, 0);
4220  if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
4221  return false;
4222  }
4223  }
4224 
4225  // Check whether we have ever pruned block & undo files
4226  pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4227  if (fHavePruned)
4228  LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4229 
4230  // Check whether we need to continue reindexing
4231  bool fReindexing = false;
4232  pblocktree->ReadReindexing(fReindexing);
4233  if(fReindexing) fReindex = true;
4234 
4235  return true;
4236 }
4237 
4238 bool CChainState::LoadChainTip(const CChainParams& chainparams)
4239 {
4240  AssertLockHeld(cs_main);
4241  const CCoinsViewCache& coins_cache = CoinsTip();
4242  assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
4243  const CBlockIndex* tip = m_chain.Tip();
4244 
4245  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4246  return true;
4247  }
4248 
4249  // Load pointer to end of best chain
4250  CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4251  if (!pindex) {
4252  return false;
4253  }
4254  m_chain.SetTip(pindex);
4256 
4257  tip = m_chain.Tip();
4258  LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4259  tip->GetBlockHash().ToString(),
4260  m_chain.Height(),
4261  FormatISO8601DateTime(tip->GetBlockTime()),
4262  GuessVerificationProgress(chainparams.TxData(), tip));
4263  return true;
4264 }
4265 
4267 {
4268  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4269 }
4270 
4272 {
4273  uiInterface.ShowProgress("", 100, false);
4274 }
4275 
4276 bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
4277 {
4278  LOCK(cs_main);
4279  if (::ChainActive().Tip() == nullptr || ::ChainActive().Tip()->pprev == nullptr)
4280  return true;
4281 
4282  // Verify blocks in the best chain
4284  nCheckDepth = ::ChainActive().Height();
4285  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4286  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4287  CCoinsViewCache coins(coinsview);
4288  CBlockIndex* pindex;
4289  CBlockIndex* pindexFailure = nullptr;
4290  int nGoodTransactions = 0;
4291  BlockValidationState state;
4292  int reportDone = 0;
4293  LogPrintf("[0%%]..."); /* Continued */
4294  for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
4295  const int percentageDone = std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
4296  if (reportDone < percentageDone/10) {
4297  // report every 10% step
4298  LogPrintf("[%d%%]...", percentageDone); /* Continued */
4299  reportDone = percentageDone/10;
4300  }
4301  uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4302  if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth)
4303  break;
4304  if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4305  // If pruning, only go back as far as we have data.
4306  LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
4307  break;
4308  }
4309  CBlock block;
4310  // check level 0: read from disk
4311  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4312  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4313  // check level 1: verify block validity
4314  if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
4315  return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
4316  pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4317  // check level 2: verify undo validity
4318  if (nCheckLevel >= 2 && pindex) {
4319  CBlockUndo undo;
4320  if (!pindex->GetUndoPos().IsNull()) {
4321  if (!UndoReadFromDisk(undo, pindex)) {
4322  return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4323  }
4324  }
4325  }
4326  // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4327  if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= nCoinCacheUsage) {
4328  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4329  DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins);
4330  if (res == DISCONNECT_FAILED) {
4331  return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4332  }
4333  if (res == DISCONNECT_UNCLEAN) {
4334  nGoodTransactions = 0;
4335  pindexFailure = pindex;
4336  } else {
4337  nGoodTransactions += block.vtx.size();
4338  }
4339  }
4340  if (ShutdownRequested()) return true;
4341  }
4342  if (pindexFailure)
4343  return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", ::ChainActive().Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4344 
4345  // store block count as we move pindex at check level >= 4
4346  int block_count = ::ChainActive().Height() - pindex->nHeight;
4347 
4348  // check level 4: try reconnecting blocks
4349  if (nCheckLevel >= 4) {
4350  while (pindex != ::ChainActive().Tip()) {
4351  const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
4352  if (reportDone < percentageDone/10) {
4353  // report every 10% step
4354  LogPrintf("[%d%%]...", percentageDone); /* Continued */
4355  reportDone = percentageDone/10;
4356  }
4357  uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4358  pindex = ::ChainActive().Next(pindex);
4359  CBlock block;
4360  if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4361  return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4362  if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams))
4363  return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4364  if (ShutdownRequested()) return true;
4365  }
4366  }
4367 
4368  LogPrintf("[DONE].\n");
4369  LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
4370 
4371  return true;
4372 }
4373 
4375 bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
4376 {
4377  // TODO: merge with ConnectBlock
4378  CBlock block;
4379  if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
4380  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4381  }
4382 
4383  for (const CTransactionRef& tx : block.vtx) {
4384  if (!tx->IsCoinBase()) {
4385  for (const CTxIn &txin : tx->vin) {
4386  inputs.SpendCoin(txin.prevout);
4387  }
4388  }
4389  // Pass check = true as every addition may be an overwrite.
4390  AddCoins(inputs, *tx, pindex->nHeight, true);
4391  }
4392  return true;
4393 }
4394 
4396 {
4397  LOCK(cs_main);
4398 
4399  CCoinsView& db = this->CoinsDB();
4400  CCoinsViewCache cache(&db);
4401 
4402  std::vector<uint256> hashHeads = db.GetHeadBlocks();
4403  if (hashHeads.empty()) return true; // We're already in a consistent state.
4404  if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4405 
4406  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4407  LogPrintf("Replaying blocks\n");
4408 
4409  const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4410  const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4411  const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4412 
4413  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4414  return error("ReplayBlocks(): reorganization to unknown block requested");
4415  }
4416  pindexNew = m_blockman.m_block_index[hashHeads[0]];
4417 
4418  if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4419  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4420  return error("ReplayBlocks(): reorganization from unknown block requested");
4421  }
4422  pindexOld = m_blockman.m_block_index[hashHeads[1]];
4423  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4424  assert(pindexFork != nullptr);
4425  }
4426 
4427  // Rollback along the old branch.
4428  while (pindexOld != pindexFork) {
4429  if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4430  CBlock block;
4431  if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
4432  return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4433  }
4434  LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4435  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
4436  if (res == DISCONNECT_FAILED) {
4437  return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4438  }
4439  // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4440  // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4441  // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4442  // the result is still a version of the UTXO set with the effects of that block undone.
4443  }
4444  pindexOld = pindexOld->pprev;
4445  }
4446 
4447  // Roll forward from the forking point to the new tip.
4448  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4449  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4450  const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4451  LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4452  uiInterface.ShowProgress(_("Replaying blocks...").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4453  if (!RollforwardBlock(pindex, cache, params)) return false;
4454  }
4455 
4456  cache.SetBestBlock(pindexNew->GetBlockHash());
4457  cache.Flush();
4458  uiInterface.ShowProgress("", 100, false);
4459  return true;
4460 }
4461 
4464 {
4465  AssertLockHeld(cs_main);
4466  assert(!m_chain.Contains(index)); // Make sure this block isn't active
4467 
4468  // Reduce validity
4469  index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
4470  // Remove have-data flags.
4471  index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
4472  // Remove storage location.
4473  index->nFile = 0;
4474  index->nDataPos = 0;
4475  index->nUndoPos = 0;
4476  // Remove various other things
4477  index->nTx = 0;
4478  index->nChainTx = 0;
4479  index->nSequenceId = 0;
4480  // Make sure it gets written.
4481  setDirtyBlockIndex.insert(index);
4482  // Update indexes
4483  setBlockIndexCandidates.erase(index);
4484  auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev);
4485  while (ret.first != ret.second) {
4486  if (ret.first->second == index) {
4487  m_blockman.m_blocks_unlinked.erase(ret.first++);
4488  } else {
4489  ++ret.first;
4490  }
4491  }
4492  // Mark parent as eligible for main chain again
4493  if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
4494  setBlockIndexCandidates.insert(index->pprev);
4495  }
4496 }
4497 
4499 {
4500  // Note that during -reindex-chainstate we are called with an empty m_chain!
4501 
4502  // First erase all post-segwit blocks without witness not in the main chain,
4503  // as this can we done without costly DisconnectTip calls. Active
4504  // blocks will be dealt with below (releasing cs_main in between).
4505  {
4506  LOCK(cs_main);
4507  for (const auto& entry : m_blockman.m_block_index) {
4508  if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
4509  EraseBlockData(entry.second);
4510  }
4511  }
4512  }
4513 
4514  // Find what height we need to reorganize to.
4515  CBlockIndex *tip;
4516  int nHeight = 1;
4517  {
4518  LOCK(cs_main);
4519  while (nHeight <= m_chain.Height()) {
4520  // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
4521  // blocks in ConnectBlock, we don't need to go back and
4522  // re-download/re-verify blocks from before segwit actually activated.
4523  if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
4524  break;
4525  }
4526  nHeight++;
4527  }
4528 
4529  tip = m_chain.Tip();
4530  }
4531  // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4532 
4533  BlockValidationState state;
4534  // Loop until the tip is below nHeight, or we reach a pruned block.
4535  while (!ShutdownRequested()) {
4536  {
4537  LOCK2(cs_main, ::mempool.cs);
4538  // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
4539  assert(tip == m_chain.Tip());
4540  if (tip == nullptr || tip->nHeight < nHeight) break;
4541  if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
4542  // If pruning, don't try rewinding past the HAVE_DATA point;
4543  // since older blocks can't be served anyway, there's
4544  // no need to walk further, and trying to DisconnectTip()
4545  // will fail (and require a needless reindex/redownload
4546  // of the blockchain).
4547  break;
4548  }
4549 
4550  // Disconnect block
4551  if (!DisconnectTip(state, params, nullptr)) {
4552  return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
4553  }
4554 
4555  // Reduce validity flag and have-data flags.
4556  // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4557  // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4558  // Note: If we encounter an insufficiently validated block that
4559  // is on m_chain, it must be because we are a pruning node, and
4560  // this block or some successor doesn't HAVE_DATA, so we were unable to
4561  // rewind all the way. Blocks remaining on m_chain at this point
4562  // must not have their validity reduced.
4563  EraseBlockData(tip);
4564 
4565  tip = tip->pprev;
4566  }
4567  // Make sure the queue of validation callbacks doesn't grow unboundedly.
4569 
4570  // Occasionally flush state to disk.
4571  if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
4572  LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4573  return false;
4574  }
4575  }
4576 
4577  {
4578  LOCK(cs_main);
4579  if (m_chain.Tip() != nullptr) {
4580  // We can't prune block index candidates based on our tip if we have
4581  // no tip due to m_chain being empty!
4583 
4584  CheckBlockIndex(params.GetConsensus());
4585 
4586  // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
4587  // and skip it here, we're about to -reindex-chainstate anyway, so
4588  // it'll get called a bunch real soon.
4589  BlockValidationState state;
4590  if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
4591  LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4592  return false;
4593  }
4594  }
4595  }
4596 
4597  return true;
4598 }
4599 
4601  nBlockSequenceId = 1;
4602  setBlockIndexCandidates.clear();
4603 }
4604 
4605 // May NOT be used after any connections are up as much
4606 // of the peer-processing logic assumes a consistent
4607 // block index state
4609 {
4610  LOCK(cs_main);
4611  g_chainman.Unload();
4612  pindexBestInvalid = nullptr;
4613  pindexBestHeader = nullptr;
4614  mempool.clear();
4615  vinfoBlockFile.clear();
4616  nLastBlockFile = 0;
4617  setDirtyBlockIndex.clear();
4618  setDirtyFileInfo.clear();
4620  for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4621  warningcache[b].clear();
4622  }
4623  fHavePruned = false;
4624 }
4625 
4627 {
4628  AssertLockHeld(cs_main);
4629  // Load block index from databases
4630  bool needs_init = fReindex;
4631  if (!fReindex) {
4632  bool ret = LoadBlockIndexDB(*this, chainparams);
4633  if (!ret) return false;
4634  needs_init = m_blockman.m_block_index.empty();
4635  }
4636 
4637  if (needs_init) {
4638  // Everything here is for *new* reindex/DBs. Thus, though
4639  // LoadBlockIndexDB may have set fReindex if we shut down
4640  // mid-reindex previously, we don't check fReindex and
4641  // instead only check it prior to LoadBlockIndexDB to set
4642  // needs_init.
4643 
4644  LogPrintf("Initializing databases...\n");
4645  }
4646  return true;
4647 }
4648 
4650 {
4651  LOCK(cs_main);
4652 
4653  // Check whether we're already initialized by checking for genesis in
4654  // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4655  // set based on the coins db, not the block index db, which is the only
4656  // thing loaded at this point.
4657  if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
4658  return true;
4659 
4660  try {
4661  const CBlock& block = chainparams.GenesisBlock();
4662  FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
4663  if (blockPos.IsNull())
4664  return error("%s: writing genesis block to disk failed", __func__);
4665  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4666  ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
4667  } catch (const std::runtime_error& e) {
4668  return error("%s: failed to write genesis block: %s", __func__, e.what());
4669  }
4670 
4671  return true;
4672 }
4673 
4674 bool LoadGenesisBlock(const CChainParams& chainparams)
4675 {
4677 }
4678 
4679 void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp)
4680 {
4681  // Map of disk positions for blocks with unknown parent (only used for reindex)
4682  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4683  int64_t nStart = GetTimeMillis();
4684 
4685  int nLoaded = 0;
4686  try {
4687  // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4689  uint64_t nRewind = blkdat.GetPos();
4690  while (!blkdat.eof()) {
4691  if (ShutdownRequested()) return;
4692 
4693  blkdat.SetPos(nRewind);
4694  nRewind++; // start one byte further next time, in case of failure
4695  blkdat.SetLimit(); // remove former limit
4696  unsigned int nSize = 0;
4697  try {
4698  // locate a header
4699  unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4700  blkdat.FindByte(chainparams.MessageStart()[0]);
4701  nRewind = blkdat.GetPos()+1;
4702  blkdat >> buf;
4703  if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
4704  continue;
4705  // read size
4706  blkdat >> nSize;
4707  if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4708  continue;
4709  } catch (const std::exception&) {
4710  // no valid block header found; don't complain
4711  break;
4712  }
4713  try {
4714  // read block
4715  uint64_t nBlockPos = blkdat.GetPos();
4716  if (dbp)
4717  dbp->nPos = nBlockPos;
4718  blkdat.SetLimit(nBlockPos + nSize);
4719  blkdat.SetPos(nBlockPos);
4720  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4721  CBlock& block = *pblock;
4722  blkdat >> block;
4723  nRewind = blkdat.GetPos();
4724 
4725  uint256 hash = block.GetHash();
4726  {
4727  LOCK(cs_main);
4728  // detect out of order blocks, and store them for later
4729  if (hash != chainparams.GetConsensus().hashGenesisBlock && !LookupBlockIndex(block.hashPrevBlock)) {
4730  LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4731  block.hashPrevBlock.ToString());
4732  if (dbp)
4733  mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4734  continue;
4735  }
4736 
4737  // process in case the block isn't known yet
4738  CBlockIndex* pindex = LookupBlockIndex(hash);
4739  if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4740  BlockValidationState state;
4741  if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
4742  nLoaded++;
4743  }
4744  if (state.IsError()) {
4745  break;
4746  }
4747  } else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4748  LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4749  }
4750  }
4751 
4752  // Activate the genesis block so normal node progress can continue
4753  if (hash == chainparams.GetConsensus().hashGenesisBlock) {
4754  BlockValidationState state;
4755  if (!ActivateBestChain(state, chainparams, nullptr)) {
4756  break;
4757  }
4758  }
4759 
4760  NotifyHeaderTip();
4761 
4762  // Recursively process earlier encountered successors of this block
4763  std::deque<uint256> queue;
4764  queue.push_back(hash);
4765  while (!queue.empty()) {
4766  uint256 head = queue.front();
4767  queue.pop_front();
4768  std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4769  while (range.first != range.second) {
4770  std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4771  std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4772  if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
4773  {
4774  LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4775  head.ToString());
4776  LOCK(cs_main);
4777  BlockValidationState dummy;
4778  if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
4779  {
4780  nLoaded++;
4781  queue.push_back(pblockrecursive->GetHash());
4782  }
4783  }
4784  range.first++;
4785  mapBlocksUnknownParent.erase(it);
4786  NotifyHeaderTip();
4787  }
4788  }
4789  } catch (const std::exception& e) {
4790  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4791  }
4792  }
4793  } catch (const std::runtime_error& e) {
4794  AbortNode(std::string("System error: ") + e.what());
4795  }
4796  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4797 }
4798 
4800 {
4801  if (!fCheckBlockIndex) {
4802  return;
4803  }
4804 
4805  LOCK(cs_main);
4806 
4807  // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4808  // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4809  // tests when iterating the block tree require that m_chain has been initialized.)
4810  if (m_chain.Height() < 0) {
4811  assert(m_blockman.m_block_index.size() <= 1);
4812  return;
4813  }
4814 
4815  // Build forward-pointing map of the entire block tree.
4816  std::multimap<CBlockIndex*,CBlockIndex*> forward;
4817  for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4818  forward.insert(std::make_pair(entry.second->pprev, entry.second));
4819  }
4820 
4821  assert(forward.size() == m_blockman.m_block_index.size());
4822 
4823  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4824  CBlockIndex *pindex = rangeGenesis.first->second;
4825  rangeGenesis.first++;
4826  assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4827 
4828  // Iterate over the entire block tree, using depth-first search.
4829  // Along the way, remember whether there are blocks on the path from genesis
4830  // block being explored which are the first to have certain properties.
4831  size_t nNodes = 0;
4832  int nHeight = 0;
4833  CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4834  CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4835  CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4836  CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4837  CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4838  CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4839  CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4840  while (pindex != nullptr) {
4841  nNodes++;
4842  if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4843  if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4844  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4845  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4846  if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4847  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4848  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4849 
4850  // Begin: actual consistency checks.
4851  if (pindex->pprev == nullptr) {
4852  // Genesis block checks.
4853  assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
4854  assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4855  }
4856  if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4857  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4858  // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4859  if (!fHavePruned) {
4860  // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4861  assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4862  assert(pindexFirstMissing == pindexFirstNeverProcessed);
4863  } else {
4864  // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4865  if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4866  }
4867  if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4868  assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4869  // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4870  assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4871  assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4872  assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4873  assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4874  assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4875  assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4876  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4877  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4878  if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4879  if (pindexFirstInvalid == nullptr) {
4880  // Checks for not-invalid blocks.
4881  assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4882  }
4883  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4884  if (pindexFirstInvalid == nullptr) {
4885  // If this block sorts at least as good as the current tip and
4886  // is valid and we have all data for its parents, it must be in
4887  // setBlockIndexCandidates. m_chain.Tip() must also be there
4888  // even if some data has been pruned.
4889  if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
4890  assert(setBlockIndexCandidates.count(pindex));
4891  }
4892  // If some parent is missing, then it could be that this block was in
4893  // setBlockIndexCandidates but had to be removed because of the missing data.
4894  // In this case it must be in m_blocks_unlinked -- see test below.
4895  }
4896  } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4897  assert(setBlockIndexCandidates.count(pindex) == 0);
4898  }
4899  // Check whether this block is in m_blocks_unlinked.
4900  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4901  bool foundInUnlinked = false;
4902  while (rangeUnlinked.first != rangeUnlinked.second) {
4903  assert(rangeUnlinked.first->first == pindex->pprev);
4904  if (rangeUnlinked.first->second == pindex) {
4905  foundInUnlinked = true;
4906  break;
4907  }
4908  rangeUnlinked.first++;
4909  }
4910  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
4911  // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
4912  assert(foundInUnlinked);
4913  }
4914  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
4915  if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
4916  if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
4917  // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4918  assert(fHavePruned); // We must have pruned.
4919  // This block may have entered m_blocks_unlinked if:
4920  // - it has a descendant that at some point had more work than the
4921  // tip, and
4922  // - we tried switching to that descendant but were missing
4923  // data for some intermediate block between m_chain and the
4924  // tip.
4925  // So if this block is itself better than m_chain.Tip() and it wasn't in
4926  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
4927  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4928  if (pindexFirstInvalid == nullptr) {
4929  assert(foundInUnlinked);
4930  }
4931  }
4932  }
4933  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4934  // End: actual consistency checks.
4935 
4936  // Try descending into the first subnode.
4937  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4938  if (range.first != range.second) {
4939  // A subnode was found.
4940  pindex = range.first->second;
4941  nHeight++;
4942  continue;
4943  }
4944  // This is a leaf node.
4945  // Move upwards until we reach a node of which we have not yet visited the last child.
4946  while (pindex) {
4947  // We are going to either move to a parent or a sibling of pindex.
4948  // If pindex was the first with a certain property, unset the corresponding variable.
4949  if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
4950  if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
4951  if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
4952  if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
4953  if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
4954  if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
4955  if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
4956  // Find our parent.
4957  CBlockIndex* pindexPar = pindex->pprev;
4958  // Find which child we just visited.
4959  std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4960  while (rangePar.first->second != pindex) {
4961  assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4962  rangePar.first++;
4963  }
4964  // Proceed to the next one.
4965  rangePar.first++;
4966  if (rangePar.first != rangePar.second) {
4967  // Move to the sibling.
4968  pindex = rangePar.first->second;
4969  break;
4970  } else {
4971  // Move up further.
4972  pindex = pindexPar;
4973  nHeight--;
4974  continue;
4975  }
4976  }
4977  }
4978 
4979  // Check that we actually traversed the entire map.
4980  assert(nNodes == forward.size());
4981 }
4982 
4983 std::string CChainState::ToString()
4984 {
4985  CBlockIndex* tip = m_chain.Tip();
4986  return strprintf("Chainstate [%s] @ height %d (%s)",
4987  m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
4988  tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
4989 }
4990 
4991 std::string CBlockFileInfo::ToString() const
4992 {
4993  return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
4994 }
4995 
4997 {
4998  LOCK(cs_LastBlockFile);
4999 
5000  return &vinfoBlockFile.at(n);
5001 }
5002 
5004 {
5005  LOCK(cs_main);
5006  return VersionBitsState(::ChainActive().Tip(), params, pos, versionbitscache);
5007 }
5008 
5010 {
5011  LOCK(cs_main);
5012  return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
5013 }
5014 
5016 {
5017  LOCK(cs_main);
5018  return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos, versionbitscache);
5019 }
5020 
5021 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
5022 
5024 {
5025  const CChainParams& chainparams = Params();
5026  int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
5027  FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
5028  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5029  if (file.IsNull()) {
5030  LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
5031  return false;
5032  }
5033 
5034  int64_t count = 0;
5035  int64_t expired = 0;
5036  int64_t failed = 0;
5037  int64_t already_there = 0;
5038  int64_t unbroadcast = 0;
5039  int64_t nNow = GetTime();
5040 
5041  try {
5042  uint64_t version;
5043  file >> version;
5044  if (version != MEMPOOL_DUMP_VERSION) {
5045  return false;
5046  }
5047  uint64_t num;
5048  file >> num;
5049  while (num--) {
5050  CTransactionRef tx;
5051  int64_t nTime;
5052  int64_t nFeeDelta;
5053  file >> tx;
5054  file >> nTime;
5055  file >> nFeeDelta;
5056 
5057  CAmount amountdelta = nFeeDelta;
5058  if (amountdelta) {
5059  pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
5060  }
5061  TxValidationState state;
5062  if (nTime + nExpiryTimeout > nNow) {
5063  LOCK(cs_main);
5064  AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime,
5065  nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
5066  false /* test_accept */);
5067  if (state.IsValid()) {
5068  ++count;
5069  } else {
5070  // mempool may contain the transaction already, e.g. from
5071  // wallet(s) having loaded it while we were processing
5072  // mempool transactions; consider these as valid, instead of
5073  // failed, but mark them as 'already there'
5074  if (pool.exists(tx->GetHash())) {
5075  ++already_there;
5076  } else {
5077  ++failed;
5078  }
5079  }
5080  } else {
5081  ++expired;
5082  }
5083  if (ShutdownRequested())
5084  return false;
5085  }
5086  std::map<uint256, CAmount> mapDeltas;
5087  file >> mapDeltas;
5088 
5089  for (const auto& i : mapDeltas) {
5090  pool.PrioritiseTransaction(i.first, i.second);
5091  }
5092 
5093  // TODO: remove this try except in v0.22
5094  try {
5095  std::set<uint256> unbroadcast_txids;
5096  file >> unbroadcast_txids;
5097  unbroadcast = unbroadcast_txids.size();
5098 
5099  for (const auto& txid : unbroadcast_txids) {
5100  pool.AddUnbroadcastTx(txid);
5101  }
5102  } catch (const std::exception&) {
5103  // mempool.dat files created prior to v0.21 will not have an
5104  // unbroadcast set. No need to log a failure if parsing fails here.
5105  }
5106 
5107  } catch (const std::exception& e) {
5108  LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
5109  return false;
5110  }
5111 
5112  LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
5113  return true;
5114 }
5115 
5116 bool DumpMempool(const CTxMemPool& pool)
5117 {
5118  int64_t start = GetTimeMicros();
5119 
5120  std::map<uint256, CAmount> mapDeltas;
5121  std::vector<TxMempoolInfo> vinfo;
5122  std::set<uint256> unbroadcast_txids;
5123 
5124  static Mutex dump_mutex;
5125  LOCK(dump_mutex);
5126 
5127  {
5128  LOCK(pool.cs);
5129  for (const auto &i : pool.mapDeltas) {
5130  mapDeltas[i.first] = i.second;
5131  }
5132  vinfo = pool.infoAll();
5133  unbroadcast_txids = pool.GetUnbroadcastTxs();
5134  }
5135 
5136  int64_t mid = GetTimeMicros();
5137 
5138  try {
5139  FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
5140  if (!filestr) {
5141  return false;
5142  }
5143 
5144  CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5145 
5146  uint64_t version = MEMPOOL_DUMP_VERSION;
5147  file << version;
5148 
5149  file << (uint64_t)vinfo.size();
5150  for (const auto& i : vinfo) {
5151  file << *(i.tx);
5152  file << int64_t{count_seconds(i.m_time)};
5153  file << int64_t{i.nFeeDelta};
5154  mapDeltas.erase(i.tx->GetHash());
5155  }
5156 
5157  file << mapDeltas;
5158 
5159  LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
5160  file << unbroadcast_txids;
5161 
5162  if (!FileCommit(file.Get()))
5163  throw std::runtime_error("FileCommit failed");
5164  file.fclose();
5165  RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
5166  int64_t last = GetTimeMicros();
5167  LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
5168  } catch (const std::exception& e) {
5169  LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
5170  return false;
5171  }
5172  return true;
5173 }
5174 
5177 double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
5178  if (pindex == nullptr)
5179  return 0.0;
5180 
5181  int64_t nNow = time(nullptr);
5182 
5183  double fTxTotal;
5184 
5185  if (pindex->nChainTx <= data.nTxCount) {
5186  fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
5187  } else {
5188  fTxTotal = pindex->