Bitcoin Core 29.99.0
P2P Digital Currency
blockstorage.cpp
Go to the documentation of this file.
1// Copyright (c) 2011-2022 The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <node/blockstorage.h>
6
7#include <arith_uint256.h>
8#include <chain.h>
9#include <consensus/params.h>
11#include <dbwrapper.h>
12#include <flatfile.h>
13#include <hash.h>
15#include <kernel/chainparams.h>
18#include <logging.h>
19#include <pow.h>
20#include <primitives/block.h>
22#include <random.h>
23#include <serialize.h>
24#include <signet.h>
25#include <span.h>
26#include <streams.h>
27#include <sync.h>
28#include <tinyformat.h>
29#include <uint256.h>
30#include <undo.h>
31#include <util/batchpriority.h>
32#include <util/check.h>
33#include <util/fs.h>
35#include <util/strencodings.h>
36#include <util/translation.h>
37#include <validation.h>
38
39#include <cstddef>
40#include <map>
41#include <unordered_map>
42
43namespace kernel {
44static constexpr uint8_t DB_BLOCK_FILES{'f'};
45static constexpr uint8_t DB_BLOCK_INDEX{'b'};
46static constexpr uint8_t DB_FLAG{'F'};
47static constexpr uint8_t DB_REINDEX_FLAG{'R'};
48static constexpr uint8_t DB_LAST_BLOCK{'l'};
49// Keys used in previous version that might still be found in the DB:
50// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
51// BlockTreeDB::DB_TXINDEX{'t'}
52// BlockTreeDB::ReadFlag("txindex")
53
55{
56 return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
57}
58
59bool BlockTreeDB::WriteReindexing(bool fReindexing)
60{
61 if (fReindexing) {
62 return Write(DB_REINDEX_FLAG, uint8_t{'1'});
63 } else {
64 return Erase(DB_REINDEX_FLAG);
65 }
66}
67
68void BlockTreeDB::ReadReindexing(bool& fReindexing)
69{
70 fReindexing = Exists(DB_REINDEX_FLAG);
71}
72
74{
75 return Read(DB_LAST_BLOCK, nFile);
76}
77
78bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
79{
80 CDBBatch batch(*this);
81 for (const auto& [file, info] : fileInfo) {
82 batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
83 }
84 batch.Write(DB_LAST_BLOCK, nLastFile);
85 for (const CBlockIndex* bi : blockinfo) {
86 batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
87 }
88 return WriteBatch(batch, true);
89}
90
91bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
92{
93 return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
94}
95
96bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
97{
98 uint8_t ch;
99 if (!Read(std::make_pair(DB_FLAG, name), ch)) {
100 return false;
101 }
102 fValue = ch == uint8_t{'1'};
103 return true;
104}
105
106bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
107{
109 std::unique_ptr<CDBIterator> pcursor(NewIterator());
110 pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
111
112 // Load m_block_index
113 while (pcursor->Valid()) {
114 if (interrupt) return false;
115 std::pair<uint8_t, uint256> key;
116 if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
117 CDiskBlockIndex diskindex;
118 if (pcursor->GetValue(diskindex)) {
119 // Construct block index object
120 CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
121 pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
122 pindexNew->nHeight = diskindex.nHeight;
123 pindexNew->nFile = diskindex.nFile;
124 pindexNew->nDataPos = diskindex.nDataPos;
125 pindexNew->nUndoPos = diskindex.nUndoPos;
126 pindexNew->nVersion = diskindex.nVersion;
127 pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
128 pindexNew->nTime = diskindex.nTime;
129 pindexNew->nBits = diskindex.nBits;
130 pindexNew->nNonce = diskindex.nNonce;
131 pindexNew->nStatus = diskindex.nStatus;
132 pindexNew->nTx = diskindex.nTx;
133
134 if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
135 LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
136 return false;
137 }
138
139 pcursor->Next();
140 } else {
141 LogError("%s: failed to read value\n", __func__);
142 return false;
143 }
144 } else {
145 break;
146 }
147 }
148
149 return true;
150}
151} // namespace kernel
152
153namespace node {
154
156{
157 // First sort by most total work, ...
158 if (pa->nChainWork > pb->nChainWork) return false;
159 if (pa->nChainWork < pb->nChainWork) return true;
160
161 // ... then by earliest time received, ...
162 if (pa->nSequenceId < pb->nSequenceId) return false;
163 if (pa->nSequenceId > pb->nSequenceId) return true;
164
165 // Use pointer address as tie breaker (should only happen with blocks
166 // loaded from disk, as those all have id 0).
167 if (pa < pb) return false;
168 if (pa > pb) return true;
169
170 // Identical blocks.
171 return false;
172}
173
175{
176 return pa->nHeight < pb->nHeight;
177}
178
179std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
180{
182 std::vector<CBlockIndex*> rv;
183 rv.reserve(m_block_index.size());
184 for (auto& [_, block_index] : m_block_index) {
185 rv.push_back(&block_index);
186 }
187 return rv;
188}
189
191{
193 BlockMap::iterator it = m_block_index.find(hash);
194 return it == m_block_index.end() ? nullptr : &it->second;
195}
196
198{
200 BlockMap::const_iterator it = m_block_index.find(hash);
201 return it == m_block_index.end() ? nullptr : &it->second;
202}
203
205{
207
208 auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
209 if (!inserted) {
210 return &mi->second;
211 }
212 CBlockIndex* pindexNew = &(*mi).second;
213
214 // We assign the sequence id to blocks only when the full data is available,
215 // to avoid miners withholding blocks but broadcasting headers, to get a
216 // competitive advantage.
217 pindexNew->nSequenceId = 0;
218
219 pindexNew->phashBlock = &((*mi).first);
220 BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
221 if (miPrev != m_block_index.end()) {
222 pindexNew->pprev = &(*miPrev).second;
223 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
224 pindexNew->BuildSkip();
225 }
226 pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
227 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
229 if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
230 best_header = pindexNew;
231 }
232
233 m_dirty_blockindex.insert(pindexNew);
234
235 return pindexNew;
236}
237
238void BlockManager::PruneOneBlockFile(const int fileNumber)
239{
242
243 for (auto& entry : m_block_index) {
244 CBlockIndex* pindex = &entry.second;
245 if (pindex->nFile == fileNumber) {
246 pindex->nStatus &= ~BLOCK_HAVE_DATA;
247 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
248 pindex->nFile = 0;
249 pindex->nDataPos = 0;
250 pindex->nUndoPos = 0;
251 m_dirty_blockindex.insert(pindex);
252
253 // Prune from m_blocks_unlinked -- any block we prune would have
254 // to be downloaded again in order to consider its chain, at which
255 // point it would be considered as a candidate for
256 // m_blocks_unlinked or setBlockIndexCandidates.
257 auto range = m_blocks_unlinked.equal_range(pindex->pprev);
258 while (range.first != range.second) {
259 std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
260 range.first++;
261 if (_it->second == pindex) {
262 m_blocks_unlinked.erase(_it);
263 }
264 }
265 }
266 }
267
268 m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
269 m_dirty_fileinfo.insert(fileNumber);
270}
271
273 std::set<int>& setFilesToPrune,
274 int nManualPruneHeight,
275 const Chainstate& chain,
276 ChainstateManager& chainman)
277{
278 assert(IsPruneMode() && nManualPruneHeight > 0);
279
281 if (chain.m_chain.Height() < 0) {
282 return;
283 }
284
285 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
286
287 int count = 0;
288 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
289 const auto& fileinfo = m_blockfile_info[fileNumber];
290 if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
291 continue;
292 }
293
294 PruneOneBlockFile(fileNumber);
295 setFilesToPrune.insert(fileNumber);
296 count++;
297 }
298 LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
299 chain.GetRole(), last_block_can_prune, count);
300}
301
303 std::set<int>& setFilesToPrune,
304 int last_prune,
305 const Chainstate& chain,
306 ChainstateManager& chainman)
307{
309 // Distribute our -prune budget over all chainstates.
310 const auto target = std::max(
312 const uint64_t target_sync_height = chainman.m_best_header->nHeight;
313
314 if (chain.m_chain.Height() < 0 || target == 0) {
315 return;
316 }
317 if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
318 return;
319 }
320
321 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
322
323 uint64_t nCurrentUsage = CalculateCurrentUsage();
324 // We don't check to prune until after we've allocated new space for files
325 // So we should leave a buffer under our target to account for another allocation
326 // before the next pruning.
327 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
328 uint64_t nBytesToPrune;
329 int count = 0;
330
331 if (nCurrentUsage + nBuffer >= target) {
332 // On a prune event, the chainstate DB is flushed.
333 // To avoid excessive prune events negating the benefit of high dbcache
334 // values, we should not prune too rapidly.
335 // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
336 const auto chain_tip_height = chain.m_chain.Height();
337 if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
338 // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
339 static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
340 const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
341 nBuffer += average_block_size * remaining_blocks;
342 }
343
344 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
345 const auto& fileinfo = m_blockfile_info[fileNumber];
346 nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
347
348 if (fileinfo.nSize == 0) {
349 continue;
350 }
351
352 if (nCurrentUsage + nBuffer < target) { // are we below our target?
353 break;
354 }
355
356 // don't prune files that could have a block that's not within the allowable
357 // prune range for the chain being pruned.
358 if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
359 continue;
360 }
361
362 PruneOneBlockFile(fileNumber);
363 // Queue up the files for removal
364 setFilesToPrune.insert(fileNumber);
365 nCurrentUsage -= nBytesToPrune;
366 count++;
367 }
368 }
369
370 LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
371 chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
372 (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
373 min_block_to_prune, last_block_can_prune, count);
374}
375
376void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
378 m_prune_locks[name] = lock_info;
379}
380
382{
384
385 if (hash.IsNull()) {
386 return nullptr;
387 }
388
389 const auto [mi, inserted]{m_block_index.try_emplace(hash)};
390 CBlockIndex* pindex = &(*mi).second;
391 if (inserted) {
392 pindex->phashBlock = &((*mi).first);
393 }
394 return pindex;
395}
396
397bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
398{
399 if (!m_block_tree_db->LoadBlockIndexGuts(
400 GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
401 return false;
402 }
403
404 if (snapshot_blockhash) {
405 const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
406 if (!maybe_au_data) {
407 m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
408 return false;
409 }
410 const AssumeutxoData& au_data = *Assert(maybe_au_data);
411 m_snapshot_height = au_data.height;
412 CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
413
414 // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
415 // to disk, we must bootstrap the value for assumedvalid chainstates
416 // from the hardcoded assumeutxo chainparams.
417 base->m_chain_tx_count = au_data.m_chain_tx_count;
418 LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
419 } else {
420 // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
421 // is null. This is relevant during snapshot completion, when the blockman may be loaded
422 // with a height that then needs to be cleared after the snapshot is fully validated.
423 m_snapshot_height.reset();
424 }
425
426 Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
427
428 // Calculate nChainWork
429 std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
430 std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
432
433 CBlockIndex* previous_index{nullptr};
434 for (CBlockIndex* pindex : vSortedByHeight) {
435 if (m_interrupt) return false;
436 if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
437 LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
438 return false;
439 }
440 previous_index = pindex;
441 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
442 pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
443
444 // We can link the chain of blocks for which we've received transactions at some point, or
445 // blocks that are assumed-valid on the basis of snapshot load (see
446 // PopulateAndValidateSnapshot()).
447 // Pruned nodes may have deleted the block.
448 if (pindex->nTx > 0) {
449 if (pindex->pprev) {
450 if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
451 pindex->GetBlockHash() == *snapshot_blockhash) {
452 // Should have been set above; don't disturb it with code below.
453 Assert(pindex->m_chain_tx_count > 0);
454 } else if (pindex->pprev->m_chain_tx_count > 0) {
455 pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
456 } else {
457 pindex->m_chain_tx_count = 0;
458 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
459 }
460 } else {
461 pindex->m_chain_tx_count = pindex->nTx;
462 }
463 }
464 if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
465 pindex->nStatus |= BLOCK_FAILED_CHILD;
466 m_dirty_blockindex.insert(pindex);
467 }
468 if (pindex->pprev) {
469 pindex->BuildSkip();
470 }
471 }
472
473 return true;
474}
475
476bool BlockManager::WriteBlockIndexDB()
477{
479 std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
480 vFiles.reserve(m_dirty_fileinfo.size());
481 for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
482 vFiles.emplace_back(*it, &m_blockfile_info[*it]);
483 m_dirty_fileinfo.erase(it++);
484 }
485 std::vector<const CBlockIndex*> vBlocks;
486 vBlocks.reserve(m_dirty_blockindex.size());
487 for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
488 vBlocks.push_back(*it);
489 m_dirty_blockindex.erase(it++);
490 }
491 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
492 if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
493 return false;
494 }
495 return true;
496}
497
498bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
499{
500 if (!LoadBlockIndex(snapshot_blockhash)) {
501 return false;
502 }
503 int max_blockfile_num{0};
504
505 // Load block file info
506 m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
507 m_blockfile_info.resize(max_blockfile_num + 1);
508 LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
509 for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
510 m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
511 }
512 LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
513 for (int nFile = max_blockfile_num + 1; true; nFile++) {
514 CBlockFileInfo info;
515 if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
516 m_blockfile_info.push_back(info);
517 } else {
518 break;
519 }
520 }
521
522 // Check presence of blk files
523 LogPrintf("Checking all blk files are present...\n");
524 std::set<int> setBlkDataFiles;
525 for (const auto& [_, block_index] : m_block_index) {
526 if (block_index.nStatus & BLOCK_HAVE_DATA) {
527 setBlkDataFiles.insert(block_index.nFile);
528 }
529 }
530 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
531 FlatFilePos pos(*it, 0);
532 if (OpenBlockFile(pos, true).IsNull()) {
533 return false;
534 }
535 }
536
537 {
538 // Initialize the blockfile cursors.
540 for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
541 const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
542 m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
543 }
544 }
545
546 // Check whether we have ever pruned block & undo files
547 m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
548 if (m_have_pruned) {
549 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
550 }
551
552 // Check whether we need to continue reindexing
553 bool fReindexing = false;
554 m_block_tree_db->ReadReindexing(fReindexing);
555 if (fReindexing) m_blockfiles_indexed = false;
556
557 return true;
558}
559
560void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
561{
563 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
564 if (!m_have_pruned) {
565 return;
566 }
567
568 std::set<int> block_files_to_prune;
569 for (int file_number = 0; file_number < max_blockfile; file_number++) {
570 if (m_blockfile_info[file_number].nSize == 0) {
571 block_files_to_prune.insert(file_number);
572 }
573 }
574
575 UnlinkPrunedFiles(block_files_to_prune);
576}
577
578bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
579{
581 return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
582}
583
584const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
585{
587 const CBlockIndex* last_block = &upper_block;
588 assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
589 while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
590 if (lower_block) {
591 // Return if we reached the lower_block
592 if (last_block == lower_block) return lower_block;
593 // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
594 // and so far this is not allowed.
595 assert(last_block->nHeight >= lower_block->nHeight);
596 }
597 last_block = last_block->pprev;
598 }
599 assert(last_block != nullptr);
600 return last_block;
601}
602
603bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
604{
605 if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
606 return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
607}
608
609// If we're using -prune with -reindex, then delete block files that will be ignored by the
610// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
611// is missing, do the same here to delete any later block files after a gap. Also delete all
612// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
613// is in sync with what's actually on disk by the time we start downloading, so that pruning
614// works correctly.
616{
617 std::map<std::string, fs::path> mapBlockFiles;
618
619 // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
620 // Remove the rev files immediately and insert the blk file paths into an
621 // ordered map keyed by block file index.
622 LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
623 for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
624 const std::string path = fs::PathToString(it->path().filename());
625 if (fs::is_regular_file(*it) &&
626 path.length() == 12 &&
627 path.substr(8,4) == ".dat")
628 {
629 if (path.substr(0, 3) == "blk") {
630 mapBlockFiles[path.substr(3, 5)] = it->path();
631 } else if (path.substr(0, 3) == "rev") {
632 remove(it->path());
633 }
634 }
635 }
636
637 // Remove all block files that aren't part of a contiguous set starting at
638 // zero by walking the ordered map (keys are block file indices) by
639 // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
640 // start removing block files.
641 int nContigCounter = 0;
642 for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
643 if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
644 nContigCounter++;
645 continue;
646 }
647 remove(item.second);
648 }
649}
650
652{
654
655 return &m_blockfile_info.at(n);
656}
657
658bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
659{
660 const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
661
662 // Open history file to read
663 AutoFile filein{OpenUndoFile(pos, true)};
664 if (filein.IsNull()) {
665 LogError("OpenUndoFile failed for %s", pos.ToString());
666 return false;
667 }
668
669 // Read block
670 uint256 hashChecksum;
671 HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
672 try {
673 verifier << index.pprev->GetBlockHash();
674 verifier >> blockundo;
675 filein >> hashChecksum;
676 } catch (const std::exception& e) {
677 LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
678 return false;
679 }
680
681 // Verify checksum
682 if (hashChecksum != verifier.GetHash()) {
683 LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
684 return false;
685 }
686
687 return true;
688}
689
690bool BlockManager::FlushUndoFile(int block_file, bool finalize)
691{
692 FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
693 if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
694 m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
695 return false;
696 }
697 return true;
698}
699
700bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
701{
702 bool success = true;
704
705 if (m_blockfile_info.size() < 1) {
706 // Return if we haven't loaded any blockfiles yet. This happens during
707 // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
708 // then calls FlushStateToDisk()), resulting in a call to this function before we
709 // have populated `m_blockfile_info` via LoadBlockIndexDB().
710 return true;
711 }
712 assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
713
714 FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
715 if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
716 m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
717 success = false;
718 }
719 // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
720 // e.g. during IBD or a sync after a node going offline
721 if (!fFinalize || finalize_undo) {
722 if (!FlushUndoFile(blockfile_num, finalize_undo)) {
723 success = false;
724 }
725 }
726 return success;
727}
728
730{
731 if (!m_snapshot_height) {
732 return BlockfileType::NORMAL;
733 }
734 return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
735}
736
738{
740 auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
741 // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
742 // but no blocks past the snapshot height have been written yet, so there
743 // is no data associated with the chainstate, and it is safe not to flush.
744 if (cursor) {
745 return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
746 }
747 // No need to log warnings in this case.
748 return true;
749}
750
752{
754
755 uint64_t retval = 0;
756 for (const CBlockFileInfo& file : m_blockfile_info) {
757 retval += file.nSize + file.nUndoSize;
758 }
759 return retval;
760}
761
762void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
763{
764 std::error_code ec;
765 for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
766 FlatFilePos pos(*it, 0);
767 const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
768 const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
769 if (removed_blockfile || removed_undofile) {
770 LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
771 }
772 }
773}
774
775AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
776{
777 return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key};
778}
779
781AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
782{
783 return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key};
784}
785
787{
788 return m_block_file_seq.FileName(pos);
789}
790
791FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
792{
794
795 const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
796
797 if (!m_blockfile_cursors[chain_type]) {
798 // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
799 assert(chain_type == BlockfileType::ASSUMED);
800 const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
801 m_blockfile_cursors[chain_type] = new_cursor;
802 LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
803 }
804 const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
805
806 int nFile = last_blockfile;
807 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
808 m_blockfile_info.resize(nFile + 1);
809 }
810
811 bool finalize_undo = false;
812 unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
813 // Use smaller blockfiles in test-only -fastprune mode - but avoid
814 // the possibility of having a block not fit into the block file.
815 if (m_opts.fast_prune) {
816 max_blockfile_size = 0x10000; // 64kiB
817 if (nAddSize >= max_blockfile_size) {
818 // dynamically adjust the blockfile size to be larger than the added size
819 max_blockfile_size = nAddSize + 1;
820 }
821 }
822 assert(nAddSize < max_blockfile_size);
823
824 while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
825 // when the undo file is keeping up with the block file, we want to flush it explicitly
826 // when it is lagging behind (more blocks arrive than are being connected), we let the
827 // undo block write case handle it
828 finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
829 Assert(m_blockfile_cursors[chain_type])->undo_height);
830
831 // Try the next unclaimed blockfile number
832 nFile = this->MaxBlockfileNum() + 1;
833 // Set to increment MaxBlockfileNum() for next iteration
834 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
835
836 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
837 m_blockfile_info.resize(nFile + 1);
838 }
839 }
840 FlatFilePos pos;
841 pos.nFile = nFile;
842 pos.nPos = m_blockfile_info[nFile].nSize;
843
844 if (nFile != last_blockfile) {
845 LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
846 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
847
848 // Do not propagate the return code. The flush concerns a previous block
849 // and undo file that has already been written to. If a flush fails
850 // here, and we crash, there is no expected additional block data
851 // inconsistency arising from the flush failure here. However, the undo
852 // data may be inconsistent after a crash if the flush is called during
853 // a reindex. A flush error might also leave some of the data files
854 // untrimmed.
855 if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
857 "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
858 last_blockfile, finalize_undo, nFile);
859 }
860 // No undo data yet in the new file, so reset our undo-height tracking.
861 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
862 }
863
864 m_blockfile_info[nFile].AddBlock(nHeight, nTime);
865 m_blockfile_info[nFile].nSize += nAddSize;
866
867 bool out_of_space;
868 size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
869 if (out_of_space) {
870 m_opts.notifications.fatalError(_("Disk space is too low!"));
871 return {};
872 }
873 if (bytes_allocated != 0 && IsPruneMode()) {
874 m_check_for_pruning = true;
875 }
876
877 m_dirty_fileinfo.insert(nFile);
878 return pos;
879}
880
881void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
882{
884
885 // Update the cursor so it points to the last file.
887 auto& cursor{m_blockfile_cursors[chain_type]};
888 if (!cursor || cursor->file_num < pos.nFile) {
889 m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
890 }
891
892 // Update the file information with the current block.
893 const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
894 const int nFile = pos.nFile;
895 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
896 m_blockfile_info.resize(nFile + 1);
897 }
898 m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
899 m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
900 m_dirty_fileinfo.insert(nFile);
901}
902
903bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
904{
905 pos.nFile = nFile;
906
908
909 pos.nPos = m_blockfile_info[nFile].nUndoSize;
910 m_blockfile_info[nFile].nUndoSize += nAddSize;
911 m_dirty_fileinfo.insert(nFile);
912
913 bool out_of_space;
914 size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
915 if (out_of_space) {
916 return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
917 }
918 if (bytes_allocated != 0 && IsPruneMode()) {
919 m_check_for_pruning = true;
920 }
921
922 return true;
923}
924
925bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
926{
928 const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
929 auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
930
931 // Write undo information to disk
932 if (block.GetUndoPos().IsNull()) {
933 FlatFilePos pos;
934 const unsigned int blockundo_size{static_cast<unsigned int>(GetSerializeSize(blockundo))};
935 if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
936 LogError("FindUndoPos failed");
937 return false;
938 }
939 // Open history file to append
940 AutoFile fileout{OpenUndoFile(pos)};
941 if (fileout.IsNull()) {
942 LogError("OpenUndoFile failed");
943 return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
944 }
945
946 // Write index header
947 fileout << GetParams().MessageStart() << blockundo_size;
948 // Write undo data
950 fileout << blockundo;
951
952 // Calculate & write checksum
953 HashWriter hasher{};
954 hasher << block.pprev->GetBlockHash();
955 hasher << blockundo;
956 fileout << hasher.GetHash();
957
958 // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
959 // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
960 // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
961 // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
962 // the FindNextBlockPos function
963 if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
964 // Do not propagate the return code, a failed flush here should not
965 // be an indication for a failed write. If it were propagated here,
966 // the caller would assume the undo data not to be written, when in
967 // fact it is. Note though, that a failed flush might leave the data
968 // file untrimmed.
969 if (!FlushUndoFile(pos.nFile, true)) {
970 LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile);
971 }
972 } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
973 cursor.undo_height = block.nHeight;
974 }
975 // update nUndoPos in block index
976 block.nUndoPos = pos.nPos;
977 block.nStatus |= BLOCK_HAVE_UNDO;
978 m_dirty_blockindex.insert(&block);
979 }
980
981 return true;
982}
983
984bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const
985{
986 block.SetNull();
987
988 // Open history file to read
989 AutoFile filein{OpenBlockFile(pos, true)};
990 if (filein.IsNull()) {
991 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
992 return false;
993 }
994
995 // Read block
996 try {
997 filein >> TX_WITH_WITNESS(block);
998 } catch (const std::exception& e) {
999 LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
1000 return false;
1001 }
1002
1003 // Check the header
1004 if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
1005 LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
1006 return false;
1007 }
1008
1009 // Signet only: check block solution
1010 if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1011 LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
1012 return false;
1013 }
1014
1015 return true;
1016}
1017
1018bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1019{
1020 const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1021
1022 if (!ReadBlock(block, block_pos)) {
1023 return false;
1024 }
1025 if (block.GetHash() != index.GetBlockHash()) {
1026 LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
1027 return false;
1028 }
1029 return true;
1030}
1031
1032bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1033{
1034 FlatFilePos hpos = pos;
1035 // If nPos is less than 8 the pos is null and we don't have the block data
1036 // Return early to prevent undefined behavior of unsigned int underflow
1037 if (hpos.nPos < 8) {
1038 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1039 return false;
1040 }
1041 hpos.nPos -= 8; // Seek back 8 bytes for meta header
1042 AutoFile filein{OpenBlockFile(hpos, true)};
1043 if (filein.IsNull()) {
1044 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1045 return false;
1046 }
1047
1048 try {
1049 MessageStartChars blk_start;
1050 unsigned int blk_size;
1051
1052 filein >> blk_start >> blk_size;
1053
1054 if (blk_start != GetParams().MessageStart()) {
1055 LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
1056 HexStr(blk_start),
1057 HexStr(GetParams().MessageStart()));
1058 return false;
1059 }
1060
1061 if (blk_size > MAX_SIZE) {
1062 LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
1063 blk_size, MAX_SIZE);
1064 return false;
1065 }
1066
1067 block.resize(blk_size); // Zeroing of memory is intentional here
1068 filein.read(MakeWritableByteSpan(block));
1069 } catch (const std::exception& e) {
1070 LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
1071 return false;
1072 }
1073
1074 return true;
1075}
1076
1078{
1079 const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1081 if (pos.IsNull()) {
1082 LogError("FindNextBlockPos failed");
1083 return FlatFilePos();
1084 }
1085 AutoFile fileout{OpenBlockFile(pos)};
1086 if (fileout.IsNull()) {
1087 LogError("OpenBlockFile failed");
1088 m_opts.notifications.fatalError(_("Failed to write block."));
1089 return FlatFilePos();
1090 }
1091
1092 // Write index header
1093 fileout << GetParams().MessageStart() << block_size;
1094 // Write block
1096 fileout << TX_WITH_WITNESS(block);
1097 return pos;
1098}
1099
1101{
1102 // Bytes are serialized without length indicator, so this is also the exact
1103 // size of the XOR-key file.
1104 std::array<std::byte, 8> xor_key{};
1105
1106 // Consider this to be the first run if the blocksdir contains only hidden
1107 // files (those which start with a .). Checking for a fully-empty dir would
1108 // be too aggressive as a .lock file may have already been written.
1109 bool first_run = true;
1110 for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
1111 const std::string path = fs::PathToString(entry.path().filename());
1112 if (!entry.is_regular_file() || !path.starts_with('.')) {
1113 first_run = false;
1114 break;
1115 }
1116 }
1117
1118 if (opts.use_xor && first_run) {
1119 // Only use random fresh key when the boolean option is set and on the
1120 // very first start of the program.
1121 FastRandomContext{}.fillrand(xor_key);
1122 }
1123
1124 const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1125 if (fs::exists(xor_key_path)) {
1126 // A pre-existing xor key file has priority.
1127 AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1128 xor_key_file >> xor_key;
1129 } else {
1130 // Create initial or missing xor key file
1131 AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1132#ifdef __MINGW64__
1133 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1134#else
1135 "wbx"
1136#endif
1137 )};
1138 xor_key_file << xor_key;
1139 }
1140 // If the user disabled the key, it must be zero.
1141 if (!opts.use_xor && xor_key != decltype(xor_key){}) {
1142 throw std::runtime_error{
1143 strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1144 "Stored key: '%s', stored path: '%s'.",
1145 HexStr(xor_key), fs::PathToString(xor_key_path)),
1146 };
1147 }
1148 LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key));
1149 return std::vector<std::byte>{xor_key.begin(), xor_key.end()};
1150}
1151
1153 : m_prune_mode{opts.prune_target > 0},
1154 m_xor_key{InitBlocksdirXorKey(opts)},
1155 m_opts{std::move(opts)},
1156 m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1157 m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1158 m_interrupt{interrupt}
1159{
1160 m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1161
1163 m_block_tree_db->WriteReindexing(true);
1164 m_blockfiles_indexed = false;
1165 // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1166 if (m_prune_mode) {
1168 }
1169 }
1170}
1171
1173{
1174 std::atomic<bool>& m_importing;
1175
1176public:
1177 ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1178 {
1179 assert(m_importing == false);
1180 m_importing = true;
1181 }
1183 {
1184 assert(m_importing == true);
1185 m_importing = false;
1186 }
1187};
1188
1189void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1190{
1191 ImportingNow imp{chainman.m_blockman.m_importing};
1192
1193 // -reindex
1194 if (!chainman.m_blockman.m_blockfiles_indexed) {
1195 int nFile = 0;
1196 // Map of disk positions for blocks with unknown parent (only used for reindex);
1197 // parent hash -> child disk position, multiple children can have the same parent.
1198 std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1199 while (true) {
1200 FlatFilePos pos(nFile, 0);
1201 if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1202 break; // No block files left to reindex
1203 }
1204 AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1205 if (file.IsNull()) {
1206 break; // This error is logged in OpenBlockFile
1207 }
1208 LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1209 chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1210 if (chainman.m_interrupt) {
1211 LogPrintf("Interrupt requested. Exit %s\n", __func__);
1212 return;
1213 }
1214 nFile++;
1215 }
1216 WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1217 chainman.m_blockman.m_blockfiles_indexed = true;
1218 LogPrintf("Reindexing finished\n");
1219 // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1220 chainman.ActiveChainstate().LoadGenesisBlock();
1221 }
1222
1223 // -loadblock=
1224 for (const fs::path& path : import_paths) {
1225 AutoFile file{fsbridge::fopen(path, "rb")};
1226 if (!file.IsNull()) {
1227 LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1228 chainman.LoadExternalBlockFile(file);
1229 if (chainman.m_interrupt) {
1230 LogPrintf("Interrupt requested. Exit %s\n", __func__);
1231 return;
1232 }
1233 } else {
1234 LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1235 }
1236 }
1237
1238 // scan for better chains in the block chain database, that are not yet connected in the active best chain
1239
1240 // We can't hold cs_main during ActivateBestChain even though we're accessing
1241 // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1242 // the relevant pointers before the ABC call.
1243 for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1245 if (!chainstate->ActivateBestChain(state, nullptr)) {
1246 chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
1247 return;
1248 }
1249 }
1250 // End scope of ImportingNow
1251}
1252
1253std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1254 switch(type) {
1255 case BlockfileType::NORMAL: os << "normal"; break;
1256 case BlockfileType::ASSUMED: os << "assumed"; break;
1257 default: os.setstate(std::ios_base::failbit);
1258 }
1259 return os;
1260}
1261
1262std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1263 os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1264 return os;
1265}
1266} // namespace node
void CheckBlockDataAvailability(BlockManager &blockman, const CBlockIndex &blockindex, bool check_for_undo)
Definition: blockchain.cpp:606
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:131
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous.
Definition: chain.h:97
@ BLOCK_HAVE_UNDO
undo data available in rev*.dat
Definition: chain.h:122
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition: chain.h:121
@ BLOCK_FAILED_CHILD
descends from failed block
Definition: chain.h:126
@ BLOCK_FAILED_MASK
Definition: chain.h:127
#define Assert(val)
Identity function.
Definition: check.h:85
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:392
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:22
uint32_t nBits
Definition: block.h:29
int64_t GetBlockTime() const
Definition: block.h:61
uint256 hashPrevBlock
Definition: block.h:26
uint256 GetHash() const
Definition: block.cpp:11
Definition: block.h:69
void SetNull()
Definition: block.h:95
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: chain.h:141
uint256 hashMerkleRoot
Definition: chain.h:188
std::string ToString() const
Definition: chain.cpp:15
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:147
uint64_t m_chain_tx_count
(memory only) Number of transactions in the chain up to and including this block.
Definition: chain.h:176
void BuildSkip()
Build the skiplist pointer for this entry.
Definition: chain.cpp:125
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:165
uint32_t nTime
Definition: chain.h:189
unsigned int nTimeMax
(memory only) Maximum nTime in the chain up to and including this block.
Definition: chain.h:197
int32_t nSequenceId
(memory only) Sequential id assigned to distinguish order in which blocks are received.
Definition: chain.h:194
uint32_t nNonce
Definition: chain.h:191
uint256 GetBlockHash() const
Definition: chain.h:243
FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:219
uint32_t nBits
Definition: chain.h:190
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(
Raise the validity level of this block index entry.
Definition: chain.h:307
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:170
int32_t nVersion
block header
Definition: chain.h:187
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:153
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:208
const uint256 * phashBlock
pointer to the hash of the block, if any. Memory is owned by this CBlockIndex
Definition: chain.h:144
Undo information for a CBlock.
Definition: undo.h:63
int Height() const
Return the maximal height in the chain.
Definition: chain.h:462
const MessageStartChars & MessageStart() const
Definition: chainparams.h:82
std::optional< AssumeutxoData > AssumeutxoForBlockhash(const uint256 &blockhash) const
Definition: chainparams.h:114
uint64_t PruneAfterHeight() const
Definition: chainparams.h:93
Batch of changes queued to be written to a CDBWrapper.
Definition: dbwrapper.h:74
void Write(const K &key, const V &value)
Definition: dbwrapper.h:100
bool WriteBatch(CDBBatch &batch, bool fSync=false)
Definition: dbwrapper.cpp:292
bool Read(const K &key, V &value) const
Definition: dbwrapper.h:222
CDBIterator * NewIterator()
Definition: dbwrapper.cpp:394
bool Erase(const K &key, bool fSync=false)
Definition: dbwrapper.h:267
bool Write(const K &key, const V &value, bool fSync=false)
Definition: dbwrapper.h:242
bool Exists(const K &key) const
Definition: dbwrapper.h:258
Used to marshal pointers into hashes for db storage.
Definition: chain.h:355
uint256 hashPrev
Definition: chain.h:365
uint256 ConstructBlockHash() const
Definition: chain.h:399
Chainstate stores and provides an API to update our local knowledge of the current best chain.
Definition: validation.h:505
CChain m_chain
The current chain of blockheaders we consult and build on.
Definition: validation.h:585
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:866
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1110
kernel::Notifications & GetNotifications() const
Definition: validation.h:981
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
const util::SignalInterrupt & m_interrupt
Definition: validation.h:1003
void LoadExternalBlockFile(AutoFile &file_in, FlatFilePos *dbp=nullptr, std::multimap< uint256, FlatFilePos > *blocks_with_unknown_parent=nullptr)
Import blocks from an external file.
const CChainParams & GetParams() const
Definition: validation.h:976
Chainstate &InitializeChainstate(CTxMemPool *mempool) EXCLUSIVE_LOCKS_REQUIRED(std::vector< Chainstate * GetAll)()
Instantiate a new chainstate.
Definition: validation.h:1080
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1007
Fast randomness source.
Definition: random.h:377
void fillrand(std::span< std::byte > output) noexcept
Fill a byte span with random bytes.
Definition: random.cpp:701
FlatFileSeq represents a sequence of numbered files storing raw data.
Definition: flatfile.h:46
FILE * Open(const FlatFilePos &pos, bool read_only=false) const
Open a handle to the file at the given position.
Definition: flatfile.cpp:33
fs::path FileName(const FlatFilePos &pos) const
Get the name of the file at the given position.
Definition: flatfile.cpp:28
bool Flush(const FlatFilePos &pos, bool finalize=false) const
Commit a file to disk, and optionally truncate off extra pre-allocated bytes if final.
Definition: flatfile.cpp:81
size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space) const
Allocate additional space in a file after the given starting position.
Definition: flatfile.cpp:55
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:151
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:101
std::string ToString() const
Definition: validation.h:111
constexpr bool IsNull() const
Definition: uint256.h:48
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:33
bool ReadLastBlockFile(int &nFile)
bool ReadFlag(const std::string &name, bool &fValue)
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info)
void ReadReindexing(bool &fReindexing)
bool WriteFlag(const std::string &name, bool fValue)
bool WriteBatchSync(const std::vector< std::pair< int, const CBlockFileInfo * > > &fileInfo, int nLastFile, const std::vector< const CBlockIndex * > &blockinfo)
bool WriteReindexing(bool fReindexing)
virtual void fatalError(const bilingual_str &message)
The fatal error notification is sent to notify the user when an error occurs in kernel code that can'...
virtual void flushError(const bilingual_str &message)
The flush error notification is sent to notify the user that an error occurred while flushing block d...
const kernel::BlockManagerOpts m_opts
Definition: blockstorage.h:256
std::set< int > m_dirty_fileinfo
Dirty block file entries.
Definition: blockstorage.h:244
const FlatFileSeq m_undo_file_seq
Definition: blockstorage.h:259
RecursiveMutex cs_LastBlockFile
Definition: blockstorage.h:204
const CChainParams & GetParams() const
Definition: blockstorage.h:144
bool FlushChainstateBlockFile(int tip_height)
void FindFilesToPrune(std::set< int > &setFilesToPrune, int last_prune, const Chainstate &chain, ChainstateManager &chainman)
Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a use...
void UpdateBlockInfo(const CBlock &block, unsigned int nHeight, const FlatFilePos &pos)
Update blockfile info while processing a block during reindex.
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadBlockUndo(CBlockUndo &blockundo, const CBlockIndex &index) const
bool ReadRawBlock(std::vector< uint8_t > &block, const FlatFilePos &pos) const
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mark one block file as pruned (modify associated database entries)
BlockfileType BlockfileTypeForHeight(int height)
std::atomic_bool m_blockfiles_indexed
Whether all blockfiles have been added to the block tree database.
Definition: blockstorage.h:275
std::vector< CBlockIndex * > GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(std::multimap< CBlockIndex *, CBlockIndex * > m_blocks_unlinked
All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
Definition: blockstorage.h:293
const Consensus::Params & GetConsensus() const
Definition: blockstorage.h:145
BlockManager(const util::SignalInterrupt &interrupt, Options opts)
std::set< CBlockIndex * > m_dirty_blockindex
Dirty block index entries.
Definition: blockstorage.h:241
const std::vector< std::byte > m_xor_key
Definition: blockstorage.h:238
fs::path GetBlockPosFilename(const FlatFilePos &pos) const
Translation to a filesystem path.
bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
Return false if block file or undo file flushing fails.
uint64_t GetPruneTarget() const
Attempt to stay below this number of bytes of block files.
Definition: blockstorage.h:352
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
Definition: blockstorage.h:222
void UnlinkPrunedFiles(const std::set< int > &setFilesToPrune) const
Actually unlink the specified files.
bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(bool LoadBlockIndexDB(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(void ScanAndUnlinkAlreadyPrunedFiles() EXCLUSIVE_LOCKS_REQUIRED(CBlockIndex * AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Remove any pruned block & undo files that are still on disk.
Definition: blockstorage.h:314
FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
Helper function performing various preparations before a block can be saved to disk: Returns the corr...
const bool m_prune_mode
Definition: blockstorage.h:236
bool CheckBlockDataAvailability(const CBlockIndex &upper_block LIFETIMEBOUND, const CBlockIndex &lower_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetFirstBlock(const CBlockIndex &upper_block LIFETIMEBOUND, uint32_t status_mask, const CBlockIndex *lower_block=nullptr) const EXCLUSIVE_LOCKS_REQUIRED(boo m_have_pruned)
Check if all blocks in the [upper_block, lower_block] range have data available.
Definition: blockstorage.h:394
bool FlushUndoFile(int block_file, bool finalize=false)
Return false if undo file flushing fails.
uint64_t CalculateCurrentUsage()
Calculate the amount of disk space the block & undo files currently use.
const util::SignalInterrupt & m_interrupt
Definition: blockstorage.h:266
const FlatFileSeq m_block_file_seq
Definition: blockstorage.h:258
CBlockIndex * InsertBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Create a new block index entry for a given block hash.
bool m_check_for_pruning
Global flag to indicate we should check to see if there are block/undo files that should be deleted.
Definition: blockstorage.h:234
bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
bool IsBlockPruned(const CBlockIndex &block) const EXCLUSIVE_LOCKS_REQUIRED(void UpdatePruneLock(const std::string &name, const PruneLockInfo &lock_info) EXCLUSIVE_LOCKS_REQUIRED(AutoFile OpenBlockFile(const FlatFilePos &pos, bool fReadOnly=false) const
Check whether the block associated with this index entry is pruned or not.
Definition: blockstorage.h:403
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:349
void CleanupBlockRevFiles() const
void FindFilesToPruneManual(std::set< int > &setFilesToPrune, int nManualPruneHeight, const Chainstate &chain, ChainstateManager &chainman)
std::atomic< bool > m_importing
Definition: blockstorage.h:267
bool WriteBlockUndo(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(FlatFilePos WriteBlock(const CBlock &block, int nHeight)
Store block on disk and update block file statistics.
Definition: blockstorage.h:338
std::vector< CBlockFileInfo > m_blockfile_info
Definition: blockstorage.h:205
CBlockFileInfo * GetBlockFileInfo(size_t n)
Get block file info entry for one block file.
bool LoadBlockIndex(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Load the blocktree off disk and into memory.
bool ReadBlock(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
AutoFile OpenUndoFile(const FlatFilePos &pos, bool fReadOnly=false) const
Open an undo file (rev?????.dat)
std::optional< int > m_snapshot_height
The height of the base block of an assumeutxo snapshot, if one is in use.
Definition: blockstorage.h:291
ImportingNow(std::atomic< bool > &importing)
std::atomic< bool > & m_importing
256-bit opaque blob.
Definition: uint256.h:201
Helper class that manages an interrupt flag, and allows a thread or signal to interrupt another threa...
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:8
std::string HexStr(const std::span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition: hex_base.cpp:29
#define LogPrintLevel(category, level,...)
Definition: logging.h:272
#define LogInfo(...)
Definition: logging.h:261
#define LogError(...)
Definition: logging.h:263
#define LogDebug(category,...)
Definition: logging.h:280
#define LogPrintf(...)
Definition: logging.h:266
unsigned int nHeight
std::array< uint8_t, 4 > MessageStartChars
@ BLOCKSTORAGE
Definition: logging.h:70
@ PRUNE
Definition: logging.h:57
static bool exists(const path &p)
Definition: fs.h:89
static std::string PathToString(const path &path)
Convert path object to a byte string.
Definition: fs.h:151
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:26
static constexpr uint8_t DB_REINDEX_FLAG
static constexpr uint8_t DB_FLAG
static constexpr uint8_t DB_BLOCK_INDEX
static constexpr uint8_t DB_LAST_BLOCK
static constexpr uint8_t DB_BLOCK_FILES
Definition: messages.h:20
static const unsigned int UNDOFILE_CHUNK_SIZE
The pre-allocation chunk size for rev?????.dat files (since 0.8)
Definition: blockstorage.h:73
BlockfileType
Definition: blockstorage.h:102
@ ASSUMED
Definition: blockstorage.h:105
static auto InitBlocksdirXorKey(const BlockManager::Options &opts)
static const unsigned int BLOCKFILE_CHUNK_SIZE
The pre-allocation chunk size for blk?????.dat files (since 0.8)
Definition: blockstorage.h:71
std::ostream & operator<<(std::ostream &os, const BlockfileType &type)
static constexpr size_t UNDO_DATA_DISK_OVERHEAD
Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes)
Definition: blockstorage.h:81
static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE
Size of header written by WriteBlock before a serialized CBlock (8 bytes)
Definition: blockstorage.h:78
static const unsigned int MAX_BLOCKFILE_SIZE
The maximum size of a blk?????.dat file (since 0.8)
Definition: blockstorage.h:75
void ImportBlocks(ChainstateManager &chainman, std::span< const fs::path > import_paths)
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:233
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params &params)
Check whether a block hash satisfies the proof-of-work requirement specified by nBits.
Definition: pow.cpp:140
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:195
const char * name
Definition: rest.cpp:49
size_t GetSerializeSize(const T &t)
Definition: serialize.h:1103
static constexpr uint64_t MAX_SIZE
The maximum size of a serialized object in bytes or number of elements (for eg vectors) when the size...
Definition: serialize.h:32
bool CheckSignetBlockSolution(const CBlock &block, const Consensus::Params &consensusParams)
Extract signature and check whether a block has a valid solution.
Definition: signet.cpp:125
auto MakeWritableByteSpan(V &&v) noexcept
Definition: span.h:89
Holds configuration for use during UTXO snapshot load and validation.
Definition: chainparams.h:35
uint64_t m_chain_tx_count
Used to populate the m_chain_tx_count value, which is used during BlockManager::LoadBlockIndex().
Definition: chainparams.h:45
Parameters that influence chain consensus.
Definition: params.h:74
bool wipe_data
If true, remove all existing data.
Definition: dbwrapper.h:42
int nFile
Definition: flatfile.h:16
std::string ToString() const
Definition: flatfile.cpp:23
unsigned int nPos
Definition: flatfile.h:17
bool IsNull() const
Definition: flatfile.h:36
An options struct for BlockManager, more ergonomically referred to as BlockManager::Options due to th...
Notifications & notifications
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
#define LOCK2(cs1, cs2)
Definition: sync.h:258
#define LOCK(cs)
Definition: sync.h:257
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:301
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1172
consteval auto _(util::TranslatedLiteral str)
Definition: translation.h:79
bool FatalError(Notifications &notifications, BlockValidationState &state, const bilingual_str &message)
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES
Definition: validation.h:79