Bitcoin Core 30.99.0
P2P Digital Currency
blockstorage.cpp
Go to the documentation of this file.
1// Copyright (c) 2011-2022 The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <node/blockstorage.h>
6
7#include <arith_uint256.h>
8#include <chain.h>
9#include <consensus/params.h>
11#include <dbwrapper.h>
12#include <flatfile.h>
13#include <hash.h>
15#include <kernel/chainparams.h>
18#include <logging.h>
19#include <pow.h>
20#include <primitives/block.h>
22#include <random.h>
23#include <serialize.h>
24#include <signet.h>
25#include <span.h>
26#include <streams.h>
27#include <sync.h>
28#include <tinyformat.h>
29#include <uint256.h>
30#include <undo.h>
31#include <util/batchpriority.h>
32#include <util/check.h>
33#include <util/fs.h>
34#include <util/obfuscation.h>
36#include <util/strencodings.h>
37#include <util/syserror.h>
38#include <util/translation.h>
39#include <validation.h>
40
41#include <cstddef>
42#include <map>
43#include <optional>
44#include <unordered_map>
45
46namespace kernel {
47static constexpr uint8_t DB_BLOCK_FILES{'f'};
48static constexpr uint8_t DB_BLOCK_INDEX{'b'};
49static constexpr uint8_t DB_FLAG{'F'};
50static constexpr uint8_t DB_REINDEX_FLAG{'R'};
51static constexpr uint8_t DB_LAST_BLOCK{'l'};
52// Keys used in previous version that might still be found in the DB:
53// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
54// BlockTreeDB::DB_TXINDEX{'t'}
55// BlockTreeDB::ReadFlag("txindex")
56
58{
59 return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
60}
61
62void BlockTreeDB::WriteReindexing(bool fReindexing)
63{
64 if (fReindexing) {
65 Write(DB_REINDEX_FLAG, uint8_t{'1'});
66 } else {
68 }
69}
70
71void BlockTreeDB::ReadReindexing(bool& fReindexing)
72{
73 fReindexing = Exists(DB_REINDEX_FLAG);
74}
75
77{
78 return Read(DB_LAST_BLOCK, nFile);
79}
80
81void BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
82{
83 CDBBatch batch(*this);
84 for (const auto& [file, info] : fileInfo) {
85 batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
86 }
87 batch.Write(DB_LAST_BLOCK, nLastFile);
88 for (const CBlockIndex* bi : blockinfo) {
89 batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
90 }
91 WriteBatch(batch, true);
92}
93
94void BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
95{
96 Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
97}
98
99bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
100{
101 uint8_t ch;
102 if (!Read(std::make_pair(DB_FLAG, name), ch)) {
103 return false;
104 }
105 fValue = ch == uint8_t{'1'};
106 return true;
107}
108
109bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
110{
112 std::unique_ptr<CDBIterator> pcursor(NewIterator());
113 pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
114
115 // Load m_block_index
116 while (pcursor->Valid()) {
117 if (interrupt) return false;
118 std::pair<uint8_t, uint256> key;
119 if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
120 CDiskBlockIndex diskindex;
121 if (pcursor->GetValue(diskindex)) {
122 // Construct block index object
123 CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
124 pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
125 pindexNew->nHeight = diskindex.nHeight;
126 pindexNew->nFile = diskindex.nFile;
127 pindexNew->nDataPos = diskindex.nDataPos;
128 pindexNew->nUndoPos = diskindex.nUndoPos;
129 pindexNew->nVersion = diskindex.nVersion;
130 pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
131 pindexNew->nTime = diskindex.nTime;
132 pindexNew->nBits = diskindex.nBits;
133 pindexNew->nNonce = diskindex.nNonce;
134 pindexNew->nStatus = diskindex.nStatus;
135 pindexNew->nTx = diskindex.nTx;
136
137 if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
138 LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
139 return false;
140 }
141
142 pcursor->Next();
143 } else {
144 LogError("%s: failed to read value\n", __func__);
145 return false;
146 }
147 } else {
148 break;
149 }
150 }
151
152 return true;
153}
154} // namespace kernel
155
156namespace node {
157
159{
160 // First sort by most total work, ...
161 if (pa->nChainWork > pb->nChainWork) return false;
162 if (pa->nChainWork < pb->nChainWork) return true;
163
164 // ... then by earliest activatable time, ...
165 if (pa->nSequenceId < pb->nSequenceId) return false;
166 if (pa->nSequenceId > pb->nSequenceId) return true;
167
168 // Use pointer address as tie breaker (should only happen with blocks
169 // loaded from disk, as those share the same id: 0 for blocks on the
170 // best chain, 1 for all others).
171 if (pa < pb) return false;
172 if (pa > pb) return true;
173
174 // Identical blocks.
175 return false;
176}
177
179{
180 return pa->nHeight < pb->nHeight;
181}
182
183std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
184{
186 std::vector<CBlockIndex*> rv;
187 rv.reserve(m_block_index.size());
188 for (auto& [_, block_index] : m_block_index) {
189 rv.push_back(&block_index);
190 }
191 return rv;
192}
193
195{
197 BlockMap::iterator it = m_block_index.find(hash);
198 return it == m_block_index.end() ? nullptr : &it->second;
199}
200
202{
204 BlockMap::const_iterator it = m_block_index.find(hash);
205 return it == m_block_index.end() ? nullptr : &it->second;
206}
207
209{
211
212 auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
213 if (!inserted) {
214 return &mi->second;
215 }
216 CBlockIndex* pindexNew = &(*mi).second;
217
218 // We assign the sequence id to blocks only when the full data is available,
219 // to avoid miners withholding blocks but broadcasting headers, to get a
220 // competitive advantage.
222
223 pindexNew->phashBlock = &((*mi).first);
224 BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
225 if (miPrev != m_block_index.end()) {
226 pindexNew->pprev = &(*miPrev).second;
227 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
228 pindexNew->BuildSkip();
229 }
230 pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
231 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
233 if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
234 best_header = pindexNew;
235 }
236
237 m_dirty_blockindex.insert(pindexNew);
238
239 return pindexNew;
240}
241
242void BlockManager::PruneOneBlockFile(const int fileNumber)
243{
246
247 for (auto& entry : m_block_index) {
248 CBlockIndex* pindex = &entry.second;
249 if (pindex->nFile == fileNumber) {
250 pindex->nStatus &= ~BLOCK_HAVE_DATA;
251 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
252 pindex->nFile = 0;
253 pindex->nDataPos = 0;
254 pindex->nUndoPos = 0;
255 m_dirty_blockindex.insert(pindex);
256
257 // Prune from m_blocks_unlinked -- any block we prune would have
258 // to be downloaded again in order to consider its chain, at which
259 // point it would be considered as a candidate for
260 // m_blocks_unlinked or setBlockIndexCandidates.
261 auto range = m_blocks_unlinked.equal_range(pindex->pprev);
262 while (range.first != range.second) {
263 std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
264 range.first++;
265 if (_it->second == pindex) {
266 m_blocks_unlinked.erase(_it);
267 }
268 }
269 }
270 }
271
272 m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
273 m_dirty_fileinfo.insert(fileNumber);
274}
275
277 std::set<int>& setFilesToPrune,
278 int nManualPruneHeight,
279 const Chainstate& chain,
280 ChainstateManager& chainman)
281{
282 assert(IsPruneMode() && nManualPruneHeight > 0);
283
285 if (chain.m_chain.Height() < 0) {
286 return;
287 }
288
289 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
290
291 int count = 0;
292 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
293 const auto& fileinfo = m_blockfile_info[fileNumber];
294 if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
295 continue;
296 }
297
298 PruneOneBlockFile(fileNumber);
299 setFilesToPrune.insert(fileNumber);
300 count++;
301 }
302 LogInfo("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs",
303 chain.GetRole(), last_block_can_prune, count);
304}
305
307 std::set<int>& setFilesToPrune,
308 int last_prune,
309 const Chainstate& chain,
310 ChainstateManager& chainman)
311{
313 // Distribute our -prune budget over all chainstates.
314 const auto target = std::max(
316 const uint64_t target_sync_height = chainman.m_best_header->nHeight;
317
318 if (chain.m_chain.Height() < 0 || target == 0) {
319 return;
320 }
321 if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
322 return;
323 }
324
325 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
326
327 uint64_t nCurrentUsage = CalculateCurrentUsage();
328 // We don't check to prune until after we've allocated new space for files
329 // So we should leave a buffer under our target to account for another allocation
330 // before the next pruning.
331 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
332 uint64_t nBytesToPrune;
333 int count = 0;
334
335 if (nCurrentUsage + nBuffer >= target) {
336 // On a prune event, the chainstate DB is flushed.
337 // To avoid excessive prune events negating the benefit of high dbcache
338 // values, we should not prune too rapidly.
339 // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
340 const auto chain_tip_height = chain.m_chain.Height();
341 if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
342 // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
343 static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
344 const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
345 nBuffer += average_block_size * remaining_blocks;
346 }
347
348 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
349 const auto& fileinfo = m_blockfile_info[fileNumber];
350 nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
351
352 if (fileinfo.nSize == 0) {
353 continue;
354 }
355
356 if (nCurrentUsage + nBuffer < target) { // are we below our target?
357 break;
358 }
359
360 // don't prune files that could have a block that's not within the allowable
361 // prune range for the chain being pruned.
362 if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
363 continue;
364 }
365
366 PruneOneBlockFile(fileNumber);
367 // Queue up the files for removal
368 setFilesToPrune.insert(fileNumber);
369 nCurrentUsage -= nBytesToPrune;
370 count++;
371 }
372 }
373
374 LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
375 chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
376 (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
377 min_block_to_prune, last_block_can_prune, count);
378}
379
380void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
382 m_prune_locks[name] = lock_info;
383}
384
386{
388
389 if (hash.IsNull()) {
390 return nullptr;
391 }
392
393 const auto [mi, inserted]{m_block_index.try_emplace(hash)};
394 CBlockIndex* pindex = &(*mi).second;
395 if (inserted) {
396 pindex->phashBlock = &((*mi).first);
397 }
398 return pindex;
399}
400
401bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
402{
403 if (!m_block_tree_db->LoadBlockIndexGuts(
404 GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
405 return false;
406 }
407
408 if (snapshot_blockhash) {
409 const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
410 if (!maybe_au_data) {
411 m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
412 return false;
413 }
414 const AssumeutxoData& au_data = *Assert(maybe_au_data);
415 m_snapshot_height = au_data.height;
416 CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
417
418 // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
419 // to disk, we must bootstrap the value for assumedvalid chainstates
420 // from the hardcoded assumeutxo chainparams.
421 base->m_chain_tx_count = au_data.m_chain_tx_count;
422 LogInfo("[snapshot] set m_chain_tx_count=%d for %s", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
423 } else {
424 // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
425 // is null. This is relevant during snapshot completion, when the blockman may be loaded
426 // with a height that then needs to be cleared after the snapshot is fully validated.
427 m_snapshot_height.reset();
428 }
429
430 Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
431
432 // Calculate nChainWork
433 std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
434 std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
436
437 CBlockIndex* previous_index{nullptr};
438 for (CBlockIndex* pindex : vSortedByHeight) {
439 if (m_interrupt) return false;
440 if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
441 LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
442 return false;
443 }
444 previous_index = pindex;
445 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
446 pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
447
448 // We can link the chain of blocks for which we've received transactions at some point, or
449 // blocks that are assumed-valid on the basis of snapshot load (see
450 // PopulateAndValidateSnapshot()).
451 // Pruned nodes may have deleted the block.
452 if (pindex->nTx > 0) {
453 if (pindex->pprev) {
454 if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
455 pindex->GetBlockHash() == *snapshot_blockhash) {
456 // Should have been set above; don't disturb it with code below.
457 Assert(pindex->m_chain_tx_count > 0);
458 } else if (pindex->pprev->m_chain_tx_count > 0) {
459 pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
460 } else {
461 pindex->m_chain_tx_count = 0;
462 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
463 }
464 } else {
465 pindex->m_chain_tx_count = pindex->nTx;
466 }
467 }
468 if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
469 pindex->nStatus |= BLOCK_FAILED_CHILD;
470 m_dirty_blockindex.insert(pindex);
471 }
472 if (pindex->pprev) {
473 pindex->BuildSkip();
474 }
475 }
476
477 return true;
478}
479
480void BlockManager::WriteBlockIndexDB()
481{
483 std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
484 vFiles.reserve(m_dirty_fileinfo.size());
485 for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
486 vFiles.emplace_back(*it, &m_blockfile_info[*it]);
487 m_dirty_fileinfo.erase(it++);
488 }
489 std::vector<const CBlockIndex*> vBlocks;
490 vBlocks.reserve(m_dirty_blockindex.size());
491 for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
492 vBlocks.push_back(*it);
493 m_dirty_blockindex.erase(it++);
494 }
495 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
496 m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks);
497}
498
499bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
500{
501 if (!LoadBlockIndex(snapshot_blockhash)) {
502 return false;
503 }
504 int max_blockfile_num{0};
505
506 // Load block file info
507 m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
508 m_blockfile_info.resize(max_blockfile_num + 1);
509 LogInfo("Loading block index db: last block file = %i", max_blockfile_num);
510 for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
511 m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
512 }
513 LogInfo("Loading block index db: last block file info: %s", m_blockfile_info[max_blockfile_num].ToString());
514 for (int nFile = max_blockfile_num + 1; true; nFile++) {
515 CBlockFileInfo info;
516 if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
517 m_blockfile_info.push_back(info);
518 } else {
519 break;
520 }
521 }
522
523 // Check presence of blk files
524 LogInfo("Checking all blk files are present...");
525 std::set<int> setBlkDataFiles;
526 for (const auto& [_, block_index] : m_block_index) {
527 if (block_index.nStatus & BLOCK_HAVE_DATA) {
528 setBlkDataFiles.insert(block_index.nFile);
529 }
530 }
531 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
532 FlatFilePos pos(*it, 0);
533 if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) {
534 return false;
535 }
536 }
537
538 {
539 // Initialize the blockfile cursors.
541 for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
542 const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
543 m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
544 }
545 }
546
547 // Check whether we have ever pruned block & undo files
548 m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
549 if (m_have_pruned) {
550 LogInfo("Loading block index db: Block files have previously been pruned");
551 }
552
553 // Check whether we need to continue reindexing
554 bool fReindexing = false;
555 m_block_tree_db->ReadReindexing(fReindexing);
556 if (fReindexing) m_blockfiles_indexed = false;
557
558 return true;
559}
560
561void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
562{
564 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
565 if (!m_have_pruned) {
566 return;
567 }
568
569 std::set<int> block_files_to_prune;
570 for (int file_number = 0; file_number < max_blockfile; file_number++) {
571 if (m_blockfile_info[file_number].nSize == 0) {
572 block_files_to_prune.insert(file_number);
573 }
574 }
575
576 UnlinkPrunedFiles(block_files_to_prune);
577}
578
579bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
580{
582 return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
583}
584
585const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
586{
588 const CBlockIndex* last_block = &upper_block;
589 assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
590 while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
591 if (lower_block) {
592 // Return if we reached the lower_block
593 if (last_block == lower_block) return lower_block;
594 // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
595 // and so far this is not allowed.
596 assert(last_block->nHeight >= lower_block->nHeight);
597 }
598 last_block = last_block->pprev;
599 }
600 assert(last_block != nullptr);
601 return last_block;
602}
603
604bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
605{
606 if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
607 return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
608}
609
610// If we're using -prune with -reindex, then delete block files that will be ignored by the
611// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
612// is missing, do the same here to delete any later block files after a gap. Also delete all
613// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
614// is in sync with what's actually on disk by the time we start downloading, so that pruning
615// works correctly.
617{
618 std::map<std::string, fs::path> mapBlockFiles;
619
620 // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
621 // Remove the rev files immediately and insert the blk file paths into an
622 // ordered map keyed by block file index.
623 LogInfo("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune");
624 for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
625 const std::string path = fs::PathToString(it->path().filename());
626 if (fs::is_regular_file(*it) &&
627 path.length() == 12 &&
628 path.ends_with(".dat"))
629 {
630 if (path.starts_with("blk")) {
631 mapBlockFiles[path.substr(3, 5)] = it->path();
632 } else if (path.starts_with("rev")) {
633 remove(it->path());
634 }
635 }
636 }
637
638 // Remove all block files that aren't part of a contiguous set starting at
639 // zero by walking the ordered map (keys are block file indices) by
640 // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
641 // start removing block files.
642 int nContigCounter = 0;
643 for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
644 if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
645 nContigCounter++;
646 continue;
647 }
648 remove(item.second);
649 }
650}
651
653{
655
656 return &m_blockfile_info.at(n);
657}
658
659bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
660{
661 const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
662
663 // Open history file to read
664 AutoFile file{OpenUndoFile(pos, true)};
665 if (file.IsNull()) {
666 LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString());
667 return false;
668 }
669 BufferedReader filein{std::move(file)};
670
671 try {
672 // Read block
673 HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243
674
675 verifier << index.pprev->GetBlockHash();
676 verifier >> blockundo;
677
678 uint256 hashChecksum;
679 filein >> hashChecksum;
680
681 // Verify checksum
682 if (hashChecksum != verifier.GetHash()) {
683 LogError("Checksum mismatch at %s while reading block undo", pos.ToString());
684 return false;
685 }
686 } catch (const std::exception& e) {
687 LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString());
688 return false;
689 }
690
691 return true;
692}
693
694bool BlockManager::FlushUndoFile(int block_file, bool finalize)
695{
696 FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
697 if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
698 m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
699 return false;
700 }
701 return true;
702}
703
704bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
705{
706 bool success = true;
708
709 if (m_blockfile_info.size() < 1) {
710 // Return if we haven't loaded any blockfiles yet. This happens during
711 // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
712 // then calls FlushStateToDisk()), resulting in a call to this function before we
713 // have populated `m_blockfile_info` via LoadBlockIndexDB().
714 return true;
715 }
716 assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
717
718 FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
719 if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
720 m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
721 success = false;
722 }
723 // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
724 // e.g. during IBD or a sync after a node going offline
725 if (!fFinalize || finalize_undo) {
726 if (!FlushUndoFile(blockfile_num, finalize_undo)) {
727 success = false;
728 }
729 }
730 return success;
731}
732
734{
735 if (!m_snapshot_height) {
736 return BlockfileType::NORMAL;
737 }
738 return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
739}
740
742{
744 auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
745 // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
746 // but no blocks past the snapshot height have been written yet, so there
747 // is no data associated with the chainstate, and it is safe not to flush.
748 if (cursor) {
749 return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
750 }
751 // No need to log warnings in this case.
752 return true;
753}
754
756{
758
759 uint64_t retval = 0;
760 for (const CBlockFileInfo& file : m_blockfile_info) {
761 retval += file.nSize + file.nUndoSize;
762 }
763 return retval;
764}
765
766void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
767{
768 std::error_code ec;
769 for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
770 FlatFilePos pos(*it, 0);
771 const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
772 const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
773 if (removed_blockfile || removed_undofile) {
774 LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
775 }
776 }
777}
778
779AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
780{
781 return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation};
782}
783
785AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
786{
787 return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation};
788}
789
791{
792 return m_block_file_seq.FileName(pos);
793}
794
795FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
796{
798
799 const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
800
801 if (!m_blockfile_cursors[chain_type]) {
802 // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
803 assert(chain_type == BlockfileType::ASSUMED);
804 const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
805 m_blockfile_cursors[chain_type] = new_cursor;
806 LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
807 }
808 const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
809
810 int nFile = last_blockfile;
811 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
812 m_blockfile_info.resize(nFile + 1);
813 }
814
815 bool finalize_undo = false;
816 unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
817 // Use smaller blockfiles in test-only -fastprune mode - but avoid
818 // the possibility of having a block not fit into the block file.
819 if (m_opts.fast_prune) {
820 max_blockfile_size = 0x10000; // 64kiB
821 if (nAddSize >= max_blockfile_size) {
822 // dynamically adjust the blockfile size to be larger than the added size
823 max_blockfile_size = nAddSize + 1;
824 }
825 }
826 assert(nAddSize < max_blockfile_size);
827
828 while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
829 // when the undo file is keeping up with the block file, we want to flush it explicitly
830 // when it is lagging behind (more blocks arrive than are being connected), we let the
831 // undo block write case handle it
832 finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
833 Assert(m_blockfile_cursors[chain_type])->undo_height);
834
835 // Try the next unclaimed blockfile number
836 nFile = this->MaxBlockfileNum() + 1;
837 // Set to increment MaxBlockfileNum() for next iteration
838 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
839
840 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
841 m_blockfile_info.resize(nFile + 1);
842 }
843 }
844 FlatFilePos pos;
845 pos.nFile = nFile;
846 pos.nPos = m_blockfile_info[nFile].nSize;
847
848 if (nFile != last_blockfile) {
849 LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
850 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
851
852 // Do not propagate the return code. The flush concerns a previous block
853 // and undo file that has already been written to. If a flush fails
854 // here, and we crash, there is no expected additional block data
855 // inconsistency arising from the flush failure here. However, the undo
856 // data may be inconsistent after a crash if the flush is called during
857 // a reindex. A flush error might also leave some of the data files
858 // untrimmed.
859 if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
861 "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
862 last_blockfile, finalize_undo, nFile);
863 }
864 // No undo data yet in the new file, so reset our undo-height tracking.
865 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
866 }
867
868 m_blockfile_info[nFile].AddBlock(nHeight, nTime);
869 m_blockfile_info[nFile].nSize += nAddSize;
870
871 bool out_of_space;
872 size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
873 if (out_of_space) {
874 m_opts.notifications.fatalError(_("Disk space is too low!"));
875 return {};
876 }
877 if (bytes_allocated != 0 && IsPruneMode()) {
878 m_check_for_pruning = true;
879 }
880
881 m_dirty_fileinfo.insert(nFile);
882 return pos;
883}
884
885void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
886{
888
889 // Update the cursor so it points to the last file.
891 auto& cursor{m_blockfile_cursors[chain_type]};
892 if (!cursor || cursor->file_num < pos.nFile) {
893 m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
894 }
895
896 // Update the file information with the current block.
897 const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
898 const int nFile = pos.nFile;
899 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
900 m_blockfile_info.resize(nFile + 1);
901 }
902 m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
903 m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
904 m_dirty_fileinfo.insert(nFile);
905}
906
907bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
908{
909 pos.nFile = nFile;
910
912
913 pos.nPos = m_blockfile_info[nFile].nUndoSize;
914 m_blockfile_info[nFile].nUndoSize += nAddSize;
915 m_dirty_fileinfo.insert(nFile);
916
917 bool out_of_space;
918 size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
919 if (out_of_space) {
920 return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
921 }
922 if (bytes_allocated != 0 && IsPruneMode()) {
923 m_check_for_pruning = true;
924 }
925
926 return true;
927}
928
929bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
930{
932 const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
933 auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
934
935 // Write undo information to disk
936 if (block.GetUndoPos().IsNull()) {
937 FlatFilePos pos;
938 const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))};
939 if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
940 LogError("FindUndoPos failed for %s while writing block undo", pos.ToString());
941 return false;
942 }
943
944 // Open history file to append
945 AutoFile file{OpenUndoFile(pos)};
946 if (file.IsNull()) {
947 LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString());
948 return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
949 }
950 {
951 BufferedWriter fileout{file};
952
953 // Write index header
954 fileout << GetParams().MessageStart() << blockundo_size;
956 {
957 // Calculate checksum
958 HashWriter hasher{};
959 hasher << block.pprev->GetBlockHash() << blockundo;
960 // Write undo data & checksum
961 fileout << blockundo << hasher.GetHash();
962 }
963 // BufferedWriter will flush pending data to file when fileout goes out of scope.
964 }
965
966 // Make sure that the file is closed before we call `FlushUndoFile`.
967 if (file.fclose() != 0) {
968 LogError("Failed to close block undo file %s: %s", pos.ToString(), SysErrorString(errno));
969 return FatalError(m_opts.notifications, state, _("Failed to close block undo file."));
970 }
971
972 // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
973 // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
974 // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
975 // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
976 // the FindNextBlockPos function
977 if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
978 // Do not propagate the return code, a failed flush here should not
979 // be an indication for a failed write. If it were propagated here,
980 // the caller would assume the undo data not to be written, when in
981 // fact it is. Note though, that a failed flush might leave the data
982 // file untrimmed.
983 if (!FlushUndoFile(pos.nFile, true)) {
984 LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile);
985 }
986 } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
987 cursor.undo_height = block.nHeight;
988 }
989 // update nUndoPos in block index
990 block.nUndoPos = pos.nPos;
991 block.nStatus |= BLOCK_HAVE_UNDO;
992 m_dirty_blockindex.insert(&block);
993 }
994
995 return true;
996}
997
998bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional<uint256>& expected_hash) const
999{
1000 block.SetNull();
1001
1002 // Open history file to read
1003 std::vector<std::byte> block_data;
1004 if (!ReadRawBlock(block_data, pos)) {
1005 return false;
1006 }
1007
1008 try {
1009 // Read block
1010 SpanReader{block_data} >> TX_WITH_WITNESS(block);
1011 } catch (const std::exception& e) {
1012 LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString());
1013 return false;
1014 }
1015
1016 const auto block_hash{block.GetHash()};
1017
1018 // Check the header
1019 if (!CheckProofOfWork(block_hash, block.nBits, GetConsensus())) {
1020 LogError("Errors in block header at %s while reading block", pos.ToString());
1021 return false;
1022 }
1023
1024 // Signet only: check block solution
1025 if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1026 LogError("Errors in block solution at %s while reading block", pos.ToString());
1027 return false;
1028 }
1029
1030 if (expected_hash && block_hash != *expected_hash) {
1031 LogError("GetHash() doesn't match index at %s while reading block (%s != %s)",
1032 pos.ToString(), block_hash.ToString(), expected_hash->ToString());
1033 return false;
1034 }
1035
1036 return true;
1037}
1038
1039bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1040{
1041 const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1042 return ReadBlock(block, block_pos, index.GetBlockHash());
1043}
1044
1045bool BlockManager::ReadRawBlock(std::vector<std::byte>& block, const FlatFilePos& pos) const
1046{
1047 if (pos.nPos < STORAGE_HEADER_BYTES) {
1048 // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data
1049 // This would cause an unsigned integer underflow when trying to position the file cursor
1050 // This can happen after pruning or default constructed positions
1051 LogError("Failed for %s while reading raw block storage header", pos.ToString());
1052 return false;
1053 }
1054 AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)};
1055 if (filein.IsNull()) {
1056 LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString());
1057 return false;
1058 }
1059
1060 try {
1061 MessageStartChars blk_start;
1062 unsigned int blk_size;
1063
1064 filein >> blk_start >> blk_size;
1065
1066 if (blk_start != GetParams().MessageStart()) {
1067 LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block",
1068 pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart()));
1069 return false;
1070 }
1071
1072 if (blk_size > MAX_SIZE) {
1073 LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block",
1074 pos.ToString(), blk_size, MAX_SIZE);
1075 return false;
1076 }
1077
1078 block.resize(blk_size); // Zeroing of memory is intentional here
1079 filein.read(block);
1080 } catch (const std::exception& e) {
1081 LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString());
1082 return false;
1083 }
1084
1085 return true;
1086}
1087
1089{
1090 const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1092 if (pos.IsNull()) {
1093 LogError("FindNextBlockPos failed for %s while writing block", pos.ToString());
1094 return FlatFilePos();
1095 }
1096 AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)};
1097 if (file.IsNull()) {
1098 LogError("OpenBlockFile failed for %s while writing block", pos.ToString());
1099 m_opts.notifications.fatalError(_("Failed to write block."));
1100 return FlatFilePos();
1101 }
1102 {
1103 BufferedWriter fileout{file};
1104
1105 // Write index header
1106 fileout << GetParams().MessageStart() << block_size;
1108 // Write block
1109 fileout << TX_WITH_WITNESS(block);
1110 }
1111
1112 if (file.fclose() != 0) {
1113 LogError("Failed to close block file %s: %s", pos.ToString(), SysErrorString(errno));
1114 m_opts.notifications.fatalError(_("Failed to close file when writing block."));
1115 return FlatFilePos();
1116 }
1117
1118 return pos;
1119}
1120
1122{
1123 // Bytes are serialized without length indicator, so this is also the exact
1124 // size of the XOR-key file.
1125 std::array<std::byte, Obfuscation::KEY_SIZE> obfuscation{};
1126
1127 // Consider this to be the first run if the blocksdir contains only hidden
1128 // files (those which start with a .). Checking for a fully-empty dir would
1129 // be too aggressive as a .lock file may have already been written.
1130 bool first_run = true;
1131 for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
1132 const std::string path = fs::PathToString(entry.path().filename());
1133 if (!entry.is_regular_file() || !path.starts_with('.')) {
1134 first_run = false;
1135 break;
1136 }
1137 }
1138
1139 if (opts.use_xor && first_run) {
1140 // Only use random fresh key when the boolean option is set and on the
1141 // very first start of the program.
1142 FastRandomContext{}.fillrand(obfuscation);
1143 }
1144
1145 const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1146 if (fs::exists(xor_key_path)) {
1147 // A pre-existing xor key file has priority.
1148 AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1149 xor_key_file >> obfuscation;
1150 } else {
1151 // Create initial or missing xor key file
1152 AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1153#ifdef __MINGW64__
1154 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1155#else
1156 "wbx"
1157#endif
1158 )};
1159 xor_key_file << obfuscation;
1160 if (xor_key_file.fclose() != 0) {
1161 throw std::runtime_error{strprintf("Error closing XOR key file %s: %s",
1162 fs::PathToString(xor_key_path),
1163 SysErrorString(errno))};
1164 }
1165 }
1166 // If the user disabled the key, it must be zero.
1167 if (!opts.use_xor && obfuscation != decltype(obfuscation){}) {
1168 throw std::runtime_error{
1169 strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1170 "Stored key: '%s', stored path: '%s'.",
1171 HexStr(obfuscation), fs::PathToString(xor_key_path)),
1172 };
1173 }
1174 LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation));
1175 return Obfuscation{obfuscation};
1176}
1177
1179 : m_prune_mode{opts.prune_target > 0},
1180 m_obfuscation{InitBlocksdirXorKey(opts)},
1181 m_opts{std::move(opts)},
1182 m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1183 m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1184 m_interrupt{interrupt}
1185{
1186 m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1187
1189 m_block_tree_db->WriteReindexing(true);
1190 m_blockfiles_indexed = false;
1191 // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1192 if (m_prune_mode) {
1194 }
1195 }
1196}
1197
1199{
1200 std::atomic<bool>& m_importing;
1201
1202public:
1203 ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1204 {
1205 assert(m_importing == false);
1206 m_importing = true;
1207 }
1209 {
1210 assert(m_importing == true);
1211 m_importing = false;
1212 }
1213};
1214
1215void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1216{
1217 ImportingNow imp{chainman.m_blockman.m_importing};
1218
1219 // -reindex
1220 if (!chainman.m_blockman.m_blockfiles_indexed) {
1221 int nFile = 0;
1222 // Map of disk positions for blocks with unknown parent (only used for reindex);
1223 // parent hash -> child disk position, multiple children can have the same parent.
1224 std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1225 while (true) {
1226 FlatFilePos pos(nFile, 0);
1227 if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1228 break; // No block files left to reindex
1229 }
1230 AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)};
1231 if (file.IsNull()) {
1232 break; // This error is logged in OpenBlockFile
1233 }
1234 LogInfo("Reindexing block file blk%05u.dat...", (unsigned int)nFile);
1235 chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1236 if (chainman.m_interrupt) {
1237 LogInfo("Interrupt requested. Exit reindexing.");
1238 return;
1239 }
1240 nFile++;
1241 }
1242 WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1243 chainman.m_blockman.m_blockfiles_indexed = true;
1244 LogInfo("Reindexing finished");
1245 // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1246 chainman.ActiveChainstate().LoadGenesisBlock();
1247 }
1248
1249 // -loadblock=
1250 for (const fs::path& path : import_paths) {
1251 AutoFile file{fsbridge::fopen(path, "rb")};
1252 if (!file.IsNull()) {
1253 LogInfo("Importing blocks file %s...", fs::PathToString(path));
1254 chainman.LoadExternalBlockFile(file);
1255 if (chainman.m_interrupt) {
1256 LogInfo("Interrupt requested. Exit block importing.");
1257 return;
1258 }
1259 } else {
1260 LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1261 }
1262 }
1263
1264 // scan for better chains in the block chain database, that are not yet connected in the active best chain
1265
1266 // We can't hold cs_main during ActivateBestChain even though we're accessing
1267 // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1268 // the relevant pointers before the ABC call.
1269 for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1271 if (!chainstate->ActivateBestChain(state, nullptr)) {
1272 chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
1273 return;
1274 }
1275 }
1276 // End scope of ImportingNow
1277}
1278
1279std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1280 switch(type) {
1281 case BlockfileType::NORMAL: os << "normal"; break;
1282 case BlockfileType::ASSUMED: os << "assumed"; break;
1283 default: os.setstate(std::ios_base::failbit);
1284 }
1285 return os;
1286}
1287
1288std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1289 os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1290 return os;
1291}
1292} // namespace node
void CheckBlockDataAvailability(BlockManager &blockman, const CBlockIndex &blockindex, bool check_for_undo)
Definition: blockchain.cpp:649
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:127
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous.
Definition: chain.h:100
@ BLOCK_HAVE_UNDO
undo data available in rev*.dat
Definition: chain.h:125
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition: chain.h:124
@ BLOCK_FAILED_CHILD
descends from failed block
Definition: chain.h:129
@ BLOCK_FAILED_MASK
Definition: chain.h:130
static constexpr int32_t SEQ_ID_INIT_FROM_DISK
Definition: chain.h:40
#define Assert(val)
Identity function.
Definition: check.h:113
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:371
Wrapper that buffers reads from an underlying stream.
Definition: streams.h:625
Wrapper that buffers writes to an underlying stream.
Definition: streams.h:667
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:22
uint32_t nBits
Definition: block.h:29
int64_t GetBlockTime() const
Definition: block.h:61
uint256 hashPrevBlock
Definition: block.h:26
uint256 GetHash() const
Definition: block.cpp:11
Definition: block.h:69
void SetNull()
Definition: block.h:95
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: chain.h:144
uint256 hashMerkleRoot
Definition: chain.h:191
std::string ToString() const
Definition: chain.cpp:16
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:150
uint64_t m_chain_tx_count
(memory only) Number of transactions in the chain up to and including this block.
Definition: chain.h:179
void BuildSkip()
Build the skiplist pointer for this entry.
Definition: chain.cpp:121
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:168
uint32_t nTime
Definition: chain.h:192
unsigned int nTimeMax
(memory only) Maximum nTime in the chain up to and including this block.
Definition: chain.h:202
int32_t nSequenceId
(memory only) Sequential id assigned to distinguish order in which blocks are received.
Definition: chain.h:199
uint32_t nNonce
Definition: chain.h:194
uint256 GetBlockHash() const
Definition: chain.h:248
FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:224
uint32_t nBits
Definition: chain.h:193
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(
Raise the validity level of this block index entry.
Definition: chain.h:312
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:173
int32_t nVersion
block header
Definition: chain.h:190
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:156
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:213
const uint256 * phashBlock
pointer to the hash of the block, if any. Memory is owned by this CBlockIndex
Definition: chain.h:147
Undo information for a CBlock.
Definition: undo.h:63
int Height() const
Return the maximal height in the chain.
Definition: chain.h:467
const MessageStartChars & MessageStart() const
Definition: chainparams.h:91
std::optional< AssumeutxoData > AssumeutxoForBlockhash(const uint256 &blockhash) const
Definition: chainparams.h:124
uint64_t PruneAfterHeight() const
Definition: chainparams.h:102
Batch of changes queued to be written to a CDBWrapper.
Definition: dbwrapper.h:72
void Write(const K &key, const V &value)
Definition: dbwrapper.h:96
bool Read(const K &key, V &value) const
Definition: dbwrapper.h:213
CDBIterator * NewIterator()
Definition: dbwrapper.cpp:359
bool Exists(const K &key) const
Definition: dbwrapper.h:249
void Erase(const K &key, bool fSync=false)
Definition: dbwrapper.h:258
void WriteBatch(CDBBatch &batch, bool fSync=false)
Definition: dbwrapper.cpp:277
void Write(const K &key, const V &value, bool fSync=false)
Definition: dbwrapper.h:233
Used to marshal pointers into hashes for db storage.
Definition: chain.h:360
uint256 hashPrev
Definition: chain.h:370
uint256 ConstructBlockHash() const
Definition: chain.h:404
Chainstate stores and provides an API to update our local knowledge of the current best chain.
Definition: validation.h:532
CChain m_chain
The current chain of blockheaders we consult and build on.
Definition: validation.h:614
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:899
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1125
kernel::Notifications & GetNotifications() const
Definition: validation.h:1012
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
const util::SignalInterrupt & m_interrupt
Definition: validation.h:1034
void LoadExternalBlockFile(AutoFile &file_in, FlatFilePos *dbp=nullptr, std::multimap< uint256, FlatFilePos > *blocks_with_unknown_parent=nullptr)
Import blocks from an external file.
const CChainParams & GetParams() const
Definition: validation.h:1007
Chainstate &InitializeChainstate(CTxMemPool *mempool) EXCLUSIVE_LOCKS_REQUIRED(std::vector< Chainstate * GetAll)()
Instantiate a new chainstate.
Definition: validation.h:1095
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1038
Fast randomness source.
Definition: random.h:386
void fillrand(std::span< std::byte > output) noexcept
Fill a byte span with random bytes.
Definition: random.cpp:626
FlatFileSeq represents a sequence of numbered files storing raw data.
Definition: flatfile.h:46
FILE * Open(const FlatFilePos &pos, bool read_only=false) const
Open a handle to the file at the given position.
Definition: flatfile.cpp:33
fs::path FileName(const FlatFilePos &pos) const
Get the name of the file at the given position.
Definition: flatfile.cpp:28
bool Flush(const FlatFilePos &pos, bool finalize=false) const
Commit a file to disk, and optionally truncate off extra pre-allocated bytes if final.
Definition: flatfile.cpp:86
size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space) const
Allocate additional space in a file after the given starting position.
Definition: flatfile.cpp:57
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:151
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:101
Minimal stream for reading from an existing byte array by std::span.
Definition: streams.h:83
std::string ToString() const
Definition: validation.h:111
constexpr bool IsNull() const
Definition: uint256.h:48
void WriteBatchSync(const std::vector< std::pair< int, const CBlockFileInfo * > > &fileInfo, int nLastFile, const std::vector< const CBlockIndex * > &blockinfo)
bool ReadLastBlockFile(int &nFile)
void WriteReindexing(bool fReindexing)
bool ReadFlag(const std::string &name, bool &fValue)
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info)
void ReadReindexing(bool &fReindexing)
void WriteFlag(const std::string &name, bool fValue)
virtual void fatalError(const bilingual_str &message)
The fatal error notification is sent to notify the user when an error occurs in kernel code that can'...
virtual void flushError(const bilingual_str &message)
The flush error notification is sent to notify the user that an error occurred while flushing block d...
const kernel::BlockManagerOpts m_opts
Definition: blockstorage.h:256
std::set< int > m_dirty_fileinfo
Dirty block file entries.
Definition: blockstorage.h:244
const FlatFileSeq m_undo_file_seq
Definition: blockstorage.h:259
RecursiveMutex cs_LastBlockFile
Definition: blockstorage.h:204
const CChainParams & GetParams() const
Definition: blockstorage.h:144
bool FlushChainstateBlockFile(int tip_height)
void FindFilesToPrune(std::set< int > &setFilesToPrune, int last_prune, const Chainstate &chain, ChainstateManager &chainman)
Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a use...
void UpdateBlockInfo(const CBlock &block, unsigned int nHeight, const FlatFilePos &pos)
Update blockfile info while processing a block during reindex.
const Obfuscation m_obfuscation
Definition: blockstorage.h:238
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadBlockUndo(CBlockUndo &blockundo, const CBlockIndex &index) const
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mark one block file as pruned (modify associated database entries)
BlockfileType BlockfileTypeForHeight(int height)
std::atomic_bool m_blockfiles_indexed
Whether all blockfiles have been added to the block tree database.
Definition: blockstorage.h:275
std::vector< CBlockIndex * > GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(std::multimap< CBlockIndex *, CBlockIndex * > m_blocks_unlinked
All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
Definition: blockstorage.h:293
const Consensus::Params & GetConsensus() const
Definition: blockstorage.h:145
BlockManager(const util::SignalInterrupt &interrupt, Options opts)
std::set< CBlockIndex * > m_dirty_blockindex
Dirty block index entries.
Definition: blockstorage.h:241
fs::path GetBlockPosFilename(const FlatFilePos &pos) const
Translation to a filesystem path.
bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
Return false if block file or undo file flushing fails.
uint64_t GetPruneTarget() const
Attempt to stay below this number of bytes of block files.
Definition: blockstorage.h:352
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
Definition: blockstorage.h:222
void UnlinkPrunedFiles(const std::set< int > &setFilesToPrune) const
Actually unlink the specified files.
void WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(bool LoadBlockIndexDB(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(void ScanAndUnlinkAlreadyPrunedFiles() EXCLUSIVE_LOCKS_REQUIRED(CBlockIndex * AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Remove any pruned block & undo files that are still on disk.
Definition: blockstorage.h:314
FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
Helper function performing various preparations before a block can be saved to disk: Returns the corr...
const bool m_prune_mode
Definition: blockstorage.h:236
bool CheckBlockDataAvailability(const CBlockIndex &upper_block LIFETIMEBOUND, const CBlockIndex &lower_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetFirstBlock(const CBlockIndex &upper_block LIFETIMEBOUND, uint32_t status_mask, const CBlockIndex *lower_block=nullptr) const EXCLUSIVE_LOCKS_REQUIRED(boo m_have_pruned)
Check if all blocks in the [upper_block, lower_block] range have data available.
Definition: blockstorage.h:394
bool FlushUndoFile(int block_file, bool finalize=false)
Return false if undo file flushing fails.
uint64_t CalculateCurrentUsage()
Calculate the amount of disk space the block & undo files currently use.
bool ReadRawBlock(std::vector< std::byte > &block, const FlatFilePos &pos) const
const util::SignalInterrupt & m_interrupt
Definition: blockstorage.h:266
const FlatFileSeq m_block_file_seq
Definition: blockstorage.h:258
CBlockIndex * InsertBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Create a new block index entry for a given block hash.
bool ReadBlock(CBlock &block, const FlatFilePos &pos, const std::optional< uint256 > &expected_hash) const
Functions for disk access for blocks.
bool m_check_for_pruning
Global flag to indicate we should check to see if there are block/undo files that should be deleted.
Definition: blockstorage.h:234
bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:349
void CleanupBlockRevFiles() const
void FindFilesToPruneManual(std::set< int > &setFilesToPrune, int nManualPruneHeight, const Chainstate &chain, ChainstateManager &chainman)
std::atomic< bool > m_importing
Definition: blockstorage.h:267
bool WriteBlockUndo(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(FlatFilePos WriteBlock(const CBlock &block, int nHeight)
Store block on disk and update block file statistics.
Definition: blockstorage.h:338
bool IsBlockPruned(const CBlockIndex &block) const EXCLUSIVE_LOCKS_REQUIRED(void UpdatePruneLock(const std::string &name, const PruneLockInfo &lock_info) EXCLUSIVE_LOCKS_REQUIRED(AutoFile OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) const
Check whether the block associated with this index entry is pruned or not.
Definition: blockstorage.h:403
std::vector< CBlockFileInfo > m_blockfile_info
Definition: blockstorage.h:205
CBlockFileInfo * GetBlockFileInfo(size_t n)
Get block file info entry for one block file.
bool LoadBlockIndex(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Load the blocktree off disk and into memory.
AutoFile OpenUndoFile(const FlatFilePos &pos, bool fReadOnly=false) const
Open an undo file (rev?????.dat)
std::optional< int > m_snapshot_height
The height of the base block of an assumeutxo snapshot, if one is in use.
Definition: blockstorage.h:291
ImportingNow(std::atomic< bool > &importing)
std::atomic< bool > & m_importing
256-bit opaque blob.
Definition: uint256.h:196
Helper class that manages an interrupt flag, and allows a thread or signal to interrupt another threa...
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:8
static bool exists(const path &p)
Definition: fs.h:95
static std::string PathToString(const path &path)
Convert path object to a byte string.
Definition: fs.h:157
std::string HexStr(const std::span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition: hex_base.cpp:31
#define LogPrintLevel(category, level,...)
Definition: logging.h:384
#define LogInfo(...)
Definition: logging.h:368
#define LogError(...)
Definition: logging.h:370
#define LogDebug(category,...)
Definition: logging.h:393
#define LogPrintf(...)
Definition: logging.h:373
unsigned int nHeight
std::array< uint8_t, 4 > MessageStartChars
@ BLOCKSTORAGE
Definition: logging.h:93
@ PRUNE
Definition: logging.h:80
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:25
static constexpr uint8_t DB_REINDEX_FLAG
static constexpr uint8_t DB_FLAG
static constexpr uint8_t DB_BLOCK_INDEX
static constexpr uint8_t DB_LAST_BLOCK
static constexpr uint8_t DB_BLOCK_FILES
Definition: messages.h:21
static const unsigned int UNDOFILE_CHUNK_SIZE
The pre-allocation chunk size for rev?????.dat files (since 0.8)
Definition: blockstorage.h:73
BlockfileType
Definition: blockstorage.h:102
@ ASSUMED
Definition: blockstorage.h:105
static auto InitBlocksdirXorKey(const BlockManager::Options &opts)
static const unsigned int BLOCKFILE_CHUNK_SIZE
The pre-allocation chunk size for blk?????.dat files (since 0.8)
Definition: blockstorage.h:71
static constexpr uint32_t STORAGE_HEADER_BYTES
Size of header written by WriteBlock before a serialized CBlock (8 bytes)
Definition: blockstorage.h:78
std::ostream & operator<<(std::ostream &os, const BlockfileType &type)
static constexpr uint32_t UNDO_DATA_DISK_OVERHEAD
Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes)
Definition: blockstorage.h:81
static const unsigned int MAX_BLOCKFILE_SIZE
The maximum size of a blk?????.dat file (since 0.8)
Definition: blockstorage.h:75
void ImportBlocks(ChainstateManager &chainman, std::span< const fs::path > import_paths)
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:245
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params &params)
Check whether a block hash satisfies the proof-of-work requirement specified by nBits.
Definition: pow.cpp:140
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:195
const char * name
Definition: rest.cpp:48
size_t GetSerializeSize(const T &t)
Definition: serialize.h:1094
static constexpr uint64_t MAX_SIZE
The maximum size of a serialized object in bytes or number of elements (for eg vectors) when the size...
Definition: serialize.h:32
bool CheckSignetBlockSolution(const CBlock &block, const Consensus::Params &consensusParams)
Extract signature and check whether a block has a valid solution.
Definition: signet.cpp:125
Holds configuration for use during UTXO snapshot load and validation.
Definition: chainparams.h:35
uint64_t m_chain_tx_count
Used to populate the m_chain_tx_count value, which is used during BlockManager::LoadBlockIndex().
Definition: chainparams.h:45
Parameters that influence chain consensus.
Definition: params.h:84
bool wipe_data
If true, remove all existing data.
Definition: dbwrapper.h:41
int nFile
Definition: flatfile.h:16
std::string ToString() const
Definition: flatfile.cpp:23
unsigned int nPos
Definition: flatfile.h:17
bool IsNull() const
Definition: flatfile.h:36
An options struct for BlockManager, more ergonomically referred to as BlockManager::Options due to th...
Notifications & notifications
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
#define LOCK2(cs1, cs2)
Definition: sync.h:260
#define LOCK(cs)
Definition: sync.h:259
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:290
std::string SysErrorString(int err)
Return system error string from errno value.
Definition: syserror.cpp:17
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:51
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1172
consteval auto _(util::TranslatedLiteral str)
Definition: translation.h:79
bool FatalError(Notifications &notifications, BlockValidationState &state, const bilingual_str &message)
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES
Definition: validation.h:83