Bitcoin Core 30.99.0
P2P Digital Currency
blockstorage.cpp
Go to the documentation of this file.
1// Copyright (c) 2011-present The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <node/blockstorage.h>
6
7#include <arith_uint256.h>
8#include <chain.h>
9#include <consensus/params.h>
10#include <crypto/hex_base.h>
11#include <dbwrapper.h>
12#include <flatfile.h>
13#include <hash.h>
15#include <kernel/chainparams.h>
18#include <kernel/types.h>
19#include <pow.h>
20#include <primitives/block.h>
22#include <random.h>
23#include <serialize.h>
24#include <signet.h>
25#include <streams.h>
26#include <sync.h>
27#include <tinyformat.h>
28#include <uint256.h>
29#include <undo.h>
30#include <util/check.h>
31#include <util/expected.h>
32#include <util/fs.h>
33#include <util/log.h>
34#include <util/obfuscation.h>
35#include <util/overflow.h>
36#include <util/result.h>
38#include <util/strencodings.h>
39#include <util/syserror.h>
40#include <util/time.h>
41#include <util/translation.h>
42#include <validation.h>
43
44#include <cerrno>
45#include <compare>
46#include <cstddef>
47#include <cstdio>
48#include <exception>
49#include <map>
50#include <optional>
51#include <ostream>
52#include <span>
53#include <stdexcept>
54#include <system_error>
55#include <unordered_map>
56
57namespace kernel {
58static constexpr uint8_t DB_BLOCK_FILES{'f'};
59static constexpr uint8_t DB_BLOCK_INDEX{'b'};
60static constexpr uint8_t DB_FLAG{'F'};
61static constexpr uint8_t DB_REINDEX_FLAG{'R'};
62static constexpr uint8_t DB_LAST_BLOCK{'l'};
63// Keys used in previous version that might still be found in the DB:
64// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
65// BlockTreeDB::DB_TXINDEX{'t'}
66// BlockTreeDB::ReadFlag("txindex")
67
69{
70 return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
71}
72
73void BlockTreeDB::WriteReindexing(bool fReindexing)
74{
75 if (fReindexing) {
76 Write(DB_REINDEX_FLAG, uint8_t{'1'});
77 } else {
79 }
80}
81
82void BlockTreeDB::ReadReindexing(bool& fReindexing)
83{
84 fReindexing = Exists(DB_REINDEX_FLAG);
85}
86
88{
89 return Read(DB_LAST_BLOCK, nFile);
90}
91
92void BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
93{
94 CDBBatch batch(*this);
95 for (const auto& [file, info] : fileInfo) {
96 batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
97 }
98 batch.Write(DB_LAST_BLOCK, nLastFile);
99 for (const CBlockIndex* bi : blockinfo) {
100 batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
101 }
102 WriteBatch(batch, true);
103}
104
105void BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
106{
107 Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
108}
109
110bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
111{
112 uint8_t ch;
113 if (!Read(std::make_pair(DB_FLAG, name), ch)) {
114 return false;
115 }
116 fValue = ch == uint8_t{'1'};
117 return true;
118}
119
120bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
121{
123 std::unique_ptr<CDBIterator> pcursor(NewIterator());
124 pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
125
126 // Load m_block_index
127 while (pcursor->Valid()) {
128 if (interrupt) return false;
129 std::pair<uint8_t, uint256> key;
130 if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
131 CDiskBlockIndex diskindex;
132 if (pcursor->GetValue(diskindex)) {
133 // Construct block index object
134 CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
135 pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
136 pindexNew->nHeight = diskindex.nHeight;
137 pindexNew->nFile = diskindex.nFile;
138 pindexNew->nDataPos = diskindex.nDataPos;
139 pindexNew->nUndoPos = diskindex.nUndoPos;
140 pindexNew->nVersion = diskindex.nVersion;
141 pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
142 pindexNew->nTime = diskindex.nTime;
143 pindexNew->nBits = diskindex.nBits;
144 pindexNew->nNonce = diskindex.nNonce;
145 pindexNew->nStatus = diskindex.nStatus;
146 pindexNew->nTx = diskindex.nTx;
147
148 if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
149 LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
150 return false;
151 }
152
153 pcursor->Next();
154 } else {
155 LogError("%s: failed to read value\n", __func__);
156 return false;
157 }
158 } else {
159 break;
160 }
161 }
162
163 return true;
164}
165
166std::string CBlockFileInfo::ToString() const
167{
168 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
169}
170} // namespace kernel
171
172namespace node {
173
175{
176 // First sort by most total work, ...
177 if (pa->nChainWork > pb->nChainWork) return false;
178 if (pa->nChainWork < pb->nChainWork) return true;
179
180 // ... then by earliest activatable time, ...
181 if (pa->nSequenceId < pb->nSequenceId) return false;
182 if (pa->nSequenceId > pb->nSequenceId) return true;
183
184 // Use pointer address as tie breaker (should only happen with blocks
185 // loaded from disk, as those share the same id: 0 for blocks on the
186 // best chain, 1 for all others).
187 if (pa < pb) return false;
188 if (pa > pb) return true;
189
190 // Identical blocks.
191 return false;
192}
193
195{
196 return pa->nHeight < pb->nHeight;
197}
198
199std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
200{
202 std::vector<CBlockIndex*> rv;
203 rv.reserve(m_block_index.size());
204 for (auto& [_, block_index] : m_block_index) {
205 rv.push_back(&block_index);
206 }
207 return rv;
208}
209
211{
213 BlockMap::iterator it = m_block_index.find(hash);
214 return it == m_block_index.end() ? nullptr : &it->second;
215}
216
218{
220 BlockMap::const_iterator it = m_block_index.find(hash);
221 return it == m_block_index.end() ? nullptr : &it->second;
222}
223
225{
227
228 auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
229 if (!inserted) {
230 return &mi->second;
231 }
232 CBlockIndex* pindexNew = &(*mi).second;
233
234 // We assign the sequence id to blocks only when the full data is available,
235 // to avoid miners withholding blocks but broadcasting headers, to get a
236 // competitive advantage.
238
239 pindexNew->phashBlock = &((*mi).first);
240 BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
241 if (miPrev != m_block_index.end()) {
242 pindexNew->pprev = &(*miPrev).second;
243 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
244 pindexNew->BuildSkip();
245 }
246 pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
247 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
249 if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
250 best_header = pindexNew;
251 }
252
253 m_dirty_blockindex.insert(pindexNew);
254
255 return pindexNew;
256}
257
258void BlockManager::PruneOneBlockFile(const int fileNumber)
259{
262
263 for (auto& entry : m_block_index) {
264 CBlockIndex* pindex = &entry.second;
265 if (pindex->nFile == fileNumber) {
266 pindex->nStatus &= ~BLOCK_HAVE_DATA;
267 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
268 pindex->nFile = 0;
269 pindex->nDataPos = 0;
270 pindex->nUndoPos = 0;
271 m_dirty_blockindex.insert(pindex);
272
273 // Prune from m_blocks_unlinked -- any block we prune would have
274 // to be downloaded again in order to consider its chain, at which
275 // point it would be considered as a candidate for
276 // m_blocks_unlinked or setBlockIndexCandidates.
277 auto range = m_blocks_unlinked.equal_range(pindex->pprev);
278 while (range.first != range.second) {
279 std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
280 range.first++;
281 if (_it->second == pindex) {
282 m_blocks_unlinked.erase(_it);
283 }
284 }
285 }
286 }
287
288 m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
289 m_dirty_fileinfo.insert(fileNumber);
290}
291
293 std::set<int>& setFilesToPrune,
294 int nManualPruneHeight,
295 const Chainstate& chain)
296{
297 assert(IsPruneMode() && nManualPruneHeight > 0);
298
300 if (chain.m_chain.Height() < 0) {
301 return;
302 }
303
304 const auto [min_block_to_prune, last_block_can_prune] = chain.GetPruneRange(nManualPruneHeight);
305
306 int count = 0;
307 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
308 const auto& fileinfo = m_blockfile_info[fileNumber];
309 if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
310 continue;
311 }
312
313 PruneOneBlockFile(fileNumber);
314 setFilesToPrune.insert(fileNumber);
315 count++;
316 }
317 LogInfo("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs",
318 chain.GetRole(), last_block_can_prune, count);
319}
320
322 std::set<int>& setFilesToPrune,
323 int last_prune,
324 const Chainstate& chain,
325 ChainstateManager& chainman)
326{
328 // Compute `target` value with maximum size (in bytes) of blocks below the
329 // `last_prune` height which should be preserved and not pruned. The
330 // `target` value will be derived from the -prune preference provided by the
331 // user. If there is a historical chainstate being used to populate indexes
332 // and validate the snapshot, the target is divided by two so half of the
333 // block storage will be reserved for the historical chainstate, and the
334 // other half will be reserved for the most-work chainstate.
335 const int num_chainstates{chainman.HistoricalChainstate() ? 2 : 1};
336 const auto target = std::max(
338 const uint64_t target_sync_height = chainman.m_best_header->nHeight;
339
340 if (chain.m_chain.Height() < 0 || target == 0) {
341 return;
342 }
343 if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
344 return;
345 }
346
347 const auto [min_block_to_prune, last_block_can_prune] = chain.GetPruneRange(last_prune);
348
349 uint64_t nCurrentUsage = CalculateCurrentUsage();
350 // We don't check to prune until after we've allocated new space for files
351 // So we should leave a buffer under our target to account for another allocation
352 // before the next pruning.
353 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
354 uint64_t nBytesToPrune;
355 int count = 0;
356
357 if (nCurrentUsage + nBuffer >= target) {
358 // On a prune event, the chainstate DB is flushed.
359 // To avoid excessive prune events negating the benefit of high dbcache
360 // values, we should not prune too rapidly.
361 // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
362 const auto chain_tip_height = chain.m_chain.Height();
363 if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
364 // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
365 static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
366 const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
367 nBuffer += average_block_size * remaining_blocks;
368 }
369
370 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
371 const auto& fileinfo = m_blockfile_info[fileNumber];
372 nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
373
374 if (fileinfo.nSize == 0) {
375 continue;
376 }
377
378 if (nCurrentUsage + nBuffer < target) { // are we below our target?
379 break;
380 }
381
382 // don't prune files that could have a block that's not within the allowable
383 // prune range for the chain being pruned.
384 if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
385 continue;
386 }
387
388 PruneOneBlockFile(fileNumber);
389 // Queue up the files for removal
390 setFilesToPrune.insert(fileNumber);
391 nCurrentUsage -= nBytesToPrune;
392 count++;
393 }
394 }
395
396 LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
397 chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
398 (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
399 min_block_to_prune, last_block_can_prune, count);
400}
401
402void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
404 m_prune_locks[name] = lock_info;
405}
406
408{
410
411 if (hash.IsNull()) {
412 return nullptr;
413 }
414
415 const auto [mi, inserted]{m_block_index.try_emplace(hash)};
416 CBlockIndex* pindex = &(*mi).second;
417 if (inserted) {
418 pindex->phashBlock = &((*mi).first);
419 }
420 return pindex;
421}
422
423bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
424{
425 if (!m_block_tree_db->LoadBlockIndexGuts(
426 GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
427 return false;
428 }
429
430 if (snapshot_blockhash) {
431 const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
432 if (!maybe_au_data) {
433 m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
434 return false;
435 }
436 const AssumeutxoData& au_data = *Assert(maybe_au_data);
437 m_snapshot_height = au_data.height;
438 CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
439
440 // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
441 // to disk, we must bootstrap the value for assumedvalid chainstates
442 // from the hardcoded assumeutxo chainparams.
443 base->m_chain_tx_count = au_data.m_chain_tx_count;
444 LogInfo("[snapshot] set m_chain_tx_count=%d for %s", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
445 } else {
446 // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
447 // is null. This is relevant during snapshot completion, when the blockman may be loaded
448 // with a height that then needs to be cleared after the snapshot is fully validated.
449 m_snapshot_height.reset();
450 }
451
452 Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
453
454 // Calculate nChainWork
455 std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
456 std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
458
459 CBlockIndex* previous_index{nullptr};
460 for (CBlockIndex* pindex : vSortedByHeight) {
461 if (m_interrupt) return false;
462 if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
463 LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
464 return false;
465 }
466 previous_index = pindex;
467 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
468 pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
469
470 // We can link the chain of blocks for which we've received transactions at some point, or
471 // blocks that are assumed-valid on the basis of snapshot load (see
472 // PopulateAndValidateSnapshot()).
473 // Pruned nodes may have deleted the block.
474 if (pindex->nTx > 0) {
475 if (pindex->pprev) {
476 if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
477 pindex->GetBlockHash() == *snapshot_blockhash) {
478 // Should have been set above; don't disturb it with code below.
479 Assert(pindex->m_chain_tx_count > 0);
480 } else if (pindex->pprev->m_chain_tx_count > 0) {
481 pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
482 } else {
483 pindex->m_chain_tx_count = 0;
484 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
485 }
486 } else {
487 pindex->m_chain_tx_count = pindex->nTx;
488 }
489 }
490
491 if (pindex->nStatus & BLOCK_FAILED_CHILD) {
492 // BLOCK_FAILED_CHILD is deprecated, but may still exist on disk. Replace it with BLOCK_FAILED_VALID.
493 pindex->nStatus = (pindex->nStatus & ~BLOCK_FAILED_CHILD) | BLOCK_FAILED_VALID;
494 m_dirty_blockindex.insert(pindex);
495 }
496 if (!(pindex->nStatus & BLOCK_FAILED_VALID) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_VALID)) {
497 // All descendants of invalid blocks are invalid too.
498 pindex->nStatus |= BLOCK_FAILED_VALID;
499 m_dirty_blockindex.insert(pindex);
500 }
501
502 if (pindex->pprev) {
503 pindex->BuildSkip();
504 }
505 }
506
507 return true;
508}
509
510void BlockManager::WriteBlockIndexDB()
511{
513 std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
514 vFiles.reserve(m_dirty_fileinfo.size());
515 for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
516 vFiles.emplace_back(*it, &m_blockfile_info[*it]);
517 m_dirty_fileinfo.erase(it++);
518 }
519 std::vector<const CBlockIndex*> vBlocks;
520 vBlocks.reserve(m_dirty_blockindex.size());
521 for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
522 vBlocks.push_back(*it);
523 m_dirty_blockindex.erase(it++);
524 }
525 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
526 m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks);
527}
528
529bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
530{
531 if (!LoadBlockIndex(snapshot_blockhash)) {
532 return false;
533 }
534 int max_blockfile_num{0};
535
536 // Load block file info
537 m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
538 m_blockfile_info.resize(max_blockfile_num + 1);
539 LogInfo("Loading block index db: last block file = %i", max_blockfile_num);
540 for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
541 m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
542 }
543 LogInfo("Loading block index db: last block file info: %s", m_blockfile_info[max_blockfile_num].ToString());
544 for (int nFile = max_blockfile_num + 1; true; nFile++) {
545 CBlockFileInfo info;
546 if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
547 m_blockfile_info.push_back(info);
548 } else {
549 break;
550 }
551 }
552
553 // Check presence of blk files
554 LogInfo("Checking all blk files are present...");
555 std::set<int> setBlkDataFiles;
556 for (const auto& [_, block_index] : m_block_index) {
557 if (block_index.nStatus & BLOCK_HAVE_DATA) {
558 setBlkDataFiles.insert(block_index.nFile);
559 }
560 }
561 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
562 FlatFilePos pos(*it, 0);
563 if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) {
564 return false;
565 }
566 }
567
568 {
569 // Initialize the blockfile cursors.
571 for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
572 const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
573 m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
574 }
575 }
576
577 // Check whether we have ever pruned block & undo files
578 m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
579 if (m_have_pruned) {
580 LogInfo("Loading block index db: Block files have previously been pruned");
581 }
582
583 // Check whether we need to continue reindexing
584 bool fReindexing = false;
585 m_block_tree_db->ReadReindexing(fReindexing);
586 if (fReindexing) m_blockfiles_indexed = false;
587
588 return true;
589}
590
591void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
592{
594 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
595 if (!m_have_pruned) {
596 return;
597 }
598
599 std::set<int> block_files_to_prune;
600 for (int file_number = 0; file_number < max_blockfile; file_number++) {
601 if (m_blockfile_info[file_number].nSize == 0) {
602 block_files_to_prune.insert(file_number);
603 }
604 }
605
606 UnlinkPrunedFiles(block_files_to_prune);
607}
608
609bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
610{
612 return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
613}
614
615const CBlockIndex& BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
616{
618 const CBlockIndex* last_block = &upper_block;
619 assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
620 while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
621 if (lower_block) {
622 // Return if we reached the lower_block
623 if (last_block == lower_block) return *lower_block;
624 // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
625 // and so far this is not allowed.
626 assert(last_block->nHeight >= lower_block->nHeight);
627 }
628 last_block = last_block->pprev;
629 }
630 assert(last_block != nullptr);
631 return *last_block;
632}
633
634bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
635{
636 if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
637 return &GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
638}
639
640// If we're using -prune with -reindex, then delete block files that will be ignored by the
641// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
642// is missing, do the same here to delete any later block files after a gap. Also delete all
643// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
644// is in sync with what's actually on disk by the time we start downloading, so that pruning
645// works correctly.
647{
648 std::map<std::string, fs::path> mapBlockFiles;
649
650 // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
651 // Remove the rev files immediately and insert the blk file paths into an
652 // ordered map keyed by block file index.
653 LogInfo("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune");
654 for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
655 const std::string path = fs::PathToString(it->path().filename());
656 if (fs::is_regular_file(*it) &&
657 path.length() == 12 &&
658 path.ends_with(".dat"))
659 {
660 if (path.starts_with("blk")) {
661 mapBlockFiles[path.substr(3, 5)] = it->path();
662 } else if (path.starts_with("rev")) {
663 remove(it->path());
664 }
665 }
666 }
667
668 // Remove all block files that aren't part of a contiguous set starting at
669 // zero by walking the ordered map (keys are block file indices) by
670 // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
671 // start removing block files.
672 int nContigCounter = 0;
673 for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
674 if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
675 nContigCounter++;
676 continue;
677 }
678 remove(item.second);
679 }
680}
681
683{
685
686 return &m_blockfile_info.at(n);
687}
688
689bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
690{
691 const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
692
693 // Open history file to read
694 AutoFile file{OpenUndoFile(pos, true)};
695 if (file.IsNull()) {
696 LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString());
697 return false;
698 }
699 BufferedReader filein{std::move(file)};
700
701 try {
702 // Read block
703 HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243
704
705 verifier << index.pprev->GetBlockHash();
706 verifier >> blockundo;
707
708 uint256 hashChecksum;
709 filein >> hashChecksum;
710
711 // Verify checksum
712 if (hashChecksum != verifier.GetHash()) {
713 LogError("Checksum mismatch at %s while reading block undo", pos.ToString());
714 return false;
715 }
716 } catch (const std::exception& e) {
717 LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString());
718 return false;
719 }
720
721 return true;
722}
723
724bool BlockManager::FlushUndoFile(int block_file, bool finalize)
725{
726 FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
727 if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
728 m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
729 return false;
730 }
731 return true;
732}
733
734bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
735{
736 bool success = true;
738
739 if (m_blockfile_info.size() < 1) {
740 // Return if we haven't loaded any blockfiles yet. This happens during
741 // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
742 // then calls FlushStateToDisk()), resulting in a call to this function before we
743 // have populated `m_blockfile_info` via LoadBlockIndexDB().
744 return true;
745 }
746 assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
747
748 FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
749 if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
750 m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
751 success = false;
752 }
753 // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
754 // e.g. during IBD or a sync after a node going offline
755 if (!fFinalize || finalize_undo) {
756 if (!FlushUndoFile(blockfile_num, finalize_undo)) {
757 success = false;
758 }
759 }
760 return success;
761}
762
764{
765 if (!m_snapshot_height) {
767 }
769}
770
772{
774 auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
775 // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
776 // but no blocks past the snapshot height have been written yet, so there
777 // is no data associated with the chainstate, and it is safe not to flush.
778 if (cursor) {
779 return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
780 }
781 // No need to log warnings in this case.
782 return true;
783}
784
786{
788
789 uint64_t retval = 0;
790 for (const CBlockFileInfo& file : m_blockfile_info) {
791 retval += file.nSize + file.nUndoSize;
792 }
793 return retval;
794}
795
796void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
797{
798 std::error_code ec;
799 for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
800 FlatFilePos pos(*it, 0);
801 const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
802 const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
803 if (removed_blockfile || removed_undofile) {
804 LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
805 }
806 }
807}
808
809AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
810{
811 return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation};
812}
813
815AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
816{
817 return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation};
818}
819
821{
822 return m_block_file_seq.FileName(pos);
823}
824
825FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
826{
828
829 const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
830
831 if (!m_blockfile_cursors[chain_type]) {
832 // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
833 assert(chain_type == BlockfileType::ASSUMED);
834 const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
835 m_blockfile_cursors[chain_type] = new_cursor;
836 LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
837 }
838 const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
839
840 int nFile = last_blockfile;
841 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
842 m_blockfile_info.resize(nFile + 1);
843 }
844
845 bool finalize_undo = false;
846 unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
847 // Use smaller blockfiles in test-only -fastprune mode - but avoid
848 // the possibility of having a block not fit into the block file.
849 if (m_opts.fast_prune) {
850 max_blockfile_size = 0x10000; // 64kiB
851 if (nAddSize >= max_blockfile_size) {
852 // dynamically adjust the blockfile size to be larger than the added size
853 max_blockfile_size = nAddSize + 1;
854 }
855 }
856 assert(nAddSize < max_blockfile_size);
857
858 while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
859 // when the undo file is keeping up with the block file, we want to flush it explicitly
860 // when it is lagging behind (more blocks arrive than are being connected), we let the
861 // undo block write case handle it
862 finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
863 Assert(m_blockfile_cursors[chain_type])->undo_height);
864
865 // Try the next unclaimed blockfile number
866 nFile = this->MaxBlockfileNum() + 1;
867 // Set to increment MaxBlockfileNum() for next iteration
868 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
869
870 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
871 m_blockfile_info.resize(nFile + 1);
872 }
873 }
874 FlatFilePos pos;
875 pos.nFile = nFile;
876 pos.nPos = m_blockfile_info[nFile].nSize;
877
878 if (nFile != last_blockfile) {
879 LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
880 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
881
882 // Do not propagate the return code. The flush concerns a previous block
883 // and undo file that has already been written to. If a flush fails
884 // here, and we crash, there is no expected additional block data
885 // inconsistency arising from the flush failure here. However, the undo
886 // data may be inconsistent after a crash if the flush is called during
887 // a reindex. A flush error might also leave some of the data files
888 // untrimmed.
889 if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
891 "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
892 last_blockfile, finalize_undo, nFile);
893 }
894 // No undo data yet in the new file, so reset our undo-height tracking.
895 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
896 }
897
898 m_blockfile_info[nFile].AddBlock(nHeight, nTime);
899 m_blockfile_info[nFile].nSize += nAddSize;
900
901 bool out_of_space;
902 size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
903 if (out_of_space) {
904 m_opts.notifications.fatalError(_("Disk space is too low!"));
905 return {};
906 }
907 if (bytes_allocated != 0 && IsPruneMode()) {
908 m_check_for_pruning = true;
909 }
910
911 m_dirty_fileinfo.insert(nFile);
912 return pos;
913}
914
915void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
916{
918
919 // Update the cursor so it points to the last file.
921 auto& cursor{m_blockfile_cursors[chain_type]};
922 if (!cursor || cursor->file_num < pos.nFile) {
923 m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
924 }
925
926 // Update the file information with the current block.
927 const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
928 const int nFile = pos.nFile;
929 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
930 m_blockfile_info.resize(nFile + 1);
931 }
932 m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
933 m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
934 m_dirty_fileinfo.insert(nFile);
935}
936
937bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
938{
939 pos.nFile = nFile;
940
942
943 pos.nPos = m_blockfile_info[nFile].nUndoSize;
944 m_blockfile_info[nFile].nUndoSize += nAddSize;
945 m_dirty_fileinfo.insert(nFile);
946
947 bool out_of_space;
948 size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
949 if (out_of_space) {
950 return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
951 }
952 if (bytes_allocated != 0 && IsPruneMode()) {
953 m_check_for_pruning = true;
954 }
955
956 return true;
957}
958
959bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
960{
962 const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
963 auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
964
965 // Write undo information to disk
966 if (block.GetUndoPos().IsNull()) {
967 FlatFilePos pos;
968 const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))};
969 if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
970 LogError("FindUndoPos failed for %s while writing block undo", pos.ToString());
971 return false;
972 }
973
974 // Open history file to append
975 AutoFile file{OpenUndoFile(pos)};
976 if (file.IsNull()) {
977 LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString());
978 return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
979 }
980 {
981 BufferedWriter fileout{file};
982
983 // Write index header
984 fileout << GetParams().MessageStart() << blockundo_size;
986 {
987 // Calculate checksum
988 HashWriter hasher{};
989 hasher << block.pprev->GetBlockHash() << blockundo;
990 // Write undo data & checksum
991 fileout << blockundo << hasher.GetHash();
992 }
993 // BufferedWriter will flush pending data to file when fileout goes out of scope.
994 }
995
996 // Make sure that the file is closed before we call `FlushUndoFile`.
997 if (file.fclose() != 0) {
998 LogError("Failed to close block undo file %s: %s", pos.ToString(), SysErrorString(errno));
999 return FatalError(m_opts.notifications, state, _("Failed to close block undo file."));
1000 }
1001
1002 // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1003 // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1004 // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1005 // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1006 // the FindNextBlockPos function
1007 if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
1008 // Do not propagate the return code, a failed flush here should not
1009 // be an indication for a failed write. If it were propagated here,
1010 // the caller would assume the undo data not to be written, when in
1011 // fact it is. Note though, that a failed flush might leave the data
1012 // file untrimmed.
1013 if (!FlushUndoFile(pos.nFile, true)) {
1014 LogWarning("Failed to flush undo file %05i\n", pos.nFile);
1015 }
1016 } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
1017 cursor.undo_height = block.nHeight;
1018 }
1019 // update nUndoPos in block index
1020 block.nUndoPos = pos.nPos;
1021 block.nStatus |= BLOCK_HAVE_UNDO;
1022 m_dirty_blockindex.insert(&block);
1023 }
1024
1025 return true;
1026}
1027
1028bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional<uint256>& expected_hash) const
1029{
1030 block.SetNull();
1031
1032 // Open history file to read
1033 const auto block_data{ReadRawBlock(pos)};
1034 if (!block_data) {
1035 return false;
1036 }
1037
1038 try {
1039 // Read block
1040 SpanReader{*block_data} >> TX_WITH_WITNESS(block);
1041 } catch (const std::exception& e) {
1042 LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString());
1043 return false;
1044 }
1045
1046 const auto block_hash{block.GetHash()};
1047
1048 // Check the header
1049 if (!CheckProofOfWork(block_hash, block.nBits, GetConsensus())) {
1050 LogError("Errors in block header at %s while reading block", pos.ToString());
1051 return false;
1052 }
1053
1054 // Signet only: check block solution
1055 if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1056 LogError("Errors in block solution at %s while reading block", pos.ToString());
1057 return false;
1058 }
1059
1060 if (expected_hash && block_hash != *expected_hash) {
1061 LogError("GetHash() doesn't match index at %s while reading block (%s != %s)",
1062 pos.ToString(), block_hash.ToString(), expected_hash->ToString());
1063 return false;
1064 }
1065
1066 return true;
1067}
1068
1069bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1070{
1071 const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1072 return ReadBlock(block, block_pos, index.GetBlockHash());
1073}
1074
1075BlockManager::ReadRawBlockResult BlockManager::ReadRawBlock(const FlatFilePos& pos, std::optional<std::pair<size_t, size_t>> block_part) const
1076{
1077 if (pos.nPos < STORAGE_HEADER_BYTES) {
1078 // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data
1079 // This would cause an unsigned integer underflow when trying to position the file cursor
1080 // This can happen after pruning or default constructed positions
1081 LogError("Failed for %s while reading raw block storage header", pos.ToString());
1083 }
1084 AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)};
1085 if (filein.IsNull()) {
1086 LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString());
1088 }
1089
1090 try {
1091 MessageStartChars blk_start;
1092 unsigned int blk_size;
1093
1094 filein >> blk_start >> blk_size;
1095
1096 if (blk_start != GetParams().MessageStart()) {
1097 LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block",
1098 pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart()));
1100 }
1101
1102 if (blk_size > MAX_SIZE) {
1103 LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block",
1104 pos.ToString(), blk_size, MAX_SIZE);
1106 }
1107
1108 if (block_part) {
1109 const auto [offset, size]{*block_part};
1110 if (size == 0 || SaturatingAdd(offset, size) > blk_size) {
1111 return util::Unexpected{ReadRawError::BadPartRange}; // Avoid logging - offset/size come from untrusted REST input
1112 }
1113 filein.seek(offset, SEEK_CUR);
1114 blk_size = size;
1115 }
1116
1117 std::vector<std::byte> data(blk_size); // Zeroing of memory is intentional here
1118 filein.read(data);
1119 return data;
1120 } catch (const std::exception& e) {
1121 LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString());
1123 }
1124}
1125
1127{
1128 const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1130 if (pos.IsNull()) {
1131 LogError("FindNextBlockPos failed for %s while writing block", pos.ToString());
1132 return FlatFilePos();
1133 }
1134 AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)};
1135 if (file.IsNull()) {
1136 LogError("OpenBlockFile failed for %s while writing block", pos.ToString());
1137 m_opts.notifications.fatalError(_("Failed to write block."));
1138 return FlatFilePos();
1139 }
1140 {
1141 BufferedWriter fileout{file};
1142
1143 // Write index header
1144 fileout << GetParams().MessageStart() << block_size;
1146 // Write block
1147 fileout << TX_WITH_WITNESS(block);
1148 }
1149
1150 if (file.fclose() != 0) {
1151 LogError("Failed to close block file %s: %s", pos.ToString(), SysErrorString(errno));
1152 m_opts.notifications.fatalError(_("Failed to close file when writing block."));
1153 return FlatFilePos();
1154 }
1155
1156 return pos;
1157}
1158
1160{
1161 // Bytes are serialized without length indicator, so this is also the exact
1162 // size of the XOR-key file.
1163 std::array<std::byte, Obfuscation::KEY_SIZE> obfuscation{};
1164
1165 // Consider this to be the first run if the blocksdir contains only hidden
1166 // files (those which start with a .). Checking for a fully-empty dir would
1167 // be too aggressive as a .lock file may have already been written.
1168 bool first_run = true;
1169 for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
1170 const std::string path = fs::PathToString(entry.path().filename());
1171 if (!entry.is_regular_file() || !path.starts_with('.')) {
1172 first_run = false;
1173 break;
1174 }
1175 }
1176
1177 if (opts.use_xor && first_run) {
1178 // Only use random fresh key when the boolean option is set and on the
1179 // very first start of the program.
1180 FastRandomContext{}.fillrand(obfuscation);
1181 }
1182
1183 const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1184 if (fs::exists(xor_key_path)) {
1185 // A pre-existing xor key file has priority.
1186 AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1187 xor_key_file >> obfuscation;
1188 } else {
1189 // Create initial or missing xor key file
1190 AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1191#ifdef __MINGW64__
1192 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1193#else
1194 "wbx"
1195#endif
1196 )};
1197 xor_key_file << obfuscation;
1198 if (xor_key_file.fclose() != 0) {
1199 throw std::runtime_error{strprintf("Error closing XOR key file %s: %s",
1200 fs::PathToString(xor_key_path),
1201 SysErrorString(errno))};
1202 }
1203 }
1204 // If the user disabled the key, it must be zero.
1205 if (!opts.use_xor && obfuscation != decltype(obfuscation){}) {
1206 throw std::runtime_error{
1207 strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1208 "Stored key: '%s', stored path: '%s'.",
1209 HexStr(obfuscation), fs::PathToString(xor_key_path)),
1210 };
1211 }
1212 LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation));
1213 return Obfuscation{obfuscation};
1214}
1215
1217 : m_prune_mode{opts.prune_target > 0},
1218 m_obfuscation{InitBlocksdirXorKey(opts)},
1219 m_opts{std::move(opts)},
1220 m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1221 m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1222 m_interrupt{interrupt}
1223{
1224 m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1225
1227 m_block_tree_db->WriteReindexing(true);
1228 m_blockfiles_indexed = false;
1229 // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1230 if (m_prune_mode) {
1232 }
1233 }
1234}
1235
1237{
1238 std::atomic<bool>& m_importing;
1239
1240public:
1241 ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1242 {
1243 assert(m_importing == false);
1244 m_importing = true;
1245 }
1247 {
1248 assert(m_importing == true);
1249 m_importing = false;
1250 }
1251};
1252
1253void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1254{
1255 ImportingNow imp{chainman.m_blockman.m_importing};
1256
1257 // -reindex
1258 if (!chainman.m_blockman.m_blockfiles_indexed) {
1259 int total_files{0};
1260 while (fs::exists(chainman.m_blockman.GetBlockPosFilename(FlatFilePos(total_files, 0)))) {
1261 total_files++;
1262 }
1263
1264 // Map of disk positions for blocks with unknown parent (only used for reindex);
1265 // parent hash -> child disk position, multiple children can have the same parent.
1266 std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1267
1268 for (int nFile{0}; nFile < total_files; ++nFile) {
1269 FlatFilePos pos(nFile, 0);
1270 AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)};
1271 if (file.IsNull()) {
1272 break; // This error is logged in OpenBlockFile
1273 }
1274 LogInfo("Reindexing block file blk%05u.dat (%d%% complete)...", (unsigned int)nFile, nFile * 100 / total_files);
1275 chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1276 if (chainman.m_interrupt) {
1277 LogInfo("Interrupt requested. Exit reindexing.");
1278 return;
1279 }
1280 }
1281 WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1282 chainman.m_blockman.m_blockfiles_indexed = true;
1283 LogInfo("Reindexing finished");
1284 // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1286 }
1287
1288 // -loadblock=
1289 for (const fs::path& path : import_paths) {
1290 AutoFile file{fsbridge::fopen(path, "rb")};
1291 if (!file.IsNull()) {
1292 LogInfo("Importing blocks file %s...", fs::PathToString(path));
1293 chainman.LoadExternalBlockFile(file);
1294 if (chainman.m_interrupt) {
1295 LogInfo("Interrupt requested. Exit block importing.");
1296 return;
1297 }
1298 } else {
1299 LogWarning("Could not open blocks file %s", fs::PathToString(path));
1300 }
1301 }
1302
1303 // scan for better chains in the block chain database, that are not yet connected in the active best chain
1304 if (auto result = chainman.ActivateBestChains(); !result) {
1305 chainman.GetNotifications().fatalError(util::ErrorString(result));
1306 }
1307 // End scope of ImportingNow
1308}
1309
1310std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1311 switch(type) {
1312 case BlockfileType::NORMAL: os << "normal"; break;
1313 case BlockfileType::ASSUMED: os << "assumed"; break;
1314 default: os.setstate(std::ios_base::failbit);
1315 }
1316 return os;
1317}
1318
1319std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1320 os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1321 return os;
1322}
1323} // namespace node
void CheckBlockDataAvailability(BlockManager &blockman, const CBlockIndex &blockindex, bool check_for_undo)
Definition: blockchain.cpp:671
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous.
Definition: chain.h:51
@ BLOCK_HAVE_UNDO
undo data available in rev*.dat
Definition: chain.h:76
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition: chain.h:75
@ BLOCK_FAILED_CHILD
Unused flag that was previously set when descending from failed block.
Definition: chain.h:80
@ BLOCK_FAILED_VALID
stage after last reached validness failed
Definition: chain.h:79
arith_uint256 GetBlockProof(const CBlockIndex &block)
Compute how much work a block index entry corresponds to.
Definition: chain.h:305
static constexpr int32_t SEQ_ID_INIT_FROM_DISK
Definition: chain.h:40
#define Assert(val)
Identity function.
Definition: check.h:113
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:373
Wrapper that buffers reads from an underlying stream.
Definition: streams.h:630
Wrapper that buffers writes to an underlying stream.
Definition: streams.h:672
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:27
uint32_t nBits
Definition: block.h:34
int64_t GetBlockTime() const
Definition: block.h:66
uint256 hashPrevBlock
Definition: block.h:31
uint256 GetHash() const
Definition: block.cpp:15
Definition: block.h:74
void SetNull()
Definition: block.h:100
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: chain.h:94
uint256 hashMerkleRoot
Definition: chain.h:141
std::string ToString() const
Definition: chain.cpp:10
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:100
uint64_t m_chain_tx_count
(memory only) Number of transactions in the chain up to and including this block.
Definition: chain.h:129
void BuildSkip()
Build the skiplist pointer for this entry.
Definition: chain.cpp:115
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:118
uint32_t nTime
Definition: chain.h:142
unsigned int nTimeMax
(memory only) Maximum nTime in the chain up to and including this block.
Definition: chain.h:152
int32_t nSequenceId
(memory only) Sequential id assigned to distinguish order in which blocks are received.
Definition: chain.h:149
uint32_t nNonce
Definition: chain.h:144
uint256 GetBlockHash() const
Definition: chain.h:198
FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:174
uint32_t nBits
Definition: chain.h:143
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(
Raise the validity level of this block index entry.
Definition: chain.h:262
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:123
int32_t nVersion
block header
Definition: chain.h:140
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:106
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: chain.h:163
const uint256 * phashBlock
pointer to the hash of the block, if any. Memory is owned by this CBlockIndex
Definition: chain.h:97
Undo information for a CBlock.
Definition: undo.h:63
int Height() const
Return the maximal height in the chain.
Definition: chain.h:425
const MessageStartChars & MessageStart() const
Definition: chainparams.h:90
std::optional< AssumeutxoData > AssumeutxoForBlockhash(const uint256 &blockhash) const
Definition: chainparams.h:123
uint64_t PruneAfterHeight() const
Definition: chainparams.h:101
Batch of changes queued to be written to a CDBWrapper.
Definition: dbwrapper.h:72
void Write(const K &key, const V &value)
Definition: dbwrapper.h:96
bool Read(const K &key, V &value) const
Definition: dbwrapper.h:207
CDBIterator * NewIterator()
Definition: dbwrapper.cpp:360
bool Exists(const K &key) const
Definition: dbwrapper.h:235
void Erase(const K &key, bool fSync=false)
Definition: dbwrapper.h:244
void WriteBatch(CDBBatch &batch, bool fSync=false)
Definition: dbwrapper.cpp:278
void Write(const K &key, const V &value, bool fSync=false)
Definition: dbwrapper.h:227
Used to marshal pointers into hashes for db storage.
Definition: chain.h:318
uint256 hashPrev
Definition: chain.h:328
uint256 ConstructBlockHash() const
Definition: chain.h:362
Chainstate stores and provides an API to update our local knowledge of the current best chain.
Definition: validation.h:550
CChain m_chain
The current chain of blockheaders we consult and build on.
Definition: validation.h:624
bool LoadGenesisBlock()
Ensures we have a genesis block in the block tree, possibly writing one to disk.
Interface for managing multiple Chainstate objects, where each chainstate is associated with chainsta...
Definition: validation.h:935
Chainstate * HistoricalChainstate() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Return historical chainstate targeting a specific block, if any.
Definition: validation.h:1123
bool IsInitialBlockDownload() const noexcept
Check whether we are doing an initial block download (synchronizing from disk or network)
kernel::Notifications & GetNotifications() const
Definition: validation.h:1007
Chainstate & ActiveChainstate() const
Alternatives to CurrentChainstate() used by older code to query latest chainstate information without...
const util::SignalInterrupt & m_interrupt
Definition: validation.h:1029
void LoadExternalBlockFile(AutoFile &file_in, FlatFilePos *dbp=nullptr, std::multimap< uint256, FlatFilePos > *blocks_with_unknown_parent=nullptr)
Import blocks from an external file.
const CChainParams & GetParams() const
Definition: validation.h:1002
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1033
Fast randomness source.
Definition: random.h:386
void fillrand(std::span< std::byte > output) noexcept
Fill a byte span with random bytes.
Definition: random.cpp:626
FlatFileSeq represents a sequence of numbered files storing raw data.
Definition: flatfile.h:42
FILE * Open(const FlatFilePos &pos, bool read_only=false) const
Open a handle to the file at the given position.
Definition: flatfile.cpp:34
fs::path FileName(const FlatFilePos &pos) const
Get the name of the file at the given position.
Definition: flatfile.cpp:29
bool Flush(const FlatFilePos &pos, bool finalize=false) const
Commit a file to disk, and optionally truncate off extra pre-allocated bytes if final.
Definition: flatfile.cpp:87
size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space) const
Allocate additional space in a file after the given starting position.
Definition: flatfile.cpp:58
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:151
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:101
Minimal stream for reading from an existing byte array by std::span.
Definition: streams.h:83
constexpr bool IsNull() const
Definition: uint256.h:48
void WriteBatchSync(const std::vector< std::pair< int, const CBlockFileInfo * > > &fileInfo, int nLastFile, const std::vector< const CBlockIndex * > &blockinfo)
bool ReadLastBlockFile(int &nFile)
void WriteReindexing(bool fReindexing)
bool ReadFlag(const std::string &name, bool &fValue)
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info)
void ReadReindexing(bool &fReindexing)
void WriteFlag(const std::string &name, bool fValue)
uint32_t nSize
number of used bytes of block file
Definition: blockstorage.h:60
std::string ToString() const
uint64_t nTimeFirst
earliest time of block in file
Definition: blockstorage.h:64
uint64_t nTimeLast
latest time of block in file
Definition: blockstorage.h:65
uint32_t nHeightFirst
lowest height of block in file
Definition: blockstorage.h:62
uint32_t nBlocks
number of blocks stored in file
Definition: blockstorage.h:59
uint32_t nHeightLast
highest height of block in file
Definition: blockstorage.h:63
virtual void fatalError(const bilingual_str &message)
The fatal error notification is sent to notify the user when an error occurs in kernel code that can'...
virtual void flushError(const bilingual_str &message)
The flush error notification is sent to notify the user that an error occurred while flushing block d...
const kernel::BlockManagerOpts m_opts
Definition: blockstorage.h:301
std::set< int > m_dirty_fileinfo
Dirty block file entries.
Definition: blockstorage.h:313
const FlatFileSeq m_undo_file_seq
Definition: blockstorage.h:304
RecursiveMutex cs_LastBlockFile
Definition: blockstorage.h:256
const CChainParams & GetParams() const
Definition: blockstorage.h:197
void PruneOneBlockFile(int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mark one block file as pruned (modify associated database entries)
bool FlushChainstateBlockFile(int tip_height)
void FindFilesToPrune(std::set< int > &setFilesToPrune, int last_prune, const Chainstate &chain, ChainstateManager &chainman)
Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a use...
void UpdateBlockInfo(const CBlock &block, unsigned int nHeight, const FlatFilePos &pos)
Update blockfile info while processing a block during reindex.
const Obfuscation m_obfuscation
Definition: blockstorage.h:289
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadBlockUndo(CBlockUndo &blockundo, const CBlockIndex &index) const
BlockfileType BlockfileTypeForHeight(int height)
std::atomic_bool m_blockfiles_indexed
Whether all blockfiles have been added to the block tree database.
Definition: blockstorage.h:330
std::vector< CBlockIndex * > GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(std::multimap< CBlockIndex *, CBlockIndex * > m_blocks_unlinked
All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
Definition: blockstorage.h:348
const Consensus::Params & GetConsensus() const
Definition: blockstorage.h:198
BlockManager(const util::SignalInterrupt &interrupt, Options opts)
std::set< CBlockIndex * > m_dirty_blockindex
Dirty block index entries.
Definition: blockstorage.h:310
fs::path GetBlockPosFilename(const FlatFilePos &pos) const
Translation to a filesystem path.
bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
Return false if block file or undo file flushing fails.
uint64_t GetPruneTarget() const
Attempt to stay below this number of bytes of block files.
Definition: blockstorage.h:407
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
Definition: blockstorage.h:273
void UnlinkPrunedFiles(const std::set< int > &setFilesToPrune) const
Actually unlink the specified files.
void WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(bool LoadBlockIndexDB(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(void ScanAndUnlinkAlreadyPrunedFiles() EXCLUSIVE_LOCKS_REQUIRED(CBlockIndex * AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Remove any pruned block & undo files that are still on disk.
Definition: blockstorage.h:369
FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
Helper function performing various preparations before a block can be saved to disk: Returns the corr...
const bool m_prune_mode
Definition: blockstorage.h:287
bool FlushUndoFile(int block_file, bool finalize=false)
Return false if undo file flushing fails.
uint64_t CalculateCurrentUsage()
Calculate the amount of disk space the block & undo files currently use.
const util::SignalInterrupt & m_interrupt
Definition: blockstorage.h:321
ReadRawBlockResult ReadRawBlock(const FlatFilePos &pos, std::optional< std::pair< size_t, size_t > > block_part=std::nullopt) const
const FlatFileSeq m_block_file_seq
Definition: blockstorage.h:303
CBlockIndex * InsertBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Create a new block index entry for a given block hash.
bool ReadBlock(CBlock &block, const FlatFilePos &pos, const std::optional< uint256 > &expected_hash) const
Functions for disk access for blocks.
bool m_check_for_pruning
Global flag to indicate we should check to see if there are block/undo files that should be deleted.
Definition: blockstorage.h:285
bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:404
bool CheckBlockDataAvailability(const CBlockIndex &upper_block, const CBlockIndex &lower_block) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex &GetFirstBlock(const CBlockIndex &upper_block LIFETIMEBOUND, uint32_t status_mask, const CBlockIndex *lower_block LIFETIMEBOUND=nullptr) const EXCLUSIVE_LOCKS_REQUIRED(boo m_have_pruned)
Check if all blocks in the [upper_block, lower_block] range have data available.
Definition: blockstorage.h:449
void CleanupBlockRevFiles() const
std::atomic< bool > m_importing
Definition: blockstorage.h:322
bool WriteBlockUndo(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(FlatFilePos WriteBlock(const CBlock &block, int nHeight)
Store block on disk and update block file statistics.
Definition: blockstorage.h:393
bool IsBlockPruned(const CBlockIndex &block) const EXCLUSIVE_LOCKS_REQUIRED(void UpdatePruneLock(const std::string &name, const PruneLockInfo &lock_info) EXCLUSIVE_LOCKS_REQUIRED(AutoFile OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) const
Check whether the block associated with this index entry is pruned or not.
Definition: blockstorage.h:458
std::vector< CBlockFileInfo > m_blockfile_info
Definition: blockstorage.h:307
CBlockFileInfo * GetBlockFileInfo(size_t n)
Get block file info entry for one block file.
void FindFilesToPruneManual(std::set< int > &setFilesToPrune, int nManualPruneHeight, const Chainstate &chain)
bool LoadBlockIndex(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Load the blocktree off disk and into memory.
AutoFile OpenUndoFile(const FlatFilePos &pos, bool fReadOnly=false) const
Open an undo file (rev?????.dat)
std::optional< int > m_snapshot_height
The height of the base block of an assumeutxo snapshot, if one is in use.
Definition: blockstorage.h:346
ImportingNow(std::atomic< bool > &importing)
std::atomic< bool > & m_importing
256-bit opaque blob.
Definition: uint256.h:195
The util::Expected class provides a standard way for low-level functions to return either error value...
Definition: expected.h:45
Helper class that manages an interrupt flag, and allows a thread or signal to interrupt another threa...
The util::Unexpected class represents an unexpected value stored in util::Expected.
Definition: expected.h:22
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:8
static bool exists(const path &p)
Definition: fs.h:95
static std::string PathToString(const path &path)
Convert path object to a byte string.
Definition: fs.h:157
std::string HexStr(const std::span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition: hex_base.cpp:30
is a home for simple enum and struct type definitions that can be used internally by functions in the...
#define LogWarning(...)
Definition: log.h:96
#define LogInfo(...)
Definition: log.h:95
#define LogError(...)
Definition: log.h:97
#define LogDebug(category,...)
Definition: log.h:115
unsigned int nHeight
std::array< uint8_t, 4 > MessageStartChars
@ BLOCKSTORAGE
Definition: categories.h:43
@ PRUNE
Definition: categories.h:30
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:25
static constexpr uint8_t DB_REINDEX_FLAG
static constexpr uint8_t DB_FLAG
static constexpr uint8_t DB_BLOCK_INDEX
static constexpr uint8_t DB_LAST_BLOCK
static constexpr uint8_t DB_BLOCK_FILES
Definition: messages.h:21
static const unsigned int UNDOFILE_CHUNK_SIZE
The pre-allocation chunk size for rev?????.dat files (since 0.8)
Definition: blockstorage.h:121
BlockfileType
Definition: blockstorage.h:151
@ NORMAL
Definition: blockstorage.h:153
@ ASSUMED
Definition: blockstorage.h:154
static auto InitBlocksdirXorKey(const BlockManager::Options &opts)
static const unsigned int BLOCKFILE_CHUNK_SIZE
The pre-allocation chunk size for blk?????.dat files (since 0.8)
Definition: blockstorage.h:119
static constexpr uint32_t STORAGE_HEADER_BYTES
Size of header written by WriteBlock before a serialized CBlock (8 bytes)
Definition: blockstorage.h:126
std::ostream & operator<<(std::ostream &os, const BlockfileType &type)
static constexpr uint32_t UNDO_DATA_DISK_OVERHEAD
Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes)
Definition: blockstorage.h:129
static const unsigned int MAX_BLOCKFILE_SIZE
The maximum size of a blk?????.dat file (since 0.8)
Definition: blockstorage.h:123
void ImportBlocks(ChainstateManager &chainman, std::span< const fs::path > import_paths)
bilingual_str ErrorString(const Result< T > &result)
Definition: result.h:93
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:246
T SaturatingAdd(const T i, const T j) noexcept
Definition: overflow.h:35
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params &params)
Check whether a block hash satisfies the proof-of-work requirement specified by nBits.
Definition: pow.cpp:140
static constexpr TransactionSerParams TX_WITH_WITNESS
Definition: transaction.h:180
const char * name
Definition: rest.cpp:48
static constexpr uint64_t MAX_SIZE
The maximum size of a serialized object in bytes or number of elements (for eg vectors) when the size...
Definition: serialize.h:32
uint64_t GetSerializeSize(const T &t)
Definition: serialize.h:1095
bool CheckSignetBlockSolution(const CBlock &block, const Consensus::Params &consensusParams)
Extract signature and check whether a block has a valid solution.
Definition: signet.cpp:126
Holds configuration for use during UTXO snapshot load and validation.
Definition: chainparams.h:34
uint64_t m_chain_tx_count
Used to populate the m_chain_tx_count value, which is used during BlockManager::LoadBlockIndex().
Definition: chainparams.h:44
Parameters that influence chain consensus.
Definition: params.h:84
bool wipe_data
If true, remove all existing data.
Definition: dbwrapper.h:41
uint32_t nPos
Definition: flatfile.h:17
std::string ToString() const
Definition: flatfile.cpp:24
bool IsNull() const
Definition: flatfile.h:32
int32_t nFile
Definition: flatfile.h:16
An options struct for BlockManager, more ergonomically referred to as BlockManager::Options due to th...
Notifications & notifications
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
#define LOCK2(cs1, cs2)
Definition: sync.h:259
#define LOCK(cs)
Definition: sync.h:258
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:289
std::string SysErrorString(int err)
Return system error string from errno value.
Definition: syserror.cpp:17
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:51
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1172
consteval auto _(util::TranslatedLiteral str)
Definition: translation.h:79
std::string FormatISO8601Date(int64_t nTime)
Definition: time.cpp:92
bool FatalError(Notifications &notifications, BlockValidationState &state, const bilingual_str &message)
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES
Definition: validation.h:86