73#include <initializer_list>
206 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
240 std::atomic<ServiceFlags> m_their_services{
NODE_NONE};
243 const bool m_is_inbound;
246 Mutex m_misbehavior_mutex;
248 bool m_should_discourage
GUARDED_BY(m_misbehavior_mutex){
false};
251 Mutex m_block_inv_mutex;
255 std::vector<uint256> m_blocks_for_inv_relay
GUARDED_BY(m_block_inv_mutex);
259 std::vector<uint256> m_blocks_for_headers_relay
GUARDED_BY(m_block_inv_mutex);
270 std::atomic<int> m_starting_height{-1};
273 std::atomic<uint64_t> m_ping_nonce_sent{0};
275 std::atomic<std::chrono::microseconds> m_ping_start{0us};
277 std::atomic<bool> m_ping_queued{
false};
280 std::atomic<bool> m_wtxid_relay{
false};
292 bool m_relay_txs
GUARDED_BY(m_bloom_filter_mutex){
false};
294 std::unique_ptr<CBloomFilter> m_bloom_filter
PT_GUARDED_BY(m_bloom_filter_mutex)
GUARDED_BY(m_bloom_filter_mutex){
nullptr};
305 std::set<Wtxid> m_tx_inventory_to_send
GUARDED_BY(m_tx_inventory_mutex);
309 bool m_send_mempool
GUARDED_BY(m_tx_inventory_mutex){
false};
312 std::chrono::microseconds m_next_inv_send_time
GUARDED_BY(m_tx_inventory_mutex){0};
315 uint64_t m_last_inv_sequence
GUARDED_BY(m_tx_inventory_mutex){1};
318 std::atomic<CAmount> m_fee_filter_received{0};
324 LOCK(m_tx_relay_mutex);
326 m_tx_relay = std::make_unique<Peer::TxRelay>();
327 return m_tx_relay.get();
332 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
361 std::atomic_bool m_addr_relay_enabled{
false};
365 mutable Mutex m_addr_send_times_mutex;
367 std::chrono::microseconds m_next_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
369 std::chrono::microseconds m_next_local_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
372 std::atomic_bool m_wants_addrv2{
false};
381 std::atomic<uint64_t> m_addr_rate_limited{0};
383 std::atomic<uint64_t> m_addr_processed{0};
389 Mutex m_getdata_requests_mutex;
391 std::deque<CInv> m_getdata_requests
GUARDED_BY(m_getdata_requests_mutex);
397 Mutex m_headers_sync_mutex;
400 std::unique_ptr<HeadersSyncState> m_headers_sync
PT_GUARDED_BY(m_headers_sync_mutex)
GUARDED_BY(m_headers_sync_mutex) {};
403 std::atomic<bool> m_sent_sendheaders{
false};
413 std::atomic<std::chrono::seconds> m_time_offset{0
s};
417 , m_our_services{our_services}
418 , m_is_inbound{is_inbound}
422 mutable Mutex m_tx_relay_mutex;
425 std::unique_ptr<TxRelay> m_tx_relay
GUARDED_BY(m_tx_relay_mutex);
428using PeerRef = std::shared_ptr<Peer>;
440 uint256 hashLastUnknownBlock{};
446 bool fSyncStarted{
false};
448 std::chrono::microseconds m_stalling_since{0us};
449 std::list<QueuedBlock> vBlocksInFlight;
451 std::chrono::microseconds m_downloading_since{0us};
453 bool fPreferredDownload{
false};
455 bool m_requested_hb_cmpctblocks{
false};
457 bool m_provides_cmpctblocks{
false};
483 struct ChainSyncTimeoutState {
485 std::chrono::seconds m_timeout{0
s};
489 bool m_sent_getheaders{
false};
491 bool m_protect{
false};
494 ChainSyncTimeoutState m_chain_sync;
497 int64_t m_last_block_announcement{0};
526 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
540 void SetBestBlock(
int height,
std::chrono::seconds time)
override
542 m_best_height = height;
543 m_best_block_time = time;
547 const std::chrono::microseconds time_received,
const std::atomic<bool>& interruptMsgProc)
override
548 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
572 void Misbehaving(Peer& peer,
const std::string& message);
583 bool via_compact_block,
const std::string& message =
"")
592 bool MaybeDiscourageAndDisconnect(
CNode& pnode, Peer& peer);
606 bool first_time_failure)
631 bool ProcessOrphanTx(Peer& peer)
641 void ProcessHeadersMessage(
CNode& pfrom, Peer& peer,
643 bool via_compact_block)
655 bool CheckHeadersAreContinuous(const
std::vector<
CBlockHeader>& headers) const;
674 bool IsContinuationOfLowWorkHeadersSync(Peer& peer,
CNode& pfrom,
688 bool TryLowWorkHeadersSync(Peer& peer,
CNode& pfrom,
703 void HeadersDirectFetchBlocks(
CNode& pfrom, const Peer& peer, const
CBlockIndex& last_header);
705 void UpdatePeerStateForReceivedHeaders(
CNode& pfrom, Peer& peer, const
CBlockIndex& last_header,
bool received_new_header,
bool may_have_more_headers)
712 template <
typename... Args>
713 void MakeAndPushMessage(
CNode&
node, std::string msg_type, Args&&...
args)
const
719 void PushNodeVersion(
CNode& pnode,
const Peer& peer);
725 void MaybeSendPing(
CNode& node_to, Peer& peer, std::chrono::microseconds now);
768 std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
771 std::atomic<int> m_best_height{-1};
773 std::atomic<std::chrono::seconds> m_best_block_time{0
s};
781 const Options m_opts;
783 bool RejectIncomingTxs(
const CNode& peer)
const;
791 mutable Mutex m_peer_mutex;
798 std::map<NodeId, PeerRef> m_peer_map
GUARDED_BY(m_peer_mutex);
808 uint32_t GetFetchFlags(
const Peer& peer)
const;
810 std::map<uint64_t, std::chrono::microseconds> m_next_inv_to_inbounds_per_network_key
GUARDED_BY(g_msgproc_mutex);
827 std::atomic<int> m_wtxid_relay_peers{0};
845 std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
846 std::chrono::seconds average_interval,
851 Mutex m_most_recent_block_mutex;
852 std::shared_ptr<const CBlock> m_most_recent_block
GUARDED_BY(m_most_recent_block_mutex);
853 std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block
GUARDED_BY(m_most_recent_block_mutex);
855 std::unique_ptr<const std::map<GenTxid, CTransactionRef>> m_most_recent_block_txs
GUARDED_BY(m_most_recent_block_mutex);
859 Mutex m_headers_presync_mutex;
867 using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
869 std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats
GUARDED_BY(m_headers_presync_mutex) {};
873 std::atomic_bool m_headers_presync_should_signal{
false};
943 std::atomic<
std::chrono::seconds> m_last_tip_update{0
s};
949 void ProcessGetData(
CNode& pfrom, Peer& peer,
const std::atomic<bool>& interruptMsgProc)
954 void ProcessBlock(
CNode&
node,
const std::shared_ptr<const CBlock>& block,
bool force_processing,
bool min_pow_checked);
979 std::vector<std::pair<Wtxid, CTransactionRef>> vExtraTxnForCompact
GUARDED_BY(g_msgproc_mutex);
981 size_t vExtraTxnForCompactIt
GUARDED_BY(g_msgproc_mutex) = 0;
993 int64_t ApproximateBestBlockDepth() const;
1003 void ProcessGetBlockData(
CNode& pfrom, Peer& peer, const
CInv& inv)
1021 bool PrepareBlockFilterRequest(
CNode&
node, Peer& peer,
1023 const
uint256& stop_hash, uint32_t max_height_diff,
1071 void LogBlockHeader(const
CBlockIndex& index, const
CNode& peer,
bool via_compact_block);
1074const CNodeState* PeerManagerImpl::
State(
NodeId pnode)
const
1076 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1077 if (it == m_node_states.end())
1084 return const_cast<CNodeState*
>(std::as_const(*this).State(pnode));
1092static bool IsAddrCompatible(
const Peer& peer,
const CAddress& addr)
1097void PeerManagerImpl::AddAddressKnown(Peer& peer,
const CAddress& addr)
1099 assert(peer.m_addr_known);
1100 peer.m_addr_known->insert(addr.
GetKey());
1103void PeerManagerImpl::PushAddress(Peer& peer,
const CAddress& addr)
1108 assert(peer.m_addr_known);
1109 if (addr.
IsValid() && !peer.m_addr_known->contains(addr.
GetKey()) && IsAddrCompatible(peer, addr)) {
1111 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1113 peer.m_addrs_to_send.push_back(addr);
1118static void AddKnownTx(Peer& peer,
const uint256& hash)
1120 auto tx_relay = peer.GetTxRelay();
1121 if (!tx_relay)
return;
1123 LOCK(tx_relay->m_tx_inventory_mutex);
1124 tx_relay->m_tx_inventory_known_filter.insert(hash);
1128static bool CanServeBlocks(
const Peer& peer)
1135static bool IsLimitedPeer(
const Peer& peer)
1142static bool CanServeWitnesses(
const Peer& peer)
1147std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1148 std::chrono::seconds average_interval,
1149 uint64_t network_key)
1151 auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us);
1152 auto& timer{it->second};
1154 timer = now + m_rng.rand_exp_duration(average_interval);
1159bool PeerManagerImpl::IsBlockRequested(
const uint256& hash)
1161 return mapBlocksInFlight.count(hash);
1164bool PeerManagerImpl::IsBlockRequestedFromOutbound(
const uint256& hash)
1166 for (
auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1167 auto [nodeid, block_it] = range.first->second;
1168 PeerRef peer{GetPeerRef(nodeid)};
1169 if (peer && !peer->m_is_inbound)
return true;
1175void PeerManagerImpl::RemoveBlockRequest(
const uint256& hash, std::optional<NodeId> from_peer)
1177 auto range = mapBlocksInFlight.equal_range(hash);
1178 if (range.first == range.second) {
1186 while (range.first != range.second) {
1187 const auto& [node_id, list_it]{range.first->second};
1189 if (from_peer && *from_peer != node_id) {
1196 if (state.vBlocksInFlight.begin() == list_it) {
1198 state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1200 state.vBlocksInFlight.erase(list_it);
1202 if (state.vBlocksInFlight.empty()) {
1204 m_peers_downloading_from--;
1206 state.m_stalling_since = 0us;
1208 range.first = mapBlocksInFlight.erase(range.first);
1212bool PeerManagerImpl::BlockRequested(
NodeId nodeid,
const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1216 CNodeState *state =
State(nodeid);
1217 assert(state !=
nullptr);
1222 for (
auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1223 if (range.first->second.first == nodeid) {
1225 *pit = &range.first->second.second;
1232 RemoveBlockRequest(hash, nodeid);
1234 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1235 {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1236 if (state->vBlocksInFlight.size() == 1) {
1238 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1239 m_peers_downloading_from++;
1241 auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1243 *pit = &itInFlight->second.second;
1248void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid)
1255 if (m_opts.ignore_incoming_txs)
return;
1257 CNodeState* nodestate =
State(nodeid);
1258 PeerRef peer{GetPeerRef(nodeid)};
1259 if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1264 int num_outbound_hb_peers = 0;
1265 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1266 if (*it == nodeid) {
1267 lNodesAnnouncingHeaderAndIDs.erase(it);
1268 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1271 PeerRef peer_ref{GetPeerRef(*it)};
1272 if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers;
1274 if (peer && peer->m_is_inbound) {
1277 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1278 PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())};
1279 if (remove_peer && !remove_peer->m_is_inbound) {
1282 std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1288 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1291 m_connman.
ForNode(lNodesAnnouncingHeaderAndIDs.front(), [
this](
CNode* pnodeStop){
1292 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, false, CMPCTBLOCKS_VERSION);
1294 pnodeStop->m_bip152_highbandwidth_to = false;
1297 lNodesAnnouncingHeaderAndIDs.pop_front();
1302 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->
GetId());
1307bool PeerManagerImpl::TipMayBeStale()
1311 if (m_last_tip_update.load() == 0
s) {
1312 m_last_tip_update = GetTime<std::chrono::seconds>();
1314 return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.
nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1317int64_t PeerManagerImpl::ApproximateBestBlockDepth()
const
1322bool PeerManagerImpl::CanDirectFetch()
1329 if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1331 if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1336void PeerManagerImpl::ProcessBlockAvailability(
NodeId nodeid) {
1337 CNodeState *state =
State(nodeid);
1338 assert(state !=
nullptr);
1340 if (!state->hashLastUnknownBlock.IsNull()) {
1343 if (state->pindexBestKnownBlock ==
nullptr || pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1344 state->pindexBestKnownBlock = pindex;
1346 state->hashLastUnknownBlock.SetNull();
1351void PeerManagerImpl::UpdateBlockAvailability(
NodeId nodeid,
const uint256 &hash) {
1352 CNodeState *state =
State(nodeid);
1353 assert(state !=
nullptr);
1355 ProcessBlockAvailability(nodeid);
1360 if (state->pindexBestKnownBlock ==
nullptr || pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1361 state->pindexBestKnownBlock = pindex;
1365 state->hashLastUnknownBlock = hash;
1370void PeerManagerImpl::FindNextBlocksToDownload(
const Peer& peer,
unsigned int count, std::vector<const CBlockIndex*>& vBlocks,
NodeId& nodeStaller)
1375 vBlocks.reserve(vBlocks.size() +
count);
1376 CNodeState *state =
State(peer.m_id);
1377 assert(state !=
nullptr);
1380 ProcessBlockAvailability(peer.m_id);
1382 if (state->pindexBestKnownBlock ==
nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.
ActiveChain().
Tip()->
nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.
MinimumChainWork()) {
1390 const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()};
1391 if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) {
1392 LogDebug(
BCLog::NET,
"Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id);
1399 if (state->pindexLastCommonBlock ==
nullptr ||
1400 (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
1401 state->pindexLastCommonBlock = m_chainman.
ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.
ActiveChain().
Height())];
1406 state->pindexLastCommonBlock =
LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1407 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1410 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1416 FindNextBlocks(vBlocks, peer, state, pindexWalk,
count, nWindowEnd, &m_chainman.
ActiveChain(), &nodeStaller);
1419void PeerManagerImpl::TryDownloadingHistoricalBlocks(
const Peer& peer,
unsigned int count, std::vector<const CBlockIndex*>& vBlocks,
const CBlockIndex *from_tip,
const CBlockIndex* target_block)
1424 if (vBlocks.size() >=
count) {
1428 vBlocks.reserve(
count);
1431 if (state->pindexBestKnownBlock ==
nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->
nHeight) != target_block) {
1448void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks,
const Peer& peer, CNodeState *state,
const CBlockIndex *pindexWalk,
unsigned int count,
int nWindowEnd,
const CChain* activeChain,
NodeId* nodeStaller)
1450 std::vector<const CBlockIndex*> vToFetch;
1451 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1452 bool is_limited_peer = IsLimitedPeer(peer);
1454 while (pindexWalk->
nHeight < nMaxHeight) {
1458 int nToFetch = std::min(nMaxHeight - pindexWalk->
nHeight, std::max<int>(
count - vBlocks.size(), 128));
1459 vToFetch.resize(nToFetch);
1460 pindexWalk = state->pindexBestKnownBlock->
GetAncestor(pindexWalk->
nHeight + nToFetch);
1461 vToFetch[nToFetch - 1] = pindexWalk;
1462 for (
unsigned int i = nToFetch - 1; i > 0; i--) {
1463 vToFetch[i - 1] = vToFetch[i]->
pprev;
1483 state->pindexLastCommonBlock = pindex;
1490 if (waitingfor == -1) {
1492 waitingfor = mapBlocksInFlight.lower_bound(pindex->
GetBlockHash())->second.first;
1498 if (pindex->
nHeight > nWindowEnd) {
1500 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1502 if (nodeStaller) *nodeStaller = waitingfor;
1512 vBlocks.push_back(pindex);
1513 if (vBlocks.size() ==
count) {
1522void PeerManagerImpl::PushNodeVersion(
CNode& pnode,
const Peer& peer)
1524 uint64_t my_services{peer.m_our_services};
1525 const int64_t nTime{
count_seconds(GetTime<std::chrono::seconds>())};
1527 const int nNodeStartingHeight{m_best_height};
1534 const bool tx_relay{!RejectIncomingTxs(pnode)};
1541 LogDebug(
BCLog::NET,
"send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n",
PROTOCOL_VERSION, nNodeStartingHeight, addr_you.
ToStringAddrPort(), tx_relay, nodeid);
1547void PeerManagerImpl::UpdateLastBlockAnnounceTime(
NodeId node, int64_t time_in_seconds)
1551 if (state) state->m_last_block_announcement = time_in_seconds;
1559 m_node_states.try_emplace(m_node_states.end(), nodeid);
1561 WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid));
1567 PeerRef peer = std::make_shared<Peer>(nodeid, our_services,
node.IsInboundConn());
1570 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1574void PeerManagerImpl::ReattemptInitialBroadcast(
CScheduler& scheduler)
1578 for (
const auto& txid : unbroadcast_txids) {
1581 if (tx !=
nullptr) {
1582 RelayTransaction(txid, tx->GetWitnessHash());
1591 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1594void PeerManagerImpl::FinalizeNode(
const CNode&
node)
1605 PeerRef peer = RemovePeer(nodeid);
1607 m_wtxid_relay_peers -= peer->m_wtxid_relay;
1608 assert(m_wtxid_relay_peers >= 0);
1610 CNodeState *state =
State(nodeid);
1611 assert(state !=
nullptr);
1613 if (state->fSyncStarted)
1616 for (
const QueuedBlock& entry : state->vBlocksInFlight) {
1617 auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1618 while (range.first != range.second) {
1619 auto [node_id, list_it] = range.first->second;
1620 if (node_id != nodeid) {
1623 range.first = mapBlocksInFlight.erase(range.first);
1628 LOCK(m_tx_download_mutex);
1629 m_txdownloadman.DisconnectedPeer(nodeid);
1631 if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1632 m_num_preferred_download_peers -= state->fPreferredDownload;
1633 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1634 assert(m_peers_downloading_from >= 0);
1635 m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1636 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1638 m_node_states.erase(nodeid);
1640 if (m_node_states.empty()) {
1642 assert(mapBlocksInFlight.empty());
1643 assert(m_num_preferred_download_peers == 0);
1644 assert(m_peers_downloading_from == 0);
1645 assert(m_outbound_peers_with_protect_from_disconnect == 0);
1646 assert(m_wtxid_relay_peers == 0);
1647 WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty());
1650 if (
node.fSuccessfullyConnected &&
1651 !
node.IsBlockOnlyConn() && !
node.IsInboundConn()) {
1658 LOCK(m_headers_presync_mutex);
1659 m_headers_presync_stats.erase(nodeid);
1664bool PeerManagerImpl::HasAllDesirableServiceFlags(
ServiceFlags services)
const
1667 return !(GetDesirableServiceFlags(services) & (~services));
1681PeerRef PeerManagerImpl::GetPeerRef(
NodeId id)
const
1684 auto it = m_peer_map.find(
id);
1685 return it != m_peer_map.end() ? it->second :
nullptr;
1688PeerRef PeerManagerImpl::RemovePeer(
NodeId id)
1692 auto it = m_peer_map.find(
id);
1693 if (it != m_peer_map.end()) {
1694 ret = std::move(it->second);
1695 m_peer_map.erase(it);
1704 const CNodeState* state =
State(nodeid);
1705 if (state ==
nullptr)
1707 stats.
nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1708 stats.
nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1709 for (
const QueuedBlock& queue : state->vBlocksInFlight) {
1715 PeerRef peer = GetPeerRef(nodeid);
1716 if (peer ==
nullptr)
return false;
1725 auto ping_wait{0us};
1726 if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1727 ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1730 if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
1733 LOCK(tx_relay->m_tx_inventory_mutex);
1735 stats.
m_inv_to_send = tx_relay->m_tx_inventory_to_send.size();
1747 LOCK(peer->m_headers_sync_mutex);
1748 if (peer->m_headers_sync) {
1757std::vector<node::TxOrphanage::OrphanInfo> PeerManagerImpl::GetOrphanTransactions()
1759 LOCK(m_tx_download_mutex);
1760 return m_txdownloadman.GetOrphanTransactions();
1767 .ignores_incoming_txs = m_opts.ignore_incoming_txs,
1771void PeerManagerImpl::AddToCompactExtraTransactions(
const CTransactionRef& tx)
1773 if (m_opts.max_extra_txs <= 0)
1775 if (!vExtraTxnForCompact.size())
1776 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1777 vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1778 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1781void PeerManagerImpl::Misbehaving(Peer& peer,
const std::string& message)
1783 LOCK(peer.m_misbehavior_mutex);
1785 const std::string message_prefixed = message.empty() ?
"" : (
": " + message);
1786 peer.m_should_discourage =
true;
1795 bool via_compact_block,
const std::string& message)
1797 PeerRef peer{GetPeerRef(nodeid)};
1808 if (!via_compact_block) {
1809 if (peer) Misbehaving(*peer, message);
1817 if (peer && !via_compact_block && !peer->m_is_inbound) {
1818 if (peer) Misbehaving(*peer, message);
1825 if (peer) Misbehaving(*peer, message);
1829 if (peer) Misbehaving(*peer, message);
1834 if (message !=
"") {
1839bool PeerManagerImpl::BlockRequestAllowed(
const CBlockIndex* pindex)
1848std::optional<std::string> PeerManagerImpl::FetchBlock(
NodeId peer_id,
const CBlockIndex& block_index)
1853 PeerRef peer = GetPeerRef(peer_id);
1854 if (peer ==
nullptr)
return "Peer does not exist";
1857 if (!CanServeWitnesses(*peer))
return "Pre-SegWit peer";
1862 RemoveBlockRequest(block_index.
GetBlockHash(), std::nullopt);
1865 if (!BlockRequested(peer_id, block_index))
return "Already requested from this peer";
1877 if (!success)
return "Peer not fully connected";
1881 return std::nullopt;
1888 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
1894 : m_rng{opts.deterministic_rng},
1896 m_chainparams(chainman.GetParams()),
1900 m_chainman(chainman),
1902 m_txdownloadman(
node::TxDownloadOptions{pool, m_rng, opts.deterministic_rng}),
1903 m_warnings{warnings},
1908 if (opts.reconcile_txs) {
1913void PeerManagerImpl::StartScheduledTasks(
CScheduler& scheduler)
1924 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1927void PeerManagerImpl::ActiveTipChange(
const CBlockIndex& new_tip,
bool is_ibd)
1935 LOCK(m_tx_download_mutex);
1939 m_txdownloadman.ActiveTipChange();
1949void PeerManagerImpl::BlockConnected(
1951 const std::shared_ptr<const CBlock>& pblock,
1956 m_last_tip_update = GetTime<std::chrono::seconds>();
1959 auto stalling_timeout = m_block_stalling_timeout.load();
1963 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1973 LOCK(m_tx_download_mutex);
1974 m_txdownloadman.BlockConnected(pblock);
1977void PeerManagerImpl::BlockDisconnected(
const std::shared_ptr<const CBlock> &block,
const CBlockIndex* pindex)
1979 LOCK(m_tx_download_mutex);
1980 m_txdownloadman.BlockDisconnected();
1987void PeerManagerImpl::NewPoWValidBlock(
const CBlockIndex *pindex,
const std::shared_ptr<const CBlock>& pblock)
1989 auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock,
FastRandomContext().rand64());
1993 if (pindex->
nHeight <= m_highest_fast_announce)
1995 m_highest_fast_announce = pindex->
nHeight;
1999 uint256 hashBlock(pblock->GetHash());
2000 const std::shared_future<CSerializedNetMsg> lazy_ser{
2004 auto most_recent_block_txs = std::make_unique<std::map<GenTxid, CTransactionRef>>();
2005 for (
const auto& tx : pblock->vtx) {
2006 most_recent_block_txs->emplace(tx->GetHash(), tx);
2007 most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2010 LOCK(m_most_recent_block_mutex);
2011 m_most_recent_block_hash = hashBlock;
2012 m_most_recent_block = pblock;
2013 m_most_recent_compact_block = pcmpctblock;
2014 m_most_recent_block_txs = std::move(most_recent_block_txs);
2022 ProcessBlockAvailability(pnode->
GetId());
2026 if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->
pprev)) {
2028 LogDebug(
BCLog::NET,
"%s sending header-and-ids %s to peer=%d\n",
"PeerManager::NewPoWValidBlock",
2029 hashBlock.ToString(), pnode->
GetId());
2032 PushMessage(*pnode, ser_cmpctblock.Copy());
2033 state.pindexBestHeaderSent = pindex;
2042void PeerManagerImpl::UpdatedBlockTip(
const CBlockIndex *pindexNew,
const CBlockIndex *pindexFork,
bool fInitialDownload)
2044 SetBestBlock(pindexNew->
nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2047 if (fInitialDownload)
return;
2050 std::vector<uint256> vHashes;
2052 while (pindexToAnnounce != pindexFork) {
2054 pindexToAnnounce = pindexToAnnounce->
pprev;
2064 for (
auto& it : m_peer_map) {
2065 Peer& peer = *it.second;
2066 LOCK(peer.m_block_inv_mutex);
2067 for (
const uint256& hash : vHashes | std::views::reverse) {
2068 peer.m_blocks_for_headers_relay.push_back(hash);
2080void PeerManagerImpl::BlockChecked(
const std::shared_ptr<const CBlock>& block,
const BlockValidationState& state)
2084 const uint256 hash(block->GetHash());
2085 std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2090 it != mapBlockSource.end() &&
2091 State(it->second.first)) {
2092 MaybePunishNodeForBlock( it->second.first, state, !it->second.second);
2102 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2103 if (it != mapBlockSource.end()) {
2104 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2107 if (it != mapBlockSource.end())
2108 mapBlockSource.erase(it);
2116bool PeerManagerImpl::AlreadyHaveBlock(
const uint256& block_hash)
2121void PeerManagerImpl::SendPings()
2124 for(
auto& it : m_peer_map) it.second->m_ping_queued =
true;
2127void PeerManagerImpl::RelayTransaction(
const Txid& txid,
const Wtxid& wtxid)
2130 for(
auto& it : m_peer_map) {
2131 Peer& peer = *it.second;
2132 auto tx_relay = peer.GetTxRelay();
2133 if (!tx_relay)
continue;
2135 LOCK(tx_relay->m_tx_inventory_mutex);
2141 if (tx_relay->m_next_inv_send_time == 0
s)
continue;
2144 if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2145 tx_relay->m_tx_inventory_to_send.insert(wtxid);
2150void PeerManagerImpl::RelayAddress(
NodeId originator,
2166 const auto current_time{GetTime<std::chrono::seconds>()};
2174 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2176 std::array<std::pair<uint64_t, Peer*>, 2> best{{{0,
nullptr}, {0,
nullptr}}};
2177 assert(nRelayNodes <= best.size());
2181 for (
auto& [
id, peer] : m_peer_map) {
2182 if (peer->m_addr_relay_enabled &&
id != originator && IsAddrCompatible(*peer, addr)) {
2184 for (
unsigned int i = 0; i < nRelayNodes; i++) {
2185 if (hashKey > best[i].first) {
2186 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2187 best[i] = std::make_pair(hashKey, peer.get());
2194 for (
unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2195 PushAddress(*best[i].second, addr);
2199void PeerManagerImpl::ProcessGetBlockData(
CNode& pfrom, Peer& peer,
const CInv& inv)
2201 std::shared_ptr<const CBlock> a_recent_block;
2202 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2204 LOCK(m_most_recent_block_mutex);
2205 a_recent_block = m_most_recent_block;
2206 a_recent_compact_block = m_most_recent_compact_block;
2209 bool need_activate_chain =
false;
2221 need_activate_chain =
true;
2225 if (need_activate_chain) {
2227 if (!m_chainman.
ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2234 bool can_direct_fetch{
false};
2242 if (!BlockRequestAllowed(pindex)) {
2243 LogDebug(
BCLog::NET,
"%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.
GetId());
2270 can_direct_fetch = CanDirectFetch();
2274 std::shared_ptr<const CBlock> pblock;
2275 if (a_recent_block && a_recent_block->GetHash() == inv.
hash) {
2276 pblock = a_recent_block;
2280 std::vector<std::byte> block_data;
2294 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2304 pblock = pblockRead;
2312 bool sendMerkleBlock =
false;
2314 if (
auto tx_relay = peer.GetTxRelay(); tx_relay !=
nullptr) {
2315 LOCK(tx_relay->m_bloom_filter_mutex);
2316 if (tx_relay->m_bloom_filter) {
2317 sendMerkleBlock =
true;
2318 merkleBlock =
CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2321 if (sendMerkleBlock) {
2329 for (
const auto& [tx_idx,
_] : merkleBlock.
vMatchedTxn)
2340 if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == inv.
hash) {
2353 LOCK(peer.m_block_inv_mutex);
2355 if (inv.
hash == peer.m_continuation_block) {
2359 std::vector<CInv> vInv;
2360 vInv.emplace_back(
MSG_BLOCK, tip->GetBlockHash());
2362 peer.m_continuation_block.SetNull();
2370 auto txinfo{std::visit(
2371 [&](
const auto&
id) {
2372 return m_mempool.
info_for_relay(
id,
WITH_LOCK(tx_relay.m_tx_inventory_mutex,
return tx_relay.m_last_inv_sequence));
2376 return std::move(txinfo.tx);
2381 LOCK(m_most_recent_block_mutex);
2382 if (m_most_recent_block_txs !=
nullptr) {
2383 auto it = m_most_recent_block_txs->find(gtxid);
2384 if (it != m_most_recent_block_txs->end())
return it->second;
2391void PeerManagerImpl::ProcessGetData(
CNode& pfrom, Peer& peer,
const std::atomic<bool>& interruptMsgProc)
2395 auto tx_relay = peer.GetTxRelay();
2397 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2398 std::vector<CInv> vNotFound;
2403 while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2404 if (interruptMsgProc)
return;
2409 const CInv &inv = *it++;
2411 if (tx_relay ==
nullptr) {
2417 if (
auto tx{FindTxForGetData(*tx_relay,
ToGenTxid(inv))}) {
2420 MakeAndPushMessage(pfrom,
NetMsgType::TX, maybe_with_witness(*tx));
2423 vNotFound.push_back(inv);
2429 if (it != peer.m_getdata_requests.end() && !pfrom.
fPauseSend) {
2430 const CInv &inv = *it++;
2432 ProcessGetBlockData(pfrom, peer, inv);
2441 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2443 if (!vNotFound.empty()) {
2462uint32_t PeerManagerImpl::GetFetchFlags(
const Peer& peer)
const
2464 uint32_t nFetchFlags = 0;
2465 if (CanServeWitnesses(peer)) {
2474 unsigned int tx_requested_size = 0;
2475 for (
size_t i = 0; i < req.
indexes.size(); i++) {
2477 Misbehaving(peer,
"getblocktxn with out-of-bounds tx indices");
2481 tx_requested_size += resp.txn[i]->GetTotalSize();
2488bool PeerManagerImpl::CheckHeadersPoW(
const std::vector<CBlockHeader>& headers,
const Consensus::Params& consensusParams, Peer& peer)
2492 Misbehaving(peer,
"header with invalid proof of work");
2497 if (!CheckHeadersAreContinuous(headers)) {
2498 Misbehaving(peer,
"non-continuous headers sequence");
2523void PeerManagerImpl::HandleUnconnectingHeaders(
CNode& pfrom, Peer& peer,
2524 const std::vector<CBlockHeader>& headers)
2528 if (MaybeSendGetHeaders(pfrom,
GetLocator(best_header), peer)) {
2529 LogDebug(
BCLog::NET,
"received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
2531 headers[0].hashPrevBlock.ToString(),
2532 best_header->nHeight,
2542bool PeerManagerImpl::CheckHeadersAreContinuous(
const std::vector<CBlockHeader>& headers)
const
2546 if (!hashLastBlock.
IsNull() && header.hashPrevBlock != hashLastBlock) {
2549 hashLastBlock = header.GetHash();
2554bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer,
CNode& pfrom, std::vector<CBlockHeader>& headers)
2556 if (peer.m_headers_sync) {
2557 auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result);
2559 if (result.success) peer.m_last_getheaders_timestamp = {};
2560 if (result.request_more) {
2561 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2563 Assume(!locator.vHave.empty());
2566 if (!locator.vHave.empty()) {
2569 bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2572 locator.vHave.front().ToString(), pfrom.
GetId());
2577 peer.m_headers_sync.reset(
nullptr);
2582 LOCK(m_headers_presync_mutex);
2583 m_headers_presync_stats.erase(pfrom.
GetId());
2586 HeadersPresyncStats stats;
2587 stats.first = peer.m_headers_sync->GetPresyncWork();
2589 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2590 peer.m_headers_sync->GetPresyncTime()};
2594 LOCK(m_headers_presync_mutex);
2595 m_headers_presync_stats[pfrom.
GetId()] = stats;
2596 auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2597 bool best_updated =
false;
2598 if (best_it == m_headers_presync_stats.end()) {
2602 const HeadersPresyncStats* stat_best{
nullptr};
2603 for (
const auto& [peer, stat] : m_headers_presync_stats) {
2604 if (!stat_best || stat > *stat_best) {
2609 m_headers_presync_bestpeer = peer_best;
2610 best_updated = (peer_best == pfrom.
GetId());
2611 }
else if (best_it->first == pfrom.
GetId() || stats > best_it->second) {
2613 m_headers_presync_bestpeer = pfrom.
GetId();
2614 best_updated =
true;
2616 if (best_updated && stats.second.has_value()) {
2618 m_headers_presync_should_signal =
true;
2622 if (result.success) {
2625 headers.swap(result.pow_validated_headers);
2628 return result.success;
2636bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer,
CNode& pfrom,
const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2643 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2647 if (total_work < minimum_chain_work) {
2651 if (headers.size() == m_opts.max_headers_result) {
2661 LOCK(peer.m_headers_sync_mutex);
2663 m_chainparams.
HeadersSync(), chain_start_header, minimum_chain_work));
2668 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2682bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(
const CBlockIndex* header)
2684 if (header ==
nullptr) {
2686 }
else if (m_chainman.m_best_header !=
nullptr && header == m_chainman.m_best_header->GetAncestor(header->
nHeight)) {
2694bool PeerManagerImpl::MaybeSendGetHeaders(
CNode& pfrom,
const CBlockLocator& locator, Peer& peer)
2702 peer.m_last_getheaders_timestamp = current_time;
2713void PeerManagerImpl::HeadersDirectFetchBlocks(
CNode& pfrom,
const Peer& peer,
const CBlockIndex& last_header)
2716 CNodeState *nodestate =
State(pfrom.
GetId());
2719 std::vector<const CBlockIndex*> vToFetch;
2727 vToFetch.push_back(pindexWalk);
2729 pindexWalk = pindexWalk->
pprev;
2740 std::vector<CInv> vGetData;
2742 for (
const CBlockIndex* pindex : vToFetch | std::views::reverse) {
2747 uint32_t nFetchFlags = GetFetchFlags(peer);
2749 BlockRequested(pfrom.
GetId(), *pindex);
2753 if (vGetData.size() > 1) {
2758 if (vGetData.size() > 0) {
2759 if (!m_opts.ignore_incoming_txs &&
2760 nodestate->m_provides_cmpctblocks &&
2761 vGetData.size() == 1 &&
2762 mapBlocksInFlight.size() == 1 &&
2778void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
CNode& pfrom, Peer& peer,
2779 const CBlockIndex& last_header,
bool received_new_header,
bool may_have_more_headers)
2782 CNodeState *nodestate =
State(pfrom.
GetId());
2791 nodestate->m_last_block_announcement =
GetTime();
2799 if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.
MinimumChainWork()) {
2821 if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.
ActiveChain().
Tip()->
nChainWork && !nodestate->m_chain_sync.m_protect) {
2823 nodestate->m_chain_sync.m_protect =
true;
2824 ++m_outbound_peers_with_protect_from_disconnect;
2829void PeerManagerImpl::ProcessHeadersMessage(
CNode& pfrom, Peer& peer,
2830 std::vector<CBlockHeader>&& headers,
2831 bool via_compact_block)
2833 size_t nCount = headers.size();
2840 LOCK(peer.m_headers_sync_mutex);
2841 if (peer.m_headers_sync) {
2842 peer.m_headers_sync.reset(
nullptr);
2843 LOCK(m_headers_presync_mutex);
2844 m_headers_presync_stats.erase(pfrom.
GetId());
2848 peer.m_last_getheaders_timestamp = {};
2856 if (!CheckHeadersPoW(headers, m_chainparams.
GetConsensus(), peer)) {
2871 bool already_validated_work =
false;
2874 bool have_headers_sync =
false;
2876 LOCK(peer.m_headers_sync_mutex);
2878 already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2890 if (headers.empty()) {
2894 have_headers_sync = !!peer.m_headers_sync;
2899 bool headers_connect_blockindex{chain_start_header !=
nullptr};
2901 if (!headers_connect_blockindex) {
2905 HandleUnconnectingHeaders(pfrom, peer, headers);
2912 peer.m_last_getheaders_timestamp = {};
2922 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2923 already_validated_work =
true;
2931 already_validated_work =
true;
2937 if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2938 chain_start_header, headers)) {
2950 bool received_new_header{last_received_header ==
nullptr};
2956 state, &pindexLast)};
2959 MaybePunishNodeForBlock(pfrom.
GetId(), state, via_compact_block,
"invalid header received");
2965 if (processed && received_new_header) {
2966 LogBlockHeader(*pindexLast, pfrom,
false);
2970 if (nCount == m_opts.max_headers_result && !have_headers_sync) {
2972 if (MaybeSendGetHeaders(pfrom,
GetLocator(pindexLast), peer)) {
2974 pindexLast->
nHeight, pfrom.
GetId(), peer.m_starting_height);
2978 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result);
2981 HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
2987 bool first_time_failure)
2993 PeerRef peer{GetPeerRef(nodeid)};
2996 ptx->GetHash().ToString(),
2997 ptx->GetWitnessHash().ToString(),
3001 const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure);
3004 AddToCompactExtraTransactions(ptx);
3006 for (
const Txid& parent_txid : unique_parents) {
3007 if (peer) AddKnownTx(*peer, parent_txid.ToUint256());
3010 return package_to_validate;
3013void PeerManagerImpl::ProcessValidTx(
NodeId nodeid,
const CTransactionRef& tx,
const std::list<CTransactionRef>& replaced_transactions)
3019 m_txdownloadman.MempoolAcceptedTx(tx);
3023 tx->GetHash().ToString(),
3024 tx->GetWitnessHash().ToString(),
3027 RelayTransaction(tx->GetHash(), tx->GetWitnessHash());
3030 AddToCompactExtraTransactions(removedTx);
3040 const auto&
package = package_to_validate.m_txns;
3041 const auto& senders = package_to_validate.
m_senders;
3044 m_txdownloadman.MempoolRejectedPackage(package);
3047 if (!
Assume(package.size() == 2))
return;
3051 auto package_iter = package.rbegin();
3052 auto senders_iter = senders.rbegin();
3053 while (package_iter != package.rend()) {
3054 const auto& tx = *package_iter;
3055 const NodeId nodeid = *senders_iter;
3056 const auto it_result{package_result.
m_tx_results.find(tx->GetWitnessHash())};
3060 const auto& tx_result = it_result->second;
3061 switch (tx_result.m_result_type) {
3064 ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions);
3074 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
false);
3092bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3099 while (
CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) {
3102 const Txid& orphanHash = porphanTx->GetHash();
3103 const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3120 ProcessInvalidTx(peer.m_id, porphanTx, state,
false);
3129bool PeerManagerImpl::PrepareBlockFilterRequest(
CNode&
node, Peer& peer,
3131 const uint256& stop_hash, uint32_t max_height_diff,
3135 const bool supported_filter_type =
3138 if (!supported_filter_type) {
3140 static_cast<uint8_t
>(filter_type),
node.DisconnectMsg(
fLogIPs));
3141 node.fDisconnect =
true;
3150 if (!stop_index || !BlockRequestAllowed(stop_index)) {
3153 node.fDisconnect =
true;
3158 uint32_t stop_height = stop_index->
nHeight;
3159 if (start_height > stop_height) {
3161 "start height %d and stop height %d, %s\n",
3162 start_height, stop_height,
node.DisconnectMsg(
fLogIPs));
3163 node.fDisconnect =
true;
3166 if (stop_height - start_height >= max_height_diff) {
3168 stop_height - start_height + 1, max_height_diff,
node.DisconnectMsg(
fLogIPs));
3169 node.fDisconnect =
true;
3174 if (!filter_index) {
3184 uint8_t filter_type_ser;
3185 uint32_t start_height;
3188 vRecv >> filter_type_ser >> start_height >> stop_hash;
3194 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height, stop_hash,
3199 std::vector<BlockFilter> filters;
3201 LogDebug(
BCLog::NET,
"Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3206 for (
const auto& filter : filters) {
3213 uint8_t filter_type_ser;
3214 uint32_t start_height;
3217 vRecv >> filter_type_ser >> start_height >> stop_hash;
3223 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height, stop_hash,
3229 if (start_height > 0) {
3231 stop_index->
GetAncestor(
static_cast<int>(start_height - 1));
3233 LogDebug(
BCLog::NET,
"Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3239 std::vector<uint256> filter_hashes;
3241 LogDebug(
BCLog::NET,
"Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3255 uint8_t filter_type_ser;
3258 vRecv >> filter_type_ser >> stop_hash;
3264 if (!PrepareBlockFilterRequest(
node, peer, filter_type, 0, stop_hash,
3265 std::numeric_limits<uint32_t>::max(),
3266 stop_index, filter_index)) {
3274 for (
int i = headers.size() - 1; i >= 0; i--) {
3279 LogDebug(
BCLog::NET,
"Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3293 bool new_block{
false};
3294 m_chainman.
ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3296 node.m_last_block_time = GetTime<std::chrono::seconds>();
3301 RemoveBlockRequest(block->GetHash(), std::nullopt);
3304 mapBlockSource.erase(block->GetHash());
3308void PeerManagerImpl::ProcessCompactBlockTxns(
CNode& pfrom, Peer& peer,
const BlockTransactions& block_transactions)
3310 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3311 bool fBlockRead{
false};
3315 auto range_flight = mapBlocksInFlight.equal_range(block_transactions.
blockhash);
3316 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3317 bool requested_block_from_this_peer{
false};
3320 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.
GetId());
3322 while (range_flight.first != range_flight.second) {
3323 auto [node_id, block_it] = range_flight.first->second;
3324 if (node_id == pfrom.
GetId() && block_it->partialBlock) {
3325 requested_block_from_this_peer =
true;
3328 range_flight.first++;
3331 if (!requested_block_from_this_peer) {
3343 Misbehaving(peer,
"previous compact block reconstruction attempt failed");
3354 Misbehaving(peer,
"invalid compact block/non-matching block transactions");
3357 if (first_in_flight) {
3362 std::vector<CInv> invs;
3367 LogDebug(
BCLog::NET,
"Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.
GetId());
3380 mapBlockSource.emplace(block_transactions.
blockhash, std::make_pair(pfrom.
GetId(),
false));
3395void PeerManagerImpl::LogBlockHeader(
const CBlockIndex& index,
const CNode& peer,
bool via_compact_block) {
3407 "Saw new %sheader hash=%s height=%d peer=%d%s",
3408 via_compact_block ?
"cmpctblock " :
"",
3421void PeerManagerImpl::ProcessMessage(
CNode& pfrom,
const std::string& msg_type,
DataStream& vRecv,
3422 const std::chrono::microseconds time_received,
3423 const std::atomic<bool>& interruptMsgProc)
3429 PeerRef peer = GetPeerRef(pfrom.
GetId());
3430 if (peer ==
nullptr)
return;
3440 uint64_t nNonce = 1;
3443 std::string cleanSubVer;
3444 int starting_height = -1;
3447 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3462 LogDebug(
BCLog::NET,
"peer does not offer the expected services (%08x offered, %08x expected), %s\n",
3464 GetDesirableServiceFlags(nServices),
3477 if (!vRecv.
empty()) {
3485 if (!vRecv.
empty()) {
3486 std::string strSubVer;
3490 if (!vRecv.
empty()) {
3491 vRecv >> starting_height;
3511 PushNodeVersion(pfrom, *peer);
3524 if (greatest_common_version >= 70016) {
3533 peer->m_their_services = nServices;
3537 pfrom.cleanSubVer = cleanSubVer;
3539 peer->m_starting_height = starting_height;
3549 (fRelay || (peer->m_our_services &
NODE_BLOOM))) {
3550 auto*
const tx_relay = peer->SetTxRelay();
3552 LOCK(tx_relay->m_bloom_filter_mutex);
3553 tx_relay->m_relay_txs = fRelay;
3565 const auto* tx_relay = peer->GetTxRelay();
3566 if (tx_relay &&
WITH_LOCK(tx_relay->m_bloom_filter_mutex,
return tx_relay->m_relay_txs) &&
3568 const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.
GetId());
3581 m_num_preferred_download_peers += state->fPreferredDownload;
3587 bool send_getaddr{
false};
3589 send_getaddr = SetupAddressRelay(pfrom, *peer);
3599 peer->m_getaddr_sent =
true;
3624 LogDebug(
BCLog::NET,
"receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3629 peer->m_time_offset =
NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
3633 m_outbound_time_offsets.Add(peer->m_time_offset);
3634 m_outbound_time_offsets.WarnIfOutOfSync();
3638 if (greatest_common_version <= 70012) {
3639 constexpr auto finalAlert{
"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex};
3640 MakeAndPushMessage(pfrom,
"alert", finalAlert);
3667 LogPrintf(
"New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3670 pfrom.
nVersion.load(), peer->m_starting_height,
3672 (mapped_as ?
strprintf(
", mapped_as=%d", mapped_as) :
""));
3684 if (m_txreconciliation) {
3685 if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.
GetId())) {
3689 m_txreconciliation->ForgetPeer(pfrom.
GetId());
3693 if (
auto tx_relay = peer->GetTxRelay()) {
3702 tx_relay->m_tx_inventory_mutex,
3703 return tx_relay->m_tx_inventory_to_send.empty() &&
3704 tx_relay->m_next_inv_send_time == 0
s));
3709 const CNodeState* state =
State(pfrom.
GetId());
3711 .m_preferred = state->fPreferredDownload,
3712 .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay),
3713 .m_wtxid_relay = peer->m_wtxid_relay,
3722 peer->m_prefers_headers =
true;
3727 bool sendcmpct_hb{
false};
3728 uint64_t sendcmpct_version{0};
3729 vRecv >> sendcmpct_hb >> sendcmpct_version;
3735 CNodeState* nodestate =
State(pfrom.
GetId());
3736 nodestate->m_provides_cmpctblocks =
true;
3737 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3754 if (!peer->m_wtxid_relay) {
3755 peer->m_wtxid_relay =
true;
3756 m_wtxid_relay_peers++;
3775 peer->m_wants_addrv2 =
true;
3783 if (!m_txreconciliation) {
3784 LogDebug(
BCLog::NET,
"sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.
GetId());
3795 if (RejectIncomingTxs(pfrom)) {
3804 const auto* tx_relay = peer->GetTxRelay();
3805 if (!tx_relay || !
WITH_LOCK(tx_relay->m_bloom_filter_mutex,
return tx_relay->m_relay_txs)) {
3811 uint32_t peer_txreconcl_version;
3812 uint64_t remote_salt;
3813 vRecv >> peer_txreconcl_version >> remote_salt;
3816 peer_txreconcl_version, remote_salt);
3841 const auto ser_params{
3849 std::vector<CAddress> vAddr;
3851 vRecv >> ser_params(vAddr);
3853 if (!SetupAddressRelay(pfrom, *peer)) {
3860 Misbehaving(*peer,
strprintf(
"%s message size = %u", msg_type, vAddr.size()));
3865 std::vector<CAddress> vAddrOk;
3866 const auto current_a_time{Now<NodeSeconds>()};
3869 const auto current_time{GetTime<std::chrono::microseconds>()};
3872 const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3876 peer->m_addr_token_timestamp = current_time;
3879 uint64_t num_proc = 0;
3880 uint64_t num_rate_limit = 0;
3881 std::shuffle(vAddr.begin(), vAddr.end(), m_rng);
3884 if (interruptMsgProc)
3888 if (peer->m_addr_token_bucket < 1.0) {
3894 peer->m_addr_token_bucket -= 1.0;
3903 addr.
nTime = current_a_time - 5 * 24h;
3905 AddAddressKnown(*peer, addr);
3912 if (addr.
nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.
IsRoutable()) {
3914 RelayAddress(pfrom.
GetId(), addr, reachable);
3918 vAddrOk.push_back(addr);
3921 peer->m_addr_processed += num_proc;
3922 peer->m_addr_rate_limited += num_rate_limit;
3923 LogDebug(
BCLog::NET,
"Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3924 vAddr.size(), num_proc, num_rate_limit, pfrom.
GetId());
3926 m_addrman.
Add(vAddrOk, pfrom.
addr, 2h);
3927 if (vAddr.size() < 1000) peer->m_getaddr_sent =
false;
3938 std::vector<CInv> vInv;
3942 Misbehaving(*peer,
strprintf(
"inv message size = %u", vInv.size()));
3946 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3950 const auto current_time{GetTime<std::chrono::microseconds>()};
3953 for (
CInv& inv : vInv) {
3954 if (interruptMsgProc)
return;
3959 if (peer->m_wtxid_relay) {
3966 const bool fAlreadyHave = AlreadyHaveBlock(inv.
hash);
3969 UpdateBlockAvailability(pfrom.
GetId(), inv.
hash);
3977 best_block = &inv.
hash;
3980 if (reject_tx_invs) {
3986 AddKnownTx(*peer, inv.
hash);
3989 const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.
GetId(), gtxid, current_time)};
3997 if (best_block !=
nullptr) {
4009 if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
4010 if (MaybeSendGetHeaders(pfrom,
GetLocator(m_chainman.m_best_header), *peer)) {
4012 m_chainman.m_best_header->nHeight, best_block->ToString(),
4015 if (!state.fSyncStarted) {
4016 peer->m_inv_triggered_getheaders_before_sync =
true;
4020 m_last_block_inv_triggering_headers_sync = *best_block;
4029 std::vector<CInv> vInv;
4033 Misbehaving(*peer,
strprintf(
"getdata message size = %u", vInv.size()));
4039 if (vInv.size() > 0) {
4044 LOCK(peer->m_getdata_requests_mutex);
4045 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
4046 ProcessGetData(pfrom, *peer, interruptMsgProc);
4055 vRecv >> locator >> hashStop;
4071 std::shared_ptr<const CBlock> a_recent_block;
4073 LOCK(m_most_recent_block_mutex);
4074 a_recent_block = m_most_recent_block;
4077 if (!m_chainman.
ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4092 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex))
4107 if (--nLimit <= 0) {
4111 WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4123 for (
size_t i = 1; i < req.
indexes.size(); ++i) {
4127 std::shared_ptr<const CBlock> recent_block;
4129 LOCK(m_most_recent_block_mutex);
4130 if (m_most_recent_block_hash == req.
blockhash)
4131 recent_block = m_most_recent_block;
4135 SendBlockTransactions(pfrom, *peer, *recent_block, req);
4154 if (!block_pos.IsNull()) {
4161 SendBlockTransactions(pfrom, *peer, block, req);
4174 WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4182 vRecv >> locator >> hashStop;
4200 if (m_chainman.
ActiveTip() ==
nullptr ||
4202 LogDebug(
BCLog::NET,
"Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.
GetId());
4209 CNodeState *nodestate =
State(pfrom.
GetId());
4219 if (!BlockRequestAllowed(pindex)) {
4220 LogDebug(
BCLog::NET,
"%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.
GetId());
4233 std::vector<CBlock> vHeaders;
4234 int nLimit = m_opts.max_headers_result;
4236 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex))
4239 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4254 nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.
ActiveChain().
Tip();
4260 if (RejectIncomingTxs(pfrom)) {
4274 const Txid& txid = ptx->GetHash();
4275 const Wtxid& wtxid = ptx->GetWitnessHash();
4278 AddKnownTx(*peer, hash);
4282 const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.
GetId(), ptx);
4283 if (!should_validate) {
4288 if (!m_mempool.
exists(txid)) {
4289 LogPrintf(
"Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4292 LogPrintf(
"Force relaying tx %s (wtxid=%s) from peer=%d\n",
4294 RelayTransaction(txid, wtxid);
4298 if (package_to_validate) {
4301 package_result.
m_state.
IsValid() ?
"package accepted" :
"package rejected");
4302 ProcessPackageResult(package_to_validate.value(), package_result);
4308 Assume(!package_to_validate.has_value());
4318 if (
auto package_to_validate{ProcessInvalidTx(pfrom.
GetId(), ptx, state,
true)}) {
4321 package_result.
m_state.
IsValid() ?
"package accepted" :
"package rejected");
4322 ProcessPackageResult(package_to_validate.value(), package_result);
4338 vRecv >> cmpctblock;
4340 bool received_new_header =
false;
4350 MaybeSendGetHeaders(pfrom,
GetLocator(m_chainman.m_best_header), *peer);
4360 received_new_header =
true;
4368 MaybePunishNodeForBlock(pfrom.
GetId(), state,
true,
"invalid header via cmpctblock");
4375 if (received_new_header) {
4376 LogBlockHeader(*pindex, pfrom,
true);
4379 bool fProcessBLOCKTXN =
false;
4383 bool fRevertToHeaderProcessing =
false;
4387 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4388 bool fBlockReconstructed =
false;
4394 CNodeState *nodestate =
State(pfrom.
GetId());
4399 nodestate->m_last_block_announcement =
GetTime();
4405 auto range_flight = mapBlocksInFlight.equal_range(pindex->
GetBlockHash());
4406 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4407 bool requested_block_from_this_peer{
false};
4410 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.
GetId());
4412 while (range_flight.first != range_flight.second) {
4413 if (range_flight.first->second.first == pfrom.
GetId()) {
4414 requested_block_from_this_peer =
true;
4417 range_flight.first++;
4422 if (requested_block_from_this_peer) {
4425 std::vector<CInv> vInv(1);
4426 vInv[0] =
CInv(
MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4433 if (!already_in_flight && !CanDirectFetch()) {
4441 requested_block_from_this_peer) {
4442 std::list<QueuedBlock>::iterator* queuedBlockIt =
nullptr;
4443 if (!BlockRequested(pfrom.
GetId(), *pindex, &queuedBlockIt)) {
4444 if (!(*queuedBlockIt)->partialBlock)
4457 Misbehaving(*peer,
"invalid compact block");
4460 if (first_in_flight) {
4462 std::vector<CInv> vInv(1);
4463 vInv[0] =
CInv(
MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4473 for (
size_t i = 0; i < cmpctblock.
BlockTxCount(); i++) {
4478 fProcessBLOCKTXN =
true;
4479 }
else if (first_in_flight) {
4486 IsBlockRequestedFromOutbound(blockhash) ||
4505 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4510 std::vector<CTransactionRef> dummy;
4512 status = tempBlock.FillBlock(*pblock, dummy,
4515 fBlockReconstructed =
true;
4519 if (requested_block_from_this_peer) {
4522 std::vector<CInv> vInv(1);
4523 vInv[0] =
CInv(
MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4528 fRevertToHeaderProcessing =
true;
4533 if (fProcessBLOCKTXN) {
4536 return ProcessCompactBlockTxns(pfrom, *peer, txn);
4539 if (fRevertToHeaderProcessing) {
4545 return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.
header},
true);
4548 if (fBlockReconstructed) {
4553 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.
GetId(),
false));
4571 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4588 return ProcessCompactBlockTxns(pfrom, *peer, resp);
4599 std::vector<CBlockHeader> headers;
4603 if (nCount > m_opts.max_headers_result) {
4604 Misbehaving(*peer,
strprintf(
"headers message size = %u", nCount));
4607 headers.resize(nCount);
4608 for (
unsigned int n = 0; n < nCount; n++) {
4609 vRecv >> headers[n];
4613 ProcessHeadersMessage(pfrom, *peer, std::move(headers),
false);
4617 if (m_headers_presync_should_signal.exchange(
false)) {
4618 HeadersPresyncStats stats;
4620 LOCK(m_headers_presync_mutex);
4621 auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4622 if (it != m_headers_presync_stats.end()) stats = it->second;
4640 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4651 Misbehaving(*peer,
"mutated block");
4656 bool forceProcessing =
false;
4657 const uint256 hash(pblock->GetHash());
4658 bool min_pow_checked =
false;
4663 forceProcessing = IsBlockRequested(hash);
4664 RemoveBlockRequest(hash, pfrom.
GetId());
4668 mapBlockSource.emplace(hash, std::make_pair(pfrom.
GetId(),
true));
4672 min_pow_checked =
true;
4675 ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4692 Assume(SetupAddressRelay(pfrom, *peer));
4696 if (peer->m_getaddr_recvd) {
4700 peer->m_getaddr_recvd =
true;
4702 peer->m_addrs_to_send.clear();
4703 std::vector<CAddress> vAddr;
4709 for (
const CAddress &addr : vAddr) {
4710 PushAddress(*peer, addr);
4738 if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
4739 LOCK(tx_relay->m_tx_inventory_mutex);
4740 tx_relay->m_send_mempool =
true;
4766 const auto ping_end = time_received;
4769 bool bPingFinished =
false;
4770 std::string sProblem;
4772 if (nAvail >=
sizeof(
nonce)) {
4776 if (peer->m_ping_nonce_sent != 0) {
4777 if (
nonce == peer->m_ping_nonce_sent) {
4779 bPingFinished =
true;
4780 const auto ping_time = ping_end - peer->m_ping_start.load();
4781 if (ping_time.count() >= 0) {
4786 sProblem =
"Timing mishap";
4790 sProblem =
"Nonce mismatch";
4793 bPingFinished =
true;
4794 sProblem =
"Nonce zero";
4798 sProblem =
"Unsolicited pong without ping";
4802 bPingFinished =
true;
4803 sProblem =
"Short payload";
4806 if (!(sProblem.empty())) {
4810 peer->m_ping_nonce_sent,
4814 if (bPingFinished) {
4815 peer->m_ping_nonce_sent = 0;
4832 Misbehaving(*peer,
"too-large bloom filter");
4833 }
else if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
4835 LOCK(tx_relay->m_bloom_filter_mutex);
4836 tx_relay->m_bloom_filter.reset(
new CBloomFilter(filter));
4837 tx_relay->m_relay_txs =
true;
4851 std::vector<unsigned char> vData;
4859 }
else if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
4860 LOCK(tx_relay->m_bloom_filter_mutex);
4861 if (tx_relay->m_bloom_filter) {
4862 tx_relay->m_bloom_filter->insert(vData);
4868 Misbehaving(*peer,
"bad filteradd message");
4879 auto tx_relay = peer->GetTxRelay();
4880 if (!tx_relay)
return;
4883 LOCK(tx_relay->m_bloom_filter_mutex);
4884 tx_relay->m_bloom_filter =
nullptr;
4885 tx_relay->m_relay_txs =
true;
4894 vRecv >> newFeeFilter;
4896 if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
4897 tx_relay->m_fee_filter_received = newFeeFilter;
4905 ProcessGetCFilters(pfrom, *peer, vRecv);
4910 ProcessGetCFHeaders(pfrom, *peer, vRecv);
4915 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4920 std::vector<CInv> vInv;
4922 std::vector<GenTxid> tx_invs;
4924 for (
CInv &inv : vInv) {
4930 LOCK(m_tx_download_mutex);
4931 m_txdownloadman.ReceivedNotFound(pfrom.
GetId(), tx_invs);
4940bool PeerManagerImpl::MaybeDiscourageAndDisconnect(
CNode& pnode, Peer& peer)
4943 LOCK(peer.m_misbehavior_mutex);
4946 if (!peer.m_should_discourage)
return false;
4948 peer.m_should_discourage =
false;
4953 LogPrintf(
"Warning: not punishing noban peer %d!\n", peer.m_id);
4959 LogPrintf(
"Warning: not punishing manually connected peer %d!\n", peer.m_id);
4979bool PeerManagerImpl::ProcessMessages(
CNode* pfrom, std::atomic<bool>& interruptMsgProc)
4984 PeerRef peer = GetPeerRef(pfrom->
GetId());
4985 if (peer ==
nullptr)
return false;
4989 if (!pfrom->
IsInboundConn() && !peer->m_outbound_version_message_sent)
return false;
4992 LOCK(peer->m_getdata_requests_mutex);
4993 if (!peer->m_getdata_requests.empty()) {
4994 ProcessGetData(*pfrom, *peer, interruptMsgProc);
4998 const bool processed_orphan = ProcessOrphanTx(*peer);
5003 if (processed_orphan)
return true;
5008 LOCK(peer->m_getdata_requests_mutex);
5009 if (!peer->m_getdata_requests.empty())
return true;
5022 bool fMoreWork = poll_result->second;
5033 if (m_opts.capture_messages) {
5038 ProcessMessage(*pfrom,
msg.m_type,
msg.m_recv,
msg.m_time, interruptMsgProc);
5039 if (interruptMsgProc)
return false;
5041 LOCK(peer->m_getdata_requests_mutex);
5042 if (!peer->m_getdata_requests.empty()) fMoreWork =
true;
5049 LOCK(m_tx_download_mutex);
5050 if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork =
true;
5051 }
catch (
const std::exception& e) {
5060void PeerManagerImpl::ConsiderEviction(
CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5073 if (state.pindexBestKnownBlock !=
nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.
ActiveChain().
Tip()->
nChainWork) {
5075 if (state.m_chain_sync.m_timeout != 0
s) {
5076 state.m_chain_sync.m_timeout = 0
s;
5077 state.m_chain_sync.m_work_header =
nullptr;
5078 state.m_chain_sync.m_sent_getheaders =
false;
5080 }
else if (state.m_chain_sync.m_timeout == 0
s || (state.m_chain_sync.m_work_header !=
nullptr && state.pindexBestKnownBlock !=
nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5088 state.m_chain_sync.m_work_header = m_chainman.
ActiveChain().
Tip();
5089 state.m_chain_sync.m_sent_getheaders =
false;
5090 }
else if (state.m_chain_sync.m_timeout > 0
s && time_in_seconds > state.m_chain_sync.m_timeout) {
5094 if (state.m_chain_sync.m_sent_getheaders) {
5096 LogInfo(
"Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock !=
nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() :
"<none>", pto.
DisconnectMsg(
fLogIPs));
5099 assert(state.m_chain_sync.m_work_header);
5104 MaybeSendGetHeaders(pto,
5105 GetLocator(state.m_chain_sync.m_work_header->pprev),
5107 LogDebug(
BCLog::NET,
"sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.
GetId(), state.pindexBestKnownBlock !=
nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() :
"<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5108 state.m_chain_sync.m_sent_getheaders =
true;
5120void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5129 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5133 if (pnode->
GetId() > youngest_peer.first) {
5134 next_youngest_peer = youngest_peer;
5135 youngest_peer.first = pnode->GetId();
5136 youngest_peer.second = pnode->m_last_block_time;
5139 NodeId to_disconnect = youngest_peer.first;
5140 if (youngest_peer.second > next_youngest_peer.second) {
5143 to_disconnect = next_youngest_peer.first;
5152 CNodeState *node_state =
State(pnode->
GetId());
5153 if (node_state ==
nullptr ||
5156 LogDebug(
BCLog::NET,
"disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5160 LogDebug(
BCLog::NET,
"keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5176 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5179 AssertLockHeld(::cs_main);
5183 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5184 CNodeState *state = State(pnode->GetId());
5185 if (state == nullptr) return;
5187 if (state->m_chain_sync.m_protect) return;
5190 if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5191 if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5192 worst_peer = pnode->GetId();
5193 oldest_block_announcement = state->m_last_block_announcement;
5196 if (worst_peer != -1) {
5207 LogDebug(
BCLog::NET,
"disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->
GetId(), oldest_block_announcement);
5211 LogDebug(
BCLog::NET,
"keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5228void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5232 auto now{GetTime<std::chrono::seconds>()};
5234 EvictExtraOutboundPeers(now);
5236 if (now > m_stale_tip_check_time) {
5240 LogPrintf(
"Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5249 if (!m_initial_sync_finished && CanDirectFetch()) {
5251 m_initial_sync_finished =
true;
5255void PeerManagerImpl::MaybeSendPing(
CNode& node_to, Peer& peer, std::chrono::microseconds now)
5258 peer.m_ping_nonce_sent &&
5268 bool pingSend =
false;
5270 if (peer.m_ping_queued) {
5275 if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() +
PING_INTERVAL) {
5284 }
while (
nonce == 0);
5285 peer.m_ping_queued =
false;
5286 peer.m_ping_start = now;
5288 peer.m_ping_nonce_sent =
nonce;
5292 peer.m_ping_nonce_sent = 0;
5298void PeerManagerImpl::MaybeSendAddr(
CNode&
node, Peer& peer, std::chrono::microseconds current_time)
5301 if (!peer.m_addr_relay_enabled)
return;
5303 LOCK(peer.m_addr_send_times_mutex);
5306 peer.m_next_local_addr_send < current_time) {
5313 if (peer.m_next_local_addr_send != 0us) {
5314 peer.m_addr_known->reset();
5317 CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5318 PushAddress(peer, local_addr);
5324 if (current_time <= peer.m_next_addr_send)
return;
5337 bool ret = peer.m_addr_known->contains(addr.
GetKey());
5338 if (!
ret) peer.m_addr_known->insert(addr.
GetKey());
5341 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5342 peer.m_addrs_to_send.end());
5345 if (peer.m_addrs_to_send.empty())
return;
5347 if (peer.m_wants_addrv2) {
5352 peer.m_addrs_to_send.clear();
5355 if (peer.m_addrs_to_send.capacity() > 40) {
5356 peer.m_addrs_to_send.shrink_to_fit();
5360void PeerManagerImpl::MaybeSendSendHeaders(
CNode&
node, Peer& peer)
5368 CNodeState &state = *
State(
node.GetId());
5369 if (state.pindexBestKnownBlock !=
nullptr &&
5376 peer.m_sent_sendheaders =
true;
5381void PeerManagerImpl::MaybeSendFeefilter(
CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5383 if (m_opts.ignore_incoming_txs)
return;
5399 if (peer.m_fee_filter_sent == MAX_FILTER) {
5402 peer.m_next_send_feefilter = 0us;
5405 if (current_time > peer.m_next_send_feefilter) {
5406 CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5409 if (filterToSend != peer.m_fee_filter_sent) {
5411 peer.m_fee_filter_sent = filterToSend;
5418 (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5424class CompareInvMempoolOrder
5428 explicit CompareInvMempoolOrder(
CTxMemPool* mempool) : m_mempool{mempool} {}
5430 bool operator()(std::set<Wtxid>::iterator a, std::set<Wtxid>::iterator b)
5439bool PeerManagerImpl::RejectIncomingTxs(
const CNode& peer)
const
5449bool PeerManagerImpl::SetupAddressRelay(
const CNode&
node, Peer& peer)
5454 if (
node.IsBlockOnlyConn())
return false;
5456 if (!peer.m_addr_relay_enabled.exchange(
true)) {
5460 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5466bool PeerManagerImpl::SendMessages(
CNode* pto)
5471 PeerRef peer = GetPeerRef(pto->
GetId());
5472 if (!peer)
return false;
5477 if (MaybeDiscourageAndDisconnect(*pto, *peer))
return true;
5480 if (!pto->
IsInboundConn() && !peer->m_outbound_version_message_sent) {
5481 PushNodeVersion(*pto, *peer);
5482 peer->m_outbound_version_message_sent =
true;
5489 const auto current_time{GetTime<std::chrono::microseconds>()};
5497 MaybeSendPing(*pto, *peer, current_time);
5502 MaybeSendAddr(*pto, *peer, current_time);
5504 MaybeSendSendHeaders(*pto, *peer);
5512 if (m_chainman.m_best_header ==
nullptr) {
5519 bool sync_blocks_and_headers_from_peer =
false;
5520 if (state.fPreferredDownload) {
5521 sync_blocks_and_headers_from_peer =
true;
5532 if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5533 sync_blocks_and_headers_from_peer =
true;
5539 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() >
NodeClock::now() - 24h) {
5540 const CBlockIndex* pindexStart = m_chainman.m_best_header;
5548 if (pindexStart->
pprev)
5549 pindexStart = pindexStart->
pprev;
5550 if (MaybeSendGetHeaders(*pto,
GetLocator(pindexStart), *peer)) {
5553 state.fSyncStarted =
true;
5577 LOCK(peer->m_block_inv_mutex);
5578 std::vector<CBlock> vHeaders;
5579 bool fRevertToInv = ((!peer->m_prefers_headers &&
5580 (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5583 ProcessBlockAvailability(pto->
GetId());
5585 if (!fRevertToInv) {
5586 bool fFoundStartingHeader =
false;
5590 for (
const uint256& hash : peer->m_blocks_for_headers_relay) {
5595 fRevertToInv =
true;
5598 if (pBestIndex !=
nullptr && pindex->
pprev != pBestIndex) {
5610 fRevertToInv =
true;
5613 pBestIndex = pindex;
5614 if (fFoundStartingHeader) {
5617 }
else if (PeerHasHeader(&state, pindex)) {
5619 }
else if (pindex->
pprev ==
nullptr || PeerHasHeader(&state, pindex->
pprev)) {
5622 fFoundStartingHeader =
true;
5627 fRevertToInv =
true;
5632 if (!fRevertToInv && !vHeaders.empty()) {
5633 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5637 vHeaders.front().GetHash().ToString(), pto->
GetId());
5639 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5641 LOCK(m_most_recent_block_mutex);
5642 if (m_most_recent_block_hash == pBestIndex->
GetBlockHash()) {
5646 if (cached_cmpctblock_msg.has_value()) {
5647 PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
5655 state.pindexBestHeaderSent = pBestIndex;
5656 }
else if (peer->m_prefers_headers) {
5657 if (vHeaders.size() > 1) {
5660 vHeaders.front().GetHash().ToString(),
5661 vHeaders.back().GetHash().ToString(), pto->
GetId());
5664 vHeaders.front().GetHash().ToString(), pto->
GetId());
5667 state.pindexBestHeaderSent = pBestIndex;
5669 fRevertToInv =
true;
5675 if (!peer->m_blocks_for_headers_relay.empty()) {
5676 const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5689 if (!PeerHasHeader(&state, pindex)) {
5690 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5696 peer->m_blocks_for_headers_relay.clear();
5702 std::vector<CInv> vInv;
5704 LOCK(peer->m_block_inv_mutex);
5708 for (
const uint256& hash : peer->m_blocks_for_inv_relay) {
5715 peer->m_blocks_for_inv_relay.clear();
5718 if (
auto tx_relay = peer->GetTxRelay(); tx_relay !=
nullptr) {
5719 LOCK(tx_relay->m_tx_inventory_mutex);
5722 if (tx_relay->m_next_inv_send_time < current_time) {
5723 fSendTrickle =
true;
5733 LOCK(tx_relay->m_bloom_filter_mutex);
5734 if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5738 if (fSendTrickle && tx_relay->m_send_mempool) {
5739 auto vtxinfo = m_mempool.
infoAll();
5740 tx_relay->m_send_mempool =
false;
5741 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5743 LOCK(tx_relay->m_bloom_filter_mutex);
5745 for (
const auto& txinfo : vtxinfo) {
5746 const Txid& txid{txinfo.tx->GetHash()};
5747 const Wtxid& wtxid{txinfo.tx->GetWitnessHash()};
5748 const auto inv = peer->m_wtxid_relay ?
5751 tx_relay->m_tx_inventory_to_send.erase(wtxid);
5754 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5757 if (tx_relay->m_bloom_filter) {
5758 if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx))
continue;
5760 tx_relay->m_tx_inventory_known_filter.insert(inv.
hash);
5761 vInv.push_back(inv);
5772 std::vector<std::set<Wtxid>::iterator> vInvTx;
5773 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5774 for (std::set<Wtxid>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5775 vInvTx.push_back(it);
5777 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5780 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
5781 std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5784 unsigned int nRelayedTransactions = 0;
5785 LOCK(tx_relay->m_bloom_filter_mutex);
5788 while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5790 std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5791 std::set<Wtxid>::iterator it = vInvTx.back();
5795 tx_relay->m_tx_inventory_to_send.erase(it);
5797 auto txinfo = m_mempool.
info(wtxid);
5804 const auto inv = peer->m_wtxid_relay ?
5806 CInv{
MSG_TX, txinfo.tx->GetHash().ToUint256()};
5808 if (tx_relay->m_tx_inventory_known_filter.contains(inv.
hash)) {
5812 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5815 if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx))
continue;
5817 vInv.push_back(inv);
5818 nRelayedTransactions++;
5823 tx_relay->m_tx_inventory_known_filter.insert(inv.
hash);
5828 tx_relay->m_last_inv_sequence = m_mempool.
GetSequence();
5835 auto stalling_timeout = m_block_stalling_timeout.load();
5836 if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5845 if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5855 if (state.vBlocksInFlight.size() > 0) {
5856 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5857 int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5865 if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5867 if (m_chainman.m_best_header->Time() <=
NodeClock::now() - 24h) {
5868 if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5885 state.fSyncStarted =
false;
5887 peer->m_headers_sync_timeout = 0us;
5893 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5899 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5904 std::vector<CInv> vGetData;
5906 std::vector<const CBlockIndex*> vToDownload;
5908 auto get_inflight_budget = [&state]() {
5914 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
5919 TryDownloadingHistoricalBlocks(
5921 get_inflight_budget(),
5922 vToDownload, from_tip,
5923 Assert(m_chainman.GetSnapshotBaseBlock()));
5926 uint32_t nFetchFlags = GetFetchFlags(*peer);
5928 BlockRequested(pto->
GetId(), *pindex);
5932 if (state.vBlocksInFlight.empty() && staller != -1) {
5933 if (
State(staller)->m_stalling_since == 0us) {
5934 State(staller)->m_stalling_since = current_time;
5944 LOCK(m_tx_download_mutex);
5945 for (
const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->
GetId(), current_time)) {
5954 if (!vGetData.empty())
5957 MaybeSendFeefilter(*pto, *peer, current_time);
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
bool MoneyRange(const CAmount &nValue)
int64_t CAmount
Amount in satoshis (Can be negative)
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
arith_uint256 GetBlockProof(const CBlockIndex &block)
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params ¶ms)
Return the time it would take to redo the work difference between from and to, assuming the current h...
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
@ BLOCK_VALID_CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ BLOCK_VALID_TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ BLOCK_VALID_SCRIPTS
Scripts & signatures ok.
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous.
@ BLOCK_HAVE_DATA
full block available in blk*.dat
const CChainParams & Params()
Return the currently selected parameters.
#define Assert(val)
Identity function.
#define Assume(val)
Assume is the identity function.
Stochastic address manager.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
bool Good(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
Mark an address record as accessible and attempt to move it to addrman's tried table.
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
bool IsBanned(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is banned.
bool IsDiscouraged(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is discouraged.
void Discourage(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint16_t > indexes
A CService with information about it as peer.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
static constexpr SerParams V1_NETWORK
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
static constexpr SerParams V2_NETWORK
size_t BlockTxCount() const
std::vector< CTransactionRef > vtx
The block chain is a tree shaped structure starting with the genesis block at the root,...
bool IsValid(enum BlockStatus nUpTo) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
CBlockHeader GetBlockHeader() const
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
uint256 GetBlockHash() const
int64_t GetBlockTime() const
unsigned int nTx
Number of transactions in this block.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
int nHeight
height of the entry in the chain. The genesis block has height 0
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
An in-memory indexed chain of blocks.
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
int Height() const
Return the maximal height in the chain.
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
const HeadersSyncParams & HeadersSync() const
const Consensus::Params & GetConsensus() const
void ForEachNode(const NodeFn &func)
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
bool GetNetworkActive() const
bool GetTryNewOutboundPeer() const
std::vector< CAddress > GetAddressesUnsafe(size_t max_addresses, size_t max_pct, std::optional< Network > network, const bool filtered=true) const
Return randomly selected addresses.
std::vector< CAddress > GetAddresses(CNode &requestor, size_t max_addresses, size_t max_pct)
Return addresses from the per-requestor cache.
void SetTryNewOutboundPeer(bool flag)
int GetExtraBlockRelayCount() const
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex)
check if the outbound target is reached if param historicalBlockServingLimit is set true,...
void StartExtraBlockRelayPeers()
bool DisconnectNode(std::string_view node)
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
uint32_t GetMappedAS(const CNetAddr &addr) const
int GetExtraFullOutboundCount() const
bool CheckIncomingNonce(uint64_t nonce)
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
bool GetUseAddrmanOutgoing() const
RecursiveMutex & GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex)
Fee rate in satoshis per virtualbyte: CAmount / vB the feerate is represented internally as FeeFrac.
CAmount GetFeePerK() const
Return the fee in satoshis for a vsize of 1000 vbytes.
bool IsMsgCmpctBlk() const
std::string ToString() const
bool IsMsgFilteredBlk() const
bool IsMsgWitnessBlk() const
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
std::vector< std::pair< unsigned int, Txid > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
static constexpr SerParams V1
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
Information about a peer.
bool IsFeelerConn() const
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
bool ExpectServicesFromConn() const
std::atomic< int > nVersion
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
bool IsInboundConn() const
bool HasPermission(NetPermissionFlags permission) const
bool IsOutboundOrBlockRelayConn() const
bool IsManualConn() const
const std::string m_addr_name
std::string ConnectionTypeAsString() const
void SetCommonVersion(int greatest_common_version)
std::atomic< bool > m_bip152_highbandwidth_to
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
std::atomic< bool > m_bip152_highbandwidth_from
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
bool IsAddrFetchConn() const
uint64_t GetLocalNonce() const
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
bool IsBlockOnlyConn() const
int GetCommonVersion() const
bool IsFullOutboundConn() const
const uint64_t m_network_key
Network key used to prevent fingerprinting our node across networks.
std::atomic_bool fPauseSend
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
std::string LogIP(bool log_ip) const
Helper function to optionally log the IP address.
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
std::string DisconnectMsg(bool log_ip) const
Helper function to log disconnects.
std::atomic_bool fDisconnect
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Simple class for background tasks that should be run periodically or once "after a while".
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
A combination of a network address (CNetAddr) and a (TCP) port.
std::string ToStringAddrPort() const
std::vector< unsigned char > GetKey() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
TxMempoolInfo info_for_relay(const T &id, uint64_t last_sequence) const
Returns info for a transaction if its entry_sequence < last_sequence.
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
CFeeRate GetMinFee(size_t sizelimit) const
CTransactionRef get(const Txid &hash) const
size_t DynamicMemoryUsage() const
std::vector< TxMempoolInfo > infoAll() const
TxMempoolInfo info(const T &id) const
void RemoveUnbroadcastTx(const Txid &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
bool CompareDepthAndScore(const Wtxid &hasha, const Wtxid &hashb) const
bool exists(const Txid &txid) const
uint64_t GetSequence() const EXCLUSIVE_LOCKS_REQUIRED(cs)
std::set< Txid > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
unsigned long size() const
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockChecked(const std::shared_ptr< const CBlock > &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void ActiveTipChange(const CBlockIndex &new_tip, bool is_ibd)
Notifies listeners any time the block chain tip changes, synchronously.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected Provides the block that was disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
bool ProcessNewBlockHeaders(std::span< const CBlockHeader > headers, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
This is used by net_processing to report pre-synchronization progress of headers, as headers are not ...
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Double ended buffer combining vector and stream-like interfaces.
void ignore(size_t num_ignore)
uint64_t rand64() noexcept
Generate a random 64-bit integer.
const uint256 & ToUint256() const LIFETIMEBOUND
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
virtual bool SendMessages(CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void FinalizeNode(const CNode &node)=0
Handle removal of a peer (clear state)
virtual bool HasAllDesirableServiceFlags(ServiceFlags services) const =0
Callback to determine whether the given set of service flags are sufficient for a peer to be "relevan...
virtual bool ProcessMessages(CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual void InitializeNode(const CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing, bool segwit_active)
bool IsTxAvailable(size_t index) const
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< Wtxid, CTransactionRef > > &extra_txn)
virtual std::optional< std::string > FetchBlock(NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void ProcessMessage(CNode &pfrom, const std::string &msg_type, DataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const =0
Gets the set of service flags which are "desirable" for a given peer.
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual std::vector< node::TxOrphanage::OrphanInfo > GetOrphanTransactions()=0
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, node::Warnings &warnings, Options opts)
virtual void UnitTestMisbehaving(NodeId peer_id)=0
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
std::string ToString() const
256-bit unsigned big integer.
constexpr bool IsNull() const
std::string ToString() const
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
bool ReadRawBlock(std::vector< std::byte > &block, const FlatFilePos &pos) const
bool ReadBlock(CBlock &block, const FlatFilePos &pos, const std::optional< uint256 > &expected_hash) const
Functions for disk access for blocks.
bool IsPruneMode() const
Whether running in -prune mode.
Class responsible for deciding what transactions to request and, once downloaded, whether and how to ...
Manages warning messages within a node.
std::string ToString() const
const uint256 & ToUint256() const LIFETIMEBOUND
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params ¶ms, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params ¶ms, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
#define LogDebug(category,...)
Transaction validation functions.
CSerializedNetMsg Make(std::string msg_type, Args &&... args)
constexpr const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
constexpr const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
constexpr const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
constexpr const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
constexpr const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
constexpr const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
constexpr const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
constexpr const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
constexpr const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
constexpr const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
constexpr const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
constexpr const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
constexpr const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
constexpr const char * BLOCKTXN
Contains a BlockTransactions.
constexpr const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
constexpr const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
constexpr const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
constexpr const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
constexpr const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
constexpr const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
constexpr const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
constexpr const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
constexpr const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
constexpr const char * GETDATA
The getdata message requests one or more data objects from another node.
constexpr const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
constexpr const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
constexpr const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
constexpr const char * TX
The tx message transmits a single transaction.
constexpr const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
constexpr const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
constexpr const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
constexpr const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
constexpr const char * BLOCK
The block message transmits a single serialized block.
constexpr const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
constexpr const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
""_hex is a compile-time user-defined literal returning a std::array<std::byte>, equivalent to ParseH...
std::string ToString(const T &t)
Locale-independent version of std::to_string.
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
std::function< void(const CAddress &addr, const std::string &msg_type, std::span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
bool SeenLocal(const CService &addr)
vote for a local address
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
TRACEPOINT_SEMAPHORE(net, inbound_message)
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain.
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS
Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers.
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
ReachableNets g_reachable_nets
bool IsProxy(const CNetAddr &addr)
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
static constexpr TransactionSerParams TX_NO_WITNESS
static constexpr TransactionSerParams TX_WITH_WITNESS
std::shared_ptr< const CTransaction > CTransactionRef
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
@ MSG_WTX
Defined in BIP 339.
@ MSG_CMPCT_BLOCK
Defined in BIP152.
@ MSG_WITNESS_BLOCK
Defined in BIP144.
ServiceFlags
nServices flags
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
#define LIMITED_STRING(obj, n)
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
constexpr auto MakeUCharSpan(const V &v) -> decltype(UCharSpanCast(std::span{v}))
Like the std::span constructor, but for (const) unsigned char member types only.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
std::vector< uint256 > vHave
std::chrono::microseconds m_ping_wait
std::vector< int > vHeightInFlight
CAmount m_fee_filter_received
std::chrono::seconds time_offset
bool m_addr_relay_enabled
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
Parameters that influence chain consensus.
int64_t nPowTargetSpacing
std::chrono::seconds PowTargetSpacing() const
Validation result for a transaction evaluated by MemPoolAccept (single or package).
const ResultType m_result_type
Result type.
const TxValidationState m_state
Contains information about why the transaction failed.
@ DIFFERENT_WITNESS
Valid, transaction was already in the mempool.
@ INVALID
Fully validated, valid.
const std::list< CTransactionRef > m_replaced_transactions
Mempool transactions replaced by the tx.
static time_point now() noexcept
Return current system time or mocked time, if set.
std::chrono::time_point< NodeClock > time_point
Validation result for package mempool acceptance.
PackageValidationState m_state
std::map< Wtxid, MempoolAcceptResult > m_tx_results
Map from wtxid to finished MempoolAcceptResults.
std::chrono::seconds median_outbound_time_offset
CFeeRate min_relay_feerate
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation)
std::vector< NodeId > m_senders
std::string ToString() const
#define AssertLockNotHeld(cs)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
COutPoint ProcessBlock(const NodeContext &node, const std::shared_ptr< CBlock > &block)
Returns the generated coin (or Null if the block was invalid).
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define LOCKS_EXCLUDED(...)
#define ACQUIRED_BEFORE(...)
#define TRACEPOINT(context,...)
consteval auto _(util::TranslatedLiteral str)
ReconciliationRegisterResult
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
constexpr int64_t count_microseconds(std::chrono::microseconds t)
constexpr int64_t count_seconds(std::chrono::seconds t)
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept, const std::optional< CFeeRate > &client_maxfeerate)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
arith_uint256 CalculateClaimedHeadersWork(std::span< const CBlockHeader > headers)
Return the sum of the claimed work on a given set of headers.
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...