Bitcoin Core  21.99.0
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <hash.h>
15 #include <index/blockfilterindex.h>
16 #include <merkleblock.h>
17 #include <netbase.h>
18 #include <netmessagemaker.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <streams.h>
27 #include <tinyformat.h>
28 #include <txmempool.h>
29 #include <util/check.h> // For NDEBUG compile time check
30 #include <util/strencodings.h>
31 #include <util/system.h>
32 #include <validation.h>
33 
34 #include <memory>
35 #include <typeinfo>
36 
38 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
40 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
42 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15};
44 static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2};
47 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
48 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
52 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
54 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
56 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
58 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
60 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
62 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
65 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
68 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
70 static constexpr std::chrono::minutes PING_INTERVAL{2};
72 static const unsigned int MAX_LOCATOR_SZ = 101;
74 static const unsigned int MAX_INV_SZ = 50000;
77 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
82 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
84 static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
86 static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
88 static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
90 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
92 static const unsigned int MAX_GETDATA_SZ = 1000;
94 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
96 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
99 static const unsigned int MAX_HEADERS_RESULTS = 2000;
102 static const int MAX_CMPCTBLOCK_DEPTH = 5;
104 static const int MAX_BLOCKTXN_DEPTH = 10;
109 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
111 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
113 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
115 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
117 static const int MAX_UNCONNECTING_HEADERS = 10;
119 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
121 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
123 static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
126 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
129 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
133 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
138 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
140 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
142 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
144 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
146 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
148 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
149 
150 struct COrphanTx {
151  // When modifying, adapt the copy of this definition in tests/DoS_tests.
154  int64_t nTimeExpire;
155  size_t list_pos;
156 };
157 
162 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
165 std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans);
166 
167 void EraseOrphansFor(NodeId peer);
168 
169 // Internal stuff
170 namespace {
172  int nSyncStarted GUARDED_BY(cs_main) = 0;
173 
180  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
181 
216  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
217  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
218 
219  /*
220  * Filter for transactions that have been recently confirmed.
221  * We use this to avoid requesting transactions that have already been
222  * confirnmed.
223  */
224  Mutex g_cs_recent_confirmed_transactions;
225  std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
226 
228  struct QueuedBlock {
229  uint256 hash;
230  const CBlockIndex* pindex;
231  bool fValidatedHeaders;
232  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
233  };
234  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
235 
237  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
238 
240  int nPreferredDownload GUARDED_BY(cs_main) = 0;
241 
243  int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
244 
246  int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
247 
249  int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
250 
252  std::atomic<int64_t> g_last_tip_update(0);
253 
255  typedef std::map<uint256, CTransactionRef> MapRelay;
256  MapRelay mapRelay GUARDED_BY(cs_main);
258  std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
259 
260  struct IteratorComparator
261  {
262  template<typename I>
263  bool operator()(const I& a, const I& b) const
264  {
265  return &(*a) < &(*b);
266  }
267  };
268 
271  std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
273  std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
274 
278  static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
280  static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
281 } // namespace
282 
283 namespace {
290 struct CNodeState {
292  const CService address;
294  const CBlockIndex *pindexBestKnownBlock;
296  uint256 hashLastUnknownBlock;
298  const CBlockIndex *pindexLastCommonBlock;
300  const CBlockIndex *pindexBestHeaderSent;
302  int nUnconnectingHeaders;
304  bool fSyncStarted;
306  int64_t nHeadersSyncTimeout;
308  int64_t nStallingSince;
309  std::list<QueuedBlock> vBlocksInFlight;
311  int64_t nDownloadingSince;
312  int nBlocksInFlight;
313  int nBlocksInFlightValidHeaders;
315  bool fPreferredDownload;
317  bool fPreferHeaders;
319  bool fPreferHeaderAndIDs;
325  bool fProvidesHeaderAndIDs;
327  bool fHaveWitness;
329  bool fWantsCmpctWitness;
334  bool fSupportsDesiredCmpctVersion;
335 
360  struct ChainSyncTimeoutState {
362  int64_t m_timeout;
364  const CBlockIndex * m_work_header;
366  bool m_sent_getheaders;
368  bool m_protect;
369  };
370 
371  ChainSyncTimeoutState m_chain_sync;
372 
374  int64_t m_last_block_announcement;
375 
377  bool m_is_inbound;
378 
380  bool m_is_manual_connection;
381 
383  CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
384 
386  bool m_wtxid_relay{false};
387 
388  CNodeState(CAddress addrIn, bool is_inbound, bool is_manual)
389  : address(addrIn), m_is_inbound(is_inbound), m_is_manual_connection(is_manual)
390  {
391  pindexBestKnownBlock = nullptr;
392  hashLastUnknownBlock.SetNull();
393  pindexLastCommonBlock = nullptr;
394  pindexBestHeaderSent = nullptr;
395  nUnconnectingHeaders = 0;
396  fSyncStarted = false;
397  nHeadersSyncTimeout = 0;
398  nStallingSince = 0;
399  nDownloadingSince = 0;
400  nBlocksInFlight = 0;
401  nBlocksInFlightValidHeaders = 0;
402  fPreferredDownload = false;
403  fPreferHeaders = false;
404  fPreferHeaderAndIDs = false;
405  fProvidesHeaderAndIDs = false;
406  fHaveWitness = false;
407  fWantsCmpctWitness = false;
408  fSupportsDesiredCmpctVersion = false;
409  m_chain_sync = { 0, nullptr, false, false };
410  m_last_block_announcement = 0;
411  m_recently_announced_invs.reset();
412  }
413 };
414 
416 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
417 
418 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
419  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
420  if (it == mapNodeState.end())
421  return nullptr;
422  return &it->second;
423 }
424 
435 struct Peer {
437  const NodeId m_id{0};
438 
440  Mutex m_misbehavior_mutex;
442  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
444  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
445 
447  std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
448 
452  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
453 
454  Peer(NodeId id) : m_id(id) {}
455 };
456 
457 using PeerRef = std::shared_ptr<Peer>;
458 
465 Mutex g_peer_mutex;
466 static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
467 
470 static PeerRef GetPeerRef(NodeId id)
471 {
472  LOCK(g_peer_mutex);
473  auto it = g_peer_map.find(id);
474  return it != g_peer_map.end() ? it->second : nullptr;
475 }
476 
477 static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
478 {
479  nPreferredDownload -= state->fPreferredDownload;
480 
481  // Whether this node should be marked as a preferred download node.
482  state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient;
483 
484  nPreferredDownload += state->fPreferredDownload;
485 }
486 
487 static void PushNodeVersion(CNode& pnode, CConnman& connman, int64_t nTime)
488 {
489  // Note that pnode->GetLocalServices() is a reflection of the local
490  // services we were offering when the CNode object was created for this
491  // peer.
492  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
493  uint64_t nonce = pnode.GetLocalNonce();
494  int nNodeStartingHeight = pnode.GetMyStartingHeight();
495  NodeId nodeid = pnode.GetId();
496  CAddress addr = pnode.addr;
497 
498  CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
499  addr :
500  CAddress(CService(), addr.nServices);
501  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
502 
503  connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
504  nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr));
505 
506  if (fLogIPs) {
507  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
508  } else {
509  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
510  }
511 }
512 
513 // Returns a bool indicating whether we requested this block.
514 // Also used if a block was /not/ received and timed out or started with another peer
515 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
516  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
517  if (itInFlight != mapBlocksInFlight.end()) {
518  CNodeState *state = State(itInFlight->second.first);
519  assert(state != nullptr);
520  state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
521  if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
522  // Last validated block on the queue was received.
523  nPeersWithValidatedDownloads--;
524  }
525  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
526  // First block on the queue was received, update the start download time for the next one
527  state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
528  }
529  state->vBlocksInFlight.erase(itInFlight->second.second);
530  state->nBlocksInFlight--;
531  state->nStallingSince = 0;
532  mapBlocksInFlight.erase(itInFlight);
533  return true;
534  }
535  return false;
536 }
537 
538 // returns false, still setting pit, if the block was already in flight from the same peer
539 // pit will only be valid as long as the same cs_main lock is being held
540 static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
541  CNodeState *state = State(nodeid);
542  assert(state != nullptr);
543 
544  // Short-circuit most stuff in case it is from the same node
545  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
546  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
547  if (pit) {
548  *pit = &itInFlight->second.second;
549  }
550  return false;
551  }
552 
553  // Make sure it's not listed somewhere already.
554  MarkBlockAsReceived(hash);
555 
556  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
557  {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
558  state->nBlocksInFlight++;
559  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
560  if (state->nBlocksInFlight == 1) {
561  // We're starting a block download (batch) from this peer.
562  state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
563  }
564  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
565  nPeersWithValidatedDownloads++;
566  }
567  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
568  if (pit)
569  *pit = &itInFlight->second.second;
570  return true;
571 }
572 
574 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
575  CNodeState *state = State(nodeid);
576  assert(state != nullptr);
577 
578  if (!state->hashLastUnknownBlock.IsNull()) {
579  const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
580  if (pindex && pindex->nChainWork > 0) {
581  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
582  state->pindexBestKnownBlock = pindex;
583  }
584  state->hashLastUnknownBlock.SetNull();
585  }
586  }
587 }
588 
590 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
591  CNodeState *state = State(nodeid);
592  assert(state != nullptr);
593 
594  ProcessBlockAvailability(nodeid);
595 
596  const CBlockIndex* pindex = LookupBlockIndex(hash);
597  if (pindex && pindex->nChainWork > 0) {
598  // An actually better block was announced.
599  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
600  state->pindexBestKnownBlock = pindex;
601  }
602  } else {
603  // An unknown block was announced; just assume that the latest one is the best one.
604  state->hashLastUnknownBlock = hash;
605  }
606 }
607 
614 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
615 {
616  AssertLockHeld(cs_main);
617  CNodeState* nodestate = State(nodeid);
618  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
619  // Never ask from peers who can't provide witnesses.
620  return;
621  }
622  if (nodestate->fProvidesHeaderAndIDs) {
623  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
624  if (*it == nodeid) {
625  lNodesAnnouncingHeaderAndIDs.erase(it);
626  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
627  return;
628  }
629  }
630  connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
631  AssertLockHeld(::cs_main);
632  uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
633  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
634  // As per BIP152, we only get 3 of our peers to announce
635  // blocks using compact encodings.
636  connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
637  connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
638  return true;
639  });
640  lNodesAnnouncingHeaderAndIDs.pop_front();
641  }
642  connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
643  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
644  return true;
645  });
646  }
647 }
648 
649 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
650 {
651  AssertLockHeld(cs_main);
652  if (g_last_tip_update == 0) {
653  g_last_tip_update = GetTime();
654  }
655  return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
656 }
657 
658 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
659 {
660  return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
661 }
662 
663 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
664 {
665  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
666  return true;
667  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
668  return true;
669  return false;
670 }
671 
674 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
675 {
676  if (count == 0)
677  return;
678 
679  vBlocks.reserve(vBlocks.size() + count);
680  CNodeState *state = State(nodeid);
681  assert(state != nullptr);
682 
683  // Make sure pindexBestKnownBlock is up to date, we'll need it.
684  ProcessBlockAvailability(nodeid);
685 
686  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
687  // This peer has nothing interesting.
688  return;
689  }
690 
691  if (state->pindexLastCommonBlock == nullptr) {
692  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
693  // Guessing wrong in either direction is not a problem.
694  state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
695  }
696 
697  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
698  // of its current tip anymore. Go back enough to fix that.
699  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
700  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
701  return;
702 
703  std::vector<const CBlockIndex*> vToFetch;
704  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
705  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
706  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
707  // download that next block if the window were 1 larger.
708  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
709  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
710  NodeId waitingfor = -1;
711  while (pindexWalk->nHeight < nMaxHeight) {
712  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
713  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
714  // as iterating over ~100 CBlockIndex* entries anyway.
715  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
716  vToFetch.resize(nToFetch);
717  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
718  vToFetch[nToFetch - 1] = pindexWalk;
719  for (unsigned int i = nToFetch - 1; i > 0; i--) {
720  vToFetch[i - 1] = vToFetch[i]->pprev;
721  }
722 
723  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
724  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
725  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
726  // already part of our chain (and therefore don't need it even if pruned).
727  for (const CBlockIndex* pindex : vToFetch) {
728  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
729  // We consider the chain that this peer is on invalid.
730  return;
731  }
732  if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
733  // We wouldn't download this block or its descendants from this peer.
734  return;
735  }
736  if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) {
737  if (pindex->HaveTxsDownloaded())
738  state->pindexLastCommonBlock = pindex;
739  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
740  // The block is not already downloaded, and not yet in flight.
741  if (pindex->nHeight > nWindowEnd) {
742  // We reached the end of the window.
743  if (vBlocks.size() == 0 && waitingfor != nodeid) {
744  // We aren't able to fetch anything, but we would be if the download window was one larger.
745  nodeStaller = waitingfor;
746  }
747  return;
748  }
749  vBlocks.push_back(pindex);
750  if (vBlocks.size() == count) {
751  return;
752  }
753  } else if (waitingfor == -1) {
754  // This is the first already-in-flight block.
755  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
756  }
757  }
758  }
759 }
760 
761 } // namespace
762 
763 void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
764 {
765  AssertLockHeld(::cs_main); // For m_txrequest
766  NodeId nodeid = node.GetId();
767  if (!node.HasPermission(PF_RELAY) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
768  // Too many queued announcements from this peer
769  return;
770  }
771  const CNodeState* state = State(nodeid);
772 
773  // Decide the TxRequestTracker parameters for this announcement:
774  // - "preferred": if fPreferredDownload is set (= outbound, or PF_NOBAN permission)
775  // - "reqtime": current time plus delays for:
776  // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
777  // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
778  // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
779  // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have PF_RELAY).
780  auto delay = std::chrono::microseconds{0};
781  const bool preferred = state->fPreferredDownload;
782  if (!preferred) delay += NONPREF_PEER_TX_DELAY;
783  if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
784  const bool overloaded = !node.HasPermission(PF_RELAY) &&
785  m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
786  if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
787  m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
788 }
789 
790 // This function is used for testing the stale tip eviction logic, see
791 // denialofservice_tests.cpp
792 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
793 {
794  LOCK(cs_main);
795  CNodeState *state = State(node);
796  if (state) state->m_last_block_announcement = time_in_seconds;
797 }
798 
800  CAddress addr = pnode->addr;
801  std::string addrName = pnode->GetAddrName();
802  NodeId nodeid = pnode->GetId();
803  {
804  LOCK(cs_main);
805  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn()));
806  assert(m_txrequest.Count(nodeid) == 0);
807  }
808  {
809  PeerRef peer = std::make_shared<Peer>(nodeid);
810  LOCK(g_peer_mutex);
811  g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
812  }
813  if (!pnode->IsInboundConn()) {
814  PushNodeVersion(*pnode, m_connman, GetTime());
815  }
816 }
817 
819 {
820  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
821 
822  for (const auto& txid : unbroadcast_txids) {
823  CTransactionRef tx = m_mempool.get(txid);
824 
825  if (tx != nullptr) {
826  LOCK(cs_main);
827  RelayTransaction(txid, tx->GetWitnessHash(), m_connman);
828  } else {
829  m_mempool.RemoveUnbroadcastTx(txid, true);
830  }
831  }
832 
833  // Schedule next run for 10-15 minutes in the future.
834  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
835  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
836  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
837 }
838 
839 void PeerManager::FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) {
840  NodeId nodeid = node.GetId();
841  fUpdateConnectionTime = false;
842  LOCK(cs_main);
843  int misbehavior{0};
844  {
845  PeerRef peer = GetPeerRef(nodeid);
846  assert(peer != nullptr);
847  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
848  LOCK(g_peer_mutex);
849  g_peer_map.erase(nodeid);
850  }
851  CNodeState *state = State(nodeid);
852  assert(state != nullptr);
853 
854  if (state->fSyncStarted)
855  nSyncStarted--;
856 
857  if (node.fSuccessfullyConnected && misbehavior == 0 &&
858  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
859  // Only change visible addrman state for outbound, full-relay peers
860  fUpdateConnectionTime = true;
861  }
862 
863  for (const QueuedBlock& entry : state->vBlocksInFlight) {
864  mapBlocksInFlight.erase(entry.hash);
865  }
866  EraseOrphansFor(nodeid);
867  m_txrequest.DisconnectedPeer(nodeid);
868  nPreferredDownload -= state->fPreferredDownload;
869  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
870  assert(nPeersWithValidatedDownloads >= 0);
871  g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
872  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
873  g_wtxid_relay_peers -= state->m_wtxid_relay;
874  assert(g_wtxid_relay_peers >= 0);
875 
876  mapNodeState.erase(nodeid);
877 
878  if (mapNodeState.empty()) {
879  // Do a consistency check after the last peer is removed.
880  assert(mapBlocksInFlight.empty());
881  assert(nPreferredDownload == 0);
882  assert(nPeersWithValidatedDownloads == 0);
883  assert(g_outbound_peers_with_protect_from_disconnect == 0);
884  assert(g_wtxid_relay_peers == 0);
885  assert(m_txrequest.Size() == 0);
886  }
887  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
888 }
889 
891  {
892  LOCK(cs_main);
893  CNodeState* state = State(nodeid);
894  if (state == nullptr)
895  return false;
896  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
897  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
898  for (const QueuedBlock& queue : state->vBlocksInFlight) {
899  if (queue.pindex)
900  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
901  }
902  }
903 
904  PeerRef peer = GetPeerRef(nodeid);
905  if (peer == nullptr) return false;
906  stats.m_misbehavior_score = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
907 
908  return true;
909 }
910 
912 //
913 // mapOrphanTransactions
914 //
915 
917 {
918  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
919  if (max_extra_txn <= 0)
920  return;
921  if (!vExtraTxnForCompact.size())
922  vExtraTxnForCompact.resize(max_extra_txn);
923  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
924  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
925 }
926 
928 {
929  const uint256& hash = tx->GetHash();
930  if (mapOrphanTransactions.count(hash))
931  return false;
932 
933  // Ignore big transactions, to avoid a
934  // send-big-orphans memory exhaustion attack. If a peer has a legitimate
935  // large transaction with a missing parent then we assume
936  // it will rebroadcast it later, after the parent transaction(s)
937  // have been mined or received.
938  // 100 orphans, each of which is at most 100,000 bytes big is
939  // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
940  unsigned int sz = GetTransactionWeight(*tx);
941  if (sz > MAX_STANDARD_TX_WEIGHT)
942  {
943  LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
944  return false;
945  }
946 
947  auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
948  assert(ret.second);
949  g_orphan_list.push_back(ret.first);
950  // Allow for lookups in the orphan pool by wtxid, as well as txid
951  g_orphans_by_wtxid.emplace(tx->GetWitnessHash(), ret.first);
952  for (const CTxIn& txin : tx->vin) {
953  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
954  }
955 
957 
958  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
959  mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
960  return true;
961 }
962 
963 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
964 {
965  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
966  if (it == mapOrphanTransactions.end())
967  return 0;
968  for (const CTxIn& txin : it->second.tx->vin)
969  {
970  auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
971  if (itPrev == mapOrphanTransactionsByPrev.end())
972  continue;
973  itPrev->second.erase(it);
974  if (itPrev->second.empty())
975  mapOrphanTransactionsByPrev.erase(itPrev);
976  }
977 
978  size_t old_pos = it->second.list_pos;
979  assert(g_orphan_list[old_pos] == it);
980  if (old_pos + 1 != g_orphan_list.size()) {
981  // Unless we're deleting the last entry in g_orphan_list, move the last
982  // entry to the position we're deleting.
983  auto it_last = g_orphan_list.back();
984  g_orphan_list[old_pos] = it_last;
985  it_last->second.list_pos = old_pos;
986  }
987  g_orphan_list.pop_back();
988  g_orphans_by_wtxid.erase(it->second.tx->GetWitnessHash());
989 
990  mapOrphanTransactions.erase(it);
991  return 1;
992 }
993 
995 {
996  LOCK(g_cs_orphans);
997  int nErased = 0;
998  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
999  while (iter != mapOrphanTransactions.end())
1000  {
1001  std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
1002  if (maybeErase->second.fromPeer == peer)
1003  {
1004  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1005  }
1006  }
1007  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
1008 }
1009 
1010 
1011 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
1012 {
1013  LOCK(g_cs_orphans);
1014 
1015  unsigned int nEvicted = 0;
1016  static int64_t nNextSweep;
1017  int64_t nNow = GetTime();
1018  if (nNextSweep <= nNow) {
1019  // Sweep out expired orphan pool entries:
1020  int nErased = 0;
1021  int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
1022  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
1023  while (iter != mapOrphanTransactions.end())
1024  {
1025  std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
1026  if (maybeErase->second.nTimeExpire <= nNow) {
1027  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1028  } else {
1029  nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1030  }
1031  }
1032  // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
1033  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1034  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
1035  }
1036  FastRandomContext rng;
1037  while (mapOrphanTransactions.size() > nMaxOrphans)
1038  {
1039  // Evict a random orphan:
1040  size_t randompos = rng.randrange(g_orphan_list.size());
1041  EraseOrphanTx(g_orphan_list[randompos]->first);
1042  ++nEvicted;
1043  }
1044  return nEvicted;
1045 }
1046 
1047 void PeerManager::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
1048 {
1049  assert(howmuch > 0);
1050 
1051  PeerRef peer = GetPeerRef(pnode);
1052  if (peer == nullptr) return;
1053 
1054  LOCK(peer->m_misbehavior_mutex);
1055  peer->m_misbehavior_score += howmuch;
1056  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1057  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1058  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1059  peer->m_should_discourage = true;
1060  } else {
1061  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1062  }
1063 }
1064 
1066  bool via_compact_block, const std::string& message)
1067 {
1068  switch (state.GetResult()) {
1070  break;
1071  // The node is providing invalid data:
1074  if (!via_compact_block) {
1075  Misbehaving(nodeid, 100, message);
1076  return true;
1077  }
1078  break;
1080  {
1081  LOCK(cs_main);
1082  CNodeState *node_state = State(nodeid);
1083  if (node_state == nullptr) {
1084  break;
1085  }
1086 
1087  // Discourage outbound (but not inbound) peers if on an invalid chain.
1088  // Exempt HB compact block peers and manual connections.
1089  if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) {
1090  Misbehaving(nodeid, 100, message);
1091  return true;
1092  }
1093  break;
1094  }
1098  Misbehaving(nodeid, 100, message);
1099  return true;
1100  // Conflicting (but not necessarily invalid) data or different policy:
1102  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1103  Misbehaving(nodeid, 10, message);
1104  return true;
1107  break;
1108  }
1109  if (message != "") {
1110  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1111  }
1112  return false;
1113 }
1114 
1115 bool PeerManager::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message)
1116 {
1117  switch (state.GetResult()) {
1119  break;
1120  // The node is providing invalid data:
1122  Misbehaving(nodeid, 100, message);
1123  return true;
1124  // Conflicting (but not necessarily invalid) data or different policy:
1134  break;
1135  }
1136  if (message != "") {
1137  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1138  }
1139  return false;
1140 }
1141 
1142 
1144 //
1145 // blockchain -> download logic notification
1146 //
1147 
1148 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1149 // active chain if they are no more than a month older (both in time, and in
1150 // best equivalent proof of work) than the best header chain we know about and
1151 // we fully-validated them at some point.
1152 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1153 {
1154  AssertLockHeld(cs_main);
1155  if (::ChainActive().Contains(pindex)) return true;
1156  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1159 }
1160 
1161 PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
1162  CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
1163  : m_chainparams(chainparams),
1164  m_connman(connman),
1165  m_banman(banman),
1166  m_chainman(chainman),
1167  m_mempool(pool),
1168  m_stale_tip_check_time(0)
1169 {
1170  // Initialize global variables that cannot be constructed at startup.
1171  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1172 
1173  // Blocks don't typically have more than 4000 transactions, so this should
1174  // be at least six blocks (~1 hr) worth of transactions that we can store,
1175  // inserting both a txid and wtxid for every observed transaction.
1176  // If the number of transactions appearing in a block goes up, or if we are
1177  // seeing getdata requests more than an hour after initial announcement, we
1178  // can increase this number.
1179  // The false positive rate of 1/1M should come out to less than 1
1180  // transaction per day that would be inadvertently ignored (which is the
1181  // same probability that we have in the reject filter).
1182  g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
1183 
1184  // Stale tip checking and peer eviction are on two different timers, but we
1185  // don't want them to get out of sync due to drift in the scheduler, so we
1186  // combine them in one function and schedule at the quicker (peer-eviction)
1187  // timer.
1188  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1189  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1190 
1191  // schedule next run for 10-15 minutes in the future
1192  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1193  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1194 }
1195 
1201 void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1202 {
1203  {
1204  LOCK(g_cs_orphans);
1205 
1206  std::vector<uint256> vOrphanErase;
1207 
1208  for (const CTransactionRef& ptx : pblock->vtx) {
1209  const CTransaction& tx = *ptx;
1210 
1211  // Which orphan pool entries must we evict?
1212  for (const auto& txin : tx.vin) {
1213  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1214  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1215  for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1216  const CTransaction& orphanTx = *(*mi)->second.tx;
1217  const uint256& orphanHash = orphanTx.GetHash();
1218  vOrphanErase.push_back(orphanHash);
1219  }
1220  }
1221  }
1222 
1223  // Erase orphan transactions included or precluded by this block
1224  if (vOrphanErase.size()) {
1225  int nErased = 0;
1226  for (const uint256& orphanHash : vOrphanErase) {
1227  nErased += EraseOrphanTx(orphanHash);
1228  }
1229  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1230  }
1231 
1232  g_last_tip_update = GetTime();
1233  }
1234  {
1235  LOCK(g_cs_recent_confirmed_transactions);
1236  for (const auto& ptx : pblock->vtx) {
1237  g_recent_confirmed_transactions->insert(ptx->GetHash());
1238  if (ptx->GetHash() != ptx->GetWitnessHash()) {
1239  g_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
1240  }
1241  }
1242  }
1243  {
1244  LOCK(cs_main);
1245  for (const auto& ptx : pblock->vtx) {
1246  m_txrequest.ForgetTxHash(ptx->GetHash());
1247  m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1248  }
1249  }
1250 }
1251 
1252 void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1253 {
1254  // To avoid relay problems with transactions that were previously
1255  // confirmed, clear our filter of recently confirmed transactions whenever
1256  // there's a reorg.
1257  // This means that in a 1-block reorg (where 1 block is disconnected and
1258  // then another block reconnected), our filter will drop to having only one
1259  // block's worth of transactions in it, but that should be fine, since
1260  // presumably the most common case of relaying a confirmed transaction
1261  // should be just after a new block containing it is found.
1262  LOCK(g_cs_recent_confirmed_transactions);
1263  g_recent_confirmed_transactions->reset();
1264 }
1265 
1266 // All of the following cache a recent block, and are protected by cs_most_recent_block
1268 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1269 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1270 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1271 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1272 
1277 void PeerManager::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1278  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1280 
1281  LOCK(cs_main);
1282 
1283  static int nHighestFastAnnounce = 0;
1284  if (pindex->nHeight <= nHighestFastAnnounce)
1285  return;
1286  nHighestFastAnnounce = pindex->nHeight;
1287 
1288  bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus());
1289  uint256 hashBlock(pblock->GetHash());
1290 
1291  {
1292  LOCK(cs_most_recent_block);
1293  most_recent_block_hash = hashBlock;
1294  most_recent_block = pblock;
1295  most_recent_compact_block = pcmpctblock;
1296  fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1297  }
1298 
1299  m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1300  AssertLockHeld(::cs_main);
1301 
1302  // TODO: Avoid the repeated-serialization here
1303  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1304  return;
1305  ProcessBlockAvailability(pnode->GetId());
1306  CNodeState &state = *State(pnode->GetId());
1307  // If the peer has, or we announced to them the previous block already,
1308  // but we don't think they have this one, go ahead and announce it
1309  if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1310  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1311 
1312  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
1313  hashBlock.ToString(), pnode->GetId());
1314  m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1315  state.pindexBestHeaderSent = pindex;
1316  }
1317  });
1318 }
1319 
1324 void PeerManager::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1325  const int nNewHeight = pindexNew->nHeight;
1326  m_connman.SetBestHeight(nNewHeight);
1327 
1328  SetServiceFlagsIBDCache(!fInitialDownload);
1329  if (!fInitialDownload) {
1330  // Find the hashes of all blocks that weren't previously in the best chain.
1331  std::vector<uint256> vHashes;
1332  const CBlockIndex *pindexToAnnounce = pindexNew;
1333  while (pindexToAnnounce != pindexFork) {
1334  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1335  pindexToAnnounce = pindexToAnnounce->pprev;
1336  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1337  // Limit announcements in case of a huge reorganization.
1338  // Rely on the peer's synchronization mechanism in that case.
1339  break;
1340  }
1341  }
1342  // Relay inventory, but don't relay old inventory during initial block download.
1343  m_connman.ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1344  LOCK(pnode->cs_inventory);
1345  if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1346  for (const uint256& hash : reverse_iterate(vHashes)) {
1347  pnode->vBlockHashesToAnnounce.push_back(hash);
1348  }
1349  }
1350  });
1352  }
1353 }
1354 
1359 void PeerManager::BlockChecked(const CBlock& block, const BlockValidationState& state) {
1360  LOCK(cs_main);
1361 
1362  const uint256 hash(block.GetHash());
1363  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1364 
1365  // If the block failed validation, we know where it came from and we're still connected
1366  // to that peer, maybe punish.
1367  if (state.IsInvalid() &&
1368  it != mapBlockSource.end() &&
1369  State(it->second.first)) {
1370  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1371  }
1372  // Check that:
1373  // 1. The block is valid
1374  // 2. We're not in initial block download
1375  // 3. This is currently the best block we're aware of. We haven't updated
1376  // the tip yet so we have no way to check this directly here. Instead we
1377  // just check that there are currently no other blocks in flight.
1378  else if (state.IsValid() &&
1380  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1381  if (it != mapBlockSource.end()) {
1382  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
1383  }
1384  }
1385  if (it != mapBlockSource.end())
1386  mapBlockSource.erase(it);
1387 }
1388 
1390 //
1391 // Messages
1392 //
1393 
1394 
1395 bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1396 {
1397  assert(recentRejects);
1398  if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
1399  // If the chain tip has changed previously rejected transactions
1400  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1401  // or a double-spend. Reset the rejects filter and give those
1402  // txs a second chance.
1403  hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1404  recentRejects->reset();
1405  }
1406 
1407  const uint256& hash = gtxid.GetHash();
1408 
1409  {
1410  LOCK(g_cs_orphans);
1411  if (!gtxid.IsWtxid() && mapOrphanTransactions.count(hash)) {
1412  return true;
1413  } else if (gtxid.IsWtxid() && g_orphans_by_wtxid.count(hash)) {
1414  return true;
1415  }
1416  }
1417 
1418  {
1419  LOCK(g_cs_recent_confirmed_transactions);
1420  if (g_recent_confirmed_transactions->contains(hash)) return true;
1421  }
1422 
1423  return recentRejects->contains(hash) || mempool.exists(gtxid);
1424 }
1425 
1426 bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1427 {
1428  return LookupBlockIndex(block_hash) != nullptr;
1429 }
1430 
1431 void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman)
1432 {
1433  connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1434  AssertLockHeld(::cs_main);
1435 
1436  CNodeState* state = State(pnode->GetId());
1437  if (state == nullptr) return;
1438  if (state->m_wtxid_relay) {
1439  pnode->PushTxInventory(wtxid);
1440  } else {
1441  pnode->PushTxInventory(txid);
1442  }
1443  });
1444 }
1445 
1446 static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman)
1447 {
1448  if (!fReachable && !addr.IsRelayable()) return;
1449 
1450  // Relay to a limited number of other nodes
1451  // Use deterministic randomness to send to the same nodes for 24 hours
1452  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1453  uint64_t hashAddr = addr.GetHash();
1454  const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
1455  FastRandomContext insecure_rand;
1456 
1457  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
1458  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
1459 
1460  std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1461  assert(nRelayNodes <= best.size());
1462 
1463  auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1464  if (pnode->RelayAddrsWithConn()) {
1465  uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1466  for (unsigned int i = 0; i < nRelayNodes; i++) {
1467  if (hashKey > best[i].first) {
1468  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1469  best[i] = std::make_pair(hashKey, pnode);
1470  break;
1471  }
1472  }
1473  }
1474  };
1475 
1476  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1477  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1478  best[i].second->PushAddress(addr, insecure_rand);
1479  }
1480  };
1481 
1482  connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1483 }
1484 
1485 void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman& connman)
1486 {
1487  bool send = false;
1488  std::shared_ptr<const CBlock> a_recent_block;
1489  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1490  bool fWitnessesPresentInARecentCompactBlock;
1491  const Consensus::Params& consensusParams = chainparams.GetConsensus();
1492  {
1493  LOCK(cs_most_recent_block);
1494  a_recent_block = most_recent_block;
1495  a_recent_compact_block = most_recent_compact_block;
1496  fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1497  }
1498 
1499  bool need_activate_chain = false;
1500  {
1501  LOCK(cs_main);
1502  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1503  if (pindex) {
1504  if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1505  pindex->IsValid(BLOCK_VALID_TREE)) {
1506  // If we have the block and all of its parents, but have not yet validated it,
1507  // we might be in the middle of connecting it (ie in the unlock of cs_main
1508  // before ActivateBestChain but after AcceptBlock).
1509  // In this case, we need to run ActivateBestChain prior to checking the relay
1510  // conditions below.
1511  need_activate_chain = true;
1512  }
1513  }
1514  } // release cs_main before calling ActivateBestChain
1515  if (need_activate_chain) {
1516  BlockValidationState state;
1517  if (!ActivateBestChain(state, chainparams, a_recent_block)) {
1518  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1519  }
1520  }
1521 
1522  LOCK(cs_main);
1523  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1524  if (pindex) {
1525  send = BlockRequestAllowed(pindex, consensusParams);
1526  if (!send) {
1527  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
1528  }
1529  }
1530  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1531  // disconnect node in case we have reached the outbound limit for serving historical blocks
1532  if (send &&
1533  connman.OutboundTargetReached(true) &&
1534  (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
1535  !pfrom.HasPermission(PF_DOWNLOAD) // nodes with the download permission may exceed target
1536  ) {
1537  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
1538 
1539  //disconnect node
1540  pfrom.fDisconnect = true;
1541  send = false;
1542  }
1543  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1544  if (send && !pfrom.HasPermission(PF_NOBAN) && (
1545  (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1546  )) {
1547  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom.GetId());
1548 
1549  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1550  pfrom.fDisconnect = true;
1551  send = false;
1552  }
1553  // Pruned nodes may have deleted the block, so check whether
1554  // it's available before trying to send.
1555  if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1556  {
1557  std::shared_ptr<const CBlock> pblock;
1558  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1559  pblock = a_recent_block;
1560  } else if (inv.IsMsgWitnessBlk()) {
1561  // Fast-path: in this case it is possible to serve the block directly from disk,
1562  // as the network format matches the format on disk
1563  std::vector<uint8_t> block_data;
1564  if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1565  assert(!"cannot load block from disk");
1566  }
1567  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1568  // Don't set pblock as we've sent the block
1569  } else {
1570  // Send block from disk
1571  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1572  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1573  assert(!"cannot load block from disk");
1574  pblock = pblockRead;
1575  }
1576  if (pblock) {
1577  if (inv.IsMsgBlk()) {
1579  } else if (inv.IsMsgWitnessBlk()) {
1580  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1581  } else if (inv.IsMsgFilteredBlk()) {
1582  bool sendMerkleBlock = false;
1583  CMerkleBlock merkleBlock;
1584  if (pfrom.m_tx_relay != nullptr) {
1585  LOCK(pfrom.m_tx_relay->cs_filter);
1586  if (pfrom.m_tx_relay->pfilter) {
1587  sendMerkleBlock = true;
1588  merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1589  }
1590  }
1591  if (sendMerkleBlock) {
1592  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1593  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1594  // This avoids hurting performance by pointlessly requiring a round-trip
1595  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1596  // they must either disconnect and retry or request the full block.
1597  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1598  // however we MUST always provide at least what the remote peer needs
1599  typedef std::pair<unsigned int, uint256> PairType;
1600  for (PairType& pair : merkleBlock.vMatchedTxn)
1601  connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1602  }
1603  // else
1604  // no response
1605  } else if (inv.IsMsgCmpctBlk()) {
1606  // If a peer is asking for old blocks, we're almost guaranteed
1607  // they won't have a useful mempool to match against a compact block,
1608  // and we don't feel like constructing the object for them, so
1609  // instead we respond with the full, non-compact block.
1610  bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness;
1611  int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1612  if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
1613  if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1614  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1615  } else {
1616  CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1617  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1618  }
1619  } else {
1620  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1621  }
1622  }
1623  }
1624 
1625  // Trigger the peer node to send a getblocks request for the next batch of inventory
1626  if (inv.hash == pfrom.hashContinue)
1627  {
1628  // Send immediately. This must send even if redundant,
1629  // and we want it right after the last block so they don't
1630  // wait for other stuff first.
1631  std::vector<CInv> vInv;
1632  vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1633  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1634  pfrom.hashContinue.SetNull();
1635  }
1636  }
1637 }
1638 
1640 static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
1641 {
1642  auto txinfo = mempool.info(gtxid);
1643  if (txinfo.tx) {
1644  // If a TX could have been INVed in reply to a MEMPOOL request,
1645  // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
1646  // unconditionally.
1647  if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
1648  return std::move(txinfo.tx);
1649  }
1650  }
1651 
1652  {
1653  LOCK(cs_main);
1654  // Otherwise, the transaction must have been announced recently.
1655  if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
1656  // If it was, it can be relayed from either the mempool...
1657  if (txinfo.tx) return std::move(txinfo.tx);
1658  // ... or the relay pool.
1659  auto mi = mapRelay.find(gtxid.GetHash());
1660  if (mi != mapRelay.end()) return mi->second;
1661  }
1662  }
1663 
1664  return {};
1665 }
1666 
1667 void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex)
1669  AssertLockNotHeld(cs_main);
1670 
1671  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
1672  std::vector<CInv> vNotFound;
1673  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1674 
1675  const std::chrono::seconds now = GetTime<std::chrono::seconds>();
1676  // Get last mempool request time
1677  const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
1678  : std::chrono::seconds::min();
1679 
1680  // Process as many TX items from the front of the getdata queue as
1681  // possible, since they're common and it's efficient to batch process
1682  // them.
1683  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
1684  if (interruptMsgProc) return;
1685  // The send buffer provides backpressure. If there's no space in
1686  // the buffer, pause processing until the next call.
1687  if (pfrom.fPauseSend) break;
1688 
1689  const CInv &inv = *it++;
1690 
1691  if (pfrom.m_tx_relay == nullptr) {
1692  // Ignore GETDATA requests for transactions from blocks-only peers.
1693  continue;
1694  }
1695 
1696  CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now);
1697  if (tx) {
1698  // WTX and WITNESS_TX imply we serialize with witness
1699  int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1700  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
1701  mempool.RemoveUnbroadcastTx(tx->GetHash());
1702  // As we're going to send tx, make sure its unconfirmed parents are made requestable.
1703  std::vector<uint256> parent_ids_to_add;
1704  {
1705  LOCK(mempool.cs);
1706  auto txiter = mempool.GetIter(tx->GetHash());
1707  if (txiter) {
1708  const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst();
1709  parent_ids_to_add.reserve(parents.size());
1710  for (const CTxMemPoolEntry& parent : parents) {
1711  if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
1712  parent_ids_to_add.push_back(parent.GetTx().GetHash());
1713  }
1714  }
1715  }
1716  }
1717  for (const uint256& parent_txid : parent_ids_to_add) {
1718  // Relaying a transaction with a recent but unconfirmed parent.
1719  if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
1720  LOCK(cs_main);
1721  State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
1722  }
1723  }
1724  } else {
1725  vNotFound.push_back(inv);
1726  }
1727  }
1728 
1729  // Only process one BLOCK item per call, since they're uncommon and can be
1730  // expensive to process.
1731  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
1732  const CInv &inv = *it++;
1733  if (inv.IsGenBlkMsg()) {
1734  ProcessGetBlockData(pfrom, chainparams, inv, connman);
1735  }
1736  // else: If the first item on the queue is an unknown type, we erase it
1737  // and continue processing the queue on the next call.
1738  }
1739 
1740  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
1741 
1742  if (!vNotFound.empty()) {
1743  // Let the peer know that we didn't find what it asked for, so it doesn't
1744  // have to wait around forever.
1745  // SPV clients care about this message: it's needed when they are
1746  // recursively walking the dependencies of relevant unconfirmed
1747  // transactions. SPV clients want to do that because they want to know
1748  // about (and store and rebroadcast and risk analyze) the dependencies
1749  // of transactions relevant to them, without having to download the
1750  // entire memory pool.
1751  // Also, other nodes can use these messages to automatically request a
1752  // transaction from some other peer that annnounced it, and stop
1753  // waiting for us to respond.
1754  // In normal operation, we often send NOTFOUND messages for parents of
1755  // transactions that we relay; if a peer is missing a parent, they may
1756  // assume we have them and request the parents from us.
1757  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1758  }
1759 }
1760 
1761 static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1762  uint32_t nFetchFlags = 0;
1763  if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
1764  nFetchFlags |= MSG_WITNESS_FLAG;
1765  }
1766  return nFetchFlags;
1767 }
1768 
1770  BlockTransactions resp(req);
1771  for (size_t i = 0; i < req.indexes.size(); i++) {
1772  if (req.indexes[i] >= block.vtx.size()) {
1773  Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
1774  return;
1775  }
1776  resp.txn[i] = block.vtx[req.indexes[i]];
1777  }
1778  LOCK(cs_main);
1779  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1780  int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1781  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1782 }
1783 
1784 void PeerManager::ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHeader>& headers, bool via_compact_block)
1785 {
1786  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1787  size_t nCount = headers.size();
1788 
1789  if (nCount == 0) {
1790  // Nothing interesting. Stop asking this peers for more headers.
1791  return;
1792  }
1793 
1794  bool received_new_header = false;
1795  const CBlockIndex *pindexLast = nullptr;
1796  {
1797  LOCK(cs_main);
1798  CNodeState *nodestate = State(pfrom.GetId());
1799 
1800  // If this looks like it could be a block announcement (nCount <
1801  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1802  // don't connect:
1803  // - Send a getheaders message in response to try to connect the chain.
1804  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1805  // don't connect before giving DoS points
1806  // - Once a headers message is received that is valid and does connect,
1807  // nUnconnectingHeaders gets reset back to 0.
1808  if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1809  nodestate->nUnconnectingHeaders++;
1811  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1812  headers[0].GetHash().ToString(),
1813  headers[0].hashPrevBlock.ToString(),
1815  pfrom.GetId(), nodestate->nUnconnectingHeaders);
1816  // Set hashLastUnknownBlock for this peer, so that if we
1817  // eventually get the headers - even from a different peer -
1818  // we can use this peer to download.
1819  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
1820 
1821  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1822  Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
1823  }
1824  return;
1825  }
1826 
1827  uint256 hashLastBlock;
1828  for (const CBlockHeader& header : headers) {
1829  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1830  Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
1831  return;
1832  }
1833  hashLastBlock = header.GetHash();
1834  }
1835 
1836  // If we don't have the last header, then they'll have given us
1837  // something new (if these headers are valid).
1838  if (!LookupBlockIndex(hashLastBlock)) {
1839  received_new_header = true;
1840  }
1841  }
1842 
1843  BlockValidationState state;
1844  if (!m_chainman.ProcessNewBlockHeaders(headers, state, m_chainparams, &pindexLast)) {
1845  if (state.IsInvalid()) {
1846  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
1847  return;
1848  }
1849  }
1850 
1851  {
1852  LOCK(cs_main);
1853  CNodeState *nodestate = State(pfrom.GetId());
1854  if (nodestate->nUnconnectingHeaders > 0) {
1855  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
1856  }
1857  nodestate->nUnconnectingHeaders = 0;
1858 
1859  assert(pindexLast);
1860  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
1861 
1862  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1863  // because it is set in UpdateBlockAvailability. Some nullptr checks
1864  // are still present, however, as belt-and-suspenders.
1865 
1866  if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
1867  nodestate->m_last_block_announcement = GetTime();
1868  }
1869 
1870  if (nCount == MAX_HEADERS_RESULTS) {
1871  // Headers message had its maximum size; the peer may have more headers.
1872  // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue
1873  // from there instead.
1874  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
1875  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256()));
1876  }
1877 
1878  bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus());
1879  // If this set of headers is valid and ends in a block with at least as
1880  // much work as our tip, download as much as possible.
1881  if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
1882  std::vector<const CBlockIndex*> vToFetch;
1883  const CBlockIndex *pindexWalk = pindexLast;
1884  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1885  while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1886  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1887  !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1888  (!IsWitnessEnabled(pindexWalk->pprev, m_chainparams.GetConsensus()) || State(pfrom.GetId())->fHaveWitness)) {
1889  // We don't have this block, and it's not yet in flight.
1890  vToFetch.push_back(pindexWalk);
1891  }
1892  pindexWalk = pindexWalk->pprev;
1893  }
1894  // If pindexWalk still isn't on our main chain, we're looking at a
1895  // very large reorg at a time we think we're close to caught up to
1896  // the main chain -- this shouldn't really happen. Bail out on the
1897  // direct fetch and rely on parallel download instead.
1898  if (!::ChainActive().Contains(pindexWalk)) {
1899  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1900  pindexLast->GetBlockHash().ToString(),
1901  pindexLast->nHeight);
1902  } else {
1903  std::vector<CInv> vGetData;
1904  // Download as much as possible, from earliest to latest.
1905  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1906  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1907  // Can't download any more from this peer
1908  break;
1909  }
1910  uint32_t nFetchFlags = GetFetchFlags(pfrom);
1911  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1912  MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex);
1913  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1914  pindex->GetBlockHash().ToString(), pfrom.GetId());
1915  }
1916  if (vGetData.size() > 1) {
1917  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1918  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1919  }
1920  if (vGetData.size() > 0) {
1921  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1922  // In any case, we want to download using a compact block, not a regular one
1923  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1924  }
1926  }
1927  }
1928  }
1929  // If we're in IBD, we want outbound peers that will serve us a useful
1930  // chain. Disconnect peers that are on chains with insufficient work.
1932  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1933  // headers to fetch from this peer.
1934  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1935  // This peer has too little work on their headers chain to help
1936  // us sync -- disconnect if it is an outbound disconnection
1937  // candidate.
1938  // Note: We compare their tip to nMinimumChainWork (rather than
1939  // ::ChainActive().Tip()) because we won't start block download
1940  // until we have a headers chain that has at least
1941  // nMinimumChainWork, even if a peer has a chain past our tip,
1942  // as an anti-DoS measure.
1943  if (pfrom.IsOutboundOrBlockRelayConn()) {
1944  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
1945  pfrom.fDisconnect = true;
1946  }
1947  }
1948  }
1949 
1950  // If this is an outbound full-relay peer, check to see if we should protect
1951  // it from the bad/lagging chain logic.
1952  // Note that outbound block-relay peers are excluded from this protection, and
1953  // thus always subject to eviction under the bad/lagging chain logic.
1954  // See ChainSyncTimeoutState.
1955  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
1956  if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1957  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
1958  nodestate->m_chain_sync.m_protect = true;
1959  ++g_outbound_peers_with_protect_from_disconnect;
1960  }
1961  }
1962  }
1963 
1964  return;
1965 }
1966 
1975 void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
1976 {
1977  AssertLockHeld(cs_main);
1978  AssertLockHeld(g_cs_orphans);
1979 
1980  while (!orphan_work_set.empty()) {
1981  const uint256 orphanHash = *orphan_work_set.begin();
1982  orphan_work_set.erase(orphan_work_set.begin());
1983 
1984  auto orphan_it = mapOrphanTransactions.find(orphanHash);
1985  if (orphan_it == mapOrphanTransactions.end()) continue;
1986 
1987  const CTransactionRef porphanTx = orphan_it->second.tx;
1988  TxValidationState state;
1989  std::list<CTransactionRef> removed_txn;
1990 
1991  if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) {
1992  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1993  RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman);
1994  for (unsigned int i = 0; i < porphanTx->vout.size(); i++) {
1995  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1996  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1997  for (const auto& elem : it_by_prev->second) {
1998  orphan_work_set.insert(elem->first);
1999  }
2000  }
2001  }
2002  EraseOrphanTx(orphanHash);
2003  for (const CTransactionRef& removedTx : removed_txn) {
2004  AddToCompactExtraTransactions(removedTx);
2005  }
2006  break;
2007  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2008  if (state.IsInvalid()) {
2009  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
2010  orphanHash.ToString(),
2011  orphan_it->second.fromPeer,
2012  state.ToString());
2013  // Maybe punish peer that gave us an invalid orphan tx
2014  MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
2015  }
2016  // Has inputs but not accepted to mempool
2017  // Probably non-standard or insufficient fee
2018  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
2020  // We can add the wtxid of this transaction to our reject filter.
2021  // Do not add txids of witness transactions or witness-stripped
2022  // transactions to the filter, as they can have been malleated;
2023  // adding such txids to the reject filter would potentially
2024  // interfere with relay of valid transactions from peers that
2025  // do not support wtxid-based relay. See
2026  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
2027  // We can remove this restriction (and always add wtxids to
2028  // the filter even for witness stripped transactions) once
2029  // wtxid-based relay is broadly deployed.
2030  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
2031  // for concerns around weakening security of unupgraded nodes
2032  // if we start doing this too early.
2033  assert(recentRejects);
2034  recentRejects->insert(porphanTx->GetWitnessHash());
2035  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
2036  // then we know that the witness was irrelevant to the policy
2037  // failure, since this check depends only on the txid
2038  // (the scriptPubKey being spent is covered by the txid).
2039  // Add the txid to the reject filter to prevent repeated
2040  // processing of this transaction in the event that child
2041  // transactions are later received (resulting in
2042  // parent-fetching by txid via the orphan-handling logic).
2043  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
2044  // We only add the txid if it differs from the wtxid, to
2045  // avoid wasting entries in the rolling bloom filter.
2046  recentRejects->insert(porphanTx->GetHash());
2047  }
2048  }
2049  EraseOrphanTx(orphanHash);
2050  break;
2051  }
2052  }
2053  m_mempool.check(&::ChainstateActive().CoinsTip());
2054 }
2055 
2071 static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params,
2072  BlockFilterType filter_type, uint32_t start_height,
2073  const uint256& stop_hash, uint32_t max_height_diff,
2074  const CBlockIndex*& stop_index,
2075  BlockFilterIndex*& filter_index)
2076 {
2077  const bool supported_filter_type =
2078  (filter_type == BlockFilterType::BASIC &&
2080  if (!supported_filter_type) {
2081  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
2082  peer.GetId(), static_cast<uint8_t>(filter_type));
2083  peer.fDisconnect = true;
2084  return false;
2085  }
2086 
2087  {
2088  LOCK(cs_main);
2089  stop_index = LookupBlockIndex(stop_hash);
2090 
2091  // Check that the stop block exists and the peer would be allowed to fetch it.
2092  if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2093  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2094  peer.GetId(), stop_hash.ToString());
2095  peer.fDisconnect = true;
2096  return false;
2097  }
2098  }
2099 
2100  uint32_t stop_height = stop_index->nHeight;
2101  if (start_height > stop_height) {
2102  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
2103  "start height %d and stop height %d\n",
2104  peer.GetId(), start_height, stop_height);
2105  peer.fDisconnect = true;
2106  return false;
2107  }
2108  if (stop_height - start_height >= max_height_diff) {
2109  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2110  peer.GetId(), stop_height - start_height + 1, max_height_diff);
2111  peer.fDisconnect = true;
2112  return false;
2113  }
2114 
2115  filter_index = GetBlockFilterIndex(filter_type);
2116  if (!filter_index) {
2117  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
2118  return false;
2119  }
2120 
2121  return true;
2122 }
2123 
2134 static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2135  CConnman& connman)
2136 {
2137  uint8_t filter_type_ser;
2138  uint32_t start_height;
2139  uint256 stop_hash;
2140 
2141  vRecv >> filter_type_ser >> start_height >> stop_hash;
2142 
2143  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2144 
2145  const CBlockIndex* stop_index;
2146  BlockFilterIndex* filter_index;
2147  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2148  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2149  return;
2150  }
2151 
2152  std::vector<BlockFilter> filters;
2153  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2154  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2155  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2156  return;
2157  }
2158 
2159  for (const auto& filter : filters) {
2161  .Make(NetMsgType::CFILTER, filter);
2162  connman.PushMessage(&peer, std::move(msg));
2163  }
2164 }
2165 
2176 static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2177  CConnman& connman)
2178 {
2179  uint8_t filter_type_ser;
2180  uint32_t start_height;
2181  uint256 stop_hash;
2182 
2183  vRecv >> filter_type_ser >> start_height >> stop_hash;
2184 
2185  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2186 
2187  const CBlockIndex* stop_index;
2188  BlockFilterIndex* filter_index;
2189  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2190  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2191  return;
2192  }
2193 
2194  uint256 prev_header;
2195  if (start_height > 0) {
2196  const CBlockIndex* const prev_block =
2197  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2198  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2199  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2200  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
2201  return;
2202  }
2203  }
2204 
2205  std::vector<uint256> filter_hashes;
2206  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
2207  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2208  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2209  return;
2210  }
2211 
2213  .Make(NetMsgType::CFHEADERS,
2214  filter_type_ser,
2215  stop_index->GetBlockHash(),
2216  prev_header,
2217  filter_hashes);
2218  connman.PushMessage(&peer, std::move(msg));
2219 }
2220 
2231 static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2232  CConnman& connman)
2233 {
2234  uint8_t filter_type_ser;
2235  uint256 stop_hash;
2236 
2237  vRecv >> filter_type_ser >> stop_hash;
2238 
2239  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2240 
2241  const CBlockIndex* stop_index;
2242  BlockFilterIndex* filter_index;
2243  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
2244  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2245  stop_index, filter_index)) {
2246  return;
2247  }
2248 
2249  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2250 
2251  // Populate headers.
2252  const CBlockIndex* block_index = stop_index;
2253  for (int i = headers.size() - 1; i >= 0; i--) {
2254  int height = (i + 1) * CFCHECKPT_INTERVAL;
2255  block_index = block_index->GetAncestor(height);
2256 
2257  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2258  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2259  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
2260  return;
2261  }
2262  }
2263 
2265  .Make(NetMsgType::CFCHECKPT,
2266  filter_type_ser,
2267  stop_index->GetBlockHash(),
2268  headers);
2269  connman.PushMessage(&peer, std::move(msg));
2270 }
2271 
2272 void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
2273  const std::chrono::microseconds time_received,
2274  const std::atomic<bool>& interruptMsgProc)
2275 {
2276  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2277  if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
2278  {
2279  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2280  return;
2281  }
2282 
2283  PeerRef peer = GetPeerRef(pfrom.GetId());
2284  if (peer == nullptr) return;
2285 
2286  if (msg_type == NetMsgType::VERSION) {
2287  // Each connection can only send one version message
2288  if (pfrom.nVersion != 0)
2289  {
2290  Misbehaving(pfrom.GetId(), 1, "redundant version message");
2291  return;
2292  }
2293 
2294  int64_t nTime;
2295  CAddress addrMe;
2296  CAddress addrFrom;
2297  uint64_t nNonce = 1;
2298  uint64_t nServiceInt;
2299  ServiceFlags nServices;
2300  int nVersion;
2301  std::string cleanSubVer;
2302  int nStartingHeight = -1;
2303  bool fRelay = true;
2304 
2305  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2306  nServices = ServiceFlags(nServiceInt);
2307  if (!pfrom.IsInboundConn())
2308  {
2309  m_connman.SetServices(pfrom.addr, nServices);
2310  }
2311  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
2312  {
2313  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
2314  pfrom.fDisconnect = true;
2315  return;
2316  }
2317 
2318  if (nVersion < MIN_PEER_PROTO_VERSION) {
2319  // disconnect from peers older than this proto version
2320  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
2321  pfrom.fDisconnect = true;
2322  return;
2323  }
2324 
2325  if (!vRecv.empty())
2326  vRecv >> addrFrom >> nNonce;
2327  if (!vRecv.empty()) {
2328  std::string strSubVer;
2329  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2330  cleanSubVer = SanitizeString(strSubVer);
2331  }
2332  if (!vRecv.empty()) {
2333  vRecv >> nStartingHeight;
2334  }
2335  if (!vRecv.empty())
2336  vRecv >> fRelay;
2337  // Disconnect if we connected to ourself
2338  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
2339  {
2340  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
2341  pfrom.fDisconnect = true;
2342  return;
2343  }
2344 
2345  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
2346  {
2347  SeenLocal(addrMe);
2348  }
2349 
2350  // Be shy and don't send version until we hear
2351  if (pfrom.IsInboundConn())
2352  PushNodeVersion(pfrom, m_connman, GetAdjustedTime());
2353 
2354  // Change version
2355  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
2356  pfrom.SetCommonVersion(greatest_common_version);
2357  pfrom.nVersion = nVersion;
2358 
2359  const CNetMsgMaker msg_maker(greatest_common_version);
2360 
2361  if (greatest_common_version >= WTXID_RELAY_VERSION) {
2362  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
2363  }
2364 
2365  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
2366 
2367  // Signal ADDRv2 support (BIP155).
2368  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
2369 
2370  pfrom.nServices = nServices;
2371  pfrom.SetAddrLocal(addrMe);
2372  {
2373  LOCK(pfrom.cs_SubVer);
2374  pfrom.cleanSubVer = cleanSubVer;
2375  }
2376  pfrom.nStartingHeight = nStartingHeight;
2377 
2378  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2379  pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2380 
2381  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2382  pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2383 
2384  if (pfrom.m_tx_relay != nullptr) {
2385  LOCK(pfrom.m_tx_relay->cs_filter);
2386  pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2387  }
2388 
2389  if((nServices & NODE_WITNESS))
2390  {
2391  LOCK(cs_main);
2392  State(pfrom.GetId())->fHaveWitness = true;
2393  }
2394 
2395  // Potentially mark this peer as a preferred download peer.
2396  {
2397  LOCK(cs_main);
2398  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2399  }
2400 
2401  if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
2402  // For outbound peers, we try to relay our address (so that other
2403  // nodes can try to find us more quickly, as we have no guarantee
2404  // that an outbound peer is even aware of how to reach us) and do a
2405  // one-time address fetch (to help populate/update our addrman). If
2406  // we're starting up for the first time, our addrman may be pretty
2407  // empty and no one will know who we are, so these mechanisms are
2408  // important to help us connect to the network.
2409  //
2410  // We skip this for BLOCK_RELAY peers to avoid potentially leaking
2411  // information about our BLOCK_RELAY connections via address relay.
2413  {
2414  CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2415  FastRandomContext insecure_rand;
2416  if (addr.IsRoutable())
2417  {
2418  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2419  pfrom.PushAddress(addr, insecure_rand);
2420  } else if (IsPeerAddrLocalGood(&pfrom)) {
2421  addr.SetIP(addrMe);
2422  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2423  pfrom.PushAddress(addr, insecure_rand);
2424  }
2425  }
2426 
2427  // Get recent addresses
2428  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
2429  pfrom.fGetAddr = true;
2430  }
2431 
2432  if (!pfrom.IsInboundConn()) {
2433  // For non-inbound connections, we update the addrman to record
2434  // connection success so that addrman will have an up-to-date
2435  // notion of which peers are online and available.
2436  //
2437  // While we strive to not leak information about block-relay-only
2438  // connections via the addrman, not moving an address to the tried
2439  // table is also potentially detrimental because new-table entries
2440  // are subject to eviction in the event of addrman collisions. We
2441  // mitigate the information-leak by never calling
2442  // CAddrMan::Connected() on block-relay-only peers; see
2443  // FinalizeNode().
2444  //
2445  // This moves an address from New to Tried table in Addrman,
2446  // resolves tried-table collisions, etc.
2448  }
2449 
2450  std::string remoteAddr;
2451  if (fLogIPs)
2452  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2453 
2454  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2455  cleanSubVer, pfrom.nVersion,
2456  pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
2457  remoteAddr);
2458 
2459  int64_t nTimeOffset = nTime - GetTime();
2460  pfrom.nTimeOffset = nTimeOffset;
2461  AddTimeData(pfrom.addr, nTimeOffset);
2462 
2463  // If the peer is old enough to have the old alert system, send it the final alert.
2464  if (greatest_common_version <= 70012) {
2465  CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2466  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
2467  }
2468 
2469  // Feeler connections exist only to verify if address is online.
2470  if (pfrom.IsFeelerConn()) {
2471  pfrom.fDisconnect = true;
2472  }
2473  return;
2474  }
2475 
2476  if (pfrom.nVersion == 0) {
2477  // Must have a version message before anything else
2478  Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
2479  return;
2480  }
2481 
2482  // At this point, the outgoing message serialization version can't change.
2483  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2484 
2485  if (msg_type == NetMsgType::VERACK) {
2486  if (pfrom.fSuccessfullyConnected) return;
2487 
2488  if (!pfrom.IsInboundConn()) {
2489  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2490  pfrom.nVersion.load(), pfrom.nStartingHeight,
2491  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
2492  pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
2493  }
2494 
2495  if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
2496  // Tell our peer we prefer to receive headers rather than inv's
2497  // We send this to non-NODE NETWORK peers as well, because even
2498  // non-NODE NETWORK peers can announce blocks (such as pruning
2499  // nodes)
2501  }
2502  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
2503  // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2504  // However, we do not request new block announcements using
2505  // cmpctblock messages.
2506  // We send this to non-NODE NETWORK peers as well, because
2507  // they may wish to request compact blocks from us
2508  bool fAnnounceUsingCMPCTBLOCK = false;
2509  uint64_t nCMPCTBLOCKVersion = 2;
2510  if (pfrom.GetLocalServices() & NODE_WITNESS)
2511  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2512  nCMPCTBLOCKVersion = 1;
2513  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2514  }
2515  pfrom.fSuccessfullyConnected = true;
2516  return;
2517  }
2518 
2519  // Feature negotiation of wtxidrelay should happen between VERSION and
2520  // VERACK, to avoid relay problems from switching after a connection is up
2521  if (msg_type == NetMsgType::WTXIDRELAY) {
2522  if (pfrom.fSuccessfullyConnected) {
2523  // Disconnect peers that send wtxidrelay message after VERACK; this
2524  // must be negotiated between VERSION and VERACK.
2525  pfrom.fDisconnect = true;
2526  return;
2527  }
2528  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
2529  LOCK(cs_main);
2530  if (!State(pfrom.GetId())->m_wtxid_relay) {
2531  State(pfrom.GetId())->m_wtxid_relay = true;
2532  g_wtxid_relay_peers++;
2533  }
2534  }
2535  return;
2536  }
2537 
2538  if (!pfrom.fSuccessfullyConnected) {
2539  LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
2540  return;
2541  }
2542 
2543  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
2544  int stream_version = vRecv.GetVersion();
2545  if (msg_type == NetMsgType::ADDRV2) {
2546  // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
2547  // unserialize methods know that an address in v2 format is coming.
2548  stream_version |= ADDRV2_FORMAT;
2549  }
2550 
2551  OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
2552  std::vector<CAddress> vAddr;
2553 
2554  s >> vAddr;
2555 
2556  if (!pfrom.RelayAddrsWithConn()) {
2557  return;
2558  }
2559  if (vAddr.size() > MAX_ADDR_TO_SEND)
2560  {
2561  Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
2562  return;
2563  }
2564 
2565  // Store the new addresses
2566  std::vector<CAddress> vAddrOk;
2567  int64_t nNow = GetAdjustedTime();
2568  int64_t nSince = nNow - 10 * 60;
2569  for (CAddress& addr : vAddr)
2570  {
2571  if (interruptMsgProc)
2572  return;
2573 
2574  // We only bother storing full nodes, though this may include
2575  // things which we would not make an outbound connection to, in
2576  // part because we may make feeler connections to them.
2578  continue;
2579 
2580  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2581  addr.nTime = nNow - 5 * 24 * 60 * 60;
2582  pfrom.AddAddressKnown(addr);
2583  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
2584  // Do not process banned/discouraged addresses beyond remembering we received them
2585  continue;
2586  }
2587  bool fReachable = IsReachable(addr);
2588  if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2589  {
2590  // Relay to a limited number of other nodes
2591  RelayAddress(addr, fReachable, m_connman);
2592  }
2593  // Do not store addresses outside our network
2594  if (fReachable)
2595  vAddrOk.push_back(addr);
2596  }
2597  m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
2598  if (vAddr.size() < 1000)
2599  pfrom.fGetAddr = false;
2600  if (pfrom.IsAddrFetchConn())
2601  pfrom.fDisconnect = true;
2602  return;
2603  }
2604 
2605  if (msg_type == NetMsgType::SENDADDRV2) {
2606  pfrom.m_wants_addrv2 = true;
2607  return;
2608  }
2609 
2610  if (msg_type == NetMsgType::SENDHEADERS) {
2611  LOCK(cs_main);
2612  State(pfrom.GetId())->fPreferHeaders = true;
2613  return;
2614  }
2615 
2616  if (msg_type == NetMsgType::SENDCMPCT) {
2617  bool fAnnounceUsingCMPCTBLOCK = false;
2618  uint64_t nCMPCTBLOCKVersion = 0;
2619  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2620  if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2621  LOCK(cs_main);
2622  // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2623  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2624  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2625  State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2626  }
2627  if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
2628  State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2629  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2630  if (pfrom.GetLocalServices() & NODE_WITNESS)
2631  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2632  else
2633  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2634  }
2635  }
2636  return;
2637  }
2638 
2639  if (msg_type == NetMsgType::INV) {
2640  std::vector<CInv> vInv;
2641  vRecv >> vInv;
2642  if (vInv.size() > MAX_INV_SZ)
2643  {
2644  Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
2645  return;
2646  }
2647 
2648  // We won't accept tx inv's if we're in blocks-only mode, or this is a
2649  // block-relay-only peer
2650  bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
2651 
2652  // Allow peers with relay permission to send data other than blocks in blocks only mode
2653  if (pfrom.HasPermission(PF_RELAY)) {
2654  fBlocksOnly = false;
2655  }
2656 
2657  LOCK(cs_main);
2658 
2659  const auto current_time = GetTime<std::chrono::microseconds>();
2660  uint256* best_block{nullptr};
2661 
2662  for (CInv& inv : vInv) {
2663  if (interruptMsgProc) return;
2664 
2665  // Ignore INVs that don't match wtxidrelay setting.
2666  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
2667  // This is fine as no INV messages are involved in that process.
2668  if (State(pfrom.GetId())->m_wtxid_relay) {
2669  if (inv.IsMsgTx()) continue;
2670  } else {
2671  if (inv.IsMsgWtx()) continue;
2672  }
2673 
2674  if (inv.IsMsgBlk()) {
2675  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
2676  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2677 
2678  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
2679  if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2680  // Headers-first is the primary method of announcement on
2681  // the network. If a node fell back to sending blocks by inv,
2682  // it's probably for a re-org. The final block hash
2683  // provided should be the highest, so send a getheaders and
2684  // then fetch the blocks we need to catch up.
2685  best_block = &inv.hash;
2686  }
2687  } else if (inv.IsGenTxMsg()) {
2688  const GenTxid gtxid = ToGenTxid(inv);
2689  const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool);
2690  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2691 
2692  pfrom.AddKnownTx(inv.hash);
2693  if (fBlocksOnly) {
2694  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
2695  pfrom.fDisconnect = true;
2696  return;
2697  } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
2698  AddTxAnnouncement(pfrom, gtxid, current_time);
2699  }
2700  } else {
2701  LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
2702  }
2703  }
2704 
2705  if (best_block != nullptr) {
2706  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
2707  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
2708  }
2709 
2710  return;
2711  }
2712 
2713  if (msg_type == NetMsgType::GETDATA) {
2714  std::vector<CInv> vInv;
2715  vRecv >> vInv;
2716  if (vInv.size() > MAX_INV_SZ)
2717  {
2718  Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
2719  return;
2720  }
2721 
2722  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
2723 
2724  if (vInv.size() > 0) {
2725  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
2726  }
2727 
2728  {
2729  LOCK(peer->m_getdata_requests_mutex);
2730  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
2731  ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
2732  }
2733 
2734  return;
2735  }
2736 
2737  if (msg_type == NetMsgType::GETBLOCKS) {
2738  CBlockLocator locator;
2739  uint256 hashStop;
2740  vRecv >> locator >> hashStop;
2741 
2742  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2743  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2744  pfrom.fDisconnect = true;
2745  return;
2746  }
2747 
2748  // We might have announced the currently-being-connected tip using a
2749  // compact block, which resulted in the peer sending a getblocks
2750  // request, which we would otherwise respond to without the new block.
2751  // To avoid this situation we simply verify that we are on our best
2752  // known chain now. This is super overkill, but we handle it better
2753  // for getheaders requests, and there are no known nodes which support
2754  // compact blocks but still use getblocks to request blocks.
2755  {
2756  std::shared_ptr<const CBlock> a_recent_block;
2757  {
2758  LOCK(cs_most_recent_block);
2759  a_recent_block = most_recent_block;
2760  }
2761  BlockValidationState state;
2762  if (!ActivateBestChain(state, m_chainparams, a_recent_block)) {
2763  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2764  }
2765  }
2766 
2767  LOCK(cs_main);
2768 
2769  // Find the last block the caller has in the main chain
2770  const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2771 
2772  // Send the rest of the chain
2773  if (pindex)
2774  pindex = ::ChainActive().Next(pindex);
2775  int nLimit = 500;
2776  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
2777  for (; pindex; pindex = ::ChainActive().Next(pindex))
2778  {
2779  if (pindex->GetBlockHash() == hashStop)
2780  {
2781  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2782  break;
2783  }
2784  // If pruning, don't inv blocks unless we have on disk and are likely to still have
2785  // for some reasonable time window (1 hour) that block relay might require.
2786  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
2787  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave))
2788  {
2789  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2790  break;
2791  }
2792  WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(pindex->GetBlockHash()));
2793  if (--nLimit <= 0)
2794  {
2795  // When this block is requested, we'll send an inv that'll
2796  // trigger the peer to getblocks the next batch of inventory.
2797  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2798  pfrom.hashContinue = pindex->GetBlockHash();
2799  break;
2800  }
2801  }
2802  return;
2803  }
2804 
2805  if (msg_type == NetMsgType::GETBLOCKTXN) {
2807  vRecv >> req;
2808 
2809  std::shared_ptr<const CBlock> recent_block;
2810  {
2811  LOCK(cs_most_recent_block);
2812  if (most_recent_block_hash == req.blockhash)
2813  recent_block = most_recent_block;
2814  // Unlock cs_most_recent_block to avoid cs_main lock inversion
2815  }
2816  if (recent_block) {
2817  SendBlockTransactions(pfrom, *recent_block, req);
2818  return;
2819  }
2820 
2821  {
2822  LOCK(cs_main);
2823 
2824  const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2825  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2826  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
2827  return;
2828  }
2829 
2830  if (pindex->nHeight >= ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
2831  CBlock block;
2832  bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
2833  assert(ret);
2834 
2835  SendBlockTransactions(pfrom, block, req);
2836  return;
2837  }
2838  }
2839 
2840  // If an older block is requested (should never happen in practice,
2841  // but can happen in tests) send a block response instead of a
2842  // blocktxn response. Sending a full block response instead of a
2843  // small blocktxn response is preferable in the case where a peer
2844  // might maliciously send lots of getblocktxn requests to trigger
2845  // expensive disk reads, because it will require the peer to
2846  // actually receive all the data read from disk over the network.
2847  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
2848  CInv inv;
2849  WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
2850  inv.hash = req.blockhash;
2851  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
2852  // The message processing loop will go around again (without pausing) and we'll respond then
2853  return;
2854  }
2855 
2856  if (msg_type == NetMsgType::GETHEADERS) {
2857  CBlockLocator locator;
2858  uint256 hashStop;
2859  vRecv >> locator >> hashStop;
2860 
2861  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2862  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2863  pfrom.fDisconnect = true;
2864  return;
2865  }
2866 
2867  LOCK(cs_main);
2868  if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_DOWNLOAD)) {
2869  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
2870  return;
2871  }
2872 
2873  CNodeState *nodestate = State(pfrom.GetId());
2874  const CBlockIndex* pindex = nullptr;
2875  if (locator.IsNull())
2876  {
2877  // If locator is null, return the hashStop block
2878  pindex = LookupBlockIndex(hashStop);
2879  if (!pindex) {
2880  return;
2881  }
2882 
2883  if (!BlockRequestAllowed(pindex, m_chainparams.GetConsensus())) {
2884  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
2885  return;
2886  }
2887  }
2888  else
2889  {
2890  // Find the last block the caller has in the main chain
2891  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2892  if (pindex)
2893  pindex = ::ChainActive().Next(pindex);
2894  }
2895 
2896  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2897  std::vector<CBlock> vHeaders;
2898  int nLimit = MAX_HEADERS_RESULTS;
2899  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
2900  for (; pindex; pindex = ::ChainActive().Next(pindex))
2901  {
2902  vHeaders.push_back(pindex->GetBlockHeader());
2903  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2904  break;
2905  }
2906  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
2907  // if our peer has ::ChainActive().Tip() (and thus we are sending an empty
2908  // headers message). In both cases it's safe to update
2909  // pindexBestHeaderSent to be our tip.
2910  //
2911  // It is important that we simply reset the BestHeaderSent value here,
2912  // and not max(BestHeaderSent, newHeaderSent). We might have announced
2913  // the currently-being-connected tip using a compact block, which
2914  // resulted in the peer sending a headers request, which we respond to
2915  // without the new block. By resetting the BestHeaderSent, we ensure we
2916  // will re-announce the new block via headers (or compact blocks again)
2917  // in the SendMessages logic.
2918  nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
2920  return;
2921  }
2922 
2923  if (msg_type == NetMsgType::TX) {
2924  // Stop processing the transaction early if
2925  // 1) We are in blocks only mode and peer has no relay permission
2926  // 2) This peer is a block-relay-only peer
2927  if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
2928  {
2929  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
2930  pfrom.fDisconnect = true;
2931  return;
2932  }
2933 
2934  CTransactionRef ptx;
2935  vRecv >> ptx;
2936  const CTransaction& tx = *ptx;
2937 
2938  const uint256& txid = ptx->GetHash();
2939  const uint256& wtxid = ptx->GetWitnessHash();
2940 
2941  LOCK2(cs_main, g_cs_orphans);
2942 
2943  CNodeState* nodestate = State(pfrom.GetId());
2944 
2945  const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid;
2946  pfrom.AddKnownTx(hash);
2947  if (nodestate->m_wtxid_relay && txid != wtxid) {
2948  // Insert txid into filterInventoryKnown, even for
2949  // wtxidrelay peers. This prevents re-adding of
2950  // unconfirmed parents to the recently_announced
2951  // filter, when a child tx is requested. See
2952  // ProcessGetData().
2953  pfrom.AddKnownTx(txid);
2954  }
2955 
2956  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
2957  if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
2958 
2959  // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
2960  // absence of witness malleation, this is strictly better, because the
2961  // recent rejects filter may contain the wtxid but rarely contains
2962  // the txid of a segwit transaction that has been rejected.
2963  // In the presence of witness malleation, it's possible that by only
2964  // doing the check with wtxid, we could overlook a transaction which
2965  // was confirmed with a different witness, or exists in our mempool
2966  // with a different witness, but this has limited downside:
2967  // mempool validation does its own lookup of whether we have the txid
2968  // already; and an adversary can already relay us old transactions
2969  // (older than our recency filter) if trying to DoS us, without any need
2970  // for witness malleation.
2971  if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) {
2972  if (pfrom.HasPermission(PF_FORCERELAY)) {
2973  // Always relay transactions received from peers with forcerelay
2974  // permission, even if they were already in the mempool, allowing
2975  // the node to function as a gateway for nodes hidden behind it.
2976  if (!m_mempool.exists(tx.GetHash())) {
2977  LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
2978  } else {
2979  LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
2981  }
2982  }
2983  return;
2984  }
2985 
2986  TxValidationState state;
2987  std::list<CTransactionRef> lRemovedTxn;
2988 
2989  if (AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) {
2990  m_mempool.check(&::ChainstateActive().CoinsTip());
2991  // As this version of the transaction was acceptable, we can forget about any
2992  // requests for it.
2993  m_txrequest.ForgetTxHash(tx.GetHash());
2994  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
2996  for (unsigned int i = 0; i < tx.vout.size(); i++) {
2997  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
2998  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2999  for (const auto& elem : it_by_prev->second) {
3000  peer->m_orphan_work_set.insert(elem->first);
3001  }
3002  }
3003  }
3004 
3005  pfrom.nLastTXTime = GetTime();
3006 
3007  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
3008  pfrom.GetId(),
3009  tx.GetHash().ToString(),
3011 
3012  for (const CTransactionRef& removedTx : lRemovedTxn) {
3013  AddToCompactExtraTransactions(removedTx);
3014  }
3015 
3016  // Recursively process any orphan transactions that depended on this one
3017  ProcessOrphanTx(peer->m_orphan_work_set);
3018  }
3020  {
3021  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
3022 
3023  // Deduplicate parent txids, so that we don't have to loop over
3024  // the same parent txid more than once down below.
3025  std::vector<uint256> unique_parents;
3026  unique_parents.reserve(tx.vin.size());
3027  for (const CTxIn& txin : tx.vin) {
3028  // We start with all parents, and then remove duplicates below.
3029  unique_parents.push_back(txin.prevout.hash);
3030  }
3031  std::sort(unique_parents.begin(), unique_parents.end());
3032  unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
3033  for (const uint256& parent_txid : unique_parents) {
3034  if (recentRejects->contains(parent_txid)) {
3035  fRejectedParents = true;
3036  break;
3037  }
3038  }
3039  if (!fRejectedParents) {
3040  const auto current_time = GetTime<std::chrono::microseconds>();
3041 
3042  for (const uint256& parent_txid : unique_parents) {
3043  // Here, we only have the txid (and not wtxid) of the
3044  // inputs, so we only request in txid mode, even for
3045  // wtxidrelay peers.
3046  // Eventually we should replace this with an improved
3047  // protocol for getting all unconfirmed parents.
3048  const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
3049  pfrom.AddKnownTx(parent_txid);
3050  if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time);
3051  }
3052  AddOrphanTx(ptx, pfrom.GetId());
3053 
3054  // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
3055  m_txrequest.ForgetTxHash(tx.GetHash());
3056  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3057 
3058  // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
3059  unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
3060  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
3061  if (nEvicted > 0) {
3062  LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
3063  }
3064  } else {
3065  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
3066  // We will continue to reject this tx since it has rejected
3067  // parents so avoid re-requesting it from other peers.
3068  // Here we add both the txid and the wtxid, as we know that
3069  // regardless of what witness is provided, we will not accept
3070  // this, so we don't need to allow for redownload of this txid
3071  // from any of our non-wtxidrelay peers.
3072  recentRejects->insert(tx.GetHash());
3073  recentRejects->insert(tx.GetWitnessHash());
3074  m_txrequest.ForgetTxHash(tx.GetHash());
3075  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3076  }
3077  } else {
3079  // We can add the wtxid of this transaction to our reject filter.
3080  // Do not add txids of witness transactions or witness-stripped
3081  // transactions to the filter, as they can have been malleated;
3082  // adding such txids to the reject filter would potentially
3083  // interfere with relay of valid transactions from peers that
3084  // do not support wtxid-based relay. See
3085  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3086  // We can remove this restriction (and always add wtxids to
3087  // the filter even for witness stripped transactions) once
3088  // wtxid-based relay is broadly deployed.
3089  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3090  // for concerns around weakening security of unupgraded nodes
3091  // if we start doing this too early.
3092  assert(recentRejects);
3093  recentRejects->insert(tx.GetWitnessHash());
3094  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3095  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3096  // then we know that the witness was irrelevant to the policy
3097  // failure, since this check depends only on the txid
3098  // (the scriptPubKey being spent is covered by the txid).
3099  // Add the txid to the reject filter to prevent repeated
3100  // processing of this transaction in the event that child
3101  // transactions are later received (resulting in
3102  // parent-fetching by txid via the orphan-handling logic).
3104  recentRejects->insert(tx.GetHash());
3105  m_txrequest.ForgetTxHash(tx.GetHash());
3106  }
3107  if (RecursiveDynamicUsage(*ptx) < 100000) {
3109  }
3110  }
3111  }
3112 
3113  // If a tx has been detected by recentRejects, we will have reached
3114  // this point and the tx will have been ignored. Because we haven't run
3115  // the tx through AcceptToMemoryPool, we won't have computed a DoS
3116  // score for it or determined exactly why we consider it invalid.
3117  //
3118  // This means we won't penalize any peer subsequently relaying a DoSy
3119  // tx (even if we penalized the first peer who gave it to us) because
3120  // we have to account for recentRejects showing false positives. In
3121  // other words, we shouldn't penalize a peer if we aren't *sure* they
3122  // submitted a DoSy tx.
3123  //
3124  // Note that recentRejects doesn't just record DoSy or invalid
3125  // transactions, but any tx not accepted by the mempool, which may be
3126  // due to node policy (vs. consensus). So we can't blanket penalize a
3127  // peer simply for relaying a tx that our recentRejects has caught,
3128  // regardless of false positives.
3129 
3130  if (state.IsInvalid()) {
3131  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
3132  pfrom.GetId(),
3133  state.ToString());
3134  MaybePunishNodeForTx(pfrom.GetId(), state);
3135  }
3136  return;
3137  }
3138 
3139  if (msg_type == NetMsgType::CMPCTBLOCK)
3140  {
3141  // Ignore cmpctblock received while importing
3142  if (fImporting || fReindex) {
3143  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
3144  return;
3145  }
3146 
3147  CBlockHeaderAndShortTxIDs cmpctblock;
3148  vRecv >> cmpctblock;
3149 
3150  bool received_new_header = false;
3151 
3152  {
3153  LOCK(cs_main);
3154 
3155  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3156  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
3157  if (!::ChainstateActive().IsInitialBlockDownload())
3159  return;
3160  }
3161 
3162  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
3163  received_new_header = true;
3164  }
3165  }
3166 
3167  const CBlockIndex *pindex = nullptr;
3168  BlockValidationState state;
3169  if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) {
3170  if (state.IsInvalid()) {
3171  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
3172  return;
3173  }
3174  }
3175 
3176  // When we succeed in decoding a block's txids from a cmpctblock
3177  // message we typically jump to the BLOCKTXN handling code, with a
3178  // dummy (empty) BLOCKTXN message, to re-use the logic there in
3179  // completing processing of the putative block (without cs_main).
3180  bool fProcessBLOCKTXN = false;
3182 
3183  // If we end up treating this as a plain headers message, call that as well
3184  // without cs_main.
3185  bool fRevertToHeaderProcessing = false;
3186 
3187  // Keep a CBlock for "optimistic" compactblock reconstructions (see
3188  // below)
3189  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3190  bool fBlockReconstructed = false;
3191 
3192  {
3193  LOCK2(cs_main, g_cs_orphans);
3194  // If AcceptBlockHeader returned true, it set pindex
3195  assert(pindex);
3196  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3197 
3198  CNodeState *nodestate = State(pfrom.GetId());
3199 
3200  // If this was a new header with more work than our tip, update the
3201  // peer's last block announcement time
3202  if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
3203  nodestate->m_last_block_announcement = GetTime();
3204  }
3205 
3206  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
3207  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3208 
3209  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
3210  return;
3211 
3212  if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better
3213  pindex->nTx != 0) { // We had this block at some point, but pruned it
3214  if (fAlreadyInFlight) {
3215  // We requested this block for some reason, but our mempool will probably be useless
3216  // so we just grab the block via normal getdata
3217  std::vector<CInv> vInv(1);
3218  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3220  }
3221  return;
3222  }
3223 
3224  // If we're not close to tip yet, give up and let parallel block fetch work its magic
3225  if (!fAlreadyInFlight && !CanDirectFetch(m_chainparams.GetConsensus()))
3226  return;
3227 
3228  if (IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
3229  // Don't bother trying to process compact blocks from v1 peers
3230  // after segwit activates.
3231  return;
3232  }
3233 
3234  // We want to be a bit conservative just to be extra careful about DoS
3235  // possibilities in compact block processing...
3236  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3237  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3238  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
3239  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
3240  if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
3241  if (!(*queuedBlockIt)->partialBlock)
3242  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
3243  else {
3244  // The block was already in flight using compact blocks from the same peer
3245  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3246  return;
3247  }
3248  }
3249 
3250  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3251  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3252  if (status == READ_STATUS_INVALID) {
3253  MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3254  Misbehaving(pfrom.GetId(), 100, "invalid compact block");
3255  return;
3256  } else if (status == READ_STATUS_FAILED) {
3257  // Duplicate txindexes, the block is now in-flight, so just request it
3258  std::vector<CInv> vInv(1);
3259  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3261  return;
3262  }
3263 
3265  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3266  if (!partialBlock.IsTxAvailable(i))
3267  req.indexes.push_back(i);
3268  }
3269  if (req.indexes.empty()) {
3270  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3271  BlockTransactions txn;
3272  txn.blockhash = cmpctblock.header.GetHash();
3273  blockTxnMsg << txn;
3274  fProcessBLOCKTXN = true;
3275  } else {
3276  req.blockhash = pindex->GetBlockHash();
3278  }
3279  } else {
3280  // This block is either already in flight from a different
3281  // peer, or this peer has too many blocks outstanding to
3282  // download from.
3283  // Optimistically try to reconstruct anyway since we might be
3284  // able to without any round trips.
3285  PartiallyDownloadedBlock tempBlock(&m_mempool);
3286  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3287  if (status != READ_STATUS_OK) {
3288  // TODO: don't ignore failures
3289  return;
3290  }
3291  std::vector<CTransactionRef> dummy;
3292  status = tempBlock.FillBlock(*pblock, dummy);
3293  if (status == READ_STATUS_OK) {
3294  fBlockReconstructed = true;
3295  }
3296  }
3297  } else {
3298  if (fAlreadyInFlight) {
3299  // We requested this block, but its far into the future, so our
3300  // mempool will probably be useless - request the block normally
3301  std::vector<CInv> vInv(1);
3302  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3304  return;
3305  } else {
3306  // If this was an announce-cmpctblock, we want the same treatment as a header message
3307  fRevertToHeaderProcessing = true;
3308  }
3309  }
3310  } // cs_main
3311 
3312  if (fProcessBLOCKTXN) {
3313  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, interruptMsgProc);
3314  }
3315 
3316  if (fRevertToHeaderProcessing) {
3317  // Headers received from HB compact block peers are permitted to be
3318  // relayed before full validation (see BIP 152), so we don't want to disconnect
3319  // the peer if the header turns out to be for an invalid block.
3320  // Note that if a peer tries to build on an invalid chain, that
3321  // will be detected and the peer will be disconnected/discouraged.
3322  return ProcessHeadersMessage(pfrom, {cmpctblock.header}, /*via_compact_block=*/true);
3323  }
3324 
3325  if (fBlockReconstructed) {
3326  // If we got here, we were able to optimistically reconstruct a
3327  // block that is in flight from some other peer.
3328  {
3329  LOCK(cs_main);
3330  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
3331  }
3332  bool fNewBlock = false;
3333  // Setting fForceProcessing to true means that we bypass some of
3334  // our anti-DoS protections in AcceptBlock, which filters
3335  // unrequested blocks that might be trying to waste our resources
3336  // (eg disk space). Because we only try to reconstruct blocks when
3337  // we're close to caught up (via the CanDirectFetch() requirement
3338  // above, combined with the behavior of not requesting blocks until
3339  // we have a chain with at least nMinimumChainWork), and we ignore
3340  // compact blocks with less work than our tip, it is safe to treat
3341  // reconstructed compact blocks as having been requested.
3342  m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3343  if (fNewBlock) {
3344  pfrom.nLastBlockTime = GetTime();
3345  } else {
3346  LOCK(cs_main);
3347  mapBlockSource.erase(pblock->GetHash());
3348  }
3349  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3350  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3351  // Clear download state for this block, which is in
3352  // process from some other peer. We do this after calling
3353  // ProcessNewBlock so that a malleated cmpctblock announcement
3354  // can't be used to interfere with block relay.
3355  MarkBlockAsReceived(pblock->GetHash());
3356  }
3357  }
3358  return;
3359  }
3360 
3361  if (msg_type == NetMsgType::BLOCKTXN)
3362  {
3363  // Ignore blocktxn received while importing
3364  if (fImporting || fReindex) {
3365  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
3366  return;
3367  }
3368 
3369  BlockTransactions resp;
3370  vRecv >> resp;
3371 
3372  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3373  bool fBlockRead = false;
3374  {
3375  LOCK(cs_main);
3376 
3377  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3378  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3379  it->second.first != pfrom.GetId()) {
3380  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3381  return;
3382  }
3383 
3384  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3385  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3386  if (status == READ_STATUS_INVALID) {
3387  MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
3388  Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
3389  return;
3390  } else if (status == READ_STATUS_FAILED) {
3391  // Might have collided, fall back to getdata now :(
3392  std::vector<CInv> invs;
3393  invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
3395  } else {
3396  // Block is either okay, or possibly we received
3397  // READ_STATUS_CHECKBLOCK_FAILED.
3398  // Note that CheckBlock can only fail for one of a few reasons:
3399  // 1. bad-proof-of-work (impossible here, because we've already
3400  // accepted the header)
3401  // 2. merkleroot doesn't match the transactions given (already
3402  // caught in FillBlock with READ_STATUS_FAILED, so
3403  // impossible here)
3404  // 3. the block is otherwise invalid (eg invalid coinbase,
3405  // block is too big, too many legacy sigops, etc).
3406  // So if CheckBlock failed, #3 is the only possibility.
3407  // Under BIP 152, we don't discourage the peer unless proof of work is
3408  // invalid (we don't require all the stateless checks to have
3409  // been run). This is handled below, so just treat this as
3410  // though the block was successfully read, and rely on the
3411  // handling in ProcessNewBlock to ensure the block index is
3412  // updated, etc.
3413  MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
3414  fBlockRead = true;
3415  // mapBlockSource is used for potentially punishing peers and
3416  // updating which peers send us compact blocks, so the race
3417  // between here and cs_main in ProcessNewBlock is fine.
3418  // BIP 152 permits peers to relay compact blocks after validating
3419  // the header only; we should not punish peers if the block turns
3420  // out to be invalid.
3421  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false));
3422  }
3423  } // Don't hold cs_main when we call into ProcessNewBlock
3424  if (fBlockRead) {
3425  bool fNewBlock = false;
3426  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3427  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3428  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3429  // disk-space attacks), but this should be safe due to the
3430  // protections in the compact block handler -- see related comment
3431  // in compact block optimistic reconstruction handling.
3432  m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3433  if (fNewBlock) {
3434  pfrom.nLastBlockTime = GetTime();
3435  } else {
3436  LOCK(cs_main);
3437  mapBlockSource.erase(pblock->GetHash());
3438  }
3439  }
3440  return;
3441  }
3442 
3443  if (msg_type == NetMsgType::HEADERS)
3444  {
3445  // Ignore headers received while importing
3446  if (fImporting || fReindex) {
3447  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
3448  return;
3449  }
3450 
3451  std::vector<CBlockHeader> headers;
3452 
3453  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3454  unsigned int nCount = ReadCompactSize(vRecv);
3455  if (nCount > MAX_HEADERS_RESULTS) {
3456  Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
3457  return;
3458  }
3459  headers.resize(nCount);
3460  for (unsigned int n = 0; n < nCount; n++) {
3461  vRecv >> headers[n];
3462  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3463  }
3464 
3465  return ProcessHeadersMessage(pfrom, headers, /*via_compact_block=*/false);
3466  }
3467 
3468  if (msg_type == NetMsgType::BLOCK)
3469  {
3470  // Ignore block received while importing
3471  if (fImporting || fReindex) {
3472  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
3473  return;
3474  }
3475 
3476  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3477  vRecv >> *pblock;
3478 
3479  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
3480 
3481  bool forceProcessing = false;
3482  const uint256 hash(pblock->GetHash());
3483  {
3484  LOCK(cs_main);
3485  // Also always process if we requested the block explicitly, as we may
3486  // need it even though it is not a candidate for a new best tip.
3487  forceProcessing |= MarkBlockAsReceived(hash);
3488  // mapBlockSource is only used for punishing peers and setting
3489  // which peers send us compact blocks, so the race between here and
3490  // cs_main in ProcessNewBlock is fine.
3491  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3492  }
3493  bool fNewBlock = false;
3494  m_chainman.ProcessNewBlock(m_chainparams, pblock, forceProcessing, &fNewBlock);
3495  if (fNewBlock) {
3496  pfrom.nLastBlockTime = GetTime();
3497  } else {
3498  LOCK(cs_main);
3499  mapBlockSource.erase(pblock->GetHash());
3500  }
3501  return;
3502  }
3503 
3504  if (msg_type == NetMsgType::GETADDR) {
3505  // This asymmetric behavior for inbound and outbound connections was introduced
3506  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3507  // to users' AddrMan and later request them by sending getaddr messages.
3508  // Making nodes which are behind NAT and can only make outgoing connections ignore
3509  // the getaddr message mitigates the attack.
3510  if (!pfrom.IsInboundConn()) {
3511  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
3512  return;
3513  }
3514 
3515  // Only send one GetAddr response per connection to reduce resource waste
3516  // and discourage addr stamping of INV announcements.
3517  if (pfrom.fSentAddr) {
3518  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
3519  return;
3520  }
3521  pfrom.fSentAddr = true;
3522 
3523  pfrom.vAddrToSend.clear();
3524  std::vector<CAddress> vAddr;
3525  if (pfrom.HasPermission(PF_ADDR)) {
3526  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3527  } else {
3528  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3529  }
3530  FastRandomContext insecure_rand;
3531  for (const CAddress &addr : vAddr) {
3532  pfrom.PushAddress(addr, insecure_rand);
3533  }
3534  return;
3535  }
3536 
3537  if (msg_type == NetMsgType::MEMPOOL) {
3538  if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL))
3539  {
3540  if (!pfrom.HasPermission(PF_NOBAN))
3541  {
3542  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
3543  pfrom.fDisconnect = true;
3544  }
3545  return;
3546  }
3547 
3549  {
3550  if (!pfrom.HasPermission(PF_NOBAN))
3551  {
3552  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
3553  pfrom.fDisconnect = true;
3554  }
3555  return;
3556  }
3557 
3558  if (pfrom.m_tx_relay != nullptr) {
3559  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
3560  pfrom.m_tx_relay->fSendMempool = true;
3561  }
3562  return;
3563  }
3564 
3565  if (msg_type == NetMsgType::PING) {
3566  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
3567  uint64_t nonce = 0;
3568  vRecv >> nonce;
3569  // Echo the message back with the nonce. This allows for two useful features:
3570  //
3571  // 1) A remote node can quickly check if the connection is operational
3572  // 2) Remote nodes can measure the latency of the network thread. If this node
3573  // is overloaded it won't respond to pings quickly and the remote node can
3574  // avoid sending us more work, like chain download requests.
3575  //
3576  // The nonce stops the remote getting confused between different pings: without
3577  // it, if the remote node sends a ping once per second and this node takes 5
3578  // seconds to respond to each, the 5th ping the remote sends would appear to
3579  // return very quickly.
3581  }
3582  return;
3583  }
3584 
3585  if (msg_type == NetMsgType::PONG) {
3586  const auto ping_end = time_received;
3587  uint64_t nonce = 0;
3588  size_t nAvail = vRecv.in_avail();
3589  bool bPingFinished = false;
3590  std::string sProblem;
3591 
3592  if (nAvail >= sizeof(nonce)) {
3593  vRecv >> nonce;
3594 
3595  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3596  if (pfrom.nPingNonceSent != 0) {
3597  if (nonce == pfrom.nPingNonceSent) {
3598  // Matching pong received, this ping is no longer outstanding
3599  bPingFinished = true;
3600  const auto ping_time = ping_end - pfrom.m_ping_start.load();
3601  if (ping_time.count() >= 0) {
3602  // Successful ping time measurement, replace previous
3603  pfrom.nPingUsecTime = count_microseconds(ping_time);
3604  pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time));
3605  } else {
3606  // This should never happen
3607  sProblem = "Timing mishap";
3608  }
3609  } else {
3610  // Nonce mismatches are normal when pings are overlapping
3611  sProblem = "Nonce mismatch";
3612  if (nonce == 0) {
3613  // This is most likely a bug in another implementation somewhere; cancel this ping
3614  bPingFinished = true;
3615  sProblem = "Nonce zero";
3616  }
3617  }
3618  } else {
3619  sProblem = "Unsolicited pong without ping";
3620  }
3621  } else {
3622  // This is most likely a bug in another implementation somewhere; cancel this ping
3623  bPingFinished = true;
3624  sProblem = "Short payload";
3625  }
3626 
3627  if (!(sProblem.empty())) {
3628  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3629  pfrom.GetId(),
3630  sProblem,
3631  pfrom.nPingNonceSent,
3632  nonce,
3633  nAvail);
3634  }
3635  if (bPingFinished) {
3636  pfrom.nPingNonceSent = 0;
3637  }
3638  return;
3639  }
3640 
3641  if (msg_type == NetMsgType::FILTERLOAD) {
3642  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3643  pfrom.fDisconnect = true;
3644  return;
3645  }
3646  CBloomFilter filter;
3647  vRecv >> filter;
3648 
3649  if (!filter.IsWithinSizeConstraints())
3650  {
3651  // There is no excuse for sending a too-large filter
3652  Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
3653  }
3654  else if (pfrom.m_tx_relay != nullptr)
3655  {
3656  LOCK(pfrom.m_tx_relay->cs_filter);
3657  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3658  pfrom.m_tx_relay->fRelayTxes = true;
3659  }
3660  return;
3661  }
3662 
3663  if (msg_type == NetMsgType::FILTERADD) {
3664  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3665  pfrom.fDisconnect = true;
3666  return;
3667  }
3668  std::vector<unsigned char> vData;
3669  vRecv >> vData;
3670 
3671  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3672  // and thus, the maximum size any matched object can have) in a filteradd message
3673  bool bad = false;
3674  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3675  bad = true;
3676  } else if (pfrom.m_tx_relay != nullptr) {
3677  LOCK(pfrom.m_tx_relay->cs_filter);
3678  if (pfrom.m_tx_relay->pfilter) {
3679  pfrom.m_tx_relay->pfilter->insert(vData);
3680  } else {
3681  bad = true;
3682  }
3683  }
3684  if (bad) {
3685  Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
3686  }
3687  return;
3688  }
3689 
3690  if (msg_type == NetMsgType::FILTERCLEAR) {
3691  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3692  pfrom.fDisconnect = true;
3693  return;
3694  }
3695  if (pfrom.m_tx_relay == nullptr) {
3696  return;
3697  }
3698  LOCK(pfrom.m_tx_relay->cs_filter);
3699  pfrom.m_tx_relay->pfilter = nullptr;
3700  pfrom.m_tx_relay->fRelayTxes = true;
3701  return;
3702  }
3703 
3704  if (msg_type == NetMsgType::FEEFILTER) {
3705  CAmount newFeeFilter = 0;
3706  vRecv >> newFeeFilter;
3707  if (MoneyRange(newFeeFilter)) {
3708  if (pfrom.m_tx_relay != nullptr) {
3709  LOCK(pfrom.m_tx_relay->cs_feeFilter);
3710  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
3711  }
3712  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
3713  }
3714  return;
3715  }
3716 
3717  if (msg_type == NetMsgType::GETCFILTERS) {
3718  ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman);
3719  return;
3720  }
3721 
3722  if (msg_type == NetMsgType::GETCFHEADERS) {
3724  return;
3725  }
3726 
3727  if (msg_type == NetMsgType::GETCFCHECKPT) {
3729  return;
3730  }
3731 
3732  if (msg_type == NetMsgType::NOTFOUND) {
3733  std::vector<CInv> vInv;
3734  vRecv >> vInv;
3736  LOCK(::cs_main);
3737  for (CInv &inv : vInv) {
3738  if (inv.IsGenTxMsg()) {
3739  // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
3740  // completed in TxRequestTracker.
3741  m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
3742  }
3743  }
3744  }
3745  return;
3746  }
3747 
3748  // Ignore unknown commands for extensibility
3749  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3750  return;
3751 }
3752 
3754 {
3755  const NodeId peer_id{pnode.GetId()};
3756  PeerRef peer = GetPeerRef(peer_id);
3757  if (peer == nullptr) return false;
3758 
3759  {
3760  LOCK(peer->m_misbehavior_mutex);
3761 
3762  // There's nothing to do if the m_should_discourage flag isn't set
3763  if (!peer->m_should_discourage) return false;
3764 
3765  peer->m_should_discourage = false;
3766  } // peer.m_misbehavior_mutex
3767 
3768  if (pnode.HasPermission(PF_NOBAN)) {
3769  // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag
3770  LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
3771  return false;
3772  }
3773 
3774  if (pnode.IsManualConn()) {
3775  // We never disconnect or discourage manual peers for bad behavior
3776  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id);
3777  return false;
3778  }
3779 
3780  if (pnode.addr.IsLocal()) {
3781  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
3782  // all peers on the same local address)
3783  LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id);
3784  pnode.fDisconnect = true;
3785  return true;
3786  }
3787 
3788  // Normal case: Disconnect the peer and discourage all nodes sharing the address
3789  LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
3790  if (m_banman) m_banman->Discourage(pnode.addr);
3791  m_connman.DisconnectNode(pnode.addr);
3792  return true;
3793 }
3794 
3795 bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3796 {
3797  bool fMoreWork = false;
3798 
3799  PeerRef peer = GetPeerRef(pfrom->GetId());
3800  if (peer == nullptr) return false;
3801 
3802  {
3803  LOCK(peer->m_getdata_requests_mutex);
3804  if (!peer->m_getdata_requests.empty()) {
3805  ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
3806  }
3807  }
3808 
3809  {
3810  LOCK2(cs_main, g_cs_orphans);
3811  if (!peer->m_orphan_work_set.empty()) {
3812  ProcessOrphanTx(peer->m_orphan_work_set);
3813  }
3814  }
3815 
3816  if (pfrom->fDisconnect)
3817  return false;
3818 
3819  // this maintains the order of responses
3820  // and prevents m_getdata_requests to grow unbounded
3821  {
3822  LOCK(peer->m_getdata_requests_mutex);
3823  if (!peer->m_getdata_requests.empty()) return true;
3824  }
3825 
3826  {
3827  LOCK(g_cs_orphans);
3828  if (!peer->m_orphan_work_set.empty()) return true;
3829  }
3830 
3831  // Don't bother if send buffer is too full to respond anyway
3832  if (pfrom->fPauseSend)
3833  return false;
3834 
3835  std::list<CNetMessage> msgs;
3836  {
3837  LOCK(pfrom->cs_vProcessMsg);
3838  if (pfrom->vProcessMsg.empty())
3839  return false;
3840  // Just take one message
3841  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3842  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
3844  fMoreWork = !pfrom->vProcessMsg.empty();
3845  }
3846  CNetMessage& msg(msgs.front());
3847 
3848  msg.SetVersion(pfrom->GetCommonVersion());
3849  const std::string& msg_type = msg.m_command;
3850 
3851  // Message size
3852  unsigned int nMessageSize = msg.m_message_size;
3853 
3854  try {
3855  ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
3856  if (interruptMsgProc) return false;
3857  {
3858  LOCK(peer->m_getdata_requests_mutex);
3859  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
3860  }
3861  } catch (const std::exception& e) {
3862  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
3863  } catch (...) {
3864  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
3865  }
3866 
3867  return fMoreWork;
3868 }
3869 
3870 void PeerManager::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
3871 {
3872  AssertLockHeld(cs_main);
3873 
3874  CNodeState &state = *State(pto.GetId());
3875  const CNetMsgMaker msgMaker(pto.GetCommonVersion());
3876 
3877  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
3878  // This is an outbound peer subject to disconnection if they don't
3879  // announce a block with as much work as the current tip within
3880  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3881  // their chain has more work than ours, we should sync to it,
3882  // unless it's invalid, in which case we should find that out and
3883  // disconnect from them elsewhere).
3884  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) {
3885  if (state.m_chain_sync.m_timeout != 0) {
3886  state.m_chain_sync.m_timeout = 0;
3887  state.m_chain_sync.m_work_header = nullptr;
3888  state.m_chain_sync.m_sent_getheaders = false;
3889  }
3890  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3891  // Our best block known by this peer is behind our tip, and we're either noticing
3892  // that for the first time, OR this peer was able to catch up to some earlier point
3893  // where we checked against our tip.
3894  // Either way, set a new timeout based on current tip.
3895  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3896  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
3897  state.m_chain_sync.m_sent_getheaders = false;
3898  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3899  // No evidence yet that our peer has synced to a chain with work equal to that
3900  // of our tip, when we first detected it was behind. Send a single getheaders
3901  // message to give the peer a chance to update us.
3902  if (state.m_chain_sync.m_sent_getheaders) {
3903  // They've run out of time to catch up!
3904  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3905  pto.fDisconnect = true;
3906  } else {
3907  assert(state.m_chain_sync.m_work_header);
3908  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3909  m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3910  state.m_chain_sync.m_sent_getheaders = true;
3911  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3912  // Bump the timeout to allow a response, which could clear the timeout
3913  // (if the response shows the peer has synced), reset the timeout (if
3914  // the peer syncs to the required work but not to our tip), or result
3915  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3916  // has not sufficiently progressed)
3917  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3918  }
3919  }
3920  }
3921 }
3922 
3923 void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
3924 {
3925  // Check whether we have too many outbound peers
3926  int extra_peers = m_connman.GetExtraOutboundCount();
3927  if (extra_peers > 0) {
3928  // If we have more outbound peers than we target, disconnect one.
3929  // Pick the outbound peer that least recently announced
3930  // us a new block, with ties broken by choosing the more recent
3931  // connection (higher node id)
3932  NodeId worst_peer = -1;
3933  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3934 
3935  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
3936  AssertLockHeld(::cs_main);
3937 
3938  // Ignore non-outbound peers, or nodes marked for disconnect already
3939  if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
3940  CNodeState *state = State(pnode->GetId());
3941  if (state == nullptr) return; // shouldn't be possible, but just in case
3942  // Don't evict our protected peers
3943  if (state->m_chain_sync.m_protect) return;
3944  // Don't evict our block-relay-only peers.
3945  if (pnode->m_tx_relay == nullptr) return;
3946  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3947  worst_peer = pnode->GetId();
3948  oldest_block_announcement = state->m_last_block_announcement;
3949  }
3950  });
3951  if (worst_peer != -1) {
3952  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
3953  AssertLockHeld(::cs_main);
3954 
3955  // Only disconnect a peer that has been connected to us for
3956  // some reasonable fraction of our check-frequency, to give
3957  // it time for new information to have arrived.
3958  // Also don't disconnect any peer we're trying to download a
3959  // block from.
3960  CNodeState &state = *State(pnode->GetId());
3961  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3962  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3963  pnode->fDisconnect = true;
3964  return true;
3965  } else {
3966  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3967  return false;
3968  }
3969  });
3970  if (disconnected) {
3971  // If we disconnected an extra peer, that means we successfully
3972  // connected to at least one peer after the last time we
3973  // detected a stale tip. Don't try any more extra peers until
3974  // we next detect a stale tip, to limit the load we put on the
3975  // network from these extra connections.
3977  }
3978  }
3979  }
3980 }
3981 
3983 {
3984  LOCK(cs_main);
3985 
3986  int64_t time_in_seconds = GetTime();
3987 
3988  EvictExtraOutboundPeers(time_in_seconds);
3989 
3990  if (time_in_seconds > m_stale_tip_check_time) {
3991  // Check whether our tip is stale, and if so, allow using an extra
3992  // outbound peer
3993  if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) {
3994  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
3996  } else if (m_connman.GetTryNewOutboundPeer()) {
3998  }
3999  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
4000  }
4001 }
4002 
4003 namespace {
4004 class CompareInvMempoolOrder
4005 {
4006  CTxMemPool *mp;
4007  bool m_wtxid_relay;
4008 public:
4009  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
4010  {
4011  mp = _mempool;
4012  m_wtxid_relay = use_wtxid;
4013  }
4014 
4015  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
4016  {
4017  /* As std::make_heap produces a max-heap, we want the entries with the
4018  * fewest ancestors/highest fee to sort later. */
4019  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
4020  }
4021 };
4022 }
4023 
4025 {
4026  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
4027 
4028  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
4029  // disconnect misbehaving peers even before the version handshake is complete.
4030  if (MaybeDiscourageAndDisconnect(*pto)) return true;
4031 
4032  // Don't send anything until the version handshake is complete
4033  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
4034  return true;
4035 
4036  // If we get here, the outgoing message serialization version is set and can't change.
4037  const CNetMsgMaker msgMaker(pto->GetCommonVersion());
4038 
4039  //
4040  // Message: ping
4041  //
4042  bool pingSend = false;
4043  if (pto->fPingQueued) {
4044  // RPC ping request by user
4045  pingSend = true;
4046  }
4047  if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
4048  // Ping automatically sent as a latency probe & keepalive.
4049  pingSend = true;
4050  }
4051  if (pingSend) {
4052  uint64_t nonce = 0;
4053  while (nonce == 0) {
4054  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
4055  }
4056  pto->fPingQueued = false;
4057  pto->m_ping_start = GetTime<std::chrono::microseconds>();
4058  if (pto->GetCommonVersion() > BIP0031_VERSION) {
4059  pto->nPingNonceSent = nonce;
4061  } else {
4062  // Peer is too old to support ping command with nonce, pong will never arrive.
4063  pto->nPingNonceSent = 0;
4065  }
4066  }
4067 
4068  {
4069  LOCK(cs_main);
4070 
4071  CNodeState &state = *State(pto->GetId());
4072 
4073  // Address refresh broadcast
4074  auto current_time = GetTime<std::chrono::microseconds>();
4075 
4076  if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
4077  AdvertiseLocal(pto);
4078  pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
4079  }
4080 
4081  //
4082  // Message: addr
4083  //
4084  if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
4085  pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
4086  std::vector<CAddress> vAddr;
4087  vAddr.reserve(pto->vAddrToSend.size());
4088  assert(pto->m_addr_known);
4089 
4090  const char* msg_type;
4091  int make_flags;
4092  if (pto->m_wants_addrv2) {
4093  msg_type = NetMsgType::ADDRV2;
4094  make_flags = ADDRV2_FORMAT;
4095  } else {
4096  msg_type = NetMsgType::ADDR;
4097  make_flags = 0;
4098  }
4099 
4100  for (const CAddress& addr : pto->vAddrToSend)
4101  {
4102  if (!pto->m_addr_known->contains(addr.GetKey()))
4103  {
4104  pto->m_addr_known->insert(addr.GetKey());
4105  vAddr.push_back(addr);
4106  // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
4107  if (vAddr.size() >= MAX_ADDR_TO_SEND)
4108  {
4109  m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4110  vAddr.clear();
4111  }
4112  }
4113  }
4114  pto->vAddrToSend.clear();
4115  if (!vAddr.empty())
4116  m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4117  // we only send the big addr message once
4118  if (pto->vAddrToSend.capacity() > 40)
4119  pto->vAddrToSend.shrink_to_fit();
4120  }
4121 
4122  // Start block sync
4123  if (pindexBestHeader == nullptr)
4125  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
4126  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
4127  // Only actively request headers from a single peer, unless we're close to today.
4128  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
4129  state.fSyncStarted = true;
4131  nSyncStarted++;
4132  const CBlockIndex *pindexStart = pindexBestHeader;
4133  /* If possible, start at the block preceding the currently
4134  best known header. This ensures that we always get a
4135  non-empty list of headers back as long as the peer
4136  is up-to-date. With a non-empty response, we can initialise
4137  the peer's known best block. This wouldn't be possible
4138  if we requested starting at pindexBestHeader and
4139  got back an empty response. */
4140  if (pindexStart->pprev)
4141  pindexStart = pindexStart->pprev;
4142  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
4143  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
4144  }
4145  }
4146 
4147  //
4148  // Try sending block announcements via headers
4149  //
4150  {
4151  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
4152  // list of block hashes we're relaying, and our peer wants
4153  // headers announcements, then find the first header
4154  // not yet known to our peer but would connect, and send.
4155  // If no header would connect, or if we have too many
4156  // blocks, or if the peer doesn't want headers, just
4157  // add all to the inv queue.
4158  LOCK(pto->cs_inventory);
4159  std::vector<CBlock> vHeaders;
4160  bool fRevertToInv = ((!state.fPreferHeaders &&
4161  (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
4162  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
4163  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
4164  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
4165 
4166  if (!fRevertToInv) {
4167  bool fFoundStartingHeader = false;
4168  // Try to find first header that our peer doesn't have, and
4169  // then send all headers past that one. If we come across any
4170  // headers that aren't on ::ChainActive(), give up.
4171  for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
4172  const CBlockIndex* pindex = LookupBlockIndex(hash);
4173  assert(pindex);
4174  if (::ChainActive()[pindex->nHeight] != pindex) {
4175  // Bail out if we reorged away from this block
4176  fRevertToInv = true;
4177  break;
4178  }
4179  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
4180  // This means that the list of blocks to announce don't
4181  // connect to each other.
4182  // This shouldn't really be possible to hit during
4183  // regular operation (because reorgs should take us to
4184  // a chain that has some block not on the prior chain,
4185  // which should be caught by the prior check), but one
4186  // way this could happen is by using invalidateblock /
4187  // reconsiderblock repeatedly on the tip, causing it to
4188  // be added multiple times to vBlockHashesToAnnounce.
4189  // Robustly deal with this rare situation by reverting
4190  // to an inv.
4191  fRevertToInv = true;
4192  break;
4193  }
4194  pBestIndex = pindex;
4195  if (fFoundStartingHeader) {
4196  // add this to the headers message
4197  vHeaders.push_back(pindex->GetBlockHeader());
4198  } else if (PeerHasHeader(&state, pindex)) {
4199  continue; // keep looking for the first new block
4200  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
4201  // Peer doesn't have this header but they do have the prior one.
4202  // Start sending headers.
4203  fFoundStartingHeader = true;
4204  vHeaders.push_back(pindex->GetBlockHeader());
4205  } else {
4206  // Peer doesn't have this header or the prior one -- nothing will
4207  // connect, so bail out.
4208  fRevertToInv = true;
4209  break;
4210  }
4211  }
4212  }
4213  if (!fRevertToInv && !vHeaders.empty()) {
4214  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4215  // We only send up to 1 block as header-and-ids, as otherwise
4216  // probably means we're doing an initial-ish-sync or they're slow
4217  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4218  vHeaders.front().GetHash().ToString(), pto->GetId());
4219 
4220  int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
4221 
4222  bool fGotBlockFromCache = false;
4223  {
4224  LOCK(cs_most_recent_block);
4225  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4226  if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
4227  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
4228  else {
4229  CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
4230  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4231  }
4232  fGotBlockFromCache = true;
4233  }
4234  }
4235  if (!fGotBlockFromCache) {
4236  CBlock block;
4237  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4238  assert(ret);
4239  CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
4240  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4241  }
4242  state.pindexBestHeaderSent = pBestIndex;
4243  } else if (state.fPreferHeaders) {
4244  if (vHeaders.size() > 1) {
4245  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4246  vHeaders.size(),
4247  vHeaders.front().GetHash().ToString(),
4248  vHeaders.back().GetHash().ToString(), pto->GetId());
4249  } else {
4250  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4251  vHeaders.front().GetHash().ToString(), pto->GetId());
4252  }
4254  state.pindexBestHeaderSent = pBestIndex;
4255  } else
4256  fRevertToInv = true;
4257  }
4258  if (fRevertToInv) {
4259  // If falling back to using an inv, just try to inv the tip.
4260  // The last entry in vBlockHashesToAnnounce was our tip at some point
4261  // in the past.
4262  if (!pto->vBlockHashesToAnnounce.empty()) {
4263  const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
4264  const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
4265  assert(pindex);
4266 
4267  // Warn if we're announcing a block that is not on the main chain.
4268  // This should be very rare and could be optimized out.
4269  // Just log for now.
4270  if (::ChainActive()[pindex->nHeight] != pindex) {
4271  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4272  hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString());
4273  }
4274 
4275  // If the peer's chain has this block, don't inv it back.
4276  if (!PeerHasHeader(&state, pindex)) {
4277  pto->vInventoryBlockToSend.push_back(hashToAnnounce);
4278  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4279  pto->GetId(), hashToAnnounce.ToString());
4280  }
4281  }
4282  }
4283  pto->vBlockHashesToAnnounce.clear();
4284  }
4285 
4286  //
4287  // Message: inventory
4288  //
4289  std::vector<CInv> vInv;
4290  {
4291  LOCK(pto->cs_inventory);
4292  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
4293 
4294  // Add blocks
4295  for (const uint256& hash : pto->vInventoryBlockToSend) {
4296  vInv.push_back(CInv(MSG_BLOCK, hash));
4297  if (vInv.size() == MAX_INV_SZ) {
4299  vInv.clear();
4300  }
4301  }
4302  pto->vInventoryBlockToSend.clear();
4303 
4304  if (pto->m_tx_relay != nullptr) {
4305  LOCK(pto->m_tx_relay->cs_tx_inventory);
4306  // Check whether periodic sends should happen
4307  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
4308  if (pto->m_tx_relay->nNextInvSend < current_time) {
4309  fSendTrickle = true;
4310  if (pto->IsInboundConn()) {
4311  pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
4312  } else {
4313  // Use half the delay for outbound peers, as there is less privacy concern for them.
4314  pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
4315  }
4316  }
4317 
4318  // Time to send but the peer has requested we not relay transactions.
4319  if (fSendTrickle) {
4320  LOCK(pto->m_tx_relay->cs_filter);
4321  if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
4322  }
4323 
4324  // Respond to BIP35 mempool requests
4325  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
4326  auto vtxinfo = m_mempool.infoAll();
4327  pto->m_tx_relay->fSendMempool = false;
4328  CFeeRate filterrate;
4329  {
4330  LOCK(pto->m_tx_relay->cs_feeFilter);
4331  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4332  }
4333 
4334  LOCK(pto->m_tx_relay->cs_filter);
4335 
4336  for (const auto& txinfo : vtxinfo) {
4337  const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
4338  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4339  pto->m_tx_relay->setInventoryTxToSend.erase(hash);
4340  // Don't send transactions that peers will not put into their mempool
4341  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4342  continue;
4343  }
4344  if (pto->m_tx_relay->pfilter) {
4345  if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4346  }
4347  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4348  // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
4349  vInv.push_back(inv);
4350  if (vInv.size() == MAX_INV_SZ) {
4352  vInv.clear();
4353  }
4354  }
4355  pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
4356  }
4357 
4358  // Determine transactions to relay
4359  if (fSendTrickle) {
4360  // Produce a vector with all candidates for sending
4361  std::vector<std::set<uint256>::iterator> vInvTx;
4362  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
4363  for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
4364  vInvTx.push_back(it);
4365  }
4366  CFeeRate filterrate;
4367  {
4368  LOCK(pto->m_tx_relay->cs_feeFilter);
4369  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4370  }
4371  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4372  // A heap is used so that not all items need sorting if only a few are being sent.
4373  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay);
4374  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4375  // No reason to drain out at many times the network's capacity,
4376  // especially since we have many peers and some will draw much shorter delays.
4377  unsigned int nRelayedTransactions = 0;
4378  LOCK(pto->m_tx_relay->cs_filter);
4379  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
4380  // Fetch the top element from the heap
4381  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4382  std::set<uint256>::iterator it = vInvTx.back();
4383  vInvTx.pop_back();
4384  uint256 hash = *it;
4385  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4386  // Remove it from the to-be-sent set
4387  pto->m_tx_relay->setInventoryTxToSend.erase(it);
4388  // Check if not in the filter already
4389  if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
4390  continue;
4391  }
4392  // Not in the mempool anymore? don't bother sending it.
4393  auto txinfo = m_mempool.info(ToGenTxid(inv));
4394  if (!txinfo.tx) {
4395  continue;
4396  }
4397  auto txid = txinfo.tx->GetHash();
4398  auto wtxid = txinfo.tx->GetWitnessHash();
4399  // Peer told you to not send transactions at that feerate? Don't bother sending it.
4400  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4401  continue;
4402  }
4403  if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4404  // Send
4405  State(pto->GetId())->m_recently_announced_invs.insert(hash);
4406  vInv.push_back(inv);
4407  nRelayedTransactions++;
4408  {
4409  // Expire old relay messages
4410  while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
4411  {
4412  mapRelay.erase(vRelayExpiration.front().second);
4413  vRelayExpiration.pop_front();
4414  }
4415 
4416  auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
4417  if (ret.second) {
4418  vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
4419  }
4420  // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
4421  auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
4422  if (ret2.second) {
4423  vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
4424  }
4425  }
4426  if (vInv.size() == MAX_INV_SZ) {
4428  vInv.clear();
4429  }
4430  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4431  if (hash != txid) {
4432  // Insert txid into filterInventoryKnown, even for
4433  // wtxidrelay peers. This prevents re-adding of
4434  // unconfirmed parents to the recently_announced
4435  // filter, when a child tx is requested. See
4436  // ProcessGetData().
4437  pto->m_tx_relay->filterInventoryKnown.insert(txid);
4438  }
4439  }
4440  }
4441  }
4442  }
4443  if (!vInv.empty())
4445 
4446  // Detect whether we're stalling
4447  current_time = GetTime<std::chrono::microseconds>();
4448  if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
4449  // Stalling only triggers when the block download window cannot move. During normal steady state,
4450  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4451  // should only happen during initial block download.
4452  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4453  pto->fDisconnect = true;
4454  return true;
4455  }
4456  // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
4457  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4458  // We compensate for other peers to prevent killing off peers due to our own downstream link
4459  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4460  // to unreasonably increase our timeout.
4461  if (state.vBlocksInFlight.size() > 0) {
4462  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4463  int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
4464  if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4465  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
4466  pto->fDisconnect = true;
4467  return true;
4468  }
4469  }
4470  // Check for headers sync timeouts
4471  if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
4472  // Detect whether this is a stalling initial-headers-sync peer
4473  if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
4474  if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4475  // Disconnect a peer (without the noban permission) if it is our only sync peer,
4476  // and we have others we could be using instead.
4477  // Note: If all our peers are inbound, then we won't
4478  // disconnect our sync peer for stalling; we have bigger
4479  // problems if we can't get any outbound peers.
4480  if (!pto->HasPermission(PF_NOBAN)) {
4481  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4482  pto->fDisconnect = true;
4483  return true;
4484  } else {
4485  LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
4486  // Reset the headers sync state so that we have a
4487  // chance to try downloading from a different peer.
4488  // Note: this will also result in at least one more
4489  // getheaders message to be sent to
4490  // this peer (eventually).
4491  state.fSyncStarted = false;
4492  nSyncStarted--;
4493  state.nHeadersSyncTimeout = 0;
4494  }
4495  }
4496  } else {
4497  // After we've caught up once, reset the timeout so we can't trigger
4498  // disconnect later.
4499  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
4500  }
4501  }
4502 
4503  // Check that outbound peers have reasonable chains
4504  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4505  ConsiderEviction(*pto, GetTime());
4506 
4507  //
4508  // Message: getdata (blocks)
4509  //
4510  std::vector<CInv> vGetData;
4511  if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4512  std::vector<const CBlockIndex*> vToDownload;
4513  NodeId staller = -1;
4514  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
4515  for (const CBlockIndex *pindex : vToDownload) {
4516  uint32_t nFetchFlags = GetFetchFlags(*pto);
4517  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4518  MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
4519  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4520  pindex->nHeight, pto->GetId());
4521  }
4522  if (state.nBlocksInFlight == 0 && staller != -1) {
4523  if (State(staller)->nStallingSince == 0) {
4524  State(staller)->nStallingSince = count_microseconds(current_time);
4525  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4526  }
4527  }
4528  }
4529 
4530  //
4531  // Message: getdata (non-blocks)
4532  //
4533  std::vector<std::pair<NodeId, GenTxid>> expired;
4534  auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
4535  for (const auto& entry : expired) {
4536  LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
4537  entry.second.GetHash().ToString(), entry.first);
4538  }
4539  for (const GenTxid& gtxid : requestable) {
4540  if (!AlreadyHaveTx(gtxid, m_mempool)) {
4541  LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
4542  gtxid.GetHash().ToString(), pto->GetId());
4543  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
4544  if (vGetData.size() >= MAX_GETDATA_SZ) {
4546  vGetData.clear();
4547  }
4548  m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
4549  } else {
4550  // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
4551  // this should already be called whenever a transaction becomes AlreadyHaveTx().
4552  m_txrequest.ForgetTxHash(gtxid.GetHash());
4553  }
4554  }
4555 
4556 
4557  if (!vGetData.empty())
4559 
4560  //
4561  // Message: feefilter
4562  //
4563  if (pto->m_tx_relay != nullptr && pto->GetCommonVersion() >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
4564  !pto->HasPermission(PF_FORCERELAY) // peers with the forcerelay permission should not filter txs to us
4565  ) {
4566  CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4567  static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
4569  // Received tx-inv messages are discarded when the active
4570  // chainstate is in IBD, so tell the peer to not send them.
4571  currentFilter = MAX_MONEY;
4572  } else {
4573  static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
4574  if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
4575  // Send the current filter if we sent MAX_FILTER previously
4576  // and made it out of IBD.
4577  pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
4578  }
4579  }
4580  if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
4581  CAmount filterToSend = g_filter_rounder.round(currentFilter);
4582  // We always have a fee filter of at least minRelayTxFee
4583  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4584  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
4586  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
4587  }
4588  pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
4589  }
4590  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4591  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4592  else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
4593  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
4594  pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
4595  }
4596  }
4597  } // release cs_main
4598  return true;
4599 }
4600 
4602 {
4603 public:
4606  // orphan transactions
4607  mapOrphanTransactions.clear();
4608  mapOrphanTransactionsByPrev.clear();
4609  g_orphans_by_wtxid.clear();
4610  }
4611 };
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:395
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:41
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:162
static int64_t GetTransactionWeight(const CTransaction &tx)
Definition: validation.h:146
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:29
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
bool MaybeDiscourageAndDisconnect(CNode &pnode)
Maybe disconnect a peer and discourage future connections from its address.
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:1054
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: system.cpp:371
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:32
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:21
std::atomic_bool fPauseSend
Definition: net.h:911
uint64_t GetRand(uint64_t nMax) noexcept
Generate a uniform random integer in the range [0..range).
Definition: random.cpp:592
static const int SERIALIZE_TRANSACTION_NO_WITNESS
A flag that is ORed into the protocol version to designate that a transaction should be (un)serialize...
Definition: transaction.h:23
static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a getcfcheckpt request.
invalid by consensus rules
Optional< txiter > GetIter(const uint256 &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given hash, if found.
Definition: txmempool.cpp:886
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:40
bool fPruneMode
True if we&#39;re running in -prune mode.
Definition: validation.cpp:139
std::deque< CInv >::iterator it
const std::chrono::seconds now
bool IsMsgTx() const
Definition: protocol.h:441
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
int64_t m_stale_tip_check_time
Next time to check for stale tip.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:112
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:18
Definition: banman.h:57
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:269
bool IsLocal() const
Definition: netaddress.cpp:406
void SetNull()
Definition: uint256.h:39
#define LogPrint(category,...)
Definition: logging.h:182
std::vector< CInv > vNotFound
int64_t GetBlockTime() const
Definition: chain.h:247
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:114
void InitializeNode(CNode *pnode) override
Initialize a peer by adding it to mapNodeState and pushing a message requesting its version...
int GetVersion() const
Definition: streams.h:396
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:144
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > pblock, bool fForceProcessing, bool *fNewBlock) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:800
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
uint32_t nStatus
Verification status of this block. See enum BlockStatus.
Definition: chain.h:174
void scheduleEvery(Function f, std::chrono::milliseconds delta)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:122
void WakeMessageHandler()
Definition: net.cpp:1550
void SetServices(const CService &addr, ServiceFlags nServices)
Definition: net.cpp:2639
static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a cfilters request.
RecursiveMutex g_cs_orphans
Guards orphan transactions and extra txs for compact blocks.
bool exists(const GenTxid &gtxid) const
Definition: txmempool.h:736
Definition: block.h:62
We don&#39;t have the previous block the checked one is built on.
CChain & ChainActive()
Please prefer the identical ChainstateManager::ActiveChain.
Definition: validation.cpp:113
void PushTxInventory(const uint256 &hash)
Definition: net.h:1198
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:27
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:318
int64_t nTimeExpire
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:32
int GetType() const
Definition: streams.h:394
static const CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:25
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:781
std::vector< uint16_t > indexes
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1164
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:913
bool IsMsgFilteredBlk() const
Definition: protocol.h:444
int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:26
void ProcessOrphanTx(std::set< uint256 > &orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main
Reconsider orphan transactions after a parent has been accepted to the mempool.
void insert(const std::vector< unsigned char > &vKey)
Definition: bloom.cpp:213
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:929
reverse_range< T > reverse_iterate(T &x)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos, const CMessageHeader::MessageStartChars &message_start)
inv message data
Definition: protocol.h:427
invalid proof of work or time too old
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:37
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ChainActive().Tip() will not be pr...
Definition: validation.h:84
constexpr auto GetRandMillis
Definition: random.h:84
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:156
static void ProcessGetData(CNode &pfrom, Peer &peer, const CChainParams &chainparams, CConnman &connman, CTxMemPool &mempool, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main
transaction was missing some of its inputs
bool IsMsgCmpctBlk() const
Definition: protocol.h:445
bool IsFeelerConn() const
Definition: net.h:940
unsigned int nHeight
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:832
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:101
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:26
CBlockHeader GetBlockHeader() const
Definition: chain.h:220
std::vector< unsigned char > ParseHex(const char *psz)
int Height() const
Return the maximal height in the chain.
Definition: chain.h:415
std::atomic_bool m_wants_addrv2
Whether the peer has signaled support for receiving ADDRv2 (BIP155) messages, implying a preference t...
Definition: net.h:900
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mutex cs_inventory
Definition: net.h:1004
bool GetTryNewOutboundPeer()
Definition: net.cpp:1812
const uint256 & GetHash() const
Definition: transaction.h:407
CTransactionRef tx
unsigned long size() const
Definition: txmempool.h:724
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:44
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:108
void SetCommonVersion(int greatest_common_version)
Definition: net.h:1137
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:21
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:2973
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&... args) const
static bool AlreadyHaveBlock(const uint256 &block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
RecursiveMutex cs_vProcessMsg
Definition: net.h:865
Defined in BIP152.
Definition: protocol.h:418
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:147
void SetVersion(int nVersionIn)
Definition: net.h:744
static void LogPrintf(const char *fmt, const Args &... args)
Definition: logging.h:166
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb, bool wtxid=false)
Definition: txmempool.cpp:738
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:145
bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message="")
Potentially mark a node discouraged based on the contents of a BlockValidationState object...
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:110
void ProcessHeadersMessage(CNode &pfrom, const std::vector< CBlockHeader > &headers, bool via_compact_block)
Process a single headers message from a peer.
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:181
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:27
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:52
violated mempool&#39;s fee/size/descendant/RBF/etc limits
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
PeerManager(const CChainParams &chainparams, CConnman &connman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool)
bool IsNull() const
Definition: block.h:135
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:202
inputs (covered by txid) failed policy rules
bool empty() const
Definition: streams.h:294
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:479
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1817
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:404
uint64_t GetLocalNonce() const
Definition: net.h:1112
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:294
std::vector< CAddress > vAddrToSend
Definition: net.h:994
void GetRandBytes(unsigned char *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:585
transaction spends a coinbase too early, or violates locktime/sequence locks
std::atomic< int > nStartingHeight
Definition: net.h:991
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:42
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:1169
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:30
unsigned char * begin()
Definition: uint256.h:58
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:39
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
initial value. Tx has not yet been rejected
const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.cpp:47
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
bool IsNull() const
Definition: uint256.h:31
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:25
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:45
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can&#39;t reach it ourselves.
Definition: netaddress.h:217
std::atomic< ServiceFlags > nServices
Definition: net.h:855
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:276
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:544
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:19
CTransactionRef tx
The transaction itself.
Definition: txmempool.h:374
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3006
CTxMemPoolEntry stores data about the corresponding transaction, as well as data about all in-mempool...
Definition: txmempool.h:78
bool contains(const std::vector< unsigned char > &vKey) const
Definition: bloom.cpp:250
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:620
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2758
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
Definition: netaddress.h:32
uint256 GetBlockHash() const
Definition: chain.h:233
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:282
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message="")
Potentially disconnect and discourage a node based on the contents of a TxValidationState object...
bool AcceptToMemoryPool(CTxMemPool &pool, TxValidationState &state, const CTransactionRef &tx, std::list< CTransactionRef > *plTxnReplaced, bool bypass_limits, bool test_accept, CAmount *fee_out)
(try to) add transaction to memory pool plTxnReplaced will be appended to with all transactions repla...
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:71
bool fSentAddr
Definition: net.h:905
void CheckForStaleTipAndEvictPeers()
Evict extra outbound peers.
bool IsValid() const
Definition: validation.h:119
std::atomic< int64_t > nPingUsecTime
Definition: net.h:1058
BlockFilterType
Definition: blockfilter.h:88
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:71
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:1060
int GetMyStartingHeight() const
Definition: net.h:1116
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:235
#define LOCK2(cs1, cs2)
Definition: sync.h:231
initial value. Block has not yet been rejected
bool IsGenBlkMsg() const
Definition: protocol.h:453
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
ServiceFlags GetLocalServices() const
Definition: net.h:1211
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:112
void BlockConnected(const std::shared_ptr< const CBlock > &pblock, const CBlockIndex *pindexConnected) override
Overridden from CValidationInterface.
std::set< CTxMemPoolEntryRef, CompareIteratorByHash > Parents
Definition: txmempool.h:83
bool fClient
Definition: net.h:894
void FinalizeNode(const CNode &node, bool &fUpdateConnectionTime) override
Handle removal of a peer by updating various state and removing it from mapNodeState.
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:124
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:23
size_type size() const
Definition: streams.h:293
Invalid by a change to consensus rules more recent than SegWit.
std::unique_ptr< CRollingBloomFilter > m_addr_known
Definition: net.h:995
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Overridden from CValidationInterface.
size_t nProcessQueueSize
Definition: net.h:867
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:115
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:149
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:131
std::vector< CTransactionRef > txn
static RecursiveMutex cs_most_recent_block
void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex) override
Notifies listeners of a block being disconnected.
this block was cached as being invalid and we didn&#39;t store the reason why
An input of a transaction.
Definition: transaction.h:65
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:345
const uint256 & GetWitnessHash() const
Definition: transaction.h:312
#define LOCK(cs)
Definition: sync.h:230
const char * name
Definition: rest.cpp:41
static constexpr std::chrono::minutes PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.cpp:17
const uint256 & GetHash() const
Definition: transaction.h:311
std::string ToString() const
Definition: validation.h:125
the block failed to meet one of our checkpoints
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:201
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
AssertLockHeld(mempool.cs)
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:402
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:501
Fast randomness source.
Definition: random.h:119
Transport protocol agnostic message container.
Definition: net.h:734
bool ProcessMessages(CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
bool g_relay_txes
Definition: net.cpp:114
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:3019
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:2865
int64_t nPowTargetSpacing
Definition: params.h:79
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds Timeout = base + per_header * (expected number of ...
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:407
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:35
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:28
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: validation.h:56
bool ActivateBestChain(BlockValidationState &state, const CChainParams &chainparams, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
bool RelayAddrsWithConn() const
Definition: net.h:953
void ConsiderEviction(CNode &pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Consider evicting an outbound peer based on the amount of time they&#39;ve been behind our tip...
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:33
void ForEachNodeThen(Callable &&pre, CallableAfter &&post)
Definition: net.h:298
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: validation.cpp:129
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:773
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
const std::vector< CTxOut > vout
Definition: transaction.h:277
bool IsManualConn() const
Definition: net.h:932
A CService with information about it as peer.
Definition: protocol.h:360
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:959
static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
uint256 hash
Definition: protocol.h:459
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
Result GetResult() const
Definition: validation.h:122
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
static const bool DEFAULT_FEEFILTER
Default for using fee filter.
Definition: validation.h:80
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:16
bool ExpectServicesFromConn() const
Definition: net.h:958
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:66
int64_t NodeId
Definition: net.h:92
std::map< uint256, COrphanTx > mapOrphanTransactions GUARDED_BY(g_cs_orphans)
Map from txid to orphan transaction record.
Definition: net.h:187
Defined in BIP144.
Definition: protocol.h:419
bool GetNetworkActive() const
Definition: net.h:268
CConnman & m_connman
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:76
static const unsigned int INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions in seconds.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:34
bool fGetAddr
Definition: net.h:996
std::atomic_bool fImporting
std::string ToString() const
Definition: uint256.cpp:64
std::vector< uint256 > vHave
Definition: block.h:116
NodeId GetId() const
Definition: net.h:1108
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:31
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3035
Parameters that influence chain consensus.
Definition: params.h:46
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
An outpoint - a combination of a transaction hash and an index n into its vout.
Definition: transaction.h:26
bool AddNewAddresses(const std::vector< CAddress > &vAddr, const CAddress &addrFrom, int64_t nTimePenalty=0)
Definition: net.cpp:2649
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:26
std::atomic_bool fDisconnect
Definition: net.h:904
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:118
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:36
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:43
CFeeRate GetMinFee(size_t sizelimit) const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.cpp:1000
static RPCHelpMan send()
Definition: rpcwallet.cpp:4004
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
If we have extra outbound peers, try to disconnect the one with the oldest block announcement.
void SendBlockTransactions(CNode &pfrom, const CBlock &block, const BlockTransactionsRequest &req)
bool IsMsgWitnessBlk() const
Definition: protocol.h:446
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:257
bool IsRoutable() const
Definition: netaddress.cpp:466
uint64_t GetHash() const
Definition: netaddress.cpp:779
void AddTxAnnouncement(const CNode &node, const GenTxid &gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(const CChainParams & m_chainparams
Register with TxRequestTracker that an INV has been received from a peer.
bool IsWtxid() const
Definition: transaction.h:406
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:354
static CTransactionRef FindTxForGetData(const CTxMemPool &mempool, const CNode &peer, const GenTxid &gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or ...
unsigned int GetReceiveFloodSize() const
Definition: net.cpp:2921
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
void Misbehaving(const NodeId pnode, const int howmuch, const std::string &message)
Increment peer&#39;s misbehavior score.
RecursiveMutex cs_SubVer
Definition: net.h:882
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:362
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion())
const CAddress addr
Definition: net.h:878
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:22
bool SendMessages(CNode *pto) override EXCLUSIVE_LOCKS_REQUIRED(pto -> cs_sendProcessing)
Send queued protocol messages to be sent to a give node.
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
bool IsBlockOnlyConn() const
Definition: net.h:936
const int64_t nTimeConnected
Definition: net.h:875
Transaction is missing a witness.
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, BlockValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
ChainstateManager & m_chainman
CTxMemPool & m_mempool
std::atomic_bool fReindex
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:15
bool IsMsgBlk() const
Definition: protocol.h:442
uint256 GetHash() const
Definition: block.cpp:11
CBlockIndex * LookupBlockIndex(const uint256 &hash)
Definition: validation.cpp:173
void RelayTransaction(const uint256 &txid, const uint256 &wtxid, const CConnman &connman)
Relay transaction to every node.
std::atomic< bool > fPingQueued
Definition: net.h:1062
256-bit opaque blob.
Definition: uint256.h:124
invalid by consensus rules (excluding any below reasons)
bool HasWitness() const
Definition: transaction.h:341
bool IsReachable(enum Network net)
Definition: net.cpp:282
CChainState & ChainstateActive()
Please prefer the identical ChainstateManager::ActiveChainstate.
Definition: validation.cpp:106
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Definition: protocol.h:400
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:66
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:38
the block&#39;s data didn&#39;t match the data committed to by the PoW
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:488
void AddKnownTx(const uint256 &hash)
Definition: net.h:1190
std::atomic< int64_t > nLastTXTime
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:1050
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:30
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:14
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:137
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:137
uint256 hashContinue
Definition: net.h:990
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static bool AlreadyHaveTx(const GenTxid &gtxid, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:18
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
static const unsigned int MAX_STANDARD_TX_WEIGHT
The maximum weight for transactions we&#39;re willing to relay/mine.
Definition: policy.h:24
bool IsTxAvailable(size_t index) const
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
A block this one builds on is invalid.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of addresses from our addrman to return in response to a getaddr message...
Definition: net.h:56
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: system.cpp:467
CBlockIndex * FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator)
Find the last common block between the parameter chain and a locator.
Definition: validation.cpp:180
bool fLogIPs
Definition: logging.cpp:35
int64_t GetAdjustedTime()
Definition: timedata.cpp:34