Bitcoin Core  0.20.99
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <hash.h>
15 #include <index/blockfilterindex.h>
16 #include <merkleblock.h>
17 #include <netbase.h>
18 #include <netmessagemaker.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <tinyformat.h>
27 #include <txmempool.h>
28 #include <util/check.h> // For NDEBUG compile time check
29 #include <util/strencodings.h>
30 #include <util/system.h>
31 #include <validation.h>
32 
33 #include <memory>
34 #include <typeinfo>
35 
37 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
39 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
41 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60};
44 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
45 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
49 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
51 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
53 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
55 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
57 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
59 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
62 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
65 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
67 static constexpr std::chrono::minutes PING_INTERVAL{2};
69 static const unsigned int MAX_LOCATOR_SZ = 101;
71 static const unsigned int MAX_INV_SZ = 50000;
73 static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
75 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
77 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}};
79 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
81 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}};
83 static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{GETDATA_TX_INTERVAL * 10};
85 "To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY");
87 static const unsigned int MAX_GETDATA_SZ = 1000;
89 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
91 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
94 static const unsigned int MAX_HEADERS_RESULTS = 2000;
97 static const int MAX_CMPCTBLOCK_DEPTH = 5;
99 static const int MAX_BLOCKTXN_DEPTH = 10;
104 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
106 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
108 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
110 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
112 static const int MAX_UNCONNECTING_HEADERS = 10;
114 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
116 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
118 static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
121 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
124 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;
126 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
128 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
130 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
132 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
133 
134 struct COrphanTx {
135  // When modifying, adapt the copy of this definition in tests/DoS_tests.
138  int64_t nTimeExpire;
139  size_t list_pos;
140 };
142 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
143 
144 void EraseOrphansFor(NodeId peer);
145 
147 void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main);
148 
149 // Internal stuff
150 namespace {
152  int nSyncStarted GUARDED_BY(cs_main) = 0;
153 
160  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
161 
181  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
182  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
183 
184  /*
185  * Filter for transactions that have been recently confirmed.
186  * We use this to avoid requesting transactions that have already been
187  * confirnmed.
188  */
189  Mutex g_cs_recent_confirmed_transactions;
190  std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
191 
193  struct QueuedBlock {
194  uint256 hash;
195  const CBlockIndex* pindex;
196  bool fValidatedHeaders;
197  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
198  };
199  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
200 
202  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
203 
205  int nPreferredDownload GUARDED_BY(cs_main) = 0;
206 
208  int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
209 
211  int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
212 
214  std::atomic<int64_t> g_last_tip_update(0);
215 
217  typedef std::map<uint256, CTransactionRef> MapRelay;
218  MapRelay mapRelay GUARDED_BY(cs_main);
220  std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
221 
222  struct IteratorComparator
223  {
224  template<typename I>
225  bool operator()(const I& a, const I& b) const
226  {
227  return &(*a) < &(*b);
228  }
229  };
230  std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
231 
232  std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
233 
234  static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
235  static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
236 } // namespace
237 
238 namespace {
245 struct CNodeState {
247  const CService address;
249  bool fCurrentlyConnected;
251  int nMisbehavior;
253  bool m_should_discourage;
255  const std::string name;
257  const CBlockIndex *pindexBestKnownBlock;
259  uint256 hashLastUnknownBlock;
261  const CBlockIndex *pindexLastCommonBlock;
263  const CBlockIndex *pindexBestHeaderSent;
265  int nUnconnectingHeaders;
267  bool fSyncStarted;
269  int64_t nHeadersSyncTimeout;
271  int64_t nStallingSince;
272  std::list<QueuedBlock> vBlocksInFlight;
274  int64_t nDownloadingSince;
275  int nBlocksInFlight;
276  int nBlocksInFlightValidHeaders;
278  bool fPreferredDownload;
280  bool fPreferHeaders;
282  bool fPreferHeaderAndIDs;
288  bool fProvidesHeaderAndIDs;
290  bool fHaveWitness;
292  bool fWantsCmpctWitness;
297  bool fSupportsDesiredCmpctVersion;
298 
313  struct ChainSyncTimeoutState {
315  int64_t m_timeout;
317  const CBlockIndex * m_work_header;
319  bool m_sent_getheaders;
321  bool m_protect;
322  };
323 
324  ChainSyncTimeoutState m_chain_sync;
325 
327  int64_t m_last_block_announcement;
328 
329  /*
330  * State associated with transaction download.
331  *
332  * Tx download algorithm:
333  *
334  * When inv comes in, queue up (process_time, txid) inside the peer's
335  * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
336  * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
337  *
338  * The process_time for a transaction is set to nNow for outbound peers,
339  * nNow + 2 seconds for inbound peers. This is the time at which we'll
340  * consider trying to request the transaction from the peer in
341  * SendMessages(). The delay for inbound peers is to allow outbound peers
342  * a chance to announce before we request from inbound peers, to prevent
343  * an adversary from using inbound connections to blind us to a
344  * transaction (InvBlock).
345  *
346  * When we call SendMessages() for a given peer,
347  * we will loop over the transactions in m_tx_process_time, looking
348  * at the transactions whose process_time <= nNow. We'll request each
349  * such transaction that we don't have already and that hasn't been
350  * requested from another peer recently, up until we hit the
351  * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
352  * g_already_asked_for for each requested txid, storing the time of the
353  * GETDATA request. We use g_already_asked_for to coordinate transaction
354  * requests amongst our peers.
355  *
356  * For transactions that we still need but we have already recently
357  * requested from some other peer, we'll reinsert (process_time, txid)
358  * back into the peer's m_tx_process_time at the point in the future at
359  * which the most recent GETDATA request would time out (ie
360  * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
361  * We add an additional delay for inbound peers, again to prefer
362  * attempting download from outbound peers first.
363  * We also add an extra small random delay up to 2 seconds
364  * to avoid biasing some peers over others. (e.g., due to fixed ordering
365  * of peer processing in ThreadMessageHandler).
366  *
367  * When we receive a transaction from a peer, we remove the txid from the
368  * peer's m_tx_in_flight set and from their recently announced set
369  * (m_tx_announced). We also clear g_already_asked_for for that entry, so
370  * that if somehow the transaction is not accepted but also not added to
371  * the reject filter, then we will eventually redownload from other
372  * peers.
373  */
374  struct TxDownloadState {
375  /* Track when to attempt download of announced transactions (process
376  * time in micros -> txid)
377  */
378  std::multimap<std::chrono::microseconds, uint256> m_tx_process_time;
379 
381  std::set<uint256> m_tx_announced;
382 
384  std::map<uint256, std::chrono::microseconds> m_tx_in_flight;
385 
387  std::chrono::microseconds m_check_expiry_timer{0};
388  };
389 
390  TxDownloadState m_tx_download;
391 
393  bool m_is_inbound;
394 
396  bool m_is_manual_connection;
397 
398  CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound, bool is_manual) :
399  address(addrIn), name(std::move(addrNameIn)), m_is_inbound(is_inbound),
400  m_is_manual_connection (is_manual)
401  {
402  fCurrentlyConnected = false;
403  nMisbehavior = 0;
404  m_should_discourage = false;
405  pindexBestKnownBlock = nullptr;
406  hashLastUnknownBlock.SetNull();
407  pindexLastCommonBlock = nullptr;
408  pindexBestHeaderSent = nullptr;
409  nUnconnectingHeaders = 0;
410  fSyncStarted = false;
411  nHeadersSyncTimeout = 0;
412  nStallingSince = 0;
413  nDownloadingSince = 0;
414  nBlocksInFlight = 0;
415  nBlocksInFlightValidHeaders = 0;
416  fPreferredDownload = false;
417  fPreferHeaders = false;
418  fPreferHeaderAndIDs = false;
419  fProvidesHeaderAndIDs = false;
420  fHaveWitness = false;
421  fWantsCmpctWitness = false;
422  fSupportsDesiredCmpctVersion = false;
423  m_chain_sync = { 0, nullptr, false, false };
424  m_last_block_announcement = 0;
425  }
426 };
427 
428 // Keeps track of the time (in microseconds) when transactions were requested last time
430 
432 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
433 
434 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
435  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
436  if (it == mapNodeState.end())
437  return nullptr;
438  return &it->second;
439 }
440 
441 static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
442 {
443  nPreferredDownload -= state->fPreferredDownload;
444 
445  // Whether this node should be marked as a preferred download node.
446  state->fPreferredDownload = (!node.fInbound || node.HasPermission(PF_NOBAN)) && !node.fOneShot && !node.fClient;
447 
448  nPreferredDownload += state->fPreferredDownload;
449 }
450 
451 static void PushNodeVersion(CNode& pnode, CConnman* connman, int64_t nTime)
452 {
453  // Note that pnode->GetLocalServices() is a reflection of the local
454  // services we were offering when the CNode object was created for this
455  // peer.
456  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
457  uint64_t nonce = pnode.GetLocalNonce();
458  int nNodeStartingHeight = pnode.GetMyStartingHeight();
459  NodeId nodeid = pnode.GetId();
460  CAddress addr = pnode.addr;
461 
462  CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
463  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
464 
465  connman->PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
466  nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr));
467 
468  if (fLogIPs) {
469  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
470  } else {
471  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
472  }
473 }
474 
475 // Returns a bool indicating whether we requested this block.
476 // Also used if a block was /not/ received and timed out or started with another peer
477 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
478  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
479  if (itInFlight != mapBlocksInFlight.end()) {
480  CNodeState *state = State(itInFlight->second.first);
481  assert(state != nullptr);
482  state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
483  if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
484  // Last validated block on the queue was received.
485  nPeersWithValidatedDownloads--;
486  }
487  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
488  // First block on the queue was received, update the start download time for the next one
489  state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
490  }
491  state->vBlocksInFlight.erase(itInFlight->second.second);
492  state->nBlocksInFlight--;
493  state->nStallingSince = 0;
494  mapBlocksInFlight.erase(itInFlight);
495  return true;
496  }
497  return false;
498 }
499 
500 // returns false, still setting pit, if the block was already in flight from the same peer
501 // pit will only be valid as long as the same cs_main lock is being held
502 static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
503  CNodeState *state = State(nodeid);
504  assert(state != nullptr);
505 
506  // Short-circuit most stuff in case it is from the same node
507  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
508  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
509  if (pit) {
510  *pit = &itInFlight->second.second;
511  }
512  return false;
513  }
514 
515  // Make sure it's not listed somewhere already.
516  MarkBlockAsReceived(hash);
517 
518  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
519  {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
520  state->nBlocksInFlight++;
521  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
522  if (state->nBlocksInFlight == 1) {
523  // We're starting a block download (batch) from this peer.
524  state->nDownloadingSince = GetTimeMicros();
525  }
526  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
527  nPeersWithValidatedDownloads++;
528  }
529  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
530  if (pit)
531  *pit = &itInFlight->second.second;
532  return true;
533 }
534 
536 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
537  CNodeState *state = State(nodeid);
538  assert(state != nullptr);
539 
540  if (!state->hashLastUnknownBlock.IsNull()) {
541  const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
542  if (pindex && pindex->nChainWork > 0) {
543  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
544  state->pindexBestKnownBlock = pindex;
545  }
546  state->hashLastUnknownBlock.SetNull();
547  }
548  }
549 }
550 
552 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
553  CNodeState *state = State(nodeid);
554  assert(state != nullptr);
555 
556  ProcessBlockAvailability(nodeid);
557 
558  const CBlockIndex* pindex = LookupBlockIndex(hash);
559  if (pindex && pindex->nChainWork > 0) {
560  // An actually better block was announced.
561  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
562  state->pindexBestKnownBlock = pindex;
563  }
564  } else {
565  // An unknown block was announced; just assume that the latest one is the best one.
566  state->hashLastUnknownBlock = hash;
567  }
568 }
569 
576 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
577 {
578  AssertLockHeld(cs_main);
579  CNodeState* nodestate = State(nodeid);
580  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
581  // Never ask from peers who can't provide witnesses.
582  return;
583  }
584  if (nodestate->fProvidesHeaderAndIDs) {
585  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
586  if (*it == nodeid) {
587  lNodesAnnouncingHeaderAndIDs.erase(it);
588  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
589  return;
590  }
591  }
592  connman->ForNode(nodeid, [connman](CNode* pfrom){
593  AssertLockHeld(cs_main);
594  uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
595  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
596  // As per BIP152, we only get 3 of our peers to announce
597  // blocks using compact encodings.
598  connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
599  AssertLockHeld(cs_main);
600  connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
601  return true;
602  });
603  lNodesAnnouncingHeaderAndIDs.pop_front();
604  }
605  connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
606  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
607  return true;
608  });
609  }
610 }
611 
612 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
613 {
614  AssertLockHeld(cs_main);
615  if (g_last_tip_update == 0) {
616  g_last_tip_update = GetTime();
617  }
618  return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
619 }
620 
621 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
622 {
623  return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
624 }
625 
626 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
627 {
628  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
629  return true;
630  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
631  return true;
632  return false;
633 }
634 
637 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
638 {
639  if (count == 0)
640  return;
641 
642  vBlocks.reserve(vBlocks.size() + count);
643  CNodeState *state = State(nodeid);
644  assert(state != nullptr);
645 
646  // Make sure pindexBestKnownBlock is up to date, we'll need it.
647  ProcessBlockAvailability(nodeid);
648 
649  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
650  // This peer has nothing interesting.
651  return;
652  }
653 
654  if (state->pindexLastCommonBlock == nullptr) {
655  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
656  // Guessing wrong in either direction is not a problem.
657  state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
658  }
659 
660  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
661  // of its current tip anymore. Go back enough to fix that.
662  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
663  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
664  return;
665 
666  std::vector<const CBlockIndex*> vToFetch;
667  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
668  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
669  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
670  // download that next block if the window were 1 larger.
671  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
672  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
673  NodeId waitingfor = -1;
674  while (pindexWalk->nHeight < nMaxHeight) {
675  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
676  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
677  // as iterating over ~100 CBlockIndex* entries anyway.
678  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
679  vToFetch.resize(nToFetch);
680  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
681  vToFetch[nToFetch - 1] = pindexWalk;
682  for (unsigned int i = nToFetch - 1; i > 0; i--) {
683  vToFetch[i - 1] = vToFetch[i]->pprev;
684  }
685 
686  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
687  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
688  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
689  // already part of our chain (and therefore don't need it even if pruned).
690  for (const CBlockIndex* pindex : vToFetch) {
691  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
692  // We consider the chain that this peer is on invalid.
693  return;
694  }
695  if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
696  // We wouldn't download this block or its descendants from this peer.
697  return;
698  }
699  if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) {
700  if (pindex->HaveTxsDownloaded())
701  state->pindexLastCommonBlock = pindex;
702  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
703  // The block is not already downloaded, and not yet in flight.
704  if (pindex->nHeight > nWindowEnd) {
705  // We reached the end of the window.
706  if (vBlocks.size() == 0 && waitingfor != nodeid) {
707  // We aren't able to fetch anything, but we would be if the download window was one larger.
708  nodeStaller = waitingfor;
709  }
710  return;
711  }
712  vBlocks.push_back(pindex);
713  if (vBlocks.size() == count) {
714  return;
715  }
716  } else if (waitingfor == -1) {
717  // This is the first already-in-flight block.
718  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
719  }
720  }
721  }
722 }
723 
724 void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
725 {
726  g_already_asked_for.erase(txid);
727 }
728 
729 std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
730 {
731  auto it = g_already_asked_for.find(txid);
732  if (it != g_already_asked_for.end()) {
733  return it->second;
734  }
735  return {};
736 }
737 
738 void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
739 {
740  auto it = g_already_asked_for.find(txid);
741  if (it == g_already_asked_for.end()) {
742  g_already_asked_for.insert(std::make_pair(txid, request_time));
743  } else {
744  g_already_asked_for.update(it, request_time);
745  }
746 }
747 
748 std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
749 {
750  std::chrono::microseconds process_time;
751  const auto last_request_time = GetTxRequestTime(txid);
752  // First time requesting this tx
753  if (last_request_time.count() == 0) {
754  process_time = current_time;
755  } else {
756  // Randomize the delay to avoid biasing some peers over others (such as due to
757  // fixed ordering of peer processing in ThreadMessageHandler)
758  process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY);
759  }
760 
761  // We delay processing announcements from inbound peers
762  if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY;
763 
764  return process_time;
765 }
766 
767 void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
768 {
769  CNodeState::TxDownloadState& peer_download_state = state->m_tx_download;
770  if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
771  peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
772  peer_download_state.m_tx_announced.count(txid)) {
773  // Too many queued announcements from this peer, or we already have
774  // this announcement
775  return;
776  }
777  peer_download_state.m_tx_announced.insert(txid);
778 
779  // Calculate the time to try requesting this transaction. Use
780  // fPreferredDownload as a proxy for outbound peers.
781  const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload);
782 
783  peer_download_state.m_tx_process_time.emplace(process_time, txid);
784 }
785 
786 } // namespace
787 
788 // This function is used for testing the stale tip eviction logic, see
789 // denialofservice_tests.cpp
790 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
791 {
792  LOCK(cs_main);
793  CNodeState *state = State(node);
794  if (state) state->m_last_block_announcement = time_in_seconds;
795 }
796 
797 // Returns true for outbound peers, excluding manual connections, feelers, and
798 // one-shots.
799 static bool IsOutboundDisconnectionCandidate(const CNode& node)
800 {
801  return !(node.fInbound || node.m_manual_connection || node.fFeeler || node.fOneShot);
802 }
803 
805  CAddress addr = pnode->addr;
806  std::string addrName = pnode->GetAddrName();
807  NodeId nodeid = pnode->GetId();
808  {
809  LOCK(cs_main);
810  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection));
811  }
812  if(!pnode->fInbound)
813  PushNodeVersion(*pnode, connman, GetTime());
814 }
815 
817 {
818  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
819 
820  for (const uint256& txid : unbroadcast_txids) {
821  // Sanity check: all unbroadcast txns should exist in the mempool
822  if (m_mempool.exists(txid)) {
823  RelayTransaction(txid, *connman);
824  } else {
825  m_mempool.RemoveUnbroadcastTx(txid, true);
826  }
827  }
828 
829  // Schedule next run for 10-15 minutes in the future.
830  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
831  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
832  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
833 }
834 
835 void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
836  fUpdateConnectionTime = false;
837  LOCK(cs_main);
838  CNodeState *state = State(nodeid);
839  assert(state != nullptr);
840 
841  if (state->fSyncStarted)
842  nSyncStarted--;
843 
844  if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
845  fUpdateConnectionTime = true;
846  }
847 
848  for (const QueuedBlock& entry : state->vBlocksInFlight) {
849  mapBlocksInFlight.erase(entry.hash);
850  }
851  EraseOrphansFor(nodeid);
852  nPreferredDownload -= state->fPreferredDownload;
853  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
854  assert(nPeersWithValidatedDownloads >= 0);
855  g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
856  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
857 
858  mapNodeState.erase(nodeid);
859 
860  if (mapNodeState.empty()) {
861  // Do a consistency check after the last peer is removed.
862  assert(mapBlocksInFlight.empty());
863  assert(nPreferredDownload == 0);
864  assert(nPeersWithValidatedDownloads == 0);
865  assert(g_outbound_peers_with_protect_from_disconnect == 0);
866  }
867  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
868 }
869 
871  LOCK(cs_main);
872  CNodeState *state = State(nodeid);
873  if (state == nullptr)
874  return false;
875  stats.nMisbehavior = state->nMisbehavior;
876  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
877  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
878  for (const QueuedBlock& queue : state->vBlocksInFlight) {
879  if (queue.pindex)
880  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
881  }
882  return true;
883 }
884 
886 //
887 // mapOrphanTransactions
888 //
889 
891 {
892  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
893  if (max_extra_txn <= 0)
894  return;
895  if (!vExtraTxnForCompact.size())
896  vExtraTxnForCompact.resize(max_extra_txn);
897  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
898  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
899 }
900 
902 {
903  const uint256& hash = tx->GetHash();
904  if (mapOrphanTransactions.count(hash))
905  return false;
906 
907  // Ignore big transactions, to avoid a
908  // send-big-orphans memory exhaustion attack. If a peer has a legitimate
909  // large transaction with a missing parent then we assume
910  // it will rebroadcast it later, after the parent transaction(s)
911  // have been mined or received.
912  // 100 orphans, each of which is at most 100,000 bytes big is
913  // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
914  unsigned int sz = GetTransactionWeight(*tx);
915  if (sz > MAX_STANDARD_TX_WEIGHT)
916  {
917  LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
918  return false;
919  }
920 
921  auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
922  assert(ret.second);
923  g_orphan_list.push_back(ret.first);
924  for (const CTxIn& txin : tx->vin) {
925  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
926  }
927 
929 
930  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
931  mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
932  return true;
933 }
934 
935 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
936 {
937  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
938  if (it == mapOrphanTransactions.end())
939  return 0;
940  for (const CTxIn& txin : it->second.tx->vin)
941  {
942  auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
943  if (itPrev == mapOrphanTransactionsByPrev.end())
944  continue;
945  itPrev->second.erase(it);
946  if (itPrev->second.empty())
947  mapOrphanTransactionsByPrev.erase(itPrev);
948  }
949 
950  size_t old_pos = it->second.list_pos;
951  assert(g_orphan_list[old_pos] == it);
952  if (old_pos + 1 != g_orphan_list.size()) {
953  // Unless we're deleting the last entry in g_orphan_list, move the last
954  // entry to the position we're deleting.
955  auto it_last = g_orphan_list.back();
956  g_orphan_list[old_pos] = it_last;
957  it_last->second.list_pos = old_pos;
958  }
959  g_orphan_list.pop_back();
960 
961  mapOrphanTransactions.erase(it);
962  return 1;
963 }
964 
966 {
967  LOCK(g_cs_orphans);
968  int nErased = 0;
969  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
970  while (iter != mapOrphanTransactions.end())
971  {
972  std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
973  if (maybeErase->second.fromPeer == peer)
974  {
975  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
976  }
977  }
978  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
979 }
980 
981 
982 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
983 {
984  LOCK(g_cs_orphans);
985 
986  unsigned int nEvicted = 0;
987  static int64_t nNextSweep;
988  int64_t nNow = GetTime();
989  if (nNextSweep <= nNow) {
990  // Sweep out expired orphan pool entries:
991  int nErased = 0;
992  int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
993  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
994  while (iter != mapOrphanTransactions.end())
995  {
996  std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
997  if (maybeErase->second.nTimeExpire <= nNow) {
998  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
999  } else {
1000  nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1001  }
1002  }
1003  // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
1004  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1005  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
1006  }
1007  FastRandomContext rng;
1008  while (mapOrphanTransactions.size() > nMaxOrphans)
1009  {
1010  // Evict a random orphan:
1011  size_t randompos = rng.randrange(g_orphan_list.size());
1012  EraseOrphanTx(g_orphan_list[randompos]->first);
1013  ++nEvicted;
1014  }
1015  return nEvicted;
1016 }
1017 
1021 void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1022 {
1023  if (howmuch == 0)
1024  return;
1025 
1026  CNodeState *state = State(pnode);
1027  if (state == nullptr)
1028  return;
1029 
1030  state->nMisbehavior += howmuch;
1031  int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
1032  std::string message_prefixed = message.empty() ? "" : (": " + message);
1033  if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
1034  {
1035  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
1036  state->m_should_discourage = true;
1037  } else
1038  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
1039 }
1040 
1051 static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, bool via_compact_block, const std::string& message = "") {
1052  switch (state.GetResult()) {
1054  break;
1055  // The node is providing invalid data:
1058  if (!via_compact_block) {
1059  LOCK(cs_main);
1060  Misbehaving(nodeid, 100, message);
1061  return true;
1062  }
1063  break;
1065  {
1066  LOCK(cs_main);
1067  CNodeState *node_state = State(nodeid);
1068  if (node_state == nullptr) {
1069  break;
1070  }
1071 
1072  // Discourage outbound (but not inbound) peers if on an invalid chain.
1073  // Exempt HB compact block peers and manual connections.
1074  if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) {
1075  Misbehaving(nodeid, 100, message);
1076  return true;
1077  }
1078  break;
1079  }
1083  {
1084  LOCK(cs_main);
1085  Misbehaving(nodeid, 100, message);
1086  }
1087  return true;
1088  // Conflicting (but not necessarily invalid) data or different policy:
1090  {
1091  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1092  LOCK(cs_main);
1093  Misbehaving(nodeid, 10, message);
1094  }
1095  return true;
1098  break;
1099  }
1100  if (message != "") {
1101  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1102  }
1103  return false;
1104 }
1105 
1111 static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
1112 {
1113  switch (state.GetResult()) {
1115  break;
1116  // The node is providing invalid data:
1118  {
1119  LOCK(cs_main);
1120  Misbehaving(nodeid, 100, message);
1121  return true;
1122  }
1123  // Conflicting (but not necessarily invalid) data or different policy:
1131  break;
1132  }
1133  if (message != "") {
1134  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1135  }
1136  return false;
1137 }
1138 
1139 
1141 //
1142 // blockchain -> download logic notification
1143 //
1144 
1145 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1146 // active chain if they are no more than a month older (both in time, and in
1147 // best equivalent proof of work) than the best header chain we know about and
1148 // we fully-validated them at some point.
1149 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1150 {
1151  AssertLockHeld(cs_main);
1152  if (::ChainActive().Contains(pindex)) return true;
1153  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1156 }
1157 
1159  : connman(connmanIn),
1160  m_banman(banman),
1161  m_chainman(chainman),
1162  m_mempool(pool),
1163  m_stale_tip_check_time(0)
1164 {
1165  // Initialize global variables that cannot be constructed at startup.
1166  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1167 
1168  // Blocks don't typically have more than 4000 transactions, so this should
1169  // be at least six blocks (~1 hr) worth of transactions that we can store.
1170  // If the number of transactions appearing in a block goes up, or if we are
1171  // seeing getdata requests more than an hour after initial announcement, we
1172  // can increase this number.
1173  // The false positive rate of 1/1M should come out to less than 1
1174  // transaction per day that would be inadvertently ignored (which is the
1175  // same probability that we have in the reject filter).
1176  g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001));
1177 
1178  const Consensus::Params& consensusParams = Params().GetConsensus();
1179  // Stale tip checking and peer eviction are on two different timers, but we
1180  // don't want them to get out of sync due to drift in the scheduler, so we
1181  // combine them in one function and schedule at the quicker (peer-eviction)
1182  // timer.
1183  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1184  scheduler.scheduleEvery([this, consensusParams] { this->CheckForStaleTipAndEvictPeers(consensusParams); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1185 
1186  // schedule next run for 10-15 minutes in the future
1187  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1188  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1189 }
1190 
1195 void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1196 {
1197  {
1198  LOCK(g_cs_orphans);
1199 
1200  std::vector<uint256> vOrphanErase;
1201 
1202  for (const CTransactionRef& ptx : pblock->vtx) {
1203  const CTransaction& tx = *ptx;
1204 
1205  // Which orphan pool entries must we evict?
1206  for (const auto& txin : tx.vin) {
1207  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1208  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1209  for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1210  const CTransaction& orphanTx = *(*mi)->second.tx;
1211  const uint256& orphanHash = orphanTx.GetHash();
1212  vOrphanErase.push_back(orphanHash);
1213  }
1214  }
1215  }
1216 
1217  // Erase orphan transactions included or precluded by this block
1218  if (vOrphanErase.size()) {
1219  int nErased = 0;
1220  for (const uint256& orphanHash : vOrphanErase) {
1221  nErased += EraseOrphanTx(orphanHash);
1222  }
1223  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1224  }
1225 
1226  g_last_tip_update = GetTime();
1227  }
1228  {
1229  LOCK(g_cs_recent_confirmed_transactions);
1230  for (const auto& ptx : pblock->vtx) {
1231  g_recent_confirmed_transactions->insert(ptx->GetHash());
1232  }
1233  }
1234 }
1235 
1236 void PeerLogicValidation::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1237 {
1238  // To avoid relay problems with transactions that were previously
1239  // confirmed, clear our filter of recently confirmed transactions whenever
1240  // there's a reorg.
1241  // This means that in a 1-block reorg (where 1 block is disconnected and
1242  // then another block reconnected), our filter will drop to having only one
1243  // block's worth of transactions in it, but that should be fine, since
1244  // presumably the most common case of relaying a confirmed transaction
1245  // should be just after a new block containing it is found.
1246  LOCK(g_cs_recent_confirmed_transactions);
1247  g_recent_confirmed_transactions->reset();
1248 }
1249 
1250 // All of the following cache a recent block, and are protected by cs_most_recent_block
1252 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1253 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1254 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1255 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1256 
1261 void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1262  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1263  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1264 
1265  LOCK(cs_main);
1266 
1267  static int nHighestFastAnnounce = 0;
1268  if (pindex->nHeight <= nHighestFastAnnounce)
1269  return;
1270  nHighestFastAnnounce = pindex->nHeight;
1271 
1272  bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
1273  uint256 hashBlock(pblock->GetHash());
1274 
1275  {
1276  LOCK(cs_most_recent_block);
1277  most_recent_block_hash = hashBlock;
1278  most_recent_block = pblock;
1279  most_recent_compact_block = pcmpctblock;
1280  fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1281  }
1282 
1283  connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
1284  AssertLockHeld(cs_main);
1285 
1286  // TODO: Avoid the repeated-serialization here
1287  if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1288  return;
1289  ProcessBlockAvailability(pnode->GetId());
1290  CNodeState &state = *State(pnode->GetId());
1291  // If the peer has, or we announced to them the previous block already,
1292  // but we don't think they have this one, go ahead and announce it
1293  if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1294  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1295 
1296  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
1297  hashBlock.ToString(), pnode->GetId());
1298  connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1299  state.pindexBestHeaderSent = pindex;
1300  }
1301  });
1302 }
1303 
1308 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1309  const int nNewHeight = pindexNew->nHeight;
1310  connman->SetBestHeight(nNewHeight);
1311 
1312  SetServiceFlagsIBDCache(!fInitialDownload);
1313  if (!fInitialDownload) {
1314  // Find the hashes of all blocks that weren't previously in the best chain.
1315  std::vector<uint256> vHashes;
1316  const CBlockIndex *pindexToAnnounce = pindexNew;
1317  while (pindexToAnnounce != pindexFork) {
1318  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1319  pindexToAnnounce = pindexToAnnounce->pprev;
1320  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1321  // Limit announcements in case of a huge reorganization.
1322  // Rely on the peer's synchronization mechanism in that case.
1323  break;
1324  }
1325  }
1326  // Relay inventory, but don't relay old inventory during initial block download.
1327  connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1328  LOCK(pnode->cs_inventory);
1329  if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1330  for (const uint256& hash : reverse_iterate(vHashes)) {
1331  pnode->vBlockHashesToAnnounce.push_back(hash);
1332  }
1333  }
1334  });
1335  connman->WakeMessageHandler();
1336  }
1337 }
1338 
1344  LOCK(cs_main);
1345 
1346  const uint256 hash(block.GetHash());
1347  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1348 
1349  // If the block failed validation, we know where it came from and we're still connected
1350  // to that peer, maybe punish.
1351  if (state.IsInvalid() &&
1352  it != mapBlockSource.end() &&
1353  State(it->second.first)) {
1354  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1355  }
1356  // Check that:
1357  // 1. The block is valid
1358  // 2. We're not in initial block download
1359  // 3. This is currently the best block we're aware of. We haven't updated
1360  // the tip yet so we have no way to check this directly here. Instead we
1361  // just check that there are currently no other blocks in flight.
1362  else if (state.IsValid() &&
1364  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1365  if (it != mapBlockSource.end()) {
1366  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
1367  }
1368  }
1369  if (it != mapBlockSource.end())
1370  mapBlockSource.erase(it);
1371 }
1372 
1374 //
1375 // Messages
1376 //
1377 
1378 
1379 bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1380 {
1381  switch (inv.type)
1382  {
1383  case MSG_TX:
1384  case MSG_WITNESS_TX:
1385  {
1386  assert(recentRejects);
1387  if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip)
1388  {
1389  // If the chain tip has changed previously rejected transactions
1390  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1391  // or a double-spend. Reset the rejects filter and give those
1392  // txs a second chance.
1393  hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1394  recentRejects->reset();
1395  }
1396 
1397  {
1398  LOCK(g_cs_orphans);
1399  if (mapOrphanTransactions.count(inv.hash)) return true;
1400  }
1401 
1402  {
1403  LOCK(g_cs_recent_confirmed_transactions);
1404  if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
1405  }
1406 
1407  return recentRejects->contains(inv.hash) ||
1408  mempool.exists(inv.hash);
1409  }
1410  case MSG_BLOCK:
1411  case MSG_WITNESS_BLOCK:
1412  return LookupBlockIndex(inv.hash) != nullptr;
1413  }
1414  // Don't know what it is, just say we already got one
1415  return true;
1416 }
1417 
1418 void RelayTransaction(const uint256& txid, const CConnman& connman)
1419 {
1420  connman.ForEachNode([&txid](CNode* pnode)
1421  {
1422  pnode->PushTxInventory(txid);
1423  });
1424 }
1425 
1426 static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman)
1427 {
1428  unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
1429 
1430  // Relay to a limited number of other nodes
1431  // Use deterministic randomness to send to the same nodes for 24 hours
1432  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1433  uint64_t hashAddr = addr.GetHash();
1434  const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
1435  FastRandomContext insecure_rand;
1436 
1437  std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1438  assert(nRelayNodes <= best.size());
1439 
1440  auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1441  if (pnode->IsAddrRelayPeer()) {
1442  uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1443  for (unsigned int i = 0; i < nRelayNodes; i++) {
1444  if (hashKey > best[i].first) {
1445  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1446  best[i] = std::make_pair(hashKey, pnode);
1447  break;
1448  }
1449  }
1450  }
1451  };
1452 
1453  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1454  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1455  best[i].second->PushAddress(addr, insecure_rand);
1456  }
1457  };
1458 
1459  connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1460 }
1461 
1462 void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman)
1463 {
1464  bool send = false;
1465  std::shared_ptr<const CBlock> a_recent_block;
1466  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1467  bool fWitnessesPresentInARecentCompactBlock;
1468  const Consensus::Params& consensusParams = chainparams.GetConsensus();
1469  {
1470  LOCK(cs_most_recent_block);
1471  a_recent_block = most_recent_block;
1472  a_recent_compact_block = most_recent_compact_block;
1473  fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1474  }
1475 
1476  bool need_activate_chain = false;
1477  {
1478  LOCK(cs_main);
1479  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1480  if (pindex) {
1481  if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1482  pindex->IsValid(BLOCK_VALID_TREE)) {
1483  // If we have the block and all of its parents, but have not yet validated it,
1484  // we might be in the middle of connecting it (ie in the unlock of cs_main
1485  // before ActivateBestChain but after AcceptBlock).
1486  // In this case, we need to run ActivateBestChain prior to checking the relay
1487  // conditions below.
1488  need_activate_chain = true;
1489  }
1490  }
1491  } // release cs_main before calling ActivateBestChain
1492  if (need_activate_chain) {
1493  BlockValidationState state;
1494  if (!ActivateBestChain(state, Params(), a_recent_block)) {
1495  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1496  }
1497  }
1498 
1499  LOCK(cs_main);
1500  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1501  if (pindex) {
1502  send = BlockRequestAllowed(pindex, consensusParams);
1503  if (!send) {
1504  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
1505  }
1506  }
1507  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1508  // disconnect node in case we have reached the outbound limit for serving historical blocks
1509  if (send &&
1510  connman->OutboundTargetReached(true) &&
1511  (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) &&
1512  !pfrom.HasPermission(PF_DOWNLOAD) // nodes with the download permission may exceed target
1513  ) {
1514  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
1515 
1516  //disconnect node
1517  pfrom.fDisconnect = true;
1518  send = false;
1519  }
1520  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1521  if (send && !pfrom.HasPermission(PF_NOBAN) && (
1522  (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1523  )) {
1524  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom.GetId());
1525 
1526  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1527  pfrom.fDisconnect = true;
1528  send = false;
1529  }
1530  // Pruned nodes may have deleted the block, so check whether
1531  // it's available before trying to send.
1532  if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1533  {
1534  std::shared_ptr<const CBlock> pblock;
1535  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1536  pblock = a_recent_block;
1537  } else if (inv.type == MSG_WITNESS_BLOCK) {
1538  // Fast-path: in this case it is possible to serve the block directly from disk,
1539  // as the network format matches the format on disk
1540  std::vector<uint8_t> block_data;
1541  if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1542  assert(!"cannot load block from disk");
1543  }
1544  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1545  // Don't set pblock as we've sent the block
1546  } else {
1547  // Send block from disk
1548  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1549  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1550  assert(!"cannot load block from disk");
1551  pblock = pblockRead;
1552  }
1553  if (pblock) {
1554  if (inv.type == MSG_BLOCK)
1555  connman->PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1556  else if (inv.type == MSG_WITNESS_BLOCK)
1557  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1558  else if (inv.type == MSG_FILTERED_BLOCK)
1559  {
1560  bool sendMerkleBlock = false;
1561  CMerkleBlock merkleBlock;
1562  if (pfrom.m_tx_relay != nullptr) {
1563  LOCK(pfrom.m_tx_relay->cs_filter);
1564  if (pfrom.m_tx_relay->pfilter) {
1565  sendMerkleBlock = true;
1566  merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1567  }
1568  }
1569  if (sendMerkleBlock) {
1570  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1571  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1572  // This avoids hurting performance by pointlessly requiring a round-trip
1573  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1574  // they must either disconnect and retry or request the full block.
1575  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1576  // however we MUST always provide at least what the remote peer needs
1577  typedef std::pair<unsigned int, uint256> PairType;
1578  for (PairType& pair : merkleBlock.vMatchedTxn)
1579  connman->PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1580  }
1581  // else
1582  // no response
1583  }
1584  else if (inv.type == MSG_CMPCT_BLOCK)
1585  {
1586  // If a peer is asking for old blocks, we're almost guaranteed
1587  // they won't have a useful mempool to match against a compact block,
1588  // and we don't feel like constructing the object for them, so
1589  // instead we respond with the full, non-compact block.
1590  bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness;
1591  int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1592  if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
1593  if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1594  connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1595  } else {
1596  CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1597  connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1598  }
1599  } else {
1600  connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1601  }
1602  }
1603  }
1604 
1605  // Trigger the peer node to send a getblocks request for the next batch of inventory
1606  if (inv.hash == pfrom.hashContinue)
1607  {
1608  // Send immediately. This must send even if redundant,
1609  // and we want it right after the last block so they don't
1610  // wait for other stuff first.
1611  std::vector<CInv> vInv;
1612  vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1613  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1614  pfrom.hashContinue.SetNull();
1615  }
1616  }
1617 }
1618 
1620 CTransactionRef static FindTxForGetData(CNode& peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds longlived_mempool_time) LOCKS_EXCLUDED(cs_main)
1621 {
1622  // Check if the requested transaction is so recent that we're just
1623  // about to announce it to the peer; if so, they certainly shouldn't
1624  // know we already have it.
1625  {
1626  LOCK(peer.m_tx_relay->cs_tx_inventory);
1627  if (peer.m_tx_relay->setInventoryTxToSend.count(txid)) return {};
1628  }
1629 
1630  {
1631  LOCK(cs_main);
1632  // Look up transaction in relay pool
1633  auto mi = mapRelay.find(txid);
1634  if (mi != mapRelay.end()) return mi->second;
1635  }
1636 
1637  auto txinfo = mempool.info(txid);
1638  if (txinfo.tx) {
1639  // To protect privacy, do not answer getdata using the mempool when
1640  // that TX couldn't have been INVed in reply to a MEMPOOL request,
1641  // or when it's too recent to have expired from mapRelay.
1642  if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= longlived_mempool_time) {
1643  return txinfo.tx;
1644  }
1645  }
1646 
1647  return {};
1648 }
1649 
1650 void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnman* connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
1651 {
1652  AssertLockNotHeld(cs_main);
1653 
1654  std::deque<CInv>::iterator it = pfrom.vRecvGetData.begin();
1655  std::vector<CInv> vNotFound;
1656  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1657 
1658  // mempool entries added before this time have likely expired from mapRelay
1659  const std::chrono::seconds longlived_mempool_time = GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME;
1660  // Get last mempool request time
1661  const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
1662  : std::chrono::seconds::min();
1663 
1664  // Process as many TX items from the front of the getdata queue as
1665  // possible, since they're common and it's efficient to batch process
1666  // them.
1667  while (it != pfrom.vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
1668  if (interruptMsgProc) return;
1669  // The send buffer provides backpressure. If there's no space in
1670  // the buffer, pause processing until the next call.
1671  if (pfrom.fPauseSend) break;
1672 
1673  const CInv &inv = *it++;
1674 
1675  if (pfrom.m_tx_relay == nullptr) {
1676  // Ignore GETDATA requests for transactions from blocks-only peers.
1677  continue;
1678  }
1679 
1680  CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, mempool_req, longlived_mempool_time);
1681  if (tx) {
1682  int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1683  connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
1684  mempool.RemoveUnbroadcastTx(inv.hash);
1685  } else {
1686  vNotFound.push_back(inv);
1687  }
1688  }
1689 
1690  // Only process one BLOCK item per call, since they're uncommon and can be
1691  // expensive to process.
1692  if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) {
1693  const CInv &inv = *it++;
1694  if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
1695  ProcessGetBlockData(pfrom, chainparams, inv, connman);
1696  }
1697  // else: If the first item on the queue is an unknown type, we erase it
1698  // and continue processing the queue on the next call.
1699  }
1700 
1701  pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it);
1702 
1703  if (!vNotFound.empty()) {
1704  // Let the peer know that we didn't find what it asked for, so it doesn't
1705  // have to wait around forever.
1706  // SPV clients care about this message: it's needed when they are
1707  // recursively walking the dependencies of relevant unconfirmed
1708  // transactions. SPV clients want to do that because they want to know
1709  // about (and store and rebroadcast and risk analyze) the dependencies
1710  // of transactions relevant to them, without having to download the
1711  // entire memory pool.
1712  // Also, other nodes can use these messages to automatically request a
1713  // transaction from some other peer that annnounced it, and stop
1714  // waiting for us to respond.
1715  // In normal operation, we often send NOTFOUND messages for parents of
1716  // transactions that we relay; if a peer is missing a parent, they may
1717  // assume we have them and request the parents from us.
1718  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1719  }
1720 }
1721 
1722 static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1723  uint32_t nFetchFlags = 0;
1724  if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
1725  nFetchFlags |= MSG_WITNESS_FLAG;
1726  }
1727  return nFetchFlags;
1728 }
1729 
1730 inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode& pfrom, CConnman* connman) {
1731  BlockTransactions resp(req);
1732  for (size_t i = 0; i < req.indexes.size(); i++) {
1733  if (req.indexes[i] >= block.vtx.size()) {
1734  LOCK(cs_main);
1735  Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom.GetId()));
1736  return;
1737  }
1738  resp.txn[i] = block.vtx[req.indexes[i]];
1739  }
1740  LOCK(cs_main);
1741  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1742  int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1743  connman->PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1744 }
1745 
1746 static void ProcessHeadersMessage(CNode& pfrom, CConnman* connman, ChainstateManager& chainman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
1747 {
1748  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1749  size_t nCount = headers.size();
1750 
1751  if (nCount == 0) {
1752  // Nothing interesting. Stop asking this peers for more headers.
1753  return;
1754  }
1755 
1756  bool received_new_header = false;
1757  const CBlockIndex *pindexLast = nullptr;
1758  {
1759  LOCK(cs_main);
1760  CNodeState *nodestate = State(pfrom.GetId());
1761 
1762  // If this looks like it could be a block announcement (nCount <
1763  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1764  // don't connect:
1765  // - Send a getheaders message in response to try to connect the chain.
1766  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1767  // don't connect before giving DoS points
1768  // - Once a headers message is received that is valid and does connect,
1769  // nUnconnectingHeaders gets reset back to 0.
1770  if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1771  nodestate->nUnconnectingHeaders++;
1772  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
1773  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1774  headers[0].GetHash().ToString(),
1775  headers[0].hashPrevBlock.ToString(),
1777  pfrom.GetId(), nodestate->nUnconnectingHeaders);
1778  // Set hashLastUnknownBlock for this peer, so that if we
1779  // eventually get the headers - even from a different peer -
1780  // we can use this peer to download.
1781  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
1782 
1783  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1784  Misbehaving(pfrom.GetId(), 20);
1785  }
1786  return;
1787  }
1788 
1789  uint256 hashLastBlock;
1790  for (const CBlockHeader& header : headers) {
1791  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1792  Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
1793  return;
1794  }
1795  hashLastBlock = header.GetHash();
1796  }
1797 
1798  // If we don't have the last header, then they'll have given us
1799  // something new (if these headers are valid).
1800  if (!LookupBlockIndex(hashLastBlock)) {
1801  received_new_header = true;
1802  }
1803  }
1804 
1805  BlockValidationState state;
1806  if (!chainman.ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
1807  if (state.IsInvalid()) {
1808  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
1809  return;
1810  }
1811  }
1812 
1813  {
1814  LOCK(cs_main);
1815  CNodeState *nodestate = State(pfrom.GetId());
1816  if (nodestate->nUnconnectingHeaders > 0) {
1817  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
1818  }
1819  nodestate->nUnconnectingHeaders = 0;
1820 
1821  assert(pindexLast);
1822  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
1823 
1824  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1825  // because it is set in UpdateBlockAvailability. Some nullptr checks
1826  // are still present, however, as belt-and-suspenders.
1827 
1828  if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
1829  nodestate->m_last_block_announcement = GetTime();
1830  }
1831 
1832  if (nCount == MAX_HEADERS_RESULTS) {
1833  // Headers message had its maximum size; the peer may have more headers.
1834  // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue
1835  // from there instead.
1836  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
1837  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256()));
1838  }
1839 
1840  bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
1841  // If this set of headers is valid and ends in a block with at least as
1842  // much work as our tip, download as much as possible.
1843  if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
1844  std::vector<const CBlockIndex*> vToFetch;
1845  const CBlockIndex *pindexWalk = pindexLast;
1846  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1847  while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1848  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1849  !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1850  (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom.GetId())->fHaveWitness)) {
1851  // We don't have this block, and it's not yet in flight.
1852  vToFetch.push_back(pindexWalk);
1853  }
1854  pindexWalk = pindexWalk->pprev;
1855  }
1856  // If pindexWalk still isn't on our main chain, we're looking at a
1857  // very large reorg at a time we think we're close to caught up to
1858  // the main chain -- this shouldn't really happen. Bail out on the
1859  // direct fetch and rely on parallel download instead.
1860  if (!::ChainActive().Contains(pindexWalk)) {
1861  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1862  pindexLast->GetBlockHash().ToString(),
1863  pindexLast->nHeight);
1864  } else {
1865  std::vector<CInv> vGetData;
1866  // Download as much as possible, from earliest to latest.
1867  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1868  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1869  // Can't download any more from this peer
1870  break;
1871  }
1872  uint32_t nFetchFlags = GetFetchFlags(pfrom);
1873  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1874  MarkBlockAsInFlight(mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex);
1875  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1876  pindex->GetBlockHash().ToString(), pfrom.GetId());
1877  }
1878  if (vGetData.size() > 1) {
1879  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1880  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1881  }
1882  if (vGetData.size() > 0) {
1883  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1884  // In any case, we want to download using a compact block, not a regular one
1885  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1886  }
1887  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1888  }
1889  }
1890  }
1891  // If we're in IBD, we want outbound peers that will serve us a useful
1892  // chain. Disconnect peers that are on chains with insufficient work.
1894  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1895  // headers to fetch from this peer.
1896  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1897  // This peer has too little work on their headers chain to help
1898  // us sync -- disconnect if it is an outbound disconnection
1899  // candidate.
1900  // Note: We compare their tip to nMinimumChainWork (rather than
1901  // ::ChainActive().Tip()) because we won't start block download
1902  // until we have a headers chain that has at least
1903  // nMinimumChainWork, even if a peer has a chain past our tip,
1904  // as an anti-DoS measure.
1905  if (IsOutboundDisconnectionCandidate(pfrom)) {
1906  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
1907  pfrom.fDisconnect = true;
1908  }
1909  }
1910  }
1911 
1912  if (!pfrom.fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) {
1913  // If this is an outbound full-relay peer, check to see if we should protect
1914  // it from the bad/lagging chain logic.
1915  // Note that block-relay-only peers are already implicitly protected, so we
1916  // only consider setting m_protect for the full-relay peers.
1917  if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1918  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
1919  nodestate->m_chain_sync.m_protect = true;
1920  ++g_outbound_peers_with_protect_from_disconnect;
1921  }
1922  }
1923  }
1924 
1925  return;
1926 }
1927 
1928 void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
1929 {
1930  AssertLockHeld(cs_main);
1931  AssertLockHeld(g_cs_orphans);
1932  std::set<NodeId> setMisbehaving;
1933  bool done = false;
1934  while (!done && !orphan_work_set.empty()) {
1935  const uint256 orphanHash = *orphan_work_set.begin();
1936  orphan_work_set.erase(orphan_work_set.begin());
1937 
1938  auto orphan_it = mapOrphanTransactions.find(orphanHash);
1939  if (orphan_it == mapOrphanTransactions.end()) continue;
1940 
1941  const CTransactionRef porphanTx = orphan_it->second.tx;
1942  const CTransaction& orphanTx = *porphanTx;
1943  NodeId fromPeer = orphan_it->second.fromPeer;
1944  // Use a new TxValidationState because orphans come from different peers (and we call
1945  // MaybePunishNodeForTx based on the source peer from the orphan map, not based on the peer
1946  // that relayed the previous transaction).
1947  TxValidationState orphan_state;
1948 
1949  if (setMisbehaving.count(fromPeer)) continue;
1950  if (AcceptToMemoryPool(mempool, orphan_state, porphanTx, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
1951  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1952  RelayTransaction(orphanHash, *connman);
1953  for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
1954  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1955  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1956  for (const auto& elem : it_by_prev->second) {
1957  orphan_work_set.insert(elem->first);
1958  }
1959  }
1960  }
1961  EraseOrphanTx(orphanHash);
1962  done = true;
1963  } else if (orphan_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
1964  if (orphan_state.IsInvalid()) {
1965  // Punish peer that gave us an invalid orphan tx
1966  if (MaybePunishNodeForTx(fromPeer, orphan_state)) {
1967  setMisbehaving.insert(fromPeer);
1968  }
1969  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
1970  }
1971  // Has inputs but not accepted to mempool
1972  // Probably non-standard or insufficient fee
1973  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
1974  if (!orphanTx.HasWitness() && orphan_state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) {
1975  // Do not use rejection cache for witness transactions or
1976  // witness-stripped transactions, as they can have been malleated.
1977  // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
1978  assert(recentRejects);
1979  recentRejects->insert(orphanHash);
1980  }
1981  EraseOrphanTx(orphanHash);
1982  done = true;
1983  }
1984  mempool.check(&::ChainstateActive().CoinsTip());
1985  }
1986 }
1987 
2003 static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_params,
2004  BlockFilterType filter_type, uint32_t start_height,
2005  const uint256& stop_hash, uint32_t max_height_diff,
2006  const CBlockIndex*& stop_index,
2007  BlockFilterIndex*& filter_index)
2008 {
2009  const bool supported_filter_type =
2010  (filter_type == BlockFilterType::BASIC &&
2011  gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS));
2012  if (!supported_filter_type) {
2013  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
2014  pfrom.GetId(), static_cast<uint8_t>(filter_type));
2015  pfrom.fDisconnect = true;
2016  return false;
2017  }
2018 
2019  {
2020  LOCK(cs_main);
2021  stop_index = LookupBlockIndex(stop_hash);
2022 
2023  // Check that the stop block exists and the peer would be allowed to fetch it.
2024  if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2025  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2026  pfrom.GetId(), stop_hash.ToString());
2027  pfrom.fDisconnect = true;
2028  return false;
2029  }
2030  }
2031 
2032  uint32_t stop_height = stop_index->nHeight;
2033  if (start_height > stop_height) {
2034  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
2035  "start height %d and stop height %d\n",
2036  pfrom.GetId(), start_height, stop_height);
2037  pfrom.fDisconnect = true;
2038  return false;
2039  }
2040  if (stop_height - start_height >= max_height_diff) {
2041  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2042  pfrom.GetId(), stop_height - start_height + 1, max_height_diff);
2043  pfrom.fDisconnect = true;
2044  return false;
2045  }
2046 
2047  filter_index = GetBlockFilterIndex(filter_type);
2048  if (!filter_index) {
2049  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
2050  return false;
2051  }
2052 
2053  return true;
2054 }
2055 
2066 static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
2067  CConnman& connman)
2068 {
2069  uint8_t filter_type_ser;
2070  uint32_t start_height;
2071  uint256 stop_hash;
2072 
2073  vRecv >> filter_type_ser >> start_height >> stop_hash;
2074 
2075  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2076 
2077  const CBlockIndex* stop_index;
2078  BlockFilterIndex* filter_index;
2079  if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash,
2080  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2081  return;
2082  }
2083 
2084  std::vector<BlockFilter> filters;
2085 
2086  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2087  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2088  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2089  return;
2090  }
2091 
2092  for (const auto& filter : filters) {
2094  .Make(NetMsgType::CFILTER, filter);
2095  connman.PushMessage(&pfrom, std::move(msg));
2096  }
2097 }
2098 
2109 static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
2110  CConnman& connman)
2111 {
2112  uint8_t filter_type_ser;
2113  uint32_t start_height;
2114  uint256 stop_hash;
2115 
2116  vRecv >> filter_type_ser >> start_height >> stop_hash;
2117 
2118  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2119 
2120  const CBlockIndex* stop_index;
2121  BlockFilterIndex* filter_index;
2122  if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash,
2123  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2124  return;
2125  }
2126 
2127  uint256 prev_header;
2128  if (start_height > 0) {
2129  const CBlockIndex* const prev_block =
2130  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2131  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2132  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2133  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
2134  return;
2135  }
2136  }
2137 
2138  std::vector<uint256> filter_hashes;
2139  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
2140  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2141  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2142  return;
2143  }
2144 
2146  .Make(NetMsgType::CFHEADERS,
2147  filter_type_ser,
2148  stop_index->GetBlockHash(),
2149  prev_header,
2150  filter_hashes);
2151  connman.PushMessage(&pfrom, std::move(msg));
2152 }
2153 
2164 static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params,
2165  CConnman& connman)
2166 {
2167  uint8_t filter_type_ser;
2168  uint256 stop_hash;
2169 
2170  vRecv >> filter_type_ser >> stop_hash;
2171 
2172  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2173 
2174  const CBlockIndex* stop_index;
2175  BlockFilterIndex* filter_index;
2176  if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, /*start_height=*/0, stop_hash,
2177  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2178  stop_index, filter_index)) {
2179  return;
2180  }
2181 
2182  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2183 
2184  // Populate headers.
2185  const CBlockIndex* block_index = stop_index;
2186  for (int i = headers.size() - 1; i >= 0; i--) {
2187  int height = (i + 1) * CFCHECKPT_INTERVAL;
2188  block_index = block_index->GetAncestor(height);
2189 
2190  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2191  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2192  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
2193  return;
2194  }
2195  }
2196 
2198  .Make(NetMsgType::CFCHECKPT,
2199  filter_type_ser,
2200  stop_index->GetBlockHash(),
2201  headers);
2202  connman.PushMessage(&pfrom, std::move(msg));
2203 }
2204 
2206  CNode& pfrom,
2207  const std::string& msg_type,
2208  CDataStream& vRecv,
2209  const std::chrono::microseconds time_received,
2210  const CChainParams& chainparams,
2211  ChainstateManager& chainman,
2212  CTxMemPool& mempool,
2213  CConnman* connman,
2214  BanMan* banman,
2215  const std::atomic<bool>& interruptMsgProc)
2216 {
2217  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2218  if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
2219  {
2220  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2221  return;
2222  }
2223 
2224 
2225  if (msg_type == NetMsgType::VERSION) {
2226  // Each connection can only send one version message
2227  if (pfrom.nVersion != 0)
2228  {
2229  LOCK(cs_main);
2230  Misbehaving(pfrom.GetId(), 1);
2231  return;
2232  }
2233 
2234  int64_t nTime;
2235  CAddress addrMe;
2236  CAddress addrFrom;
2237  uint64_t nNonce = 1;
2238  uint64_t nServiceInt;
2239  ServiceFlags nServices;
2240  int nVersion;
2241  int nSendVersion;
2242  std::string cleanSubVer;
2243  int nStartingHeight = -1;
2244  bool fRelay = true;
2245 
2246  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2247  nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
2248  nServices = ServiceFlags(nServiceInt);
2249  if (!pfrom.fInbound)
2250  {
2251  connman->SetServices(pfrom.addr, nServices);
2252  }
2253  if (!pfrom.fInbound && !pfrom.fFeeler && !pfrom.m_manual_connection && !HasAllDesirableServiceFlags(nServices))
2254  {
2255  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
2256  pfrom.fDisconnect = true;
2257  return;
2258  }
2259 
2260  if (nVersion < MIN_PEER_PROTO_VERSION) {
2261  // disconnect from peers older than this proto version
2262  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
2263  pfrom.fDisconnect = true;
2264  return;
2265  }
2266 
2267  if (!vRecv.empty())
2268  vRecv >> addrFrom >> nNonce;
2269  if (!vRecv.empty()) {
2270  std::string strSubVer;
2271  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2272  cleanSubVer = SanitizeString(strSubVer);
2273  }
2274  if (!vRecv.empty()) {
2275  vRecv >> nStartingHeight;
2276  }
2277  if (!vRecv.empty())
2278  vRecv >> fRelay;
2279  // Disconnect if we connected to ourself
2280  if (pfrom.fInbound && !connman->CheckIncomingNonce(nNonce))
2281  {
2282  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
2283  pfrom.fDisconnect = true;
2284  return;
2285  }
2286 
2287  if (pfrom.fInbound && addrMe.IsRoutable())
2288  {
2289  SeenLocal(addrMe);
2290  }
2291 
2292  // Be shy and don't send version until we hear
2293  if (pfrom.fInbound)
2294  PushNodeVersion(pfrom, connman, GetAdjustedTime());
2295 
2297 
2298  pfrom.nServices = nServices;
2299  pfrom.SetAddrLocal(addrMe);
2300  {
2301  LOCK(pfrom.cs_SubVer);
2302  pfrom.cleanSubVer = cleanSubVer;
2303  }
2304  pfrom.nStartingHeight = nStartingHeight;
2305 
2306  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2307  pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2308 
2309  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2310  pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2311 
2312  if (pfrom.m_tx_relay != nullptr) {
2313  LOCK(pfrom.m_tx_relay->cs_filter);
2314  pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2315  }
2316 
2317  // Change version
2318  pfrom.SetSendVersion(nSendVersion);
2319  pfrom.nVersion = nVersion;
2320 
2321  if((nServices & NODE_WITNESS))
2322  {
2323  LOCK(cs_main);
2324  State(pfrom.GetId())->fHaveWitness = true;
2325  }
2326 
2327  // Potentially mark this peer as a preferred download peer.
2328  {
2329  LOCK(cs_main);
2330  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2331  }
2332 
2333  if (!pfrom.fInbound && pfrom.IsAddrRelayPeer())
2334  {
2335  // Advertise our address
2337  {
2338  CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2339  FastRandomContext insecure_rand;
2340  if (addr.IsRoutable())
2341  {
2342  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2343  pfrom.PushAddress(addr, insecure_rand);
2344  } else if (IsPeerAddrLocalGood(&pfrom)) {
2345  addr.SetIP(addrMe);
2346  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2347  pfrom.PushAddress(addr, insecure_rand);
2348  }
2349  }
2350 
2351  // Get recent addresses
2352  connman->PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
2353  pfrom.fGetAddr = true;
2354  connman->MarkAddressGood(pfrom.addr);
2355  }
2356 
2357  std::string remoteAddr;
2358  if (fLogIPs)
2359  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2360 
2361  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2362  cleanSubVer, pfrom.nVersion,
2363  pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
2364  remoteAddr);
2365 
2366  int64_t nTimeOffset = nTime - GetTime();
2367  pfrom.nTimeOffset = nTimeOffset;
2368  AddTimeData(pfrom.addr, nTimeOffset);
2369 
2370  // If the peer is old enough to have the old alert system, send it the final alert.
2371  if (pfrom.nVersion <= 70012) {
2372  CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2373  connman->PushMessage(&pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
2374  }
2375 
2376  // Feeler connections exist only to verify if address is online.
2377  if (pfrom.fFeeler) {
2378  assert(pfrom.fInbound == false);
2379  pfrom.fDisconnect = true;
2380  }
2381  return;
2382  }
2383 
2384  if (pfrom.nVersion == 0) {
2385  // Must have a version message before anything else
2386  LOCK(cs_main);
2387  Misbehaving(pfrom.GetId(), 1);
2388  return;
2389  }
2390 
2391  // At this point, the outgoing message serialization version can't change.
2392  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
2393 
2394  if (msg_type == NetMsgType::VERACK)
2395  {
2396  pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION));
2397 
2398  if (!pfrom.fInbound) {
2399  // Mark this node as currently connected, so we update its timestamp later.
2400  LOCK(cs_main);
2401  State(pfrom.GetId())->fCurrentlyConnected = true;
2402  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2403  pfrom.nVersion.load(), pfrom.nStartingHeight,
2404  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
2405  pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
2406  }
2407 
2408  if (pfrom.nVersion >= SENDHEADERS_VERSION) {
2409  // Tell our peer we prefer to receive headers rather than inv's
2410  // We send this to non-NODE NETWORK peers as well, because even
2411  // non-NODE NETWORK peers can announce blocks (such as pruning
2412  // nodes)
2413  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2414  }
2415  if (pfrom.nVersion >= SHORT_IDS_BLOCKS_VERSION) {
2416  // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2417  // However, we do not request new block announcements using
2418  // cmpctblock messages.
2419  // We send this to non-NODE NETWORK peers as well, because
2420  // they may wish to request compact blocks from us
2421  bool fAnnounceUsingCMPCTBLOCK = false;
2422  uint64_t nCMPCTBLOCKVersion = 2;
2423  if (pfrom.GetLocalServices() & NODE_WITNESS)
2424  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2425  nCMPCTBLOCKVersion = 1;
2426  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2427  }
2428  pfrom.fSuccessfullyConnected = true;
2429  return;
2430  }
2431 
2432  if (!pfrom.fSuccessfullyConnected) {
2433  // Must have a verack message before anything else
2434  LOCK(cs_main);
2435  Misbehaving(pfrom.GetId(), 1);
2436  return;
2437  }
2438 
2439  if (msg_type == NetMsgType::ADDR) {
2440  std::vector<CAddress> vAddr;
2441  vRecv >> vAddr;
2442 
2443  if (!pfrom.IsAddrRelayPeer()) {
2444  return;
2445  }
2446  if (vAddr.size() > 1000)
2447  {
2448  LOCK(cs_main);
2449  Misbehaving(pfrom.GetId(), 20, strprintf("addr message size = %u", vAddr.size()));
2450  return;
2451  }
2452 
2453  // Store the new addresses
2454  std::vector<CAddress> vAddrOk;
2455  int64_t nNow = GetAdjustedTime();
2456  int64_t nSince = nNow - 10 * 60;
2457  for (CAddress& addr : vAddr)
2458  {
2459  if (interruptMsgProc)
2460  return;
2461 
2462  // We only bother storing full nodes, though this may include
2463  // things which we would not make an outbound connection to, in
2464  // part because we may make feeler connections to them.
2466  continue;
2467 
2468  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2469  addr.nTime = nNow - 5 * 24 * 60 * 60;
2470  pfrom.AddAddressKnown(addr);
2471  if (banman->IsDiscouraged(addr)) continue; // Do not process banned/discouraged addresses beyond remembering we received them
2472  if (banman->IsBanned(addr)) continue;
2473  bool fReachable = IsReachable(addr);
2474  if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2475  {
2476  // Relay to a limited number of other nodes
2477  RelayAddress(addr, fReachable, *connman);
2478  }
2479  // Do not store addresses outside our network
2480  if (fReachable)
2481  vAddrOk.push_back(addr);
2482  }
2483  connman->AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
2484  if (vAddr.size() < 1000)
2485  pfrom.fGetAddr = false;
2486  if (pfrom.fOneShot)
2487  pfrom.fDisconnect = true;
2488  return;
2489  }
2490 
2491  if (msg_type == NetMsgType::SENDHEADERS) {
2492  LOCK(cs_main);
2493  State(pfrom.GetId())->fPreferHeaders = true;
2494  return;
2495  }
2496 
2497  if (msg_type == NetMsgType::SENDCMPCT) {
2498  bool fAnnounceUsingCMPCTBLOCK = false;
2499  uint64_t nCMPCTBLOCKVersion = 0;
2500  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2501  if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2502  LOCK(cs_main);
2503  // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2504  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2505  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2506  State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2507  }
2508  if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
2509  State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2510  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2511  if (pfrom.GetLocalServices() & NODE_WITNESS)
2512  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2513  else
2514  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2515  }
2516  }
2517  return;
2518  }
2519 
2520  if (msg_type == NetMsgType::INV) {
2521  std::vector<CInv> vInv;
2522  vRecv >> vInv;
2523  if (vInv.size() > MAX_INV_SZ)
2524  {
2525  LOCK(cs_main);
2526  Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
2527  return;
2528  }
2529 
2530  // We won't accept tx inv's if we're in blocks-only mode, or this is a
2531  // block-relay-only peer
2532  bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
2533 
2534  // Allow peers with relay permission to send data other than blocks in blocks only mode
2535  if (pfrom.HasPermission(PF_RELAY)) {
2536  fBlocksOnly = false;
2537  }
2538 
2539  LOCK(cs_main);
2540 
2541  uint32_t nFetchFlags = GetFetchFlags(pfrom);
2542  const auto current_time = GetTime<std::chrono::microseconds>();
2543  uint256* best_block{nullptr};
2544 
2545  for (CInv &inv : vInv)
2546  {
2547  if (interruptMsgProc)
2548  return;
2549 
2550  bool fAlreadyHave = AlreadyHave(inv, mempool);
2551  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2552 
2553  if (inv.type == MSG_TX) {
2554  inv.type |= nFetchFlags;
2555  }
2556 
2557  if (inv.type == MSG_BLOCK) {
2558  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
2559  if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2560  // Headers-first is the primary method of announcement on
2561  // the network. If a node fell back to sending blocks by inv,
2562  // it's probably for a re-org. The final block hash
2563  // provided should be the highest, so send a getheaders and
2564  // then fetch the blocks we need to catch up.
2565  best_block = &inv.hash;
2566  }
2567  } else {
2568  pfrom.AddInventoryKnown(inv);
2569  if (fBlocksOnly) {
2570  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
2571  pfrom.fDisconnect = true;
2572  return;
2573  } else if (!fAlreadyHave && !chainman.ActiveChainstate().IsInitialBlockDownload()) {
2574  RequestTx(State(pfrom.GetId()), inv.hash, current_time);
2575  }
2576  }
2577  }
2578 
2579  if (best_block != nullptr) {
2580  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
2581  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
2582  }
2583 
2584  return;
2585  }
2586 
2587  if (msg_type == NetMsgType::GETDATA) {
2588  std::vector<CInv> vInv;
2589  vRecv >> vInv;
2590  if (vInv.size() > MAX_INV_SZ)
2591  {
2592  LOCK(cs_main);
2593  Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
2594  return;
2595  }
2596 
2597  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
2598 
2599  if (vInv.size() > 0) {
2600  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
2601  }
2602 
2603  pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end());
2604  ProcessGetData(pfrom, chainparams, connman, mempool, interruptMsgProc);
2605  return;
2606  }
2607 
2608  if (msg_type == NetMsgType::GETBLOCKS) {
2609  CBlockLocator locator;
2610  uint256 hashStop;
2611  vRecv >> locator >> hashStop;
2612 
2613  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2614  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2615  pfrom.fDisconnect = true;
2616  return;
2617  }
2618 
2619  // We might have announced the currently-being-connected tip using a
2620  // compact block, which resulted in the peer sending a getblocks
2621  // request, which we would otherwise respond to without the new block.
2622  // To avoid this situation we simply verify that we are on our best
2623  // known chain now. This is super overkill, but we handle it better
2624  // for getheaders requests, and there are no known nodes which support
2625  // compact blocks but still use getblocks to request blocks.
2626  {
2627  std::shared_ptr<const CBlock> a_recent_block;
2628  {
2629  LOCK(cs_most_recent_block);
2630  a_recent_block = most_recent_block;
2631  }
2632  BlockValidationState state;
2633  if (!ActivateBestChain(state, Params(), a_recent_block)) {
2634  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2635  }
2636  }
2637 
2638  LOCK(cs_main);
2639 
2640  // Find the last block the caller has in the main chain
2641  const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2642 
2643  // Send the rest of the chain
2644  if (pindex)
2645  pindex = ::ChainActive().Next(pindex);
2646  int nLimit = 500;
2647  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
2648  for (; pindex; pindex = ::ChainActive().Next(pindex))
2649  {
2650  if (pindex->GetBlockHash() == hashStop)
2651  {
2652  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2653  break;
2654  }
2655  // If pruning, don't inv blocks unless we have on disk and are likely to still have
2656  // for some reasonable time window (1 hour) that block relay might require.
2657  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
2658  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave))
2659  {
2660  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2661  break;
2662  }
2663  WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(pindex->GetBlockHash()));
2664  if (--nLimit <= 0)
2665  {
2666  // When this block is requested, we'll send an inv that'll
2667  // trigger the peer to getblocks the next batch of inventory.
2668  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2669  pfrom.hashContinue = pindex->GetBlockHash();
2670  break;
2671  }
2672  }
2673  return;
2674  }
2675 
2676  if (msg_type == NetMsgType::GETBLOCKTXN) {
2678  vRecv >> req;
2679 
2680  std::shared_ptr<const CBlock> recent_block;
2681  {
2682  LOCK(cs_most_recent_block);
2683  if (most_recent_block_hash == req.blockhash)
2684  recent_block = most_recent_block;
2685  // Unlock cs_most_recent_block to avoid cs_main lock inversion
2686  }
2687  if (recent_block) {
2688  SendBlockTransactions(*recent_block, req, pfrom, connman);
2689  return;
2690  }
2691 
2692  LOCK(cs_main);
2693 
2694  const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2695  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2696  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
2697  return;
2698  }
2699 
2700  if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
2701  // If an older block is requested (should never happen in practice,
2702  // but can happen in tests) send a block response instead of a
2703  // blocktxn response. Sending a full block response instead of a
2704  // small blocktxn response is preferable in the case where a peer
2705  // might maliciously send lots of getblocktxn requests to trigger
2706  // expensive disk reads, because it will require the peer to
2707  // actually receive all the data read from disk over the network.
2708  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
2709  CInv inv;
2710  inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
2711  inv.hash = req.blockhash;
2712  pfrom.vRecvGetData.push_back(inv);
2713  // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
2714  return;
2715  }
2716 
2717  CBlock block;
2718  bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
2719  assert(ret);
2720 
2721  SendBlockTransactions(block, req, pfrom, connman);
2722  return;
2723  }
2724 
2725  if (msg_type == NetMsgType::GETHEADERS) {
2726  CBlockLocator locator;
2727  uint256 hashStop;
2728  vRecv >> locator >> hashStop;
2729 
2730  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2731  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2732  pfrom.fDisconnect = true;
2733  return;
2734  }
2735 
2736  LOCK(cs_main);
2737  if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_DOWNLOAD)) {
2738  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
2739  return;
2740  }
2741 
2742  CNodeState *nodestate = State(pfrom.GetId());
2743  const CBlockIndex* pindex = nullptr;
2744  if (locator.IsNull())
2745  {
2746  // If locator is null, return the hashStop block
2747  pindex = LookupBlockIndex(hashStop);
2748  if (!pindex) {
2749  return;
2750  }
2751 
2752  if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
2753  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
2754  return;
2755  }
2756  }
2757  else
2758  {
2759  // Find the last block the caller has in the main chain
2760  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2761  if (pindex)
2762  pindex = ::ChainActive().Next(pindex);
2763  }
2764 
2765  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2766  std::vector<CBlock> vHeaders;
2767  int nLimit = MAX_HEADERS_RESULTS;
2768  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
2769  for (; pindex; pindex = ::ChainActive().Next(pindex))
2770  {
2771  vHeaders.push_back(pindex->GetBlockHeader());
2772  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2773  break;
2774  }
2775  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
2776  // if our peer has ::ChainActive().Tip() (and thus we are sending an empty
2777  // headers message). In both cases it's safe to update
2778  // pindexBestHeaderSent to be our tip.
2779  //
2780  // It is important that we simply reset the BestHeaderSent value here,
2781  // and not max(BestHeaderSent, newHeaderSent). We might have announced
2782  // the currently-being-connected tip using a compact block, which
2783  // resulted in the peer sending a headers request, which we respond to
2784  // without the new block. By resetting the BestHeaderSent, we ensure we
2785  // will re-announce the new block via headers (or compact blocks again)
2786  // in the SendMessages logic.
2787  nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
2788  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2789  return;
2790  }
2791 
2792  if (msg_type == NetMsgType::TX) {
2793  // Stop processing the transaction early if
2794  // 1) We are in blocks only mode and peer has no relay permission
2795  // 2) This peer is a block-relay-only peer
2796  if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
2797  {
2798  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
2799  pfrom.fDisconnect = true;
2800  return;
2801  }
2802 
2803  CTransactionRef ptx;
2804  vRecv >> ptx;
2805  const CTransaction& tx = *ptx;
2806 
2807  CInv inv(MSG_TX, tx.GetHash());
2808  pfrom.AddInventoryKnown(inv);
2809 
2810  LOCK2(cs_main, g_cs_orphans);
2811 
2812  TxValidationState state;
2813 
2814  CNodeState* nodestate = State(pfrom.GetId());
2815  nodestate->m_tx_download.m_tx_announced.erase(inv.hash);
2816  nodestate->m_tx_download.m_tx_in_flight.erase(inv.hash);
2817  EraseTxRequest(inv.hash);
2818 
2819  std::list<CTransactionRef> lRemovedTxn;
2820 
2821  if (!AlreadyHave(inv, mempool) &&
2822  AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
2823  mempool.check(&::ChainstateActive().CoinsTip());
2825  for (unsigned int i = 0; i < tx.vout.size(); i++) {
2826  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i));
2827  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2828  for (const auto& elem : it_by_prev->second) {
2829  pfrom.orphan_work_set.insert(elem->first);
2830  }
2831  }
2832  }
2833 
2834  pfrom.nLastTXTime = GetTime();
2835 
2836  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
2837  pfrom.GetId(),
2838  tx.GetHash().ToString(),
2839  mempool.size(), mempool.DynamicMemoryUsage() / 1000);
2840 
2841  // Recursively process any orphan transactions that depended on this one
2842  ProcessOrphanTx(connman, mempool, pfrom.orphan_work_set, lRemovedTxn);
2843  }
2845  {
2846  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
2847  for (const CTxIn& txin : tx.vin) {
2848  if (recentRejects->contains(txin.prevout.hash)) {
2849  fRejectedParents = true;
2850  break;
2851  }
2852  }
2853  if (!fRejectedParents) {
2854  uint32_t nFetchFlags = GetFetchFlags(pfrom);
2855  const auto current_time = GetTime<std::chrono::microseconds>();
2856 
2857  for (const CTxIn& txin : tx.vin) {
2858  CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
2859  pfrom.AddInventoryKnown(_inv);
2860  if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time);
2861  }
2862  AddOrphanTx(ptx, pfrom.GetId());
2863 
2864  // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
2865  unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
2866  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
2867  if (nEvicted > 0) {
2868  LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
2869  }
2870  } else {
2871  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
2872  // We will continue to reject this tx since it has rejected
2873  // parents so avoid re-requesting it from other peers.
2874  recentRejects->insert(tx.GetHash());
2875  }
2876  } else {
2878  // Do not use rejection cache for witness transactions or
2879  // witness-stripped transactions, as they can have been malleated.
2880  // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
2881  assert(recentRejects);
2882  recentRejects->insert(tx.GetHash());
2883  if (RecursiveDynamicUsage(*ptx) < 100000) {
2885  }
2886  } else if (tx.HasWitness() && RecursiveDynamicUsage(*ptx) < 100000) {
2888  }
2889 
2890  if (pfrom.HasPermission(PF_FORCERELAY)) {
2891  // Always relay transactions received from peers with forcerelay permission, even
2892  // if they were already in the mempool,
2893  // allowing the node to function as a gateway for
2894  // nodes hidden behind it.
2895  if (!mempool.exists(tx.GetHash())) {
2896  LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
2897  } else {
2898  LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
2900  }
2901  }
2902  }
2903 
2904  for (const CTransactionRef& removedTx : lRemovedTxn)
2905  AddToCompactExtraTransactions(removedTx);
2906 
2907  // If a tx has been detected by recentRejects, we will have reached
2908  // this point and the tx will have been ignored. Because we haven't run
2909  // the tx through AcceptToMemoryPool, we won't have computed a DoS
2910  // score for it or determined exactly why we consider it invalid.
2911  //
2912  // This means we won't penalize any peer subsequently relaying a DoSy
2913  // tx (even if we penalized the first peer who gave it to us) because
2914  // we have to account for recentRejects showing false positives. In
2915  // other words, we shouldn't penalize a peer if we aren't *sure* they
2916  // submitted a DoSy tx.
2917  //
2918  // Note that recentRejects doesn't just record DoSy or invalid
2919  // transactions, but any tx not accepted by the mempool, which may be
2920  // due to node policy (vs. consensus). So we can't blanket penalize a
2921  // peer simply for relaying a tx that our recentRejects has caught,
2922  // regardless of false positives.
2923 
2924  if (state.IsInvalid())
2925  {
2926  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
2927  pfrom.GetId(),
2928  state.ToString());
2929  MaybePunishNodeForTx(pfrom.GetId(), state);
2930  }
2931  return;
2932  }
2933 
2934  if (msg_type == NetMsgType::CMPCTBLOCK)
2935  {
2936  // Ignore cmpctblock received while importing
2937  if (fImporting || fReindex) {
2938  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
2939  return;
2940  }
2941 
2942  CBlockHeaderAndShortTxIDs cmpctblock;
2943  vRecv >> cmpctblock;
2944 
2945  bool received_new_header = false;
2946 
2947  {
2948  LOCK(cs_main);
2949 
2950  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
2951  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
2952  if (!::ChainstateActive().IsInitialBlockDownload())
2953  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
2954  return;
2955  }
2956 
2957  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
2958  received_new_header = true;
2959  }
2960  }
2961 
2962  const CBlockIndex *pindex = nullptr;
2963  BlockValidationState state;
2964  if (!chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
2965  if (state.IsInvalid()) {
2966  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
2967  return;
2968  }
2969  }
2970 
2971  // When we succeed in decoding a block's txids from a cmpctblock
2972  // message we typically jump to the BLOCKTXN handling code, with a
2973  // dummy (empty) BLOCKTXN message, to re-use the logic there in
2974  // completing processing of the putative block (without cs_main).
2975  bool fProcessBLOCKTXN = false;
2977 
2978  // If we end up treating this as a plain headers message, call that as well
2979  // without cs_main.
2980  bool fRevertToHeaderProcessing = false;
2981 
2982  // Keep a CBlock for "optimistic" compactblock reconstructions (see
2983  // below)
2984  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2985  bool fBlockReconstructed = false;
2986 
2987  {
2988  LOCK2(cs_main, g_cs_orphans);
2989  // If AcceptBlockHeader returned true, it set pindex
2990  assert(pindex);
2991  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
2992 
2993  CNodeState *nodestate = State(pfrom.GetId());
2994 
2995  // If this was a new header with more work than our tip, update the
2996  // peer's last block announcement time
2997  if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
2998  nodestate->m_last_block_announcement = GetTime();
2999  }
3000 
3001  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
3002  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3003 
3004  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
3005  return;
3006 
3007  if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better
3008  pindex->nTx != 0) { // We had this block at some point, but pruned it
3009  if (fAlreadyInFlight) {
3010  // We requested this block for some reason, but our mempool will probably be useless
3011  // so we just grab the block via normal getdata
3012  std::vector<CInv> vInv(1);
3013  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3014  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3015  }
3016  return;
3017  }
3018 
3019  // If we're not close to tip yet, give up and let parallel block fetch work its magic
3020  if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
3021  return;
3022 
3023  if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
3024  // Don't bother trying to process compact blocks from v1 peers
3025  // after segwit activates.
3026  return;
3027  }
3028 
3029  // We want to be a bit conservative just to be extra careful about DoS
3030  // possibilities in compact block processing...
3031  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3032  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3033  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
3034  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
3035  if (!MarkBlockAsInFlight(mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
3036  if (!(*queuedBlockIt)->partialBlock)
3037  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
3038  else {
3039  // The block was already in flight using compact blocks from the same peer
3040  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3041  return;
3042  }
3043  }
3044 
3045  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3046  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3047  if (status == READ_STATUS_INVALID) {
3048  MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3049  Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom.GetId()));
3050  return;
3051  } else if (status == READ_STATUS_FAILED) {
3052  // Duplicate txindexes, the block is now in-flight, so just request it
3053  std::vector<CInv> vInv(1);
3054  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3055  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3056  return;
3057  }
3058 
3060  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3061  if (!partialBlock.IsTxAvailable(i))
3062  req.indexes.push_back(i);
3063  }
3064  if (req.indexes.empty()) {
3065  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3066  BlockTransactions txn;
3067  txn.blockhash = cmpctblock.header.GetHash();
3068  blockTxnMsg << txn;
3069  fProcessBLOCKTXN = true;
3070  } else {
3071  req.blockhash = pindex->GetBlockHash();
3072  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3073  }
3074  } else {
3075  // This block is either already in flight from a different
3076  // peer, or this peer has too many blocks outstanding to
3077  // download from.
3078  // Optimistically try to reconstruct anyway since we might be
3079  // able to without any round trips.
3080  PartiallyDownloadedBlock tempBlock(&mempool);
3081  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3082  if (status != READ_STATUS_OK) {
3083  // TODO: don't ignore failures
3084  return;
3085  }
3086  std::vector<CTransactionRef> dummy;
3087  status = tempBlock.FillBlock(*pblock, dummy);
3088  if (status == READ_STATUS_OK) {
3089  fBlockReconstructed = true;
3090  }
3091  }
3092  } else {
3093  if (fAlreadyInFlight) {
3094  // We requested this block, but its far into the future, so our
3095  // mempool will probably be useless - request the block normally
3096  std::vector<CInv> vInv(1);
3097  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3098  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3099  return;
3100  } else {
3101  // If this was an announce-cmpctblock, we want the same treatment as a header message
3102  fRevertToHeaderProcessing = true;
3103  }
3104  }
3105  } // cs_main
3106 
3107  if (fProcessBLOCKTXN)
3108  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, chainparams, chainman, mempool, connman, banman, interruptMsgProc);
3109 
3110  if (fRevertToHeaderProcessing) {
3111  // Headers received from HB compact block peers are permitted to be
3112  // relayed before full validation (see BIP 152), so we don't want to disconnect
3113  // the peer if the header turns out to be for an invalid block.
3114  // Note that if a peer tries to build on an invalid chain, that
3115  // will be detected and the peer will be disconnected/discouraged.
3116  return ProcessHeadersMessage(pfrom, connman, chainman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
3117  }
3118 
3119  if (fBlockReconstructed) {
3120  // If we got here, we were able to optimistically reconstruct a
3121  // block that is in flight from some other peer.
3122  {
3123  LOCK(cs_main);
3124  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
3125  }
3126  bool fNewBlock = false;
3127  // Setting fForceProcessing to true means that we bypass some of
3128  // our anti-DoS protections in AcceptBlock, which filters
3129  // unrequested blocks that might be trying to waste our resources
3130  // (eg disk space). Because we only try to reconstruct blocks when
3131  // we're close to caught up (via the CanDirectFetch() requirement
3132  // above, combined with the behavior of not requesting blocks until
3133  // we have a chain with at least nMinimumChainWork), and we ignore
3134  // compact blocks with less work than our tip, it is safe to treat
3135  // reconstructed compact blocks as having been requested.
3136  chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3137  if (fNewBlock) {
3138  pfrom.nLastBlockTime = GetTime();
3139  } else {
3140  LOCK(cs_main);
3141  mapBlockSource.erase(pblock->GetHash());
3142  }
3143  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3144  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3145  // Clear download state for this block, which is in
3146  // process from some other peer. We do this after calling
3147  // ProcessNewBlock so that a malleated cmpctblock announcement
3148  // can't be used to interfere with block relay.
3149  MarkBlockAsReceived(pblock->GetHash());
3150  }
3151  }
3152  return;
3153  }
3154 
3155  if (msg_type == NetMsgType::BLOCKTXN)
3156  {
3157  // Ignore blocktxn received while importing
3158  if (fImporting || fReindex) {
3159  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
3160  return;
3161  }
3162 
3163  BlockTransactions resp;
3164  vRecv >> resp;
3165 
3166  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3167  bool fBlockRead = false;
3168  {
3169  LOCK(cs_main);
3170 
3171  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3172  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3173  it->second.first != pfrom.GetId()) {
3174  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3175  return;
3176  }
3177 
3178  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3179  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3180  if (status == READ_STATUS_INVALID) {
3181  MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
3182  Misbehaving(pfrom.GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom.GetId()));
3183  return;
3184  } else if (status == READ_STATUS_FAILED) {
3185  // Might have collided, fall back to getdata now :(
3186  std::vector<CInv> invs;
3187  invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
3188  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3189  } else {
3190  // Block is either okay, or possibly we received
3191  // READ_STATUS_CHECKBLOCK_FAILED.
3192  // Note that CheckBlock can only fail for one of a few reasons:
3193  // 1. bad-proof-of-work (impossible here, because we've already
3194  // accepted the header)
3195  // 2. merkleroot doesn't match the transactions given (already
3196  // caught in FillBlock with READ_STATUS_FAILED, so
3197  // impossible here)
3198  // 3. the block is otherwise invalid (eg invalid coinbase,
3199  // block is too big, too many legacy sigops, etc).
3200  // So if CheckBlock failed, #3 is the only possibility.
3201  // Under BIP 152, we don't discourage the peer unless proof of work is
3202  // invalid (we don't require all the stateless checks to have
3203  // been run). This is handled below, so just treat this as
3204  // though the block was successfully read, and rely on the
3205  // handling in ProcessNewBlock to ensure the block index is
3206  // updated, etc.
3207  MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
3208  fBlockRead = true;
3209  // mapBlockSource is used for potentially punishing peers and
3210  // updating which peers send us compact blocks, so the race
3211  // between here and cs_main in ProcessNewBlock is fine.
3212  // BIP 152 permits peers to relay compact blocks after validating
3213  // the header only; we should not punish peers if the block turns
3214  // out to be invalid.
3215  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false));
3216  }
3217  } // Don't hold cs_main when we call into ProcessNewBlock
3218  if (fBlockRead) {
3219  bool fNewBlock = false;
3220  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3221  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3222  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3223  // disk-space attacks), but this should be safe due to the
3224  // protections in the compact block handler -- see related comment
3225  // in compact block optimistic reconstruction handling.
3226  chainman.ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3227  if (fNewBlock) {
3228  pfrom.nLastBlockTime = GetTime();
3229  } else {
3230  LOCK(cs_main);
3231  mapBlockSource.erase(pblock->GetHash());
3232  }
3233  }
3234  return;
3235  }
3236 
3237  if (msg_type == NetMsgType::HEADERS)
3238  {
3239  // Ignore headers received while importing
3240  if (fImporting || fReindex) {
3241  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
3242  return;
3243  }
3244 
3245  std::vector<CBlockHeader> headers;
3246 
3247  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3248  unsigned int nCount = ReadCompactSize(vRecv);
3249  if (nCount > MAX_HEADERS_RESULTS) {
3250  LOCK(cs_main);
3251  Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
3252  return;
3253  }
3254  headers.resize(nCount);
3255  for (unsigned int n = 0; n < nCount; n++) {
3256  vRecv >> headers[n];
3257  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3258  }
3259 
3260  return ProcessHeadersMessage(pfrom, connman, chainman, mempool, headers, chainparams, /*via_compact_block=*/false);
3261  }
3262 
3263  if (msg_type == NetMsgType::BLOCK)
3264  {
3265  // Ignore block received while importing
3266  if (fImporting || fReindex) {
3267  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
3268  return;
3269  }
3270 
3271  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3272  vRecv >> *pblock;
3273 
3274  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
3275 
3276  bool forceProcessing = false;
3277  const uint256 hash(pblock->GetHash());
3278  {
3279  LOCK(cs_main);
3280  // Also always process if we requested the block explicitly, as we may
3281  // need it even though it is not a candidate for a new best tip.
3282  forceProcessing |= MarkBlockAsReceived(hash);
3283  // mapBlockSource is only used for punishing peers and setting
3284  // which peers send us compact blocks, so the race between here and
3285  // cs_main in ProcessNewBlock is fine.
3286  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3287  }
3288  bool fNewBlock = false;
3289  chainman.ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
3290  if (fNewBlock) {
3291  pfrom.nLastBlockTime = GetTime();
3292  } else {
3293  LOCK(cs_main);
3294  mapBlockSource.erase(pblock->GetHash());
3295  }
3296  return;
3297  }
3298 
3299  if (msg_type == NetMsgType::GETADDR) {
3300  // This asymmetric behavior for inbound and outbound connections was introduced
3301  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3302  // to users' AddrMan and later request them by sending getaddr messages.
3303  // Making nodes which are behind NAT and can only make outgoing connections ignore
3304  // the getaddr message mitigates the attack.
3305  if (!pfrom.fInbound) {
3306  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId());
3307  return;
3308  }
3309  if (!pfrom.IsAddrRelayPeer()) {
3310  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from block-relay-only connection. peer=%d\n", pfrom.GetId());
3311  return;
3312  }
3313 
3314  // Only send one GetAddr response per connection to reduce resource waste
3315  // and discourage addr stamping of INV announcements.
3316  if (pfrom.fSentAddr) {
3317  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
3318  return;
3319  }
3320  pfrom.fSentAddr = true;
3321 
3322  pfrom.vAddrToSend.clear();
3323  std::vector<CAddress> vAddr = connman->GetAddresses();
3324  FastRandomContext insecure_rand;
3325  for (const CAddress &addr : vAddr) {
3326  if (!banman->IsDiscouraged(addr) && !banman->IsBanned(addr)) {
3327  pfrom.PushAddress(addr, insecure_rand);
3328  }
3329  }
3330  return;
3331  }
3332 
3333  if (msg_type == NetMsgType::MEMPOOL) {
3334  if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL))
3335  {
3336  if (!pfrom.HasPermission(PF_NOBAN))
3337  {
3338  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
3339  pfrom.fDisconnect = true;
3340  }
3341  return;
3342  }
3343 
3344  if (connman->OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL))
3345  {
3346  if (!pfrom.HasPermission(PF_NOBAN))
3347  {
3348  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
3349  pfrom.fDisconnect = true;
3350  }
3351  return;
3352  }
3353 
3354  if (pfrom.m_tx_relay != nullptr) {
3355  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
3356  pfrom.m_tx_relay->fSendMempool = true;
3357  }
3358  return;
3359  }
3360 
3361  if (msg_type == NetMsgType::PING) {
3362  if (pfrom.nVersion > BIP0031_VERSION)
3363  {
3364  uint64_t nonce = 0;
3365  vRecv >> nonce;
3366  // Echo the message back with the nonce. This allows for two useful features:
3367  //
3368  // 1) A remote node can quickly check if the connection is operational
3369  // 2) Remote nodes can measure the latency of the network thread. If this node
3370  // is overloaded it won't respond to pings quickly and the remote node can
3371  // avoid sending us more work, like chain download requests.
3372  //
3373  // The nonce stops the remote getting confused between different pings: without
3374  // it, if the remote node sends a ping once per second and this node takes 5
3375  // seconds to respond to each, the 5th ping the remote sends would appear to
3376  // return very quickly.
3377  connman->PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3378  }
3379  return;
3380  }
3381 
3382  if (msg_type == NetMsgType::PONG) {
3383  const auto ping_end = time_received;
3384  uint64_t nonce = 0;
3385  size_t nAvail = vRecv.in_avail();
3386  bool bPingFinished = false;
3387  std::string sProblem;
3388 
3389  if (nAvail >= sizeof(nonce)) {
3390  vRecv >> nonce;
3391 
3392  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3393  if (pfrom.nPingNonceSent != 0) {
3394  if (nonce == pfrom.nPingNonceSent) {
3395  // Matching pong received, this ping is no longer outstanding
3396  bPingFinished = true;
3397  const auto ping_time = ping_end - pfrom.m_ping_start.load();
3398  if (ping_time.count() > 0) {
3399  // Successful ping time measurement, replace previous
3400  pfrom.nPingUsecTime = count_microseconds(ping_time);
3401  pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time));
3402  } else {
3403  // This should never happen
3404  sProblem = "Timing mishap";
3405  }
3406  } else {
3407  // Nonce mismatches are normal when pings are overlapping
3408  sProblem = "Nonce mismatch";
3409  if (nonce == 0) {
3410  // This is most likely a bug in another implementation somewhere; cancel this ping
3411  bPingFinished = true;
3412  sProblem = "Nonce zero";
3413  }
3414  }
3415  } else {
3416  sProblem = "Unsolicited pong without ping";
3417  }
3418  } else {
3419  // This is most likely a bug in another implementation somewhere; cancel this ping
3420  bPingFinished = true;
3421  sProblem = "Short payload";
3422  }
3423 
3424  if (!(sProblem.empty())) {
3425  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3426  pfrom.GetId(),
3427  sProblem,
3428  pfrom.nPingNonceSent,
3429  nonce,
3430  nAvail);
3431  }
3432  if (bPingFinished) {
3433  pfrom.nPingNonceSent = 0;
3434  }
3435  return;
3436  }
3437 
3438  if (msg_type == NetMsgType::FILTERLOAD) {
3439  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3440  pfrom.fDisconnect = true;
3441  return;
3442  }
3443  CBloomFilter filter;
3444  vRecv >> filter;
3445 
3446  if (!filter.IsWithinSizeConstraints())
3447  {
3448  // There is no excuse for sending a too-large filter
3449  LOCK(cs_main);
3450  Misbehaving(pfrom.GetId(), 100);
3451  }
3452  else if (pfrom.m_tx_relay != nullptr)
3453  {
3454  LOCK(pfrom.m_tx_relay->cs_filter);
3455  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3456  pfrom.m_tx_relay->fRelayTxes = true;
3457  }
3458  return;
3459  }
3460 
3461  if (msg_type == NetMsgType::FILTERADD) {
3462  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3463  pfrom.fDisconnect = true;
3464  return;
3465  }
3466  std::vector<unsigned char> vData;
3467  vRecv >> vData;
3468 
3469  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3470  // and thus, the maximum size any matched object can have) in a filteradd message
3471  bool bad = false;
3472  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3473  bad = true;
3474  } else if (pfrom.m_tx_relay != nullptr) {
3475  LOCK(pfrom.m_tx_relay->cs_filter);
3476  if (pfrom.m_tx_relay->pfilter) {
3477  pfrom.m_tx_relay->pfilter->insert(vData);
3478  } else {
3479  bad = true;
3480  }
3481  }
3482  if (bad) {
3483  LOCK(cs_main);
3484  Misbehaving(pfrom.GetId(), 100);
3485  }
3486  return;
3487  }
3488 
3489  if (msg_type == NetMsgType::FILTERCLEAR) {
3490  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3491  pfrom.fDisconnect = true;
3492  return;
3493  }
3494  if (pfrom.m_tx_relay == nullptr) {
3495  return;
3496  }
3497  LOCK(pfrom.m_tx_relay->cs_filter);
3498  pfrom.m_tx_relay->pfilter = nullptr;
3499  pfrom.m_tx_relay->fRelayTxes = true;
3500  return;
3501  }
3502 
3503  if (msg_type == NetMsgType::FEEFILTER) {
3504  CAmount newFeeFilter = 0;
3505  vRecv >> newFeeFilter;
3506  if (MoneyRange(newFeeFilter)) {
3507  if (pfrom.m_tx_relay != nullptr) {
3508  LOCK(pfrom.m_tx_relay->cs_feeFilter);
3509  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
3510  }
3511  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
3512  }
3513  return;
3514  }
3515 
3516  if (msg_type == NetMsgType::GETCFILTERS) {
3517  ProcessGetCFilters(pfrom, vRecv, chainparams, *connman);
3518  return;
3519  }
3520 
3521  if (msg_type == NetMsgType::GETCFHEADERS) {
3522  ProcessGetCFHeaders(pfrom, vRecv, chainparams, *connman);
3523  return;
3524  }
3525 
3526  if (msg_type == NetMsgType::GETCFCHECKPT) {
3527  ProcessGetCFCheckPt(pfrom, vRecv, chainparams, *connman);
3528  return;
3529  }
3530 
3531  if (msg_type == NetMsgType::NOTFOUND) {
3532  // Remove the NOTFOUND transactions from the peer
3533  LOCK(cs_main);
3534  CNodeState *state = State(pfrom.GetId());
3535  std::vector<CInv> vInv;
3536  vRecv >> vInv;
3538  for (CInv &inv : vInv) {
3539  if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) {
3540  // If we receive a NOTFOUND message for a txid we requested, erase
3541  // it from our data structures for this peer.
3542  auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash);
3543  if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) {
3544  // Skip any further work if this is a spurious NOTFOUND
3545  // message.
3546  continue;
3547  }
3548  state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
3549  state->m_tx_download.m_tx_announced.erase(inv.hash);
3550  }
3551  }
3552  }
3553  return;
3554  }
3555 
3556  // Ignore unknown commands for extensibility
3557  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3558  return;
3559 }
3560 
3562 {
3563  AssertLockHeld(cs_main);
3564  CNodeState &state = *State(pnode.GetId());
3565 
3566  if (state.m_should_discourage) {
3567  state.m_should_discourage = false;
3568  if (pnode.HasPermission(PF_NOBAN)) {
3569  LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode.addr.ToString());
3570  } else if (pnode.m_manual_connection) {
3571  LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode.addr.ToString());
3572  } else if (pnode.addr.IsLocal()) {
3573  // Disconnect but don't discourage this local node
3574  LogPrintf("Warning: disconnecting but not discouraging local peer %s!\n", pnode.addr.ToString());
3575  pnode.fDisconnect = true;
3576  } else {
3577  // Disconnect and discourage all nodes sharing the address
3578  LogPrintf("Disconnecting and discouraging peer %s!\n", pnode.addr.ToString());
3579  if (m_banman) {
3580  m_banman->Discourage(pnode.addr);
3581  }
3582  connman->DisconnectNode(pnode.addr);
3583  }
3584  return true;
3585  }
3586  return false;
3587 }
3588 
3589 bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3590 {
3591  const CChainParams& chainparams = Params();
3592  //
3593  // Message format
3594  // (4) message start
3595  // (12) command
3596  // (4) size
3597  // (4) checksum
3598  // (x) data
3599  //
3600  bool fMoreWork = false;
3601 
3602  if (!pfrom->vRecvGetData.empty())
3603  ProcessGetData(*pfrom, chainparams, connman, m_mempool, interruptMsgProc);
3604 
3605  if (!pfrom->orphan_work_set.empty()) {
3606  std::list<CTransactionRef> removed_txn;
3607  LOCK2(cs_main, g_cs_orphans);
3608  ProcessOrphanTx(connman, m_mempool, pfrom->orphan_work_set, removed_txn);
3609  for (const CTransactionRef& removedTx : removed_txn) {
3610  AddToCompactExtraTransactions(removedTx);
3611  }
3612  }
3613 
3614  if (pfrom->fDisconnect)
3615  return false;
3616 
3617  // this maintains the order of responses
3618  // and prevents vRecvGetData to grow unbounded
3619  if (!pfrom->vRecvGetData.empty()) return true;
3620  if (!pfrom->orphan_work_set.empty()) return true;
3621 
3622  // Don't bother if send buffer is too full to respond anyway
3623  if (pfrom->fPauseSend)
3624  return false;
3625 
3626  std::list<CNetMessage> msgs;
3627  {
3628  LOCK(pfrom->cs_vProcessMsg);
3629  if (pfrom->vProcessMsg.empty())
3630  return false;
3631  // Just take one message
3632  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3633  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
3634  pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
3635  fMoreWork = !pfrom->vProcessMsg.empty();
3636  }
3637  CNetMessage& msg(msgs.front());
3638 
3639  msg.SetVersion(pfrom->GetRecvVersion());
3640  // Check network magic
3641  if (!msg.m_valid_netmagic) {
3642  LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId());
3643  pfrom->fDisconnect = true;
3644  return false;
3645  }
3646 
3647  // Check header
3648  if (!msg.m_valid_header)
3649  {
3650  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId());
3651  return fMoreWork;
3652  }
3653  const std::string& msg_type = msg.m_command;
3654 
3655  // Message size
3656  unsigned int nMessageSize = msg.m_message_size;
3657 
3658  // Checksum
3659  CDataStream& vRecv = msg.m_recv;
3660  if (!msg.m_valid_checksum)
3661  {
3662  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n", __func__,
3663  SanitizeString(msg_type), nMessageSize, pfrom->GetId());
3664  return fMoreWork;
3665  }
3666 
3667  try {
3668  ProcessMessage(*pfrom, msg_type, vRecv, msg.m_time, chainparams, m_chainman, m_mempool, connman, m_banman, interruptMsgProc);
3669  if (interruptMsgProc)
3670  return false;
3671  if (!pfrom->vRecvGetData.empty())
3672  fMoreWork = true;
3673  } catch (const std::exception& e) {
3674  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
3675  } catch (...) {
3676  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
3677  }
3678 
3679  LOCK(cs_main);
3681 
3682  return fMoreWork;
3683 }
3684 
3685 void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
3686 {
3687  AssertLockHeld(cs_main);
3688 
3689  CNodeState &state = *State(pto.GetId());
3690  const CNetMsgMaker msgMaker(pto.GetSendVersion());
3691 
3692  if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
3693  // This is an outbound peer subject to disconnection if they don't
3694  // announce a block with as much work as the current tip within
3695  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3696  // their chain has more work than ours, we should sync to it,
3697  // unless it's invalid, in which case we should find that out and
3698  // disconnect from them elsewhere).
3699  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) {
3700  if (state.m_chain_sync.m_timeout != 0) {
3701  state.m_chain_sync.m_timeout = 0;
3702  state.m_chain_sync.m_work_header = nullptr;
3703  state.m_chain_sync.m_sent_getheaders = false;
3704  }
3705  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3706  // Our best block known by this peer is behind our tip, and we're either noticing
3707  // that for the first time, OR this peer was able to catch up to some earlier point
3708  // where we checked against our tip.
3709  // Either way, set a new timeout based on current tip.
3710  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3711  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
3712  state.m_chain_sync.m_sent_getheaders = false;
3713  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3714  // No evidence yet that our peer has synced to a chain with work equal to that
3715  // of our tip, when we first detected it was behind. Send a single getheaders
3716  // message to give the peer a chance to update us.
3717  if (state.m_chain_sync.m_sent_getheaders) {
3718  // They've run out of time to catch up!
3719  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3720  pto.fDisconnect = true;
3721  } else {
3722  assert(state.m_chain_sync.m_work_header);
3723  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3724  connman->PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3725  state.m_chain_sync.m_sent_getheaders = true;
3726  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3727  // Bump the timeout to allow a response, which could clear the timeout
3728  // (if the response shows the peer has synced), reset the timeout (if
3729  // the peer syncs to the required work but not to our tip), or result
3730  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3731  // has not sufficiently progressed)
3732  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3733  }
3734  }
3735  }
3736 }
3737 
3739 {
3740  // Check whether we have too many outbound peers
3741  int extra_peers = connman->GetExtraOutboundCount();
3742  if (extra_peers > 0) {
3743  // If we have more outbound peers than we target, disconnect one.
3744  // Pick the outbound peer that least recently announced
3745  // us a new block, with ties broken by choosing the more recent
3746  // connection (higher node id)
3747  NodeId worst_peer = -1;
3748  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3749 
3750  connman->ForEachNode([&](CNode* pnode) {
3751  AssertLockHeld(cs_main);
3752 
3753  // Ignore non-outbound peers, or nodes marked for disconnect already
3754  if (!IsOutboundDisconnectionCandidate(*pnode) || pnode->fDisconnect) return;
3755  CNodeState *state = State(pnode->GetId());
3756  if (state == nullptr) return; // shouldn't be possible, but just in case
3757  // Don't evict our protected peers
3758  if (state->m_chain_sync.m_protect) return;
3759  // Don't evict our block-relay-only peers.
3760  if (pnode->m_tx_relay == nullptr) return;
3761  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3762  worst_peer = pnode->GetId();
3763  oldest_block_announcement = state->m_last_block_announcement;
3764  }
3765  });
3766  if (worst_peer != -1) {
3767  bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
3768  AssertLockHeld(cs_main);
3769 
3770  // Only disconnect a peer that has been connected to us for
3771  // some reasonable fraction of our check-frequency, to give
3772  // it time for new information to have arrived.
3773  // Also don't disconnect any peer we're trying to download a
3774  // block from.
3775  CNodeState &state = *State(pnode->GetId());
3776  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3777  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3778  pnode->fDisconnect = true;
3779  return true;
3780  } else {
3781  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3782  return false;
3783  }
3784  });
3785  if (disconnected) {
3786  // If we disconnected an extra peer, that means we successfully
3787  // connected to at least one peer after the last time we
3788  // detected a stale tip. Don't try any more extra peers until
3789  // we next detect a stale tip, to limit the load we put on the
3790  // network from these extra connections.
3791  connman->SetTryNewOutboundPeer(false);
3792  }
3793  }
3794  }
3795 }
3796 
3798 {
3799  LOCK(cs_main);
3800 
3801  if (connman == nullptr) return;
3802 
3803  int64_t time_in_seconds = GetTime();
3804 
3805  EvictExtraOutboundPeers(time_in_seconds);
3806 
3807  if (time_in_seconds > m_stale_tip_check_time) {
3808  // Check whether our tip is stale, and if so, allow using an extra
3809  // outbound peer
3810  if (!fImporting && !fReindex && connman->GetNetworkActive() && connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
3811  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
3812  connman->SetTryNewOutboundPeer(true);
3813  } else if (connman->GetTryNewOutboundPeer()) {
3814  connman->SetTryNewOutboundPeer(false);
3815  }
3816  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
3817  }
3818 }
3819 
3820 namespace {
3821 class CompareInvMempoolOrder
3822 {
3823  CTxMemPool *mp;
3824 public:
3825  explicit CompareInvMempoolOrder(CTxMemPool *_mempool)
3826  {
3827  mp = _mempool;
3828  }
3829 
3830  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
3831  {
3832  /* As std::make_heap produces a max-heap, we want the entries with the
3833  * fewest ancestors/highest fee to sort later. */
3834  return mp->CompareDepthAndScore(*b, *a);
3835  }
3836 };
3837 }
3838 
3840 {
3841  const Consensus::Params& consensusParams = Params().GetConsensus();
3842  {
3843  // Don't send anything until the version handshake is complete
3844  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
3845  return true;
3846 
3847  // If we get here, the outgoing message serialization version is set and can't change.
3848  const CNetMsgMaker msgMaker(pto->GetSendVersion());
3849 
3850  //
3851  // Message: ping
3852  //
3853  bool pingSend = false;
3854  if (pto->fPingQueued) {
3855  // RPC ping request by user
3856  pingSend = true;
3857  }
3858  if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
3859  // Ping automatically sent as a latency probe & keepalive.
3860  pingSend = true;
3861  }
3862  if (pingSend) {
3863  uint64_t nonce = 0;
3864  while (nonce == 0) {
3865  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
3866  }
3867  pto->fPingQueued = false;
3868  pto->m_ping_start = GetTime<std::chrono::microseconds>();
3869  if (pto->nVersion > BIP0031_VERSION) {
3870  pto->nPingNonceSent = nonce;
3871  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
3872  } else {
3873  // Peer is too old to support ping command with nonce, pong will never arrive.
3874  pto->nPingNonceSent = 0;
3875  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
3876  }
3877  }
3878 
3879  TRY_LOCK(cs_main, lockMain);
3880  if (!lockMain)
3881  return true;
3882 
3883  if (MaybeDiscourageAndDisconnect(*pto)) return true;
3884 
3885  CNodeState &state = *State(pto->GetId());
3886 
3887  // Address refresh broadcast
3888  int64_t nNow = GetTimeMicros();
3889  auto current_time = GetTime<std::chrono::microseconds>();
3890 
3891  if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
3892  AdvertiseLocal(pto);
3893  pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
3894  }
3895 
3896  //
3897  // Message: addr
3898  //
3899  if (pto->IsAddrRelayPeer() && pto->m_next_addr_send < current_time) {
3900  pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
3901  std::vector<CAddress> vAddr;
3902  vAddr.reserve(pto->vAddrToSend.size());
3903  assert(pto->m_addr_known);
3904  for (const CAddress& addr : pto->vAddrToSend)
3905  {
3906  if (!pto->m_addr_known->contains(addr.GetKey()))
3907  {
3908  pto->m_addr_known->insert(addr.GetKey());
3909  vAddr.push_back(addr);
3910  // receiver rejects addr messages larger than 1000
3911  if (vAddr.size() >= 1000)
3912  {
3913  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3914  vAddr.clear();
3915  }
3916  }
3917  }
3918  pto->vAddrToSend.clear();
3919  if (!vAddr.empty())
3920  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3921  // we only send the big addr message once
3922  if (pto->vAddrToSend.capacity() > 40)
3923  pto->vAddrToSend.shrink_to_fit();
3924  }
3925 
3926  // Start block sync
3927  if (pindexBestHeader == nullptr)
3929  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
3930  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
3931  // Only actively request headers from a single peer, unless we're close to today.
3932  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
3933  state.fSyncStarted = true;
3935  nSyncStarted++;
3936  const CBlockIndex *pindexStart = pindexBestHeader;
3937  /* If possible, start at the block preceding the currently
3938  best known header. This ensures that we always get a
3939  non-empty list of headers back as long as the peer
3940  is up-to-date. With a non-empty response, we can initialise
3941  the peer's known best block. This wouldn't be possible
3942  if we requested starting at pindexBestHeader and
3943  got back an empty response. */
3944  if (pindexStart->pprev)
3945  pindexStart = pindexStart->pprev;
3946  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
3947  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
3948  }
3949  }
3950 
3951  //
3952  // Try sending block announcements via headers
3953  //
3954  {
3955  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
3956  // list of block hashes we're relaying, and our peer wants
3957  // headers announcements, then find the first header
3958  // not yet known to our peer but would connect, and send.
3959  // If no header would connect, or if we have too many
3960  // blocks, or if the peer doesn't want headers, just
3961  // add all to the inv queue.
3962  LOCK(pto->cs_inventory);
3963  std::vector<CBlock> vHeaders;
3964  bool fRevertToInv = ((!state.fPreferHeaders &&
3965  (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
3966  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
3967  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
3968  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
3969 
3970  if (!fRevertToInv) {
3971  bool fFoundStartingHeader = false;
3972  // Try to find first header that our peer doesn't have, and
3973  // then send all headers past that one. If we come across any
3974  // headers that aren't on ::ChainActive(), give up.
3975  for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
3976  const CBlockIndex* pindex = LookupBlockIndex(hash);
3977  assert(pindex);
3978  if (::ChainActive()[pindex->nHeight] != pindex) {
3979  // Bail out if we reorged away from this block
3980  fRevertToInv = true;
3981  break;
3982  }
3983  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
3984  // This means that the list of blocks to announce don't
3985  // connect to each other.
3986  // This shouldn't really be possible to hit during
3987  // regular operation (because reorgs should take us to
3988  // a chain that has some block not on the prior chain,
3989  // which should be caught by the prior check), but one
3990  // way this could happen is by using invalidateblock /
3991  // reconsiderblock repeatedly on the tip, causing it to
3992  // be added multiple times to vBlockHashesToAnnounce.
3993  // Robustly deal with this rare situation by reverting
3994  // to an inv.
3995  fRevertToInv = true;
3996  break;
3997  }
3998  pBestIndex = pindex;
3999  if (fFoundStartingHeader) {
4000  // add this to the headers message
4001  vHeaders.push_back(pindex->GetBlockHeader());
4002  } else if (PeerHasHeader(&state, pindex)) {
4003  continue; // keep looking for the first new block
4004  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
4005  // Peer doesn't have this header but they do have the prior one.
4006  // Start sending headers.
4007  fFoundStartingHeader = true;
4008  vHeaders.push_back(pindex->GetBlockHeader());
4009  } else {
4010  // Peer doesn't have this header or the prior one -- nothing will
4011  // connect, so bail out.
4012  fRevertToInv = true;
4013  break;
4014  }
4015  }
4016  }
4017  if (!fRevertToInv && !vHeaders.empty()) {
4018  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4019  // We only send up to 1 block as header-and-ids, as otherwise
4020  // probably means we're doing an initial-ish-sync or they're slow
4021  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4022  vHeaders.front().GetHash().ToString(), pto->GetId());
4023 
4024  int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
4025 
4026  bool fGotBlockFromCache = false;
4027  {
4028  LOCK(cs_most_recent_block);
4029  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4030  if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
4031  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
4032  else {
4033  CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
4034  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4035  }
4036  fGotBlockFromCache = true;
4037  }
4038  }
4039  if (!fGotBlockFromCache) {
4040  CBlock block;
4041  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4042  assert(ret);
4043  CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
4044  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4045  }
4046  state.pindexBestHeaderSent = pBestIndex;
4047  } else if (state.fPreferHeaders) {
4048  if (vHeaders.size() > 1) {
4049  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4050  vHeaders.size(),
4051  vHeaders.front().GetHash().ToString(),
4052  vHeaders.back().GetHash().ToString(), pto->GetId());
4053  } else {
4054  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4055  vHeaders.front().GetHash().ToString(), pto->GetId());
4056  }
4057  connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4058  state.pindexBestHeaderSent = pBestIndex;
4059  } else
4060  fRevertToInv = true;
4061  }
4062  if (fRevertToInv) {
4063  // If falling back to using an inv, just try to inv the tip.
4064  // The last entry in vBlockHashesToAnnounce was our tip at some point
4065  // in the past.
4066  if (!pto->vBlockHashesToAnnounce.empty()) {
4067  const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
4068  const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
4069  assert(pindex);
4070 
4071  // Warn if we're announcing a block that is not on the main chain.
4072  // This should be very rare and could be optimized out.
4073  // Just log for now.
4074  if (::ChainActive()[pindex->nHeight] != pindex) {
4075  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4076  hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString());
4077  }
4078 
4079  // If the peer's chain has this block, don't inv it back.
4080  if (!PeerHasHeader(&state, pindex)) {
4081  pto->vInventoryBlockToSend.push_back(hashToAnnounce);
4082  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4083  pto->GetId(), hashToAnnounce.ToString());
4084  }
4085  }
4086  }
4087  pto->vBlockHashesToAnnounce.clear();
4088  }
4089 
4090  //
4091  // Message: inventory
4092  //
4093  std::vector<CInv> vInv;
4094  {
4095  LOCK(pto->cs_inventory);
4096  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
4097 
4098  // Add blocks
4099  for (const uint256& hash : pto->vInventoryBlockToSend) {
4100  vInv.push_back(CInv(MSG_BLOCK, hash));
4101  if (vInv.size() == MAX_INV_SZ) {
4102  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4103  vInv.clear();
4104  }
4105  }
4106  pto->vInventoryBlockToSend.clear();
4107 
4108  if (pto->m_tx_relay != nullptr) {
4109  LOCK(pto->m_tx_relay->cs_tx_inventory);
4110  // Check whether periodic sends should happen
4111  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
4112  if (pto->m_tx_relay->nNextInvSend < current_time) {
4113  fSendTrickle = true;
4114  if (pto->fInbound) {
4115  pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
4116  } else {
4117  // Use half the delay for outbound peers, as there is less privacy concern for them.
4118  pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
4119  }
4120  }
4121 
4122  // Time to send but the peer has requested we not relay transactions.
4123  if (fSendTrickle) {
4124  LOCK(pto->m_tx_relay->cs_filter);
4125  if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
4126  }
4127 
4128  // Respond to BIP35 mempool requests
4129  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
4130  auto vtxinfo = m_mempool.infoAll();
4131  pto->m_tx_relay->fSendMempool = false;
4132  CFeeRate filterrate;
4133  {
4134  LOCK(pto->m_tx_relay->cs_feeFilter);
4135  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4136  }
4137 
4138  LOCK(pto->m_tx_relay->cs_filter);
4139 
4140  for (const auto& txinfo : vtxinfo) {
4141  const uint256& hash = txinfo.tx->GetHash();
4142  CInv inv(MSG_TX, hash);
4143  pto->m_tx_relay->setInventoryTxToSend.erase(hash);
4144  // Don't send transactions that peers will not put into their mempool
4145  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4146  continue;
4147  }
4148  if (pto->m_tx_relay->pfilter) {
4149  if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4150  }
4151  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4152  vInv.push_back(inv);
4153  if (vInv.size() == MAX_INV_SZ) {
4154  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4155  vInv.clear();
4156  }
4157  }
4158  pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
4159  }
4160 
4161  // Determine transactions to relay
4162  if (fSendTrickle) {
4163  // Produce a vector with all candidates for sending
4164  std::vector<std::set<uint256>::iterator> vInvTx;
4165  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
4166  for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
4167  vInvTx.push_back(it);
4168  }
4169  CFeeRate filterrate;
4170  {
4171  LOCK(pto->m_tx_relay->cs_feeFilter);
4172  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4173  }
4174  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4175  // A heap is used so that not all items need sorting if only a few are being sent.
4176  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
4177  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4178  // No reason to drain out at many times the network's capacity,
4179  // especially since we have many peers and some will draw much shorter delays.
4180  unsigned int nRelayedTransactions = 0;
4181  LOCK(pto->m_tx_relay->cs_filter);
4182  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
4183  // Fetch the top element from the heap
4184  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4185  std::set<uint256>::iterator it = vInvTx.back();
4186  vInvTx.pop_back();
4187  uint256 hash = *it;
4188  // Remove it from the to-be-sent set
4189  pto->m_tx_relay->setInventoryTxToSend.erase(it);
4190  // Check if not in the filter already
4191  if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
4192  continue;
4193  }
4194  // Not in the mempool anymore? don't bother sending it.
4195  auto txinfo = m_mempool.info(hash);
4196  if (!txinfo.tx) {
4197  continue;
4198  }
4199  // Peer told you to not send transactions at that feerate? Don't bother sending it.
4200  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4201  continue;
4202  }
4203  if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4204  // Send
4205  vInv.push_back(CInv(MSG_TX, hash));
4206  nRelayedTransactions++;
4207  {
4208  // Expire old relay messages
4209  while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
4210  {
4211  mapRelay.erase(vRelayExpiration.front().second);
4212  vRelayExpiration.pop_front();
4213  }
4214 
4215  auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
4216  if (ret.second) {
4217  vRelayExpiration.push_back(std::make_pair(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first));
4218  }
4219  }
4220  if (vInv.size() == MAX_INV_SZ) {
4221  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4222  vInv.clear();
4223  }
4224  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4225  }
4226  }
4227  }
4228  }
4229  if (!vInv.empty())
4230  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4231 
4232  // Detect whether we're stalling
4233  current_time = GetTime<std::chrono::microseconds>();
4234  // nNow is the current system time (GetTimeMicros is not mockable) and
4235  // should be replaced by the mockable current_time eventually
4236  nNow = GetTimeMicros();
4237  if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
4238  // Stalling only triggers when the block download window cannot move. During normal steady state,
4239  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4240  // should only happen during initial block download.
4241  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4242  pto->fDisconnect = true;
4243  return true;
4244  }
4245  // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
4246  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4247  // We compensate for other peers to prevent killing off peers due to our own downstream link
4248  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4249  // to unreasonably increase our timeout.
4250  if (state.vBlocksInFlight.size() > 0) {
4251  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4252  int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
4253  if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4254  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
4255  pto->fDisconnect = true;
4256  return true;
4257  }
4258  }
4259  // Check for headers sync timeouts
4260  if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
4261  // Detect whether this is a stalling initial-headers-sync peer
4262  if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
4263  if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4264  // Disconnect a peer (without the noban permission) if it is our only sync peer,
4265  // and we have others we could be using instead.
4266  // Note: If all our peers are inbound, then we won't
4267  // disconnect our sync peer for stalling; we have bigger
4268  // problems if we can't get any outbound peers.
4269  if (!pto->HasPermission(PF_NOBAN)) {
4270  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4271  pto->fDisconnect = true;
4272  return true;
4273  } else {
4274  LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
4275  // Reset the headers sync state so that we have a
4276  // chance to try downloading from a different peer.
4277  // Note: this will also result in at least one more
4278  // getheaders message to be sent to
4279  // this peer (eventually).
4280  state.fSyncStarted = false;
4281  nSyncStarted--;
4282  state.nHeadersSyncTimeout = 0;
4283  }
4284  }
4285  } else {
4286  // After we've caught up once, reset the timeout so we can't trigger
4287  // disconnect later.
4288  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
4289  }
4290  }
4291 
4292  // Check that outbound peers have reasonable chains
4293  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4294  ConsiderEviction(*pto, GetTime());
4295 
4296  //
4297  // Message: getdata (blocks)
4298  //
4299  std::vector<CInv> vGetData;
4300  if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4301  std::vector<const CBlockIndex*> vToDownload;
4302  NodeId staller = -1;
4303  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
4304  for (const CBlockIndex *pindex : vToDownload) {
4305  uint32_t nFetchFlags = GetFetchFlags(*pto);
4306  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4307  MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
4308  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4309  pindex->nHeight, pto->GetId());
4310  }
4311  if (state.nBlocksInFlight == 0 && staller != -1) {
4312  if (State(staller)->nStallingSince == 0) {
4313  State(staller)->nStallingSince = nNow;
4314  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4315  }
4316  }
4317  }
4318 
4319  //
4320  // Message: getdata (non-blocks)
4321  //
4322 
4323  // For robustness, expire old requests after a long timeout, so that
4324  // we can resume downloading transactions from a peer even if they
4325  // were unresponsive in the past.
4326  // Eventually we should consider disconnecting peers, but this is
4327  // conservative.
4328  if (state.m_tx_download.m_check_expiry_timer <= current_time) {
4329  for (auto it=state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) {
4330  if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
4331  LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId());
4332  state.m_tx_download.m_tx_announced.erase(it->first);
4333  state.m_tx_download.m_tx_in_flight.erase(it++);
4334  } else {
4335  ++it;
4336  }
4337  }
4338  // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
4339  // so that we're not doing this for all peers at the same time.
4340  state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL);
4341  }
4342 
4343  auto& tx_process_time = state.m_tx_download.m_tx_process_time;
4344  while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
4345  const uint256 txid = tx_process_time.begin()->second;
4346  // Erase this entry from tx_process_time (it may be added back for
4347  // processing at a later time, see below)
4348  tx_process_time.erase(tx_process_time.begin());
4349  CInv inv(MSG_TX | GetFetchFlags(*pto), txid);
4350  if (!AlreadyHave(inv, m_mempool)) {
4351  // If this transaction was last requested more than 1 minute ago,
4352  // then request.
4353  const auto last_request_time = GetTxRequestTime(inv.hash);
4354  if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
4355  LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
4356  vGetData.push_back(inv);
4357  if (vGetData.size() >= MAX_GETDATA_SZ) {
4358  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4359  vGetData.clear();
4360  }
4361  UpdateTxRequestTime(inv.hash, current_time);
4362  state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time);
4363  } else {
4364  // This transaction is in flight from someone else; queue
4365  // up processing to happen after the download times out
4366  // (with a slight delay for inbound peers, to prefer
4367  // requests to outbound peers).
4368  const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload);
4369  tx_process_time.emplace(next_process_time, txid);
4370  }
4371  } else {
4372  // We have already seen this transaction, no need to download.
4373  state.m_tx_download.m_tx_announced.erase(inv.hash);
4374  state.m_tx_download.m_tx_in_flight.erase(inv.hash);
4375  }
4376  }
4377 
4378 
4379  if (!vGetData.empty())
4380  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4381 
4382  //
4383  // Message: feefilter
4384  //
4385  if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
4386  !pto->HasPermission(PF_FORCERELAY) // peers with the forcerelay permission should not filter txs to us
4387  ) {
4388  CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4389  int64_t timeNow = GetTimeMicros();
4390  static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
4392  // Received tx-inv messages are discarded when the active
4393  // chainstate is in IBD, so tell the peer to not send them.
4394  currentFilter = MAX_MONEY;
4395  } else {
4396  static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
4397  if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
4398  // Send the current filter if we sent MAX_FILTER previously
4399  // and made it out of IBD.
4400  pto->m_tx_relay->nextSendTimeFeeFilter = timeNow - 1;
4401  }
4402  }
4403  if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
4404  CAmount filterToSend = g_filter_rounder.round(currentFilter);
4405  // We always have a fee filter of at least minRelayTxFee
4406  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4407  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
4408  connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
4409  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
4410  }
4411  pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
4412  }
4413  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4414  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4415  else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
4416  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
4417  pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
4418  }
4419  }
4420  }
4421  return true;
4422 }
4423 
4425 {
4426 public:
4429  // orphan transactions
4430  mapOrphanTransactions.clear();
4431  mapOrphanTransactionsByPrev.clear();
4432  }
4433 };
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:387
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:43
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:162
static int64_t GetTransactionWeight(const CTransaction &tx)
Definition: validation.h:133
static void ProcessGetData(CNode &pfrom, const CChainParams &chainparams, CConnman *connman, CTxMemPool &mempool, const std::atomic< bool > &interruptMsgProc) LOCKS_EXCLUDED(cs_main)
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:31
CTxMemPool mempool
void Misbehaving(NodeId nodeid, int howmuch, const std::string &message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Increase a node&#39;s misbehavior score.
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:846
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: system.cpp:370
if(expired !=0)
Definition: validation.cpp:339
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:34
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:23
std::atomic_bool fPauseSend
Definition: net.h:782
uint64_t GetRand(uint64_t nMax) noexcept
Generate a uniform random integer in the range [0..range).
Definition: random.cpp:586
static const int SERIALIZE_TRANSACTION_NO_WITNESS
Definition: transaction.h:15
invalid by consensus rules
int GetSendVersion() const
Definition: net.cpp:634
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:42
bool fPruneMode
True if we&#39;re running in -prune mode.
Definition: validation.cpp:138
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:112
Definition: banman.h:57
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:270
ChainstateManager & m_chainman
bool IsLocal() const
Definition: netaddress.cpp:230
void SetNull()
Definition: uint256.h:38
#define LogPrint(category,...)
Definition: logging.h:182
int64_t GetBlockTime() const
Definition: chain.h:247
CConnman *const connman
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:114
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:144
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > pblock, bool fForceProcessing, bool *fNewBlock) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:791
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
#define TRY_LOCK(cs, name)
Definition: sync.h:230
STL-like map container that only keeps the N elements with the highest value.
Definition: limitedmap.h:13
uint32_t nStatus
Verification status of this block. See enum BlockStatus.
Definition: chain.h:174
void scheduleEvery(Function f, std::chrono::milliseconds delta)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:29
void WakeMessageHandler()
Definition: net.cpp:1474
void SetServices(const CService &addr, ServiceFlags nServices)
Definition: net.cpp:2514
std::string ToString() const
Definition: protocol.cpp:188
Definition: block.h:62
Defined in BIP144.
Definition: protocol.h:408
uint64_t ReadCompactSize(Stream &is)
Definition: serialize.h:308
We don&#39;t have the previous block the checked one is built on.
CChain & ChainActive()
Please prefer the identical ChainstateManager::ActiveChain.
Definition: validation.cpp:112
void PushTxInventory(const uint256 &hash)
Definition: net.h:975
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:29
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Overridden from CValidationInterface.
int64_t nTimeExpire
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:32
static const CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:25
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:729
Defined in BIP152.
Definition: protocol.h:406
std::vector< uint16_t > indexes
int GetRecvVersion() const
Definition: net.h:921
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1164
void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
Evict extra outbound peers.
bool SendMessages(CNode *pto) override EXCLUSIVE_LOCKS_REQUIRED(pto -> cs_sendProcessing)
Send queued protocol messages to be sent to a give node.
int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:26
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:918
reverse_range< T > reverse_iterate(T &x)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos, const CMessageHeader::MessageStartChars &message_start)
inv message data
Definition: protocol.h:413
invalid proof of work or time too old
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:39
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ChainActive().Tip() will not be pr...
Definition: validation.h:85
constexpr auto GetRandMillis
Definition: random.h:84
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:156
transaction was missing some of its inputs
CTxMemPool & m_mempool
unsigned int nHeight
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:814
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:101
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:26
CBlockHeader GetBlockHeader() const
Definition: chain.h:220
std::vector< unsigned char > ParseHex(const char *psz)
int Height() const
Return the maximal height in the chain.
Definition: chain.h:421
static CTransactionRef FindTxForGetData(CNode &peer, const uint256 &txid, const std::chrono::seconds mempool_req, const std::chrono::seconds longlived_mempool_time) LOCKS_EXCLUDED(cs_main)
Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or ...
Defined in BIP144.
Definition: protocol.h:407
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mutex cs_inventory
Definition: net.h:805
bool GetTryNewOutboundPeer()
Definition: net.cpp:1747
CTransactionRef tx
unsigned long size() const
Definition: txmempool.h:680
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:46
static void ProcessHeadersMessage(CNode &pfrom, CConnman *connman, ChainstateManager &chainman, CTxMemPool &mempool, const std::vector< CBlockHeader > &headers, const CChainParams &chainparams, bool via_compact_block)
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:108
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:21
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:2795
RecursiveMutex cs_vProcessMsg
Definition: net.h:735
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:147
static void ProcessGetCFHeaders(CNode &pfrom, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a cfheaders request.
void SetVersion(int nVersionIn)
Definition: net.h:625
static void LogPrintf(const char *fmt, const Args &... args)
Definition: logging.h:166
static bool PrepareBlockFilterRequest(CNode &pfrom, const CChainParams &chain_params, BlockFilterType filter_type, uint32_t start_height, const uint256 &stop_hash, uint32_t max_height_diff, const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index)
Validation logic for compact filters request handling.
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:154
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:99
static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:169
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:27
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:47
violated mempool&#39;s fee/size/descendant/RBF/etc limits
static bool IsOutboundDisconnectionCandidate(const CNode &node)
bool IsNull() const
Definition: block.h:135
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:201
bool empty() const
Definition: streams.h:293
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:392
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1752
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:393
uint64_t GetLocalNonce() const
Definition: net.h:901
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:282
std::vector< CAddress > vAddrToSend
Definition: net.h:793
void GetRandBytes(unsigned char *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:579
transaction spends a coinbase too early, or violates locktime/sequence locks
std::atomic< int > nStartingHeight
Definition: net.h:790
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:44
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:951
void SetRecvVersion(int nVersionIn)
Definition: net.h:917
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:32
unsigned char * begin()
Definition: uint256.h:54
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
initial value. Tx has not yet been rejected
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
bool IsNull() const
Definition: uint256.h:30
bool ProcessMessages(CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:27
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:47
std::set< uint256 > orphan_work_set
Definition: net.h:856
std::atomic< ServiceFlags > nServices
Definition: net.h:725
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:268
constexpr auto GetRandMicros
Definition: random.h:83
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:505
std::deque< CInv > vRecvGetData
Definition: net.h:741
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:21
CTransactionRef tx
The transaction itself.
Definition: txmempool.h:330
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:2828
const std::unique_ptr< CRollingBloomFilter > m_addr_known
Definition: net.h:794
static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY
Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others...
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:613
static const bool DEFAULT_PEERBLOCKFILTERS
static constexpr int32_t MAX_PEER_TX_IN_FLIGHT
Maximum number of in-flight transactions from a peer.
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2584
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
uint256 GetBlockHash() const
Definition: chain.h:233
bool IsAddrRelayPeer() const
Definition: net.h:799
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:282
bool done
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:71
bool fSentAddr
Definition: net.h:776
bool IsValid() const
Definition: validation.h:106
std::atomic< int64_t > nPingUsecTime
Definition: net.h:850
BlockFilterType
Definition: blockfilter.h:88
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:69
static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message="")
Potentially mark a node discouraged based on the contents of a BlockValidationState object...
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:852
int GetMyStartingHeight() const
Definition: net.h:905
#define LOCK2(cs1, cs2)
Definition: sync.h:227
initial value. Block has not yet been rejected
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
ServiceFlags GetLocalServices() const
Definition: net.h:988
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:112
bool fClient
Definition: net.h:769
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:124
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:25
BanMan *const m_banman
size_type size() const
Definition: streams.h:292
Invalid by a change to consensus rules more recent than SegWit.
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
size_t nProcessQueueSize
Definition: net.h:737
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:115
Transaction might be missing a witness, have a witness prior to SegWit activation, or witness may have been malleated (which includes non-standard witnesses).
void ReattemptInitialBroadcast(CScheduler &scheduler) const
Retrieve unbroadcast transactions from the mempool and reattempt sending to peers.
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:149
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:130
std::vector< CTransactionRef > txn
static RecursiveMutex cs_most_recent_block
this block was cached as being invalid and we didn&#39;t store the reason why
bool exists(const uint256 &hash) const
Definition: txmempool.h:692
bool fOneShot
Definition: net.h:767
An input of a transaction.
Definition: transaction.h:57
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:343
bool AcceptToMemoryPool(CTxMemPool &pool, TxValidationState &state, const CTransactionRef &tx, std::list< CTransactionRef > *plTxnReplaced, bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
(try to) add transaction to memory pool plTxnReplaced will be appended to with all transactions repla...
#define LOCK(cs)
Definition: sync.h:226
const char * name
Definition: rest.cpp:41
static constexpr std::chrono::minutes PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
const uint256 & GetHash() const
Definition: transaction.h:303
std::string ToString() const
Definition: validation.h:112
the block failed to meet one of our checkpoints
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:189
int type
Definition: protocol.h:426
static void ProcessGetBlockData(CNode &pfrom, const CChainParams &chainparams, const CInv &inv, CConnman *connman)
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
void ProcessMessage(CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const CChainParams &chainparams, ChainstateManager &chainman, CTxMemPool &mempool, CConnman *connman, BanMan *banman, const std::atomic< bool > &interruptMsgProc)
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:408
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:139
Fast randomness source.
Definition: random.h:119
Transport protocol agnostic message container.
Definition: net.h:612
bool g_relay_txes
Definition: net.cpp:102
static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode &pfrom, CConnman *connman)
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:2841
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:2691
int64_t nPowTargetSpacing
Definition: params.h:78
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds Timeout = base + per_header * (expected number of ...
std::vector< CAddress > GetAddresses()
Definition: net.cpp:2529
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:413
void ConsiderEviction(CNode &pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Consider evicting an outbound peer based on the amount of time they&#39;ve been behind our tip...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:37
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:30
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: validation.h:56
bool ActivateBestChain(BlockValidationState &state, const CChainParams &chainparams, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:33
void ForEachNodeThen(Callable &&pre, CallableAfter &&post)
Definition: net.h:228
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: validation.cpp:128
static void ProcessGetCFCheckPt(CNode &pfrom, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a getcfcheckpt request.
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:766
bool m_manual_connection
Definition: net.h:768
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
const std::vector< CTxOut > vout
Definition: transaction.h:269
A CService with information about it as peer.
Definition: protocol.h:358
std::map< uint256, COrphanTx > mapOrphanTransactions GUARDED_BY(g_cs_orphans)
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:727
static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
uint256 hash
Definition: protocol.h:427
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
Result GetResult() const
Definition: validation.h:109
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
static const bool DEFAULT_FEEFILTER
Default for using fee filter.
Definition: validation.h:81
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:61
int64_t NodeId
Definition: net.h:90
Definition: net.h:118
void AddNewAddresses(const std::vector< CAddress > &vAddr, const CAddress &addrFrom, int64_t nTimePenalty=0)
Definition: net.cpp:2524
bool GetNetworkActive() const
Definition: net.h:197
static const unsigned int INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions in seconds.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:36
bool fGetAddr
Definition: net.h:795
std::atomic_bool fImporting
std::string ToString() const
Definition: uint256.cpp:60
std::vector< uint256 > vHave
Definition: block.h:116
NodeId GetId() const
Definition: net.h:897
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:33
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:2857
Parameters that influence chain consensus.
Definition: params.h:45
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
An outpoint - a combination of a transaction hash and an index n into its vout.
Definition: transaction.h:18
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:28
std::atomic_bool fDisconnect
Definition: net.h:775
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:106
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:38
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:45
CFeeRate GetMinFee(size_t sizelimit) const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.cpp:1003
RecursiveMutex g_cs_orphans
void ForEachNode(Callable &&func)
Definition: net.h:208
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:253
bool IsRoutable() const
Definition: netaddress.cpp:302
uint64_t GetHash() const
Definition: netaddress.cpp:547
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:352
unsigned int GetReceiveFloodSize() const
Definition: net.cpp:2747
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
RecursiveMutex cs_SubVer
Definition: net.h:754
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:345
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
If we have extra outbound peers, try to disconnect the one with the oldest block announcement.
void RelayTransaction(const uint256 &txid, const CConnman &connman)
Relay transaction to every node.
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
const CAddress addr
Definition: net.h:750
static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL
How long to wait (in microseconds) before expiring an in-flight getdata request to a peer...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:24
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of announced transactions from a peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
const int64_t nTimeConnected
Definition: net.h:747
int64_t GetTimeMicros()
Returns the system time (not mockable)
Definition: time.cpp:65
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, BlockValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
int64_t m_stale_tip_check_time
Next time to check for stale tip.
std::atomic_bool fReindex
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
uint256 GetHash() const
Definition: block.cpp:11
CBlockIndex * LookupBlockIndex(const uint256 &hash)
Definition: validation.cpp:174
std::atomic< bool > fPingQueued
Definition: net.h:854
256-bit opaque blob.
Definition: uint256.h:120
invalid by consensus rules (excluding any below reasons)
void AddInventoryKnown(const CInv &inv)
Definition: net.h:967
bool HasWitness() const
Definition: transaction.h:333
bool IsReachable(enum Network net)
Definition: net.cpp:270
CChainState & ChainstateActive()
Please prefer the identical ChainstateManager::ActiveChainstate.
Definition: validation.cpp:105
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Definition: protocol.h:387
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &pblock) override
Overridden from CValidationInterface.
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:53
std::vector< CTransactionRef > vtx
Definition: block.h:66
std::set< NodeId > setMisbehaving
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:40
void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex) override
Notifies listeners of a block being disconnected.
the block&#39;s data didn&#39;t match the data committed to by the PoW
bool fFeeler
Definition: net.h:766
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:443
std::atomic< int64_t > nLastTXTime
Definition: net.h:842
const bool fInbound
Definition: net.h:771
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb)
Definition: txmempool.cpp:729
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:52
static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY
How many microseconds to delay requesting transactions from inbound peers.
void BlockConnected(const std::shared_ptr< const CBlock > &pblock, const CBlockIndex *pindexConnected) override
Overridden from CValidationInterface.
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:30
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:137
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:137
const CChainParams & Params()
Return the currently selected parameters.
uint256 hashContinue
Definition: net.h:789
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:18
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
static const unsigned int MAX_STANDARD_TX_WEIGHT
The maximum weight for transactions we&#39;re willing to relay/mine.
Definition: policy.h:24
bool IsTxAvailable(size_t index) const
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
A block this one builds on is invalid.
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: system.cpp:380
CBlockIndex * FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator)
Find the last common block between the parameter chain and a locator.
Definition: validation.cpp:181
bool fLogIPs
Definition: logging.cpp:35
int64_t GetAdjustedTime()
Definition: timedata.cpp:34
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:147
static const unsigned int BLOCK_STALLING_TIMEOUT
Timeout in seconds during which a peer must stall block download progress before being disconnected...
void SetSendVersion(int nVersionIn)
Definition: net.cpp:620
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out)
Get a single filter header by block.
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:390
void SetBestHeight(int height)
Definition: net.cpp:2737
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
#define LIMITED_STRING(obj, n)
Definition: serialize.h:470
void EraseOrphansFor(NodeId peer)
static void ProcessOrphanTx(CConnman *connman, CTxMemPool &mempool, std::set< uint256 > &orphan_work_set, std::list< CTransactionRef > &removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main
static uint32_t GetFetchFlags(const CNode &pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
std::atomic< int64_t > nTimeOffset
Definition: net.h:748
int64_t PoissonNextSend(int64_t now, int average_interval_seconds)
Return a timestamp in the future (in microseconds) for exponentially distributed events.
Definition: net.cpp:2852
ArgsManager gArgs
Definition: system.cpp:77
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:22
bool fListen
Definition: net.cpp:101
Fee rate in satoshis per kilobyte: CAmount / kB.
Definition: feerate.h:29
std::atomic_bool fSuccessfullyConnected
Definition: net.h:772
SipHash-2-4.
Definition: siphash.h:13
#define AssertLockNotHeld(cs)
Definition: sync.h:76
bool IsInvalid() const
Definition: validation.h:107
static int count
Definition: tests.c:35
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:58
std::atomic< int > nVersion
Definition: net.h:753
Invalid by a change to consensus rules more recent than SegWit.
bool IsWitnessEnabled(const CBlockIndex *pindexPrev, const Consensus::Params &params)
Check whether witness commitments are required for a block, and whether to enforce NULLDUMMY (BIP 147...
static void RelayAddress(const CAddress &addr, bool fReachable, const CConnman &connman)
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
PeerLogicValidation(CConnman *connman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool)
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:48
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) override
Handle removal of a peer by updating various state and removing it from mapNodeState.
static CNetProcessingCleanup instance_of_cnetprocessingcleanup
bool m_limited_node
Definition: net.h:770
std::string ToString() const
Definition: netaddress.cpp:751
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
int GetExtraOutboundCount()
Definition: net.cpp:1764
static const unsigned int DEFAULT_BANSCORE_THRESHOLD
Definition: validation.h:77
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:924
CChainState & ActiveChainstate() const
The most-work chain.
static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:77
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:26
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:251
void MarkAddressGood(const CAddress &addr)
Definition: net.cpp:2519
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:150
Information about a peer.
Definition: net.h:715
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:60
std::vector< int > vHeightInFlight
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:32
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:111
full block available in blk*.dat
Definition: chain.h:121
std::string GetAddrName() const
Definition: net.cpp:488
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:46
static void AddToCompactExtraTransactions(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
AssertLockHeld(g_cs_orphans)
void AddAddressKnown(const CAddress &_addr)
Definition: net.h:945
void InitializeNode(CNode *pnode) override
Initialize a peer by adding it to mapNodeState and pushing a message requesting its version...
int64_t GetTime()
Return system time (or mocked time, if set)
Definition: time.cpp:23
auto it
Definition: validation.cpp:384
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS
Default for -maxorphantx, maximum number of orphan transactions kept in memory.
static void ProcessGetCFilters(CNode &pfrom, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a cfilters request.
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:36
COutPoint prevout
Definition: transaction.h:60
std::atomic_bool fPauseRecv
Definition: net.h:781
int GetRandInt(int nMax) noexcept
Definition: random.cpp:591
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:761
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
std::atomic< int64_t > nLastBlockTime
Definition: net.h:841
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
uint32_t nTime
Definition: protocol.h:389
static constexpr int64_t STALE_CHECK_INTERVAL
How frequently to check for stale tips, in seconds.
didn&#39;t meet our local policy rules
bool HaveTxsDownloaded() const
Check whether this block&#39;s and all previous blocks&#39; transactions have been downloaded (and stored to ...
Definition: chain.h:245
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL
Minimum time between orphan transactions expire time checks in seconds.
void BlockChecked(const CBlock &block, const BlockValidationState &state) override
Overridden from CValidationInterface.
CAmount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:51
uint64_t randrange(uint64_t range) noexcept
Generate a random integer in the range [0..range).
Definition: random.h:190
void AdvertiseLocal(CNode *pnode)
Definition: net.cpp:197
bool GetUseAddrmanOutgoing() const
Definition: net.h:198
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:166
void scheduleFromNow(Function f, std::chrono::milliseconds delta)
Call f once after the delta has passed.
Definition: scheduler.h:44
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:20
int in_avail() const
Definition: streams.h:390
Defined in BIP37.
Definition: protocol.h:405
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in millionths of the block interval (i.e.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:35
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:137
std::atomic< std::chrono::microseconds > m_ping_start
When the last ping was sent, or 0 if no ping was ever sent.
Definition: net.h:848
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:41
std::unique_ptr< TxRelay > m_tx_relay
Definition: net.h:835
CAmount GetFee(size_t nBytes) const
Return the fee in satoshis for the given size in bytes.
Definition: feerate.cpp:21
static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
uint256 hash
Definition: transaction.h:21
static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message="")
Potentially disconnect and discourage a node based on the contents of a TxValidationState object...
Span< A > constexpr MakeSpan(A(&a)[N])
MakeSpan for arrays:
Definition: span.h:192
static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
bool MaybeDiscourageAndDisconnect(CNode &pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main)