Bitcoin Core  0.19.99
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <chainparams.h>
12 #include <consensus/validation.h>
13 #include <hash.h>
14 #include <validation.h>
15 #include <merkleblock.h>
16 #include <netmessagemaker.h>
17 #include <netbase.h>
18 #include <policy/fees.h>
19 #include <policy/policy.h>
20 #include <primitives/block.h>
21 #include <primitives/transaction.h>
22 #include <random.h>
23 #include <reverse_iterator.h>
24 #include <scheduler.h>
25 #include <tinyformat.h>
26 #include <txmempool.h>
27 #include <util/system.h>
28 #include <util/strencodings.h>
29 
30 #include <memory>
31 #include <typeinfo>
32 
33 #if defined(NDEBUG)
34 # error "Bitcoin cannot be compiled without assertions."
35 #endif
36 
38 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
40 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
42 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60};
45 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
46 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
50 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
52 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
54 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
56 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
58 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
60 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
63 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
66 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
68 static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
70 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
72 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}};
74 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
76 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}};
78 static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{GETDATA_TX_INTERVAL * 10};
80 "To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY");
82 static const unsigned int MAX_GETDATA_SZ = 1000;
83 
84 
85 struct COrphanTx {
86  // When modifying, adapt the copy of this definition in tests/DoS_tests.
89  int64_t nTimeExpire;
90  size_t list_pos;
91 };
93 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
94 
95 void EraseOrphansFor(NodeId peer);
96 
98 void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main);
99 
101 static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 60 * 60;
103 static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
106 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
109 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;
111 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
113 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
114 
115 // Internal stuff
116 namespace {
118  int nSyncStarted GUARDED_BY(cs_main) = 0;
119 
126  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
127 
147  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
148  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
149 
150  /*
151  * Filter for transactions that have been recently confirmed.
152  * We use this to avoid requesting transactions that have already been
153  * confirnmed.
154  */
155  RecursiveMutex g_cs_recent_confirmed_transactions;
156  std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
157 
159  struct QueuedBlock {
160  uint256 hash;
161  const CBlockIndex* pindex;
162  bool fValidatedHeaders;
163  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
164  };
165  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
166 
168  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
169 
171  int nPreferredDownload GUARDED_BY(cs_main) = 0;
172 
174  int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
175 
177  int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
178 
180  std::atomic<int64_t> g_last_tip_update(0);
181 
183  typedef std::map<uint256, CTransactionRef> MapRelay;
184  MapRelay mapRelay GUARDED_BY(cs_main);
186  std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
187 
188  struct IteratorComparator
189  {
190  template<typename I>
191  bool operator()(const I& a, const I& b) const
192  {
193  return &(*a) < &(*b);
194  }
195  };
196  std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
197 
198  std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
199 
200  static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
201  static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
202 } // namespace
203 
204 namespace {
211 struct CNodeState {
213  const CService address;
215  bool fCurrentlyConnected;
217  int nMisbehavior;
219  bool fShouldBan;
221  const std::string name;
223  const CBlockIndex *pindexBestKnownBlock;
225  uint256 hashLastUnknownBlock;
227  const CBlockIndex *pindexLastCommonBlock;
229  const CBlockIndex *pindexBestHeaderSent;
231  int nUnconnectingHeaders;
233  bool fSyncStarted;
235  int64_t nHeadersSyncTimeout;
237  int64_t nStallingSince;
238  std::list<QueuedBlock> vBlocksInFlight;
240  int64_t nDownloadingSince;
241  int nBlocksInFlight;
242  int nBlocksInFlightValidHeaders;
244  bool fPreferredDownload;
246  bool fPreferHeaders;
248  bool fPreferHeaderAndIDs;
254  bool fProvidesHeaderAndIDs;
256  bool fHaveWitness;
258  bool fWantsCmpctWitness;
263  bool fSupportsDesiredCmpctVersion;
264 
279  struct ChainSyncTimeoutState {
281  int64_t m_timeout;
283  const CBlockIndex * m_work_header;
285  bool m_sent_getheaders;
287  bool m_protect;
288  };
289 
290  ChainSyncTimeoutState m_chain_sync;
291 
293  int64_t m_last_block_announcement;
294 
295  /*
296  * State associated with transaction download.
297  *
298  * Tx download algorithm:
299  *
300  * When inv comes in, queue up (process_time, txid) inside the peer's
301  * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
302  * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
303  *
304  * The process_time for a transaction is set to nNow for outbound peers,
305  * nNow + 2 seconds for inbound peers. This is the time at which we'll
306  * consider trying to request the transaction from the peer in
307  * SendMessages(). The delay for inbound peers is to allow outbound peers
308  * a chance to announce before we request from inbound peers, to prevent
309  * an adversary from using inbound connections to blind us to a
310  * transaction (InvBlock).
311  *
312  * When we call SendMessages() for a given peer,
313  * we will loop over the transactions in m_tx_process_time, looking
314  * at the transactions whose process_time <= nNow. We'll request each
315  * such transaction that we don't have already and that hasn't been
316  * requested from another peer recently, up until we hit the
317  * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
318  * g_already_asked_for for each requested txid, storing the time of the
319  * GETDATA request. We use g_already_asked_for to coordinate transaction
320  * requests amongst our peers.
321  *
322  * For transactions that we still need but we have already recently
323  * requested from some other peer, we'll reinsert (process_time, txid)
324  * back into the peer's m_tx_process_time at the point in the future at
325  * which the most recent GETDATA request would time out (ie
326  * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
327  * We add an additional delay for inbound peers, again to prefer
328  * attempting download from outbound peers first.
329  * We also add an extra small random delay up to 2 seconds
330  * to avoid biasing some peers over others. (e.g., due to fixed ordering
331  * of peer processing in ThreadMessageHandler).
332  *
333  * When we receive a transaction from a peer, we remove the txid from the
334  * peer's m_tx_in_flight set and from their recently announced set
335  * (m_tx_announced). We also clear g_already_asked_for for that entry, so
336  * that if somehow the transaction is not accepted but also not added to
337  * the reject filter, then we will eventually redownload from other
338  * peers.
339  */
340  struct TxDownloadState {
341  /* Track when to attempt download of announced transactions (process
342  * time in micros -> txid)
343  */
344  std::multimap<std::chrono::microseconds, uint256> m_tx_process_time;
345 
347  std::set<uint256> m_tx_announced;
348 
350  std::map<uint256, std::chrono::microseconds> m_tx_in_flight;
351 
353  std::chrono::microseconds m_check_expiry_timer{0};
354  };
355 
356  TxDownloadState m_tx_download;
357 
359  bool m_is_inbound;
360 
362  bool m_is_manual_connection;
363 
364  CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound, bool is_manual) :
365  address(addrIn), name(std::move(addrNameIn)), m_is_inbound(is_inbound),
366  m_is_manual_connection (is_manual)
367  {
368  fCurrentlyConnected = false;
369  nMisbehavior = 0;
370  fShouldBan = false;
371  pindexBestKnownBlock = nullptr;
372  hashLastUnknownBlock.SetNull();
373  pindexLastCommonBlock = nullptr;
374  pindexBestHeaderSent = nullptr;
375  nUnconnectingHeaders = 0;
376  fSyncStarted = false;
377  nHeadersSyncTimeout = 0;
378  nStallingSince = 0;
379  nDownloadingSince = 0;
380  nBlocksInFlight = 0;
381  nBlocksInFlightValidHeaders = 0;
382  fPreferredDownload = false;
383  fPreferHeaders = false;
384  fPreferHeaderAndIDs = false;
385  fProvidesHeaderAndIDs = false;
386  fHaveWitness = false;
387  fWantsCmpctWitness = false;
388  fSupportsDesiredCmpctVersion = false;
389  m_chain_sync = { 0, nullptr, false, false };
390  m_last_block_announcement = 0;
391  }
392 };
393 
394 // Keeps track of the time (in microseconds) when transactions were requested last time
396 
398 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
399 
400 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
401  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
402  if (it == mapNodeState.end())
403  return nullptr;
404  return &it->second;
405 }
406 
407 static void UpdatePreferredDownload(CNode* node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
408 {
409  nPreferredDownload -= state->fPreferredDownload;
410 
411  // Whether this node should be marked as a preferred download node.
412  state->fPreferredDownload = (!node->fInbound || node->HasPermission(PF_NOBAN)) && !node->fOneShot && !node->fClient;
413 
414  nPreferredDownload += state->fPreferredDownload;
415 }
416 
417 static void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
418 {
419  // Note that pnode->GetLocalServices() is a reflection of the local
420  // services we were offering when the CNode object was created for this
421  // peer.
422  ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
423  uint64_t nonce = pnode->GetLocalNonce();
424  int nNodeStartingHeight = pnode->GetMyStartingHeight();
425  NodeId nodeid = pnode->GetId();
426  CAddress addr = pnode->addr;
427 
428  CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
429  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
430 
431  connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
432  nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode->m_tx_relay != nullptr));
433 
434  if (fLogIPs) {
435  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
436  } else {
437  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
438  }
439 }
440 
441 // Returns a bool indicating whether we requested this block.
442 // Also used if a block was /not/ received and timed out or started with another peer
443 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
444  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
445  if (itInFlight != mapBlocksInFlight.end()) {
446  CNodeState *state = State(itInFlight->second.first);
447  assert(state != nullptr);
448  state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
449  if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
450  // Last validated block on the queue was received.
451  nPeersWithValidatedDownloads--;
452  }
453  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
454  // First block on the queue was received, update the start download time for the next one
455  state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
456  }
457  state->vBlocksInFlight.erase(itInFlight->second.second);
458  state->nBlocksInFlight--;
459  state->nStallingSince = 0;
460  mapBlocksInFlight.erase(itInFlight);
461  return true;
462  }
463  return false;
464 }
465 
466 // returns false, still setting pit, if the block was already in flight from the same peer
467 // pit will only be valid as long as the same cs_main lock is being held
468 static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
469  CNodeState *state = State(nodeid);
470  assert(state != nullptr);
471 
472  // Short-circuit most stuff in case it is from the same node
473  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
474  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
475  if (pit) {
476  *pit = &itInFlight->second.second;
477  }
478  return false;
479  }
480 
481  // Make sure it's not listed somewhere already.
482  MarkBlockAsReceived(hash);
483 
484  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
485  {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
486  state->nBlocksInFlight++;
487  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
488  if (state->nBlocksInFlight == 1) {
489  // We're starting a block download (batch) from this peer.
490  state->nDownloadingSince = GetTimeMicros();
491  }
492  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
493  nPeersWithValidatedDownloads++;
494  }
495  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
496  if (pit)
497  *pit = &itInFlight->second.second;
498  return true;
499 }
500 
502 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
503  CNodeState *state = State(nodeid);
504  assert(state != nullptr);
505 
506  if (!state->hashLastUnknownBlock.IsNull()) {
507  const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
508  if (pindex && pindex->nChainWork > 0) {
509  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
510  state->pindexBestKnownBlock = pindex;
511  }
512  state->hashLastUnknownBlock.SetNull();
513  }
514  }
515 }
516 
518 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
519  CNodeState *state = State(nodeid);
520  assert(state != nullptr);
521 
522  ProcessBlockAvailability(nodeid);
523 
524  const CBlockIndex* pindex = LookupBlockIndex(hash);
525  if (pindex && pindex->nChainWork > 0) {
526  // An actually better block was announced.
527  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
528  state->pindexBestKnownBlock = pindex;
529  }
530  } else {
531  // An unknown block was announced; just assume that the latest one is the best one.
532  state->hashLastUnknownBlock = hash;
533  }
534 }
535 
542 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
543 {
545  CNodeState* nodestate = State(nodeid);
546  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
547  // Never ask from peers who can't provide witnesses.
548  return;
549  }
550  if (nodestate->fProvidesHeaderAndIDs) {
551  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
552  if (*it == nodeid) {
553  lNodesAnnouncingHeaderAndIDs.erase(it);
554  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
555  return;
556  }
557  }
558  connman->ForNode(nodeid, [connman](CNode* pfrom){
560  uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
561  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
562  // As per BIP152, we only get 3 of our peers to announce
563  // blocks using compact encodings.
564  connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
566  connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
567  return true;
568  });
569  lNodesAnnouncingHeaderAndIDs.pop_front();
570  }
571  connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
572  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
573  return true;
574  });
575  }
576 }
577 
578 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
579 {
581  if (g_last_tip_update == 0) {
582  g_last_tip_update = GetTime();
583  }
584  return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
585 }
586 
587 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
588 {
589  return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
590 }
591 
592 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
593 {
594  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
595  return true;
596  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
597  return true;
598  return false;
599 }
600 
603 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
604 {
605  if (count == 0)
606  return;
607 
608  vBlocks.reserve(vBlocks.size() + count);
609  CNodeState *state = State(nodeid);
610  assert(state != nullptr);
611 
612  // Make sure pindexBestKnownBlock is up to date, we'll need it.
613  ProcessBlockAvailability(nodeid);
614 
615  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
616  // This peer has nothing interesting.
617  return;
618  }
619 
620  if (state->pindexLastCommonBlock == nullptr) {
621  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
622  // Guessing wrong in either direction is not a problem.
623  state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
624  }
625 
626  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
627  // of its current tip anymore. Go back enough to fix that.
628  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
629  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
630  return;
631 
632  std::vector<const CBlockIndex*> vToFetch;
633  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
634  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
635  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
636  // download that next block if the window were 1 larger.
637  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
638  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
639  NodeId waitingfor = -1;
640  while (pindexWalk->nHeight < nMaxHeight) {
641  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
642  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
643  // as iterating over ~100 CBlockIndex* entries anyway.
644  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
645  vToFetch.resize(nToFetch);
646  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
647  vToFetch[nToFetch - 1] = pindexWalk;
648  for (unsigned int i = nToFetch - 1; i > 0; i--) {
649  vToFetch[i - 1] = vToFetch[i]->pprev;
650  }
651 
652  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
653  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
654  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
655  // already part of our chain (and therefore don't need it even if pruned).
656  for (const CBlockIndex* pindex : vToFetch) {
657  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
658  // We consider the chain that this peer is on invalid.
659  return;
660  }
661  if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
662  // We wouldn't download this block or its descendants from this peer.
663  return;
664  }
665  if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) {
666  if (pindex->HaveTxsDownloaded())
667  state->pindexLastCommonBlock = pindex;
668  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
669  // The block is not already downloaded, and not yet in flight.
670  if (pindex->nHeight > nWindowEnd) {
671  // We reached the end of the window.
672  if (vBlocks.size() == 0 && waitingfor != nodeid) {
673  // We aren't able to fetch anything, but we would be if the download window was one larger.
674  nodeStaller = waitingfor;
675  }
676  return;
677  }
678  vBlocks.push_back(pindex);
679  if (vBlocks.size() == count) {
680  return;
681  }
682  } else if (waitingfor == -1) {
683  // This is the first already-in-flight block.
684  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
685  }
686  }
687  }
688 }
689 
690 void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
691 {
692  g_already_asked_for.erase(txid);
693 }
694 
695 std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
696 {
697  auto it = g_already_asked_for.find(txid);
698  if (it != g_already_asked_for.end()) {
699  return it->second;
700  }
701  return {};
702 }
703 
704 void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
705 {
706  auto it = g_already_asked_for.find(txid);
707  if (it == g_already_asked_for.end()) {
708  g_already_asked_for.insert(std::make_pair(txid, request_time));
709  } else {
710  g_already_asked_for.update(it, request_time);
711  }
712 }
713 
714 std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
715 {
716  std::chrono::microseconds process_time;
717  const auto last_request_time = GetTxRequestTime(txid);
718  // First time requesting this tx
719  if (last_request_time.count() == 0) {
720  process_time = current_time;
721  } else {
722  // Randomize the delay to avoid biasing some peers over others (such as due to
723  // fixed ordering of peer processing in ThreadMessageHandler)
724  process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY);
725  }
726 
727  // We delay processing announcements from inbound peers
728  if (use_inbound_delay) process_time += INBOUND_PEER_TX_DELAY;
729 
730  return process_time;
731 }
732 
733 void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
734 {
735  CNodeState::TxDownloadState& peer_download_state = state->m_tx_download;
736  if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
737  peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS ||
738  peer_download_state.m_tx_announced.count(txid)) {
739  // Too many queued announcements from this peer, or we already have
740  // this announcement
741  return;
742  }
743  peer_download_state.m_tx_announced.insert(txid);
744 
745  // Calculate the time to try requesting this transaction. Use
746  // fPreferredDownload as a proxy for outbound peers.
747  const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload);
748 
749  peer_download_state.m_tx_process_time.emplace(process_time, txid);
750 }
751 
752 } // namespace
753 
754 // This function is used for testing the stale tip eviction logic, see
755 // denialofservice_tests.cpp
756 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
757 {
758  LOCK(cs_main);
759  CNodeState *state = State(node);
760  if (state) state->m_last_block_announcement = time_in_seconds;
761 }
762 
763 // Returns true for outbound peers, excluding manual connections, feelers, and
764 // one-shots.
765 static bool IsOutboundDisconnectionCandidate(const CNode *node)
766 {
767  return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
768 }
769 
771  CAddress addr = pnode->addr;
772  std::string addrName = pnode->GetAddrName();
773  NodeId nodeid = pnode->GetId();
774  {
775  LOCK(cs_main);
776  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection));
777  }
778  if(!pnode->fInbound)
779  PushNodeVersion(pnode, connman, GetTime());
780 }
781 
782 void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
783  fUpdateConnectionTime = false;
784  LOCK(cs_main);
785  CNodeState *state = State(nodeid);
786  assert(state != nullptr);
787 
788  if (state->fSyncStarted)
789  nSyncStarted--;
790 
791  if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
792  fUpdateConnectionTime = true;
793  }
794 
795  for (const QueuedBlock& entry : state->vBlocksInFlight) {
796  mapBlocksInFlight.erase(entry.hash);
797  }
798  EraseOrphansFor(nodeid);
799  nPreferredDownload -= state->fPreferredDownload;
800  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
801  assert(nPeersWithValidatedDownloads >= 0);
802  g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
803  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
804 
805  mapNodeState.erase(nodeid);
806 
807  if (mapNodeState.empty()) {
808  // Do a consistency check after the last peer is removed.
809  assert(mapBlocksInFlight.empty());
810  assert(nPreferredDownload == 0);
811  assert(nPeersWithValidatedDownloads == 0);
812  assert(g_outbound_peers_with_protect_from_disconnect == 0);
813  }
814  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
815 }
816 
818  LOCK(cs_main);
819  CNodeState *state = State(nodeid);
820  if (state == nullptr)
821  return false;
822  stats.nMisbehavior = state->nMisbehavior;
823  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
824  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
825  for (const QueuedBlock& queue : state->vBlocksInFlight) {
826  if (queue.pindex)
827  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
828  }
829  return true;
830 }
831 
833 //
834 // mapOrphanTransactions
835 //
836 
838 {
839  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
840  if (max_extra_txn <= 0)
841  return;
842  if (!vExtraTxnForCompact.size())
843  vExtraTxnForCompact.resize(max_extra_txn);
844  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
845  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
846 }
847 
849 {
850  const uint256& hash = tx->GetHash();
851  if (mapOrphanTransactions.count(hash))
852  return false;
853 
854  // Ignore big transactions, to avoid a
855  // send-big-orphans memory exhaustion attack. If a peer has a legitimate
856  // large transaction with a missing parent then we assume
857  // it will rebroadcast it later, after the parent transaction(s)
858  // have been mined or received.
859  // 100 orphans, each of which is at most 100,000 bytes big is
860  // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
861  unsigned int sz = GetTransactionWeight(*tx);
862  if (sz > MAX_STANDARD_TX_WEIGHT)
863  {
864  LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
865  return false;
866  }
867 
868  auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
869  assert(ret.second);
870  g_orphan_list.push_back(ret.first);
871  for (const CTxIn& txin : tx->vin) {
872  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
873  }
874 
876 
877  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
878  mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
879  return true;
880 }
881 
882 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
883 {
884  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
885  if (it == mapOrphanTransactions.end())
886  return 0;
887  for (const CTxIn& txin : it->second.tx->vin)
888  {
889  auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
890  if (itPrev == mapOrphanTransactionsByPrev.end())
891  continue;
892  itPrev->second.erase(it);
893  if (itPrev->second.empty())
894  mapOrphanTransactionsByPrev.erase(itPrev);
895  }
896 
897  size_t old_pos = it->second.list_pos;
898  assert(g_orphan_list[old_pos] == it);
899  if (old_pos + 1 != g_orphan_list.size()) {
900  // Unless we're deleting the last entry in g_orphan_list, move the last
901  // entry to the position we're deleting.
902  auto it_last = g_orphan_list.back();
903  g_orphan_list[old_pos] = it_last;
904  it_last->second.list_pos = old_pos;
905  }
906  g_orphan_list.pop_back();
907 
908  mapOrphanTransactions.erase(it);
909  return 1;
910 }
911 
913 {
914  LOCK(g_cs_orphans);
915  int nErased = 0;
916  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
917  while (iter != mapOrphanTransactions.end())
918  {
919  std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
920  if (maybeErase->second.fromPeer == peer)
921  {
922  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
923  }
924  }
925  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
926 }
927 
928 
929 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
930 {
931  LOCK(g_cs_orphans);
932 
933  unsigned int nEvicted = 0;
934  static int64_t nNextSweep;
935  int64_t nNow = GetTime();
936  if (nNextSweep <= nNow) {
937  // Sweep out expired orphan pool entries:
938  int nErased = 0;
939  int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
940  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
941  while (iter != mapOrphanTransactions.end())
942  {
943  std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
944  if (maybeErase->second.nTimeExpire <= nNow) {
945  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
946  } else {
947  nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
948  }
949  }
950  // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
951  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
952  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
953  }
954  FastRandomContext rng;
955  while (mapOrphanTransactions.size() > nMaxOrphans)
956  {
957  // Evict a random orphan:
958  size_t randompos = rng.randrange(g_orphan_list.size());
959  EraseOrphanTx(g_orphan_list[randompos]->first);
960  ++nEvicted;
961  }
962  return nEvicted;
963 }
964 
968 void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
969 {
970  if (howmuch == 0)
971  return;
972 
973  CNodeState *state = State(pnode);
974  if (state == nullptr)
975  return;
976 
977  state->nMisbehavior += howmuch;
978  int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
979  std::string message_prefixed = message.empty() ? "" : (": " + message);
980  if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
981  {
982  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
983  state->fShouldBan = true;
984  } else
985  LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
986 }
987 
998 static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, bool via_compact_block, const std::string& message = "") {
999  switch (state.GetResult()) {
1001  break;
1002  // The node is providing invalid data:
1005  if (!via_compact_block) {
1006  LOCK(cs_main);
1007  Misbehaving(nodeid, 100, message);
1008  return true;
1009  }
1010  break;
1012  {
1013  LOCK(cs_main);
1014  CNodeState *node_state = State(nodeid);
1015  if (node_state == nullptr) {
1016  break;
1017  }
1018 
1019  // Ban outbound (but not inbound) peers if on an invalid chain.
1020  // Exempt HB compact block peers and manual connections.
1021  if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) {
1022  Misbehaving(nodeid, 100, message);
1023  return true;
1024  }
1025  break;
1026  }
1030  {
1031  LOCK(cs_main);
1032  Misbehaving(nodeid, 100, message);
1033  }
1034  return true;
1035  // Conflicting (but not necessarily invalid) data or different policy:
1037  {
1038  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1039  LOCK(cs_main);
1040  Misbehaving(nodeid, 10, message);
1041  }
1042  return true;
1045  break;
1046  }
1047  if (message != "") {
1048  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1049  }
1050  return false;
1051 }
1052 
1058 static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
1059 {
1060  switch (state.GetResult()) {
1062  break;
1063  // The node is providing invalid data:
1065  {
1066  LOCK(cs_main);
1067  Misbehaving(nodeid, 100, message);
1068  return true;
1069  }
1070  // Conflicting (but not necessarily invalid) data or different policy:
1078  break;
1079  }
1080  if (message != "") {
1081  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1082  }
1083  return false;
1084 }
1085 
1086 
1088 //
1089 // blockchain -> download logic notification
1090 //
1091 
1092 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1093 // active chain if they are no more than a month older (both in time, and in
1094 // best equivalent proof of work) than the best header chain we know about and
1095 // we fully-validated them at some point.
1096 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1097 {
1099  if (::ChainActive().Contains(pindex)) return true;
1100  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1101  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1103 }
1104 
1106  : connman(connmanIn),
1107  m_banman(banman),
1108  m_mempool(pool),
1109  m_stale_tip_check_time(0)
1110 {
1111  // Initialize global variables that cannot be constructed at startup.
1112  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1113 
1114  // Blocks don't typically have more than 4000 transactions, so this should
1115  // be at least six blocks (~1 hr) worth of transactions that we can store.
1116  // If the number of transactions appearing in a block goes up, or if we are
1117  // seeing getdata requests more than an hour after initial announcement, we
1118  // can increase this number.
1119  // The false positive rate of 1/1M should come out to less than 1
1120  // transaction per day that would be inadvertently ignored (which is the
1121  // same probability that we have in the reject filter).
1122  g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001));
1123 
1124  const Consensus::Params& consensusParams = Params().GetConsensus();
1125  // Stale tip checking and peer eviction are on two different timers, but we
1126  // don't want them to get out of sync due to drift in the scheduler, so we
1127  // combine them in one function and schedule at the quicker (peer-eviction)
1128  // timer.
1129  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1130  scheduler.scheduleEvery([this, consensusParams] { this->CheckForStaleTipAndEvictPeers(consensusParams); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1131 }
1132 
1137 void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1138 {
1139  {
1140  LOCK(g_cs_orphans);
1141 
1142  std::vector<uint256> vOrphanErase;
1143 
1144  for (const CTransactionRef& ptx : pblock->vtx) {
1145  const CTransaction& tx = *ptx;
1146 
1147  // Which orphan pool entries must we evict?
1148  for (const auto& txin : tx.vin) {
1149  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1150  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1151  for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1152  const CTransaction& orphanTx = *(*mi)->second.tx;
1153  const uint256& orphanHash = orphanTx.GetHash();
1154  vOrphanErase.push_back(orphanHash);
1155  }
1156  }
1157  }
1158 
1159  // Erase orphan transactions included or precluded by this block
1160  if (vOrphanErase.size()) {
1161  int nErased = 0;
1162  for (const uint256& orphanHash : vOrphanErase) {
1163  nErased += EraseOrphanTx(orphanHash);
1164  }
1165  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1166  }
1167 
1168  g_last_tip_update = GetTime();
1169  }
1170  {
1171  LOCK(g_cs_recent_confirmed_transactions);
1172  for (const auto& ptx : pblock->vtx) {
1173  g_recent_confirmed_transactions->insert(ptx->GetHash());
1174  }
1175  }
1176 }
1177 
1178 void PeerLogicValidation::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1179 {
1180  // To avoid relay problems with transactions that were previously
1181  // confirmed, clear our filter of recently confirmed transactions whenever
1182  // there's a reorg.
1183  // This means that in a 1-block reorg (where 1 block is disconnected and
1184  // then another block reconnected), our filter will drop to having only one
1185  // block's worth of transactions in it, but that should be fine, since
1186  // presumably the most common case of relaying a confirmed transaction
1187  // should be just after a new block containing it is found.
1188  LOCK(g_cs_recent_confirmed_transactions);
1189  g_recent_confirmed_transactions->reset();
1190 }
1191 
1192 // All of the following cache a recent block, and are protected by cs_most_recent_block
1194 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1195 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1196 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1197 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1198 
1203 void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1204  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1205  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1206 
1207  LOCK(cs_main);
1208 
1209  static int nHighestFastAnnounce = 0;
1210  if (pindex->nHeight <= nHighestFastAnnounce)
1211  return;
1212  nHighestFastAnnounce = pindex->nHeight;
1213 
1214  bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
1215  uint256 hashBlock(pblock->GetHash());
1216 
1217  {
1218  LOCK(cs_most_recent_block);
1219  most_recent_block_hash = hashBlock;
1220  most_recent_block = pblock;
1221  most_recent_compact_block = pcmpctblock;
1222  fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1223  }
1224 
1225  connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
1227 
1228  // TODO: Avoid the repeated-serialization here
1229  if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1230  return;
1231  ProcessBlockAvailability(pnode->GetId());
1232  CNodeState &state = *State(pnode->GetId());
1233  // If the peer has, or we announced to them the previous block already,
1234  // but we don't think they have this one, go ahead and announce it
1235  if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1236  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1237 
1238  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
1239  hashBlock.ToString(), pnode->GetId());
1240  connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1241  state.pindexBestHeaderSent = pindex;
1242  }
1243  });
1244 }
1245 
1250 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1251  const int nNewHeight = pindexNew->nHeight;
1252  connman->SetBestHeight(nNewHeight);
1253 
1254  SetServiceFlagsIBDCache(!fInitialDownload);
1255  if (!fInitialDownload) {
1256  // Find the hashes of all blocks that weren't previously in the best chain.
1257  std::vector<uint256> vHashes;
1258  const CBlockIndex *pindexToAnnounce = pindexNew;
1259  while (pindexToAnnounce != pindexFork) {
1260  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1261  pindexToAnnounce = pindexToAnnounce->pprev;
1262  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1263  // Limit announcements in case of a huge reorganization.
1264  // Rely on the peer's synchronization mechanism in that case.
1265  break;
1266  }
1267  }
1268  // Relay inventory, but don't relay old inventory during initial block download.
1269  connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1270  if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1271  for (const uint256& hash : reverse_iterate(vHashes)) {
1272  pnode->PushBlockHash(hash);
1273  }
1274  }
1275  });
1277  }
1278 }
1279 
1285  LOCK(cs_main);
1286 
1287  const uint256 hash(block.GetHash());
1288  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1289 
1290  // If the block failed validation, we know where it came from and we're still connected
1291  // to that peer, maybe punish.
1292  if (state.IsInvalid() &&
1293  it != mapBlockSource.end() &&
1294  State(it->second.first)) {
1295  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1296  }
1297  // Check that:
1298  // 1. The block is valid
1299  // 2. We're not in initial block download
1300  // 3. This is currently the best block we're aware of. We haven't updated
1301  // the tip yet so we have no way to check this directly here. Instead we
1302  // just check that there are currently no other blocks in flight.
1303  else if (state.IsValid() &&
1305  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1306  if (it != mapBlockSource.end()) {
1307  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
1308  }
1309  }
1310  if (it != mapBlockSource.end())
1311  mapBlockSource.erase(it);
1312 }
1313 
1315 //
1316 // Messages
1317 //
1318 
1319 
1321 {
1322  switch (inv.type)
1323  {
1324  case MSG_TX:
1325  case MSG_WITNESS_TX:
1326  {
1327  assert(recentRejects);
1328  if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip)
1329  {
1330  // If the chain tip has changed previously rejected transactions
1331  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1332  // or a double-spend. Reset the rejects filter and give those
1333  // txs a second chance.
1334  hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1335  recentRejects->reset();
1336  }
1337 
1338  {
1339  LOCK(g_cs_orphans);
1340  if (mapOrphanTransactions.count(inv.hash)) return true;
1341  }
1342 
1343  {
1344  LOCK(g_cs_recent_confirmed_transactions);
1345  if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
1346  }
1347 
1348  return recentRejects->contains(inv.hash) ||
1349  mempool.exists(inv.hash);
1350  }
1351  case MSG_BLOCK:
1352  case MSG_WITNESS_BLOCK:
1353  return LookupBlockIndex(inv.hash) != nullptr;
1354  }
1355  // Don't know what it is, just say we already got one
1356  return true;
1357 }
1358 
1359 void RelayTransaction(const uint256& txid, const CConnman& connman)
1360 {
1361  CInv inv(MSG_TX, txid);
1362  connman.ForEachNode([&inv](CNode* pnode)
1363  {
1364  pnode->PushInventory(inv);
1365  });
1366 }
1367 
1368 static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
1369 {
1370  unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
1371 
1372  // Relay to a limited number of other nodes
1373  // Use deterministic randomness to send to the same nodes for 24 hours
1374  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1375  uint64_t hashAddr = addr.GetHash();
1376  const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
1377  FastRandomContext insecure_rand;
1378 
1379  std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1380  assert(nRelayNodes <= best.size());
1381 
1382  auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1383  if (pnode->nVersion >= CADDR_TIME_VERSION && pnode->IsAddrRelayPeer()) {
1384  uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1385  for (unsigned int i = 0; i < nRelayNodes; i++) {
1386  if (hashKey > best[i].first) {
1387  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1388  best[i] = std::make_pair(hashKey, pnode);
1389  break;
1390  }
1391  }
1392  }
1393  };
1394 
1395  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1396  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1397  best[i].second->PushAddress(addr, insecure_rand);
1398  }
1399  };
1400 
1401  connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1402 }
1403 
1404 void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman)
1405 {
1406  bool send = false;
1407  std::shared_ptr<const CBlock> a_recent_block;
1408  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1409  bool fWitnessesPresentInARecentCompactBlock;
1410  const Consensus::Params& consensusParams = chainparams.GetConsensus();
1411  {
1412  LOCK(cs_most_recent_block);
1413  a_recent_block = most_recent_block;
1414  a_recent_compact_block = most_recent_compact_block;
1415  fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1416  }
1417 
1418  bool need_activate_chain = false;
1419  {
1420  LOCK(cs_main);
1421  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1422  if (pindex) {
1423  if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1424  pindex->IsValid(BLOCK_VALID_TREE)) {
1425  // If we have the block and all of its parents, but have not yet validated it,
1426  // we might be in the middle of connecting it (ie in the unlock of cs_main
1427  // before ActivateBestChain but after AcceptBlock).
1428  // In this case, we need to run ActivateBestChain prior to checking the relay
1429  // conditions below.
1430  need_activate_chain = true;
1431  }
1432  }
1433  } // release cs_main before calling ActivateBestChain
1434  if (need_activate_chain) {
1435  BlockValidationState state;
1436  if (!ActivateBestChain(state, Params(), a_recent_block)) {
1437  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1438  }
1439  }
1440 
1441  LOCK(cs_main);
1442  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1443  if (pindex) {
1444  send = BlockRequestAllowed(pindex, consensusParams);
1445  if (!send) {
1446  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
1447  }
1448  }
1449  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1450  // disconnect node in case we have reached the outbound limit for serving historical blocks
1451  // never disconnect whitelisted nodes
1452  if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->HasPermission(PF_NOBAN))
1453  {
1454  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
1455 
1456  //disconnect node
1457  pfrom->fDisconnect = true;
1458  send = false;
1459  }
1460  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1461  if (send && !pfrom->HasPermission(PF_NOBAN) && (
1462  (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1463  )) {
1464  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
1465 
1466  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1467  pfrom->fDisconnect = true;
1468  send = false;
1469  }
1470  // Pruned nodes may have deleted the block, so check whether
1471  // it's available before trying to send.
1472  if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1473  {
1474  std::shared_ptr<const CBlock> pblock;
1475  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1476  pblock = a_recent_block;
1477  } else if (inv.type == MSG_WITNESS_BLOCK) {
1478  // Fast-path: in this case it is possible to serve the block directly from disk,
1479  // as the network format matches the format on disk
1480  std::vector<uint8_t> block_data;
1481  if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1482  assert(!"cannot load block from disk");
1483  }
1484  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1485  // Don't set pblock as we've sent the block
1486  } else {
1487  // Send block from disk
1488  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1489  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1490  assert(!"cannot load block from disk");
1491  pblock = pblockRead;
1492  }
1493  if (pblock) {
1494  if (inv.type == MSG_BLOCK)
1495  connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1496  else if (inv.type == MSG_WITNESS_BLOCK)
1497  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1498  else if (inv.type == MSG_FILTERED_BLOCK)
1499  {
1500  bool sendMerkleBlock = false;
1501  CMerkleBlock merkleBlock;
1502  if (pfrom->m_tx_relay != nullptr) {
1503  LOCK(pfrom->m_tx_relay->cs_filter);
1504  if (pfrom->m_tx_relay->pfilter) {
1505  sendMerkleBlock = true;
1506  merkleBlock = CMerkleBlock(*pblock, *pfrom->m_tx_relay->pfilter);
1507  }
1508  }
1509  if (sendMerkleBlock) {
1510  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1511  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1512  // This avoids hurting performance by pointlessly requiring a round-trip
1513  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1514  // they must either disconnect and retry or request the full block.
1515  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1516  // however we MUST always provide at least what the remote peer needs
1517  typedef std::pair<unsigned int, uint256> PairType;
1518  for (PairType& pair : merkleBlock.vMatchedTxn)
1519  connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1520  }
1521  // else
1522  // no response
1523  }
1524  else if (inv.type == MSG_CMPCT_BLOCK)
1525  {
1526  // If a peer is asking for old blocks, we're almost guaranteed
1527  // they won't have a useful mempool to match against a compact block,
1528  // and we don't feel like constructing the object for them, so
1529  // instead we respond with the full, non-compact block.
1530  bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
1531  int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1532  if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
1533  if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1534  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1535  } else {
1536  CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1537  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1538  }
1539  } else {
1540  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1541  }
1542  }
1543  }
1544 
1545  // Trigger the peer node to send a getblocks request for the next batch of inventory
1546  if (inv.hash == pfrom->hashContinue)
1547  {
1548  // Bypass PushInventory, this must send even if redundant,
1549  // and we want it right after the last block so they don't
1550  // wait for other stuff first.
1551  std::vector<CInv> vInv;
1552  vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1553  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1554  pfrom->hashContinue.SetNull();
1555  }
1556  }
1557 }
1558 
1559 void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, const CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
1560 {
1562 
1563  std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
1564  std::vector<CInv> vNotFound;
1565  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1566 
1567  // Note that if we receive a getdata for a MSG_TX or MSG_WITNESS_TX from a
1568  // block-relay-only outbound peer, we will stop processing further getdata
1569  // messages from this peer (likely resulting in our peer eventually
1570  // disconnecting us).
1571  if (pfrom->m_tx_relay != nullptr) {
1572  // mempool entries added before this time have likely expired from mapRelay
1573  const std::chrono::seconds longlived_mempool_time = GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME;
1574  const std::chrono::seconds mempool_req = pfrom->m_tx_relay->m_last_mempool_req.load();
1575 
1576  LOCK(cs_main);
1577 
1578  while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
1579  if (interruptMsgProc)
1580  return;
1581  // Don't bother if send buffer is too full to respond anyway
1582  if (pfrom->fPauseSend)
1583  break;
1584 
1585  const CInv &inv = *it;
1586  it++;
1587 
1588  // Send stream from relay memory
1589  bool push = false;
1590  auto mi = mapRelay.find(inv.hash);
1591  int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1592  if (mi != mapRelay.end()) {
1593  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
1594  push = true;
1595  } else {
1596  auto txinfo = mempool.info(inv.hash);
1597  // To protect privacy, do not answer getdata using the mempool when
1598  // that TX couldn't have been INVed in reply to a MEMPOOL request,
1599  // or when it's too recent to have expired from mapRelay.
1600  if (txinfo.tx && (
1601  (mempool_req.count() && txinfo.m_time <= mempool_req)
1602  || (txinfo.m_time <= longlived_mempool_time)))
1603  {
1604  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
1605  push = true;
1606  }
1607  }
1608  if (!push) {
1609  vNotFound.push_back(inv);
1610  }
1611  }
1612  } // release cs_main
1613 
1614  if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
1615  const CInv &inv = *it;
1616  if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
1617  it++;
1618  ProcessGetBlockData(pfrom, chainparams, inv, connman);
1619  }
1620  }
1621 
1622  // Unknown types in the GetData stay in vRecvGetData and block any future
1623  // message from this peer, see vRecvGetData check in ProcessMessages().
1624  // Depending on future p2p changes, we might either drop unknown getdata on
1625  // the floor or disconnect the peer.
1626 
1627  pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
1628 
1629  if (!vNotFound.empty()) {
1630  // Let the peer know that we didn't find what it asked for, so it doesn't
1631  // have to wait around forever.
1632  // SPV clients care about this message: it's needed when they are
1633  // recursively walking the dependencies of relevant unconfirmed
1634  // transactions. SPV clients want to do that because they want to know
1635  // about (and store and rebroadcast and risk analyze) the dependencies
1636  // of transactions relevant to them, without having to download the
1637  // entire memory pool.
1638  // Also, other nodes can use these messages to automatically request a
1639  // transaction from some other peer that annnounced it, and stop
1640  // waiting for us to respond.
1641  // In normal operation, we often send NOTFOUND messages for parents of
1642  // transactions that we relay; if a peer is missing a parent, they may
1643  // assume we have them and request the parents from us.
1644  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1645  }
1646 }
1647 
1649  uint32_t nFetchFlags = 0;
1650  if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
1651  nFetchFlags |= MSG_WITNESS_FLAG;
1652  }
1653  return nFetchFlags;
1654 }
1655 
1656 inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
1657  BlockTransactions resp(req);
1658  for (size_t i = 0; i < req.indexes.size(); i++) {
1659  if (req.indexes[i] >= block.vtx.size()) {
1660  LOCK(cs_main);
1661  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId()));
1662  return;
1663  }
1664  resp.txn[i] = block.vtx[req.indexes[i]];
1665  }
1666  LOCK(cs_main);
1667  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1668  int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1669  connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1670 }
1671 
1672 bool static ProcessHeadersMessage(CNode* pfrom, CConnman* connman, CTxMemPool& mempool, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool via_compact_block)
1673 {
1674  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1675  size_t nCount = headers.size();
1676 
1677  if (nCount == 0) {
1678  // Nothing interesting. Stop asking this peers for more headers.
1679  return true;
1680  }
1681 
1682  bool received_new_header = false;
1683  const CBlockIndex *pindexLast = nullptr;
1684  {
1685  LOCK(cs_main);
1686  CNodeState *nodestate = State(pfrom->GetId());
1687 
1688  // If this looks like it could be a block announcement (nCount <
1689  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1690  // don't connect:
1691  // - Send a getheaders message in response to try to connect the chain.
1692  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1693  // don't connect before giving DoS points
1694  // - Once a headers message is received that is valid and does connect,
1695  // nUnconnectingHeaders gets reset back to 0.
1696  if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1697  nodestate->nUnconnectingHeaders++;
1698  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
1699  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1700  headers[0].GetHash().ToString(),
1701  headers[0].hashPrevBlock.ToString(),
1703  pfrom->GetId(), nodestate->nUnconnectingHeaders);
1704  // Set hashLastUnknownBlock for this peer, so that if we
1705  // eventually get the headers - even from a different peer -
1706  // we can use this peer to download.
1707  UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
1708 
1709  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1710  Misbehaving(pfrom->GetId(), 20);
1711  }
1712  return true;
1713  }
1714 
1715  uint256 hashLastBlock;
1716  for (const CBlockHeader& header : headers) {
1717  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1718  Misbehaving(pfrom->GetId(), 20, "non-continuous headers sequence");
1719  return false;
1720  }
1721  hashLastBlock = header.GetHash();
1722  }
1723 
1724  // If we don't have the last header, then they'll have given us
1725  // something new (if these headers are valid).
1726  if (!LookupBlockIndex(hashLastBlock)) {
1727  received_new_header = true;
1728  }
1729  }
1730 
1731  BlockValidationState state;
1732  if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) {
1733  if (state.IsInvalid()) {
1734  MaybePunishNodeForBlock(pfrom->GetId(), state, via_compact_block, "invalid header received");
1735  return false;
1736  }
1737  }
1738 
1739  {
1740  LOCK(cs_main);
1741  CNodeState *nodestate = State(pfrom->GetId());
1742  if (nodestate->nUnconnectingHeaders > 0) {
1743  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
1744  }
1745  nodestate->nUnconnectingHeaders = 0;
1746 
1747  assert(pindexLast);
1748  UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
1749 
1750  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1751  // because it is set in UpdateBlockAvailability. Some nullptr checks
1752  // are still present, however, as belt-and-suspenders.
1753 
1754  if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
1755  nodestate->m_last_block_announcement = GetTime();
1756  }
1757 
1758  if (nCount == MAX_HEADERS_RESULTS) {
1759  // Headers message had its maximum size; the peer may have more headers.
1760  // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue
1761  // from there instead.
1762  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
1763  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256()));
1764  }
1765 
1766  bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
1767  // If this set of headers is valid and ends in a block with at least as
1768  // much work as our tip, download as much as possible.
1769  if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
1770  std::vector<const CBlockIndex*> vToFetch;
1771  const CBlockIndex *pindexWalk = pindexLast;
1772  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1773  while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1774  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1775  !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1776  (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
1777  // We don't have this block, and it's not yet in flight.
1778  vToFetch.push_back(pindexWalk);
1779  }
1780  pindexWalk = pindexWalk->pprev;
1781  }
1782  // If pindexWalk still isn't on our main chain, we're looking at a
1783  // very large reorg at a time we think we're close to caught up to
1784  // the main chain -- this shouldn't really happen. Bail out on the
1785  // direct fetch and rely on parallel download instead.
1786  if (!::ChainActive().Contains(pindexWalk)) {
1787  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1788  pindexLast->GetBlockHash().ToString(),
1789  pindexLast->nHeight);
1790  } else {
1791  std::vector<CInv> vGetData;
1792  // Download as much as possible, from earliest to latest.
1793  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1794  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1795  // Can't download any more from this peer
1796  break;
1797  }
1798  uint32_t nFetchFlags = GetFetchFlags(pfrom);
1799  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1800  MarkBlockAsInFlight(mempool, pfrom->GetId(), pindex->GetBlockHash(), pindex);
1801  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1802  pindex->GetBlockHash().ToString(), pfrom->GetId());
1803  }
1804  if (vGetData.size() > 1) {
1805  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1806  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1807  }
1808  if (vGetData.size() > 0) {
1809  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1810  // In any case, we want to download using a compact block, not a regular one
1811  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1812  }
1813  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1814  }
1815  }
1816  }
1817  // If we're in IBD, we want outbound peers that will serve us a useful
1818  // chain. Disconnect peers that are on chains with insufficient work.
1820  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1821  // headers to fetch from this peer.
1822  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1823  // This peer has too little work on their headers chain to help
1824  // us sync -- disconnect if using an outbound slot (unless
1825  // whitelisted or addnode).
1826  // Note: We compare their tip to nMinimumChainWork (rather than
1827  // ::ChainActive().Tip()) because we won't start block download
1828  // until we have a headers chain that has at least
1829  // nMinimumChainWork, even if a peer has a chain past our tip,
1830  // as an anti-DoS measure.
1831  if (IsOutboundDisconnectionCandidate(pfrom)) {
1832  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
1833  pfrom->fDisconnect = true;
1834  }
1835  }
1836  }
1837 
1838  if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom->m_tx_relay != nullptr) {
1839  // If this is an outbound full-relay peer, check to see if we should protect
1840  // it from the bad/lagging chain logic.
1841  // Note that block-relay-only peers are already implicitly protected, so we
1842  // only consider setting m_protect for the full-relay peers.
1843  if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1844  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
1845  nodestate->m_chain_sync.m_protect = true;
1846  ++g_outbound_peers_with_protect_from_disconnect;
1847  }
1848  }
1849  }
1850 
1851  return true;
1852 }
1853 
1854 void static ProcessOrphanTx(CConnman* connman, CTxMemPool& mempool, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
1855 {
1857  AssertLockHeld(g_cs_orphans);
1858  std::set<NodeId> setMisbehaving;
1859  bool done = false;
1860  while (!done && !orphan_work_set.empty()) {
1861  const uint256 orphanHash = *orphan_work_set.begin();
1862  orphan_work_set.erase(orphan_work_set.begin());
1863 
1864  auto orphan_it = mapOrphanTransactions.find(orphanHash);
1865  if (orphan_it == mapOrphanTransactions.end()) continue;
1866 
1867  const CTransactionRef porphanTx = orphan_it->second.tx;
1868  const CTransaction& orphanTx = *porphanTx;
1869  NodeId fromPeer = orphan_it->second.fromPeer;
1870  // Use a new TxValidationState because orphans come from different peers (and we call
1871  // MaybePunishNodeForTx based on the source peer from the orphan map, not based on the peer
1872  // that relayed the previous transaction).
1873  TxValidationState orphan_state;
1874 
1875  if (setMisbehaving.count(fromPeer)) continue;
1876  if (AcceptToMemoryPool(mempool, orphan_state, porphanTx, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
1877  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1878  RelayTransaction(orphanHash, *connman);
1879  for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
1880  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1881  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1882  for (const auto& elem : it_by_prev->second) {
1883  orphan_work_set.insert(elem->first);
1884  }
1885  }
1886  }
1887  EraseOrphanTx(orphanHash);
1888  done = true;
1889  } else if (orphan_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
1890  if (orphan_state.IsInvalid()) {
1891  // Punish peer that gave us an invalid orphan tx
1892  if (MaybePunishNodeForTx(fromPeer, orphan_state)) {
1893  setMisbehaving.insert(fromPeer);
1894  }
1895  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
1896  }
1897  // Has inputs but not accepted to mempool
1898  // Probably non-standard or insufficient fee
1899  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
1900  if (!orphanTx.HasWitness() && orphan_state.GetResult() != TxValidationResult::TX_WITNESS_MUTATED) {
1901  // Do not use rejection cache for witness transactions or
1902  // witness-stripped transactions, as they can have been malleated.
1903  // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
1904  assert(recentRejects);
1905  recentRejects->insert(orphanHash);
1906  }
1907  EraseOrphanTx(orphanHash);
1908  done = true;
1909  }
1910  mempool.check(&::ChainstateActive().CoinsTip());
1911  }
1912 }
1913 
1914 bool ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CTxMemPool& mempool, CConnman* connman, BanMan* banman, const std::atomic<bool>& interruptMsgProc)
1915 {
1916  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
1917  if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
1918  {
1919  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
1920  return true;
1921  }
1922 
1923 
1924  if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
1925  (strCommand == NetMsgType::FILTERLOAD ||
1926  strCommand == NetMsgType::FILTERADD))
1927  {
1928  if (pfrom->nVersion >= NO_BLOOM_VERSION) {
1929  LOCK(cs_main);
1930  Misbehaving(pfrom->GetId(), 100);
1931  return false;
1932  } else {
1933  pfrom->fDisconnect = true;
1934  return false;
1935  }
1936  }
1937 
1938  if (strCommand == NetMsgType::VERSION) {
1939  // Each connection can only send one version message
1940  if (pfrom->nVersion != 0)
1941  {
1942  LOCK(cs_main);
1943  Misbehaving(pfrom->GetId(), 1);
1944  return false;
1945  }
1946 
1947  int64_t nTime;
1948  CAddress addrMe;
1949  CAddress addrFrom;
1950  uint64_t nNonce = 1;
1951  uint64_t nServiceInt;
1952  ServiceFlags nServices;
1953  int nVersion;
1954  int nSendVersion;
1955  std::string cleanSubVer;
1956  int nStartingHeight = -1;
1957  bool fRelay = true;
1958 
1959  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
1960  nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
1961  nServices = ServiceFlags(nServiceInt);
1962  if (!pfrom->fInbound)
1963  {
1964  connman->SetServices(pfrom->addr, nServices);
1965  }
1966  if (!pfrom->fInbound && !pfrom->fFeeler && !pfrom->m_manual_connection && !HasAllDesirableServiceFlags(nServices))
1967  {
1968  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, GetDesirableServiceFlags(nServices));
1969  pfrom->fDisconnect = true;
1970  return false;
1971  }
1972 
1973  if (nVersion < MIN_PEER_PROTO_VERSION) {
1974  // disconnect from peers older than this proto version
1975  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
1976  pfrom->fDisconnect = true;
1977  return false;
1978  }
1979 
1980  if (!vRecv.empty())
1981  vRecv >> addrFrom >> nNonce;
1982  if (!vRecv.empty()) {
1983  std::string strSubVer;
1984  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
1985  cleanSubVer = SanitizeString(strSubVer);
1986  }
1987  if (!vRecv.empty()) {
1988  vRecv >> nStartingHeight;
1989  }
1990  if (!vRecv.empty())
1991  vRecv >> fRelay;
1992  // Disconnect if we connected to ourself
1993  if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
1994  {
1995  LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
1996  pfrom->fDisconnect = true;
1997  return true;
1998  }
1999 
2000  if (pfrom->fInbound && addrMe.IsRoutable())
2001  {
2002  SeenLocal(addrMe);
2003  }
2004 
2005  // Be shy and don't send version until we hear
2006  if (pfrom->fInbound)
2007  PushNodeVersion(pfrom, connman, GetAdjustedTime());
2008 
2010 
2011  pfrom->nServices = nServices;
2012  pfrom->SetAddrLocal(addrMe);
2013  {
2014  LOCK(pfrom->cs_SubVer);
2015  pfrom->cleanSubVer = cleanSubVer;
2016  }
2017  pfrom->nStartingHeight = nStartingHeight;
2018 
2019  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2020  pfrom->fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2021 
2022  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2023  pfrom->m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2024 
2025  if (pfrom->m_tx_relay != nullptr) {
2026  LOCK(pfrom->m_tx_relay->cs_filter);
2027  pfrom->m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2028  }
2029 
2030  // Change version
2031  pfrom->SetSendVersion(nSendVersion);
2032  pfrom->nVersion = nVersion;
2033 
2034  if((nServices & NODE_WITNESS))
2035  {
2036  LOCK(cs_main);
2037  State(pfrom->GetId())->fHaveWitness = true;
2038  }
2039 
2040  // Potentially mark this peer as a preferred download peer.
2041  {
2042  LOCK(cs_main);
2043  UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
2044  }
2045 
2046  if (!pfrom->fInbound && pfrom->IsAddrRelayPeer())
2047  {
2048  // Advertise our address
2050  {
2051  CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
2052  FastRandomContext insecure_rand;
2053  if (addr.IsRoutable())
2054  {
2055  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2056  pfrom->PushAddress(addr, insecure_rand);
2057  } else if (IsPeerAddrLocalGood(pfrom)) {
2058  addr.SetIP(addrMe);
2059  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2060  pfrom->PushAddress(addr, insecure_rand);
2061  }
2062  }
2063 
2064  // Get recent addresses
2065  if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
2066  {
2067  connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
2068  pfrom->fGetAddr = true;
2069  }
2070  connman->MarkAddressGood(pfrom->addr);
2071  }
2072 
2073  std::string remoteAddr;
2074  if (fLogIPs)
2075  remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
2076 
2077  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2078  cleanSubVer, pfrom->nVersion,
2079  pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
2080  remoteAddr);
2081 
2082  int64_t nTimeOffset = nTime - GetTime();
2083  pfrom->nTimeOffset = nTimeOffset;
2084  AddTimeData(pfrom->addr, nTimeOffset);
2085 
2086  // If the peer is old enough to have the old alert system, send it the final alert.
2087  if (pfrom->nVersion <= 70012) {
2088  CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2089  connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
2090  }
2091 
2092  // Feeler connections exist only to verify if address is online.
2093  if (pfrom->fFeeler) {
2094  assert(pfrom->fInbound == false);
2095  pfrom->fDisconnect = true;
2096  }
2097  return true;
2098  }
2099 
2100  if (pfrom->nVersion == 0) {
2101  // Must have a version message before anything else
2102  LOCK(cs_main);
2103  Misbehaving(pfrom->GetId(), 1);
2104  return false;
2105  }
2106 
2107  // At this point, the outgoing message serialization version can't change.
2108  const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
2109 
2110  if (strCommand == NetMsgType::VERACK)
2111  {
2112  pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
2113 
2114  if (!pfrom->fInbound) {
2115  // Mark this node as currently connected, so we update its timestamp later.
2116  LOCK(cs_main);
2117  State(pfrom->GetId())->fCurrentlyConnected = true;
2118  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2119  pfrom->nVersion.load(), pfrom->nStartingHeight,
2120  pfrom->GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom->addr.ToString()) : ""),
2121  pfrom->m_tx_relay == nullptr ? "block-relay" : "full-relay");
2122  }
2123 
2124  if (pfrom->nVersion >= SENDHEADERS_VERSION) {
2125  // Tell our peer we prefer to receive headers rather than inv's
2126  // We send this to non-NODE NETWORK peers as well, because even
2127  // non-NODE NETWORK peers can announce blocks (such as pruning
2128  // nodes)
2129  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2130  }
2131  if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
2132  // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2133  // However, we do not request new block announcements using
2134  // cmpctblock messages.
2135  // We send this to non-NODE NETWORK peers as well, because
2136  // they may wish to request compact blocks from us
2137  bool fAnnounceUsingCMPCTBLOCK = false;
2138  uint64_t nCMPCTBLOCKVersion = 2;
2139  if (pfrom->GetLocalServices() & NODE_WITNESS)
2140  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2141  nCMPCTBLOCKVersion = 1;
2142  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2143  }
2144  pfrom->fSuccessfullyConnected = true;
2145  return true;
2146  }
2147 
2148  if (!pfrom->fSuccessfullyConnected) {
2149  // Must have a verack message before anything else
2150  LOCK(cs_main);
2151  Misbehaving(pfrom->GetId(), 1);
2152  return false;
2153  }
2154 
2155  if (strCommand == NetMsgType::ADDR) {
2156  std::vector<CAddress> vAddr;
2157  vRecv >> vAddr;
2158 
2159  // Don't want addr from older versions unless seeding
2160  if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
2161  return true;
2162  if (!pfrom->IsAddrRelayPeer()) {
2163  return true;
2164  }
2165  if (vAddr.size() > 1000)
2166  {
2167  LOCK(cs_main);
2168  Misbehaving(pfrom->GetId(), 20, strprintf("message addr size() = %u", vAddr.size()));
2169  return false;
2170  }
2171 
2172  // Store the new addresses
2173  std::vector<CAddress> vAddrOk;
2174  int64_t nNow = GetAdjustedTime();
2175  int64_t nSince = nNow - 10 * 60;
2176  for (CAddress& addr : vAddr)
2177  {
2178  if (interruptMsgProc)
2179  return true;
2180 
2181  // We only bother storing full nodes, though this may include
2182  // things which we would not make an outbound connection to, in
2183  // part because we may make feeler connections to them.
2184  if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
2185  continue;
2186 
2187  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2188  addr.nTime = nNow - 5 * 24 * 60 * 60;
2189  pfrom->AddAddressKnown(addr);
2190  if (banman->IsBanned(addr)) continue; // Do not process banned addresses beyond remembering we received them
2191  bool fReachable = IsReachable(addr);
2192  if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2193  {
2194  // Relay to a limited number of other nodes
2195  RelayAddress(addr, fReachable, connman);
2196  }
2197  // Do not store addresses outside our network
2198  if (fReachable)
2199  vAddrOk.push_back(addr);
2200  }
2201  connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
2202  if (vAddr.size() < 1000)
2203  pfrom->fGetAddr = false;
2204  if (pfrom->fOneShot)
2205  pfrom->fDisconnect = true;
2206  return true;
2207  }
2208 
2209  if (strCommand == NetMsgType::SENDHEADERS) {
2210  LOCK(cs_main);
2211  State(pfrom->GetId())->fPreferHeaders = true;
2212  return true;
2213  }
2214 
2215  if (strCommand == NetMsgType::SENDCMPCT) {
2216  bool fAnnounceUsingCMPCTBLOCK = false;
2217  uint64_t nCMPCTBLOCKVersion = 0;
2218  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2219  if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2220  LOCK(cs_main);
2221  // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2222  if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
2223  State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
2224  State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2225  }
2226  if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
2227  State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2228  if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
2229  if (pfrom->GetLocalServices() & NODE_WITNESS)
2230  State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2231  else
2232  State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2233  }
2234  }
2235  return true;
2236  }
2237 
2238  if (strCommand == NetMsgType::INV) {
2239  std::vector<CInv> vInv;
2240  vRecv >> vInv;
2241  if (vInv.size() > MAX_INV_SZ)
2242  {
2243  LOCK(cs_main);
2244  Misbehaving(pfrom->GetId(), 20, strprintf("message inv size() = %u", vInv.size()));
2245  return false;
2246  }
2247 
2248  // We won't accept tx inv's if we're in blocks-only mode, or this is a
2249  // block-relay-only peer
2250  bool fBlocksOnly = !g_relay_txes || (pfrom->m_tx_relay == nullptr);
2251 
2252  // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
2253  if (pfrom->HasPermission(PF_RELAY))
2254  fBlocksOnly = false;
2255 
2256  LOCK(cs_main);
2257 
2258  uint32_t nFetchFlags = GetFetchFlags(pfrom);
2259  const auto current_time = GetTime<std::chrono::microseconds>();
2260 
2261  for (CInv &inv : vInv)
2262  {
2263  if (interruptMsgProc)
2264  return true;
2265 
2266  bool fAlreadyHave = AlreadyHave(inv, mempool);
2267  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
2268 
2269  if (inv.type == MSG_TX) {
2270  inv.type |= nFetchFlags;
2271  }
2272 
2273  if (inv.type == MSG_BLOCK) {
2274  UpdateBlockAvailability(pfrom->GetId(), inv.hash);
2275  if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2276  // We used to request the full block here, but since headers-announcements are now the
2277  // primary method of announcement on the network, and since, in the case that a node
2278  // fell back to inv we probably have a reorg which we should get the headers for first,
2279  // we now only provide a getheaders response here. When we receive the headers, we will
2280  // then ask for the blocks we need.
2281  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), inv.hash));
2282  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
2283  }
2284  }
2285  else
2286  {
2287  pfrom->AddInventoryKnown(inv);
2288  if (fBlocksOnly) {
2289  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom->GetId());
2290  pfrom->fDisconnect = true;
2291  return true;
2292  } else if (!fAlreadyHave && !fImporting && !fReindex && !::ChainstateActive().IsInitialBlockDownload()) {
2293  RequestTx(State(pfrom->GetId()), inv.hash, current_time);
2294  }
2295  }
2296  }
2297  return true;
2298  }
2299 
2300  if (strCommand == NetMsgType::GETDATA) {
2301  std::vector<CInv> vInv;
2302  vRecv >> vInv;
2303  if (vInv.size() > MAX_INV_SZ)
2304  {
2305  LOCK(cs_main);
2306  Misbehaving(pfrom->GetId(), 20, strprintf("message getdata size() = %u", vInv.size()));
2307  return false;
2308  }
2309 
2310  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
2311 
2312  if (vInv.size() > 0) {
2313  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
2314  }
2315 
2316  pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
2317  ProcessGetData(pfrom, chainparams, connman, mempool, interruptMsgProc);
2318  return true;
2319  }
2320 
2321  if (strCommand == NetMsgType::GETBLOCKS) {
2322  CBlockLocator locator;
2323  uint256 hashStop;
2324  vRecv >> locator >> hashStop;
2325 
2326  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2327  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
2328  pfrom->fDisconnect = true;
2329  return true;
2330  }
2331 
2332  // We might have announced the currently-being-connected tip using a
2333  // compact block, which resulted in the peer sending a getblocks
2334  // request, which we would otherwise respond to without the new block.
2335  // To avoid this situation we simply verify that we are on our best
2336  // known chain now. This is super overkill, but we handle it better
2337  // for getheaders requests, and there are no known nodes which support
2338  // compact blocks but still use getblocks to request blocks.
2339  {
2340  std::shared_ptr<const CBlock> a_recent_block;
2341  {
2342  LOCK(cs_most_recent_block);
2343  a_recent_block = most_recent_block;
2344  }
2345  BlockValidationState state;
2346  if (!ActivateBestChain(state, Params(), a_recent_block)) {
2347  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2348  }
2349  }
2350 
2351  LOCK(cs_main);
2352 
2353  // Find the last block the caller has in the main chain
2354  const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2355 
2356  // Send the rest of the chain
2357  if (pindex)
2358  pindex = ::ChainActive().Next(pindex);
2359  int nLimit = 500;
2360  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
2361  for (; pindex; pindex = ::ChainActive().Next(pindex))
2362  {
2363  if (pindex->GetBlockHash() == hashStop)
2364  {
2365  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2366  break;
2367  }
2368  // If pruning, don't inv blocks unless we have on disk and are likely to still have
2369  // for some reasonable time window (1 hour) that block relay might require.
2370  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
2371  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave))
2372  {
2373  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2374  break;
2375  }
2376  pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
2377  if (--nLimit <= 0)
2378  {
2379  // When this block is requested, we'll send an inv that'll
2380  // trigger the peer to getblocks the next batch of inventory.
2381  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2382  pfrom->hashContinue = pindex->GetBlockHash();
2383  break;
2384  }
2385  }
2386  return true;
2387  }
2388 
2389  if (strCommand == NetMsgType::GETBLOCKTXN) {
2391  vRecv >> req;
2392 
2393  std::shared_ptr<const CBlock> recent_block;
2394  {
2395  LOCK(cs_most_recent_block);
2396  if (most_recent_block_hash == req.blockhash)
2397  recent_block = most_recent_block;
2398  // Unlock cs_most_recent_block to avoid cs_main lock inversion
2399  }
2400  if (recent_block) {
2401  SendBlockTransactions(*recent_block, req, pfrom, connman);
2402  return true;
2403  }
2404 
2405  LOCK(cs_main);
2406 
2407  const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2408  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2409  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom->GetId());
2410  return true;
2411  }
2412 
2413  if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
2414  // If an older block is requested (should never happen in practice,
2415  // but can happen in tests) send a block response instead of a
2416  // blocktxn response. Sending a full block response instead of a
2417  // small blocktxn response is preferable in the case where a peer
2418  // might maliciously send lots of getblocktxn requests to trigger
2419  // expensive disk reads, because it will require the peer to
2420  // actually receive all the data read from disk over the network.
2421  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
2422  CInv inv;
2423  inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
2424  inv.hash = req.blockhash;
2425  pfrom->vRecvGetData.push_back(inv);
2426  // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
2427  return true;
2428  }
2429 
2430  CBlock block;
2431  bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
2432  assert(ret);
2433 
2434  SendBlockTransactions(block, req, pfrom, connman);
2435  return true;
2436  }
2437 
2438  if (strCommand == NetMsgType::GETHEADERS) {
2439  CBlockLocator locator;
2440  uint256 hashStop;
2441  vRecv >> locator >> hashStop;
2442 
2443  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2444  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
2445  pfrom->fDisconnect = true;
2446  return true;
2447  }
2448 
2449  LOCK(cs_main);
2450  if (::ChainstateActive().IsInitialBlockDownload() && !pfrom->HasPermission(PF_NOBAN)) {
2451  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
2452  return true;
2453  }
2454 
2455  CNodeState *nodestate = State(pfrom->GetId());
2456  const CBlockIndex* pindex = nullptr;
2457  if (locator.IsNull())
2458  {
2459  // If locator is null, return the hashStop block
2460  pindex = LookupBlockIndex(hashStop);
2461  if (!pindex) {
2462  return true;
2463  }
2464 
2465  if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
2466  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId());
2467  return true;
2468  }
2469  }
2470  else
2471  {
2472  // Find the last block the caller has in the main chain
2473  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2474  if (pindex)
2475  pindex = ::ChainActive().Next(pindex);
2476  }
2477 
2478  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2479  std::vector<CBlock> vHeaders;
2480  int nLimit = MAX_HEADERS_RESULTS;
2481  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
2482  for (; pindex; pindex = ::ChainActive().Next(pindex))
2483  {
2484  vHeaders.push_back(pindex->GetBlockHeader());
2485  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2486  break;
2487  }
2488  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
2489  // if our peer has ::ChainActive().Tip() (and thus we are sending an empty
2490  // headers message). In both cases it's safe to update
2491  // pindexBestHeaderSent to be our tip.
2492  //
2493  // It is important that we simply reset the BestHeaderSent value here,
2494  // and not max(BestHeaderSent, newHeaderSent). We might have announced
2495  // the currently-being-connected tip using a compact block, which
2496  // resulted in the peer sending a headers request, which we respond to
2497  // without the new block. By resetting the BestHeaderSent, we ensure we
2498  // will re-announce the new block via headers (or compact blocks again)
2499  // in the SendMessages logic.
2500  nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
2501  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2502  return true;
2503  }
2504 
2505  if (strCommand == NetMsgType::TX) {
2506  // Stop processing the transaction early if
2507  // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
2508  // or if this peer is supposed to be a block-relay-only peer
2509  if ((!g_relay_txes && !pfrom->HasPermission(PF_RELAY)) || (pfrom->m_tx_relay == nullptr))
2510  {
2511  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
2512  pfrom->fDisconnect = true;
2513  return true;
2514  }
2515 
2516  CTransactionRef ptx;
2517  vRecv >> ptx;
2518  const CTransaction& tx = *ptx;
2519 
2520  CInv inv(MSG_TX, tx.GetHash());
2521  pfrom->AddInventoryKnown(inv);
2522 
2523  LOCK2(cs_main, g_cs_orphans);
2524 
2525  TxValidationState state;
2526 
2527  CNodeState* nodestate = State(pfrom->GetId());
2528  nodestate->m_tx_download.m_tx_announced.erase(inv.hash);
2529  nodestate->m_tx_download.m_tx_in_flight.erase(inv.hash);
2530  EraseTxRequest(inv.hash);
2531 
2532  std::list<CTransactionRef> lRemovedTxn;
2533 
2534  if (!AlreadyHave(inv, mempool) &&
2535  AcceptToMemoryPool(mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
2536  mempool.check(&::ChainstateActive().CoinsTip());
2538  for (unsigned int i = 0; i < tx.vout.size(); i++) {
2539  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i));
2540  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2541  for (const auto& elem : it_by_prev->second) {
2542  pfrom->orphan_work_set.insert(elem->first);
2543  }
2544  }
2545  }
2546 
2547  pfrom->nLastTXTime = GetTime();
2548 
2549  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
2550  pfrom->GetId(),
2551  tx.GetHash().ToString(),
2552  mempool.size(), mempool.DynamicMemoryUsage() / 1000);
2553 
2554  // Recursively process any orphan transactions that depended on this one
2555  ProcessOrphanTx(connman, mempool, pfrom->orphan_work_set, lRemovedTxn);
2556  }
2558  {
2559  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
2560  for (const CTxIn& txin : tx.vin) {
2561  if (recentRejects->contains(txin.prevout.hash)) {
2562  fRejectedParents = true;
2563  break;
2564  }
2565  }
2566  if (!fRejectedParents) {
2567  uint32_t nFetchFlags = GetFetchFlags(pfrom);
2568  const auto current_time = GetTime<std::chrono::microseconds>();
2569 
2570  for (const CTxIn& txin : tx.vin) {
2571  CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
2572  pfrom->AddInventoryKnown(_inv);
2573  if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom->GetId()), _inv.hash, current_time);
2574  }
2575  AddOrphanTx(ptx, pfrom->GetId());
2576 
2577  // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
2578  unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
2579  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
2580  if (nEvicted > 0) {
2581  LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
2582  }
2583  } else {
2584  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
2585  // We will continue to reject this tx since it has rejected
2586  // parents so avoid re-requesting it from other peers.
2587  recentRejects->insert(tx.GetHash());
2588  }
2589  } else {
2591  // Do not use rejection cache for witness transactions or
2592  // witness-stripped transactions, as they can have been malleated.
2593  // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
2594  assert(recentRejects);
2595  recentRejects->insert(tx.GetHash());
2596  if (RecursiveDynamicUsage(*ptx) < 100000) {
2598  }
2599  } else if (tx.HasWitness() && RecursiveDynamicUsage(*ptx) < 100000) {
2601  }
2602 
2603  if (pfrom->HasPermission(PF_FORCERELAY)) {
2604  // Always relay transactions received from whitelisted peers, even
2605  // if they were already in the mempool,
2606  // allowing the node to function as a gateway for
2607  // nodes hidden behind it.
2608  if (!mempool.exists(tx.GetHash())) {
2609  LogPrintf("Not relaying non-mempool transaction %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
2610  } else {
2611  LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
2613  }
2614  }
2615  }
2616 
2617  for (const CTransactionRef& removedTx : lRemovedTxn)
2618  AddToCompactExtraTransactions(removedTx);
2619 
2620  // If a tx has been detected by recentRejects, we will have reached
2621  // this point and the tx will have been ignored. Because we haven't run
2622  // the tx through AcceptToMemoryPool, we won't have computed a DoS
2623  // score for it or determined exactly why we consider it invalid.
2624  //
2625  // This means we won't penalize any peer subsequently relaying a DoSy
2626  // tx (even if we penalized the first peer who gave it to us) because
2627  // we have to account for recentRejects showing false positives. In
2628  // other words, we shouldn't penalize a peer if we aren't *sure* they
2629  // submitted a DoSy tx.
2630  //
2631  // Note that recentRejects doesn't just record DoSy or invalid
2632  // transactions, but any tx not accepted by the mempool, which may be
2633  // due to node policy (vs. consensus). So we can't blanket penalize a
2634  // peer simply for relaying a tx that our recentRejects has caught,
2635  // regardless of false positives.
2636 
2637  if (state.IsInvalid())
2638  {
2639  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
2640  pfrom->GetId(),
2641  state.ToString());
2642  MaybePunishNodeForTx(pfrom->GetId(), state);
2643  }
2644  return true;
2645  }
2646 
2647  if (strCommand == NetMsgType::CMPCTBLOCK)
2648  {
2649  // Ignore cmpctblock received while importing
2650  if (fImporting || fReindex) {
2651  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom->GetId());
2652  return true;
2653  }
2654 
2655  CBlockHeaderAndShortTxIDs cmpctblock;
2656  vRecv >> cmpctblock;
2657 
2658  bool received_new_header = false;
2659 
2660  {
2661  LOCK(cs_main);
2662 
2663  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
2664  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
2665  if (!::ChainstateActive().IsInitialBlockDownload())
2666  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
2667  return true;
2668  }
2669 
2670  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
2671  received_new_header = true;
2672  }
2673  }
2674 
2675  const CBlockIndex *pindex = nullptr;
2676  BlockValidationState state;
2677  if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
2678  if (state.IsInvalid()) {
2679  MaybePunishNodeForBlock(pfrom->GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
2680  return true;
2681  }
2682  }
2683 
2684  // When we succeed in decoding a block's txids from a cmpctblock
2685  // message we typically jump to the BLOCKTXN handling code, with a
2686  // dummy (empty) BLOCKTXN message, to re-use the logic there in
2687  // completing processing of the putative block (without cs_main).
2688  bool fProcessBLOCKTXN = false;
2690 
2691  // If we end up treating this as a plain headers message, call that as well
2692  // without cs_main.
2693  bool fRevertToHeaderProcessing = false;
2694 
2695  // Keep a CBlock for "optimistic" compactblock reconstructions (see
2696  // below)
2697  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2698  bool fBlockReconstructed = false;
2699 
2700  {
2701  LOCK2(cs_main, g_cs_orphans);
2702  // If AcceptBlockHeader returned true, it set pindex
2703  assert(pindex);
2704  UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
2705 
2706  CNodeState *nodestate = State(pfrom->GetId());
2707 
2708  // If this was a new header with more work than our tip, update the
2709  // peer's last block announcement time
2710  if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
2711  nodestate->m_last_block_announcement = GetTime();
2712  }
2713 
2714  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
2715  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
2716 
2717  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
2718  return true;
2719 
2720  if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better
2721  pindex->nTx != 0) { // We had this block at some point, but pruned it
2722  if (fAlreadyInFlight) {
2723  // We requested this block for some reason, but our mempool will probably be useless
2724  // so we just grab the block via normal getdata
2725  std::vector<CInv> vInv(1);
2726  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2727  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2728  }
2729  return true;
2730  }
2731 
2732  // If we're not close to tip yet, give up and let parallel block fetch work its magic
2733  if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
2734  return true;
2735 
2736  if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
2737  // Don't bother trying to process compact blocks from v1 peers
2738  // after segwit activates.
2739  return true;
2740  }
2741 
2742  // We want to be a bit conservative just to be extra careful about DoS
2743  // possibilities in compact block processing...
2744  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
2745  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
2746  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
2747  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
2748  if (!MarkBlockAsInFlight(mempool, pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
2749  if (!(*queuedBlockIt)->partialBlock)
2750  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
2751  else {
2752  // The block was already in flight using compact blocks from the same peer
2753  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
2754  return true;
2755  }
2756  }
2757 
2758  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
2759  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
2760  if (status == READ_STATUS_INVALID) {
2761  MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
2762  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom->GetId()));
2763  return true;
2764  } else if (status == READ_STATUS_FAILED) {
2765  // Duplicate txindexes, the block is now in-flight, so just request it
2766  std::vector<CInv> vInv(1);
2767  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2768  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2769  return true;
2770  }
2771 
2773  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
2774  if (!partialBlock.IsTxAvailable(i))
2775  req.indexes.push_back(i);
2776  }
2777  if (req.indexes.empty()) {
2778  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
2779  BlockTransactions txn;
2780  txn.blockhash = cmpctblock.header.GetHash();
2781  blockTxnMsg << txn;
2782  fProcessBLOCKTXN = true;
2783  } else {
2784  req.blockhash = pindex->GetBlockHash();
2785  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
2786  }
2787  } else {
2788  // This block is either already in flight from a different
2789  // peer, or this peer has too many blocks outstanding to
2790  // download from.
2791  // Optimistically try to reconstruct anyway since we might be
2792  // able to without any round trips.
2793  PartiallyDownloadedBlock tempBlock(&mempool);
2794  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
2795  if (status != READ_STATUS_OK) {
2796  // TODO: don't ignore failures
2797  return true;
2798  }
2799  std::vector<CTransactionRef> dummy;
2800  status = tempBlock.FillBlock(*pblock, dummy);
2801  if (status == READ_STATUS_OK) {
2802  fBlockReconstructed = true;
2803  }
2804  }
2805  } else {
2806  if (fAlreadyInFlight) {
2807  // We requested this block, but its far into the future, so our
2808  // mempool will probably be useless - request the block normally
2809  std::vector<CInv> vInv(1);
2810  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2811  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2812  return true;
2813  } else {
2814  // If this was an announce-cmpctblock, we want the same treatment as a header message
2815  fRevertToHeaderProcessing = true;
2816  }
2817  }
2818  } // cs_main
2819 
2820  if (fProcessBLOCKTXN)
2821  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, mempool, connman, banman, interruptMsgProc);
2822 
2823  if (fRevertToHeaderProcessing) {
2824  // Headers received from HB compact block peers are permitted to be
2825  // relayed before full validation (see BIP 152), so we don't want to disconnect
2826  // the peer if the header turns out to be for an invalid block.
2827  // Note that if a peer tries to build on an invalid chain, that
2828  // will be detected and the peer will be banned.
2829  return ProcessHeadersMessage(pfrom, connman, mempool, {cmpctblock.header}, chainparams, /*via_compact_block=*/true);
2830  }
2831 
2832  if (fBlockReconstructed) {
2833  // If we got here, we were able to optimistically reconstruct a
2834  // block that is in flight from some other peer.
2835  {
2836  LOCK(cs_main);
2837  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
2838  }
2839  bool fNewBlock = false;
2840  // Setting fForceProcessing to true means that we bypass some of
2841  // our anti-DoS protections in AcceptBlock, which filters
2842  // unrequested blocks that might be trying to waste our resources
2843  // (eg disk space). Because we only try to reconstruct blocks when
2844  // we're close to caught up (via the CanDirectFetch() requirement
2845  // above, combined with the behavior of not requesting blocks until
2846  // we have a chain with at least nMinimumChainWork), and we ignore
2847  // compact blocks with less work than our tip, it is safe to treat
2848  // reconstructed compact blocks as having been requested.
2849  ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
2850  if (fNewBlock) {
2851  pfrom->nLastBlockTime = GetTime();
2852  } else {
2853  LOCK(cs_main);
2854  mapBlockSource.erase(pblock->GetHash());
2855  }
2856  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
2857  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
2858  // Clear download state for this block, which is in
2859  // process from some other peer. We do this after calling
2860  // ProcessNewBlock so that a malleated cmpctblock announcement
2861  // can't be used to interfere with block relay.
2862  MarkBlockAsReceived(pblock->GetHash());
2863  }
2864  }
2865  return true;
2866  }
2867 
2868  if (strCommand == NetMsgType::BLOCKTXN)
2869  {
2870  // Ignore blocktxn received while importing
2871  if (fImporting || fReindex) {
2872  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom->GetId());
2873  return true;
2874  }
2875 
2876  BlockTransactions resp;
2877  vRecv >> resp;
2878 
2879  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2880  bool fBlockRead = false;
2881  {
2882  LOCK(cs_main);
2883 
2884  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
2885  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
2886  it->second.first != pfrom->GetId()) {
2887  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
2888  return true;
2889  }
2890 
2891  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
2892  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
2893  if (status == READ_STATUS_INVALID) {
2894  MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
2895  Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId()));
2896  return true;
2897  } else if (status == READ_STATUS_FAILED) {
2898  // Might have collided, fall back to getdata now :(
2899  std::vector<CInv> invs;
2900  invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
2901  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
2902  } else {
2903  // Block is either okay, or possibly we received
2904  // READ_STATUS_CHECKBLOCK_FAILED.
2905  // Note that CheckBlock can only fail for one of a few reasons:
2906  // 1. bad-proof-of-work (impossible here, because we've already
2907  // accepted the header)
2908  // 2. merkleroot doesn't match the transactions given (already
2909  // caught in FillBlock with READ_STATUS_FAILED, so
2910  // impossible here)
2911  // 3. the block is otherwise invalid (eg invalid coinbase,
2912  // block is too big, too many legacy sigops, etc).
2913  // So if CheckBlock failed, #3 is the only possibility.
2914  // Under BIP 152, we don't DoS-ban unless proof of work is
2915  // invalid (we don't require all the stateless checks to have
2916  // been run). This is handled below, so just treat this as
2917  // though the block was successfully read, and rely on the
2918  // handling in ProcessNewBlock to ensure the block index is
2919  // updated, etc.
2920  MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
2921  fBlockRead = true;
2922  // mapBlockSource is used for potentially punishing peers and
2923  // updating which peers send us compact blocks, so the race
2924  // between here and cs_main in ProcessNewBlock is fine.
2925  // BIP 152 permits peers to relay compact blocks after validating
2926  // the header only; we should not punish peers if the block turns
2927  // out to be invalid.
2928  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
2929  }
2930  } // Don't hold cs_main when we call into ProcessNewBlock
2931  if (fBlockRead) {
2932  bool fNewBlock = false;
2933  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
2934  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
2935  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
2936  // disk-space attacks), but this should be safe due to the
2937  // protections in the compact block handler -- see related comment
2938  // in compact block optimistic reconstruction handling.
2939  ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
2940  if (fNewBlock) {
2941  pfrom->nLastBlockTime = GetTime();
2942  } else {
2943  LOCK(cs_main);
2944  mapBlockSource.erase(pblock->GetHash());
2945  }
2946  }
2947  return true;
2948  }
2949 
2950  if (strCommand == NetMsgType::HEADERS)
2951  {
2952  // Ignore headers received while importing
2953  if (fImporting || fReindex) {
2954  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom->GetId());
2955  return true;
2956  }
2957 
2958  std::vector<CBlockHeader> headers;
2959 
2960  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
2961  unsigned int nCount = ReadCompactSize(vRecv);
2962  if (nCount > MAX_HEADERS_RESULTS) {
2963  LOCK(cs_main);
2964  Misbehaving(pfrom->GetId(), 20, strprintf("headers message size = %u", nCount));
2965  return false;
2966  }
2967  headers.resize(nCount);
2968  for (unsigned int n = 0; n < nCount; n++) {
2969  vRecv >> headers[n];
2970  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
2971  }
2972 
2973  return ProcessHeadersMessage(pfrom, connman, mempool, headers, chainparams, /*via_compact_block=*/false);
2974  }
2975 
2976  if (strCommand == NetMsgType::BLOCK)
2977  {
2978  // Ignore block received while importing
2979  if (fImporting || fReindex) {
2980  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom->GetId());
2981  return true;
2982  }
2983 
2984  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2985  vRecv >> *pblock;
2986 
2987  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
2988 
2989  bool forceProcessing = false;
2990  const uint256 hash(pblock->GetHash());
2991  {
2992  LOCK(cs_main);
2993  // Also always process if we requested the block explicitly, as we may
2994  // need it even though it is not a candidate for a new best tip.
2995  forceProcessing |= MarkBlockAsReceived(hash);
2996  // mapBlockSource is only used for punishing peers and setting
2997  // which peers send us compact blocks, so the race between here and
2998  // cs_main in ProcessNewBlock is fine.
2999  mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
3000  }
3001  bool fNewBlock = false;
3002  ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
3003  if (fNewBlock) {
3004  pfrom->nLastBlockTime = GetTime();
3005  } else {
3006  LOCK(cs_main);
3007  mapBlockSource.erase(pblock->GetHash());
3008  }
3009  return true;
3010  }
3011 
3012  if (strCommand == NetMsgType::GETADDR) {
3013  // This asymmetric behavior for inbound and outbound connections was introduced
3014  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3015  // to users' AddrMan and later request them by sending getaddr messages.
3016  // Making nodes which are behind NAT and can only make outgoing connections ignore
3017  // the getaddr message mitigates the attack.
3018  if (!pfrom->fInbound) {
3019  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
3020  return true;
3021  }
3022  if (!pfrom->IsAddrRelayPeer()) {
3023  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from block-relay-only connection. peer=%d\n", pfrom->GetId());
3024  return true;
3025  }
3026 
3027  // Only send one GetAddr response per connection to reduce resource waste
3028  // and discourage addr stamping of INV announcements.
3029  if (pfrom->fSentAddr) {
3030  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
3031  return true;
3032  }
3033  pfrom->fSentAddr = true;
3034 
3035  pfrom->vAddrToSend.clear();
3036  std::vector<CAddress> vAddr = connman->GetAddresses();
3037  FastRandomContext insecure_rand;
3038  for (const CAddress &addr : vAddr) {
3039  if (!banman->IsBanned(addr)) {
3040  pfrom->PushAddress(addr, insecure_rand);
3041  }
3042  }
3043  return true;
3044  }
3045 
3046  if (strCommand == NetMsgType::MEMPOOL) {
3047  if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->HasPermission(PF_MEMPOOL))
3048  {
3049  if (!pfrom->HasPermission(PF_NOBAN))
3050  {
3051  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
3052  pfrom->fDisconnect = true;
3053  }
3054  return true;
3055  }
3056 
3057  if (connman->OutboundTargetReached(false) && !pfrom->HasPermission(PF_MEMPOOL))
3058  {
3059  if (!pfrom->HasPermission(PF_NOBAN))
3060  {
3061  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
3062  pfrom->fDisconnect = true;
3063  }
3064  return true;
3065  }
3066 
3067  if (pfrom->m_tx_relay != nullptr) {
3068  LOCK(pfrom->m_tx_relay->cs_tx_inventory);
3069  pfrom->m_tx_relay->fSendMempool = true;
3070  }
3071  return true;
3072  }
3073 
3074  if (strCommand == NetMsgType::PING) {
3075  if (pfrom->nVersion > BIP0031_VERSION)
3076  {
3077  uint64_t nonce = 0;
3078  vRecv >> nonce;
3079  // Echo the message back with the nonce. This allows for two useful features:
3080  //
3081  // 1) A remote node can quickly check if the connection is operational
3082  // 2) Remote nodes can measure the latency of the network thread. If this node
3083  // is overloaded it won't respond to pings quickly and the remote node can
3084  // avoid sending us more work, like chain download requests.
3085  //
3086  // The nonce stops the remote getting confused between different pings: without
3087  // it, if the remote node sends a ping once per second and this node takes 5
3088  // seconds to respond to each, the 5th ping the remote sends would appear to
3089  // return very quickly.
3090  connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3091  }
3092  return true;
3093  }
3094 
3095  if (strCommand == NetMsgType::PONG) {
3096  int64_t pingUsecEnd = nTimeReceived;
3097  uint64_t nonce = 0;
3098  size_t nAvail = vRecv.in_avail();
3099  bool bPingFinished = false;
3100  std::string sProblem;
3101 
3102  if (nAvail >= sizeof(nonce)) {
3103  vRecv >> nonce;
3104 
3105  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3106  if (pfrom->nPingNonceSent != 0) {
3107  if (nonce == pfrom->nPingNonceSent) {
3108  // Matching pong received, this ping is no longer outstanding
3109  bPingFinished = true;
3110  int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
3111  if (pingUsecTime > 0) {
3112  // Successful ping time measurement, replace previous
3113  pfrom->nPingUsecTime = pingUsecTime;
3114  pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime.load(), pingUsecTime);
3115  } else {
3116  // This should never happen
3117  sProblem = "Timing mishap";
3118  }
3119  } else {
3120  // Nonce mismatches are normal when pings are overlapping
3121  sProblem = "Nonce mismatch";
3122  if (nonce == 0) {
3123  // This is most likely a bug in another implementation somewhere; cancel this ping
3124  bPingFinished = true;
3125  sProblem = "Nonce zero";
3126  }
3127  }
3128  } else {
3129  sProblem = "Unsolicited pong without ping";
3130  }
3131  } else {
3132  // This is most likely a bug in another implementation somewhere; cancel this ping
3133  bPingFinished = true;
3134  sProblem = "Short payload";
3135  }
3136 
3137  if (!(sProblem.empty())) {
3138  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3139  pfrom->GetId(),
3140  sProblem,
3141  pfrom->nPingNonceSent,
3142  nonce,
3143  nAvail);
3144  }
3145  if (bPingFinished) {
3146  pfrom->nPingNonceSent = 0;
3147  }
3148  return true;
3149  }
3150 
3151  if (strCommand == NetMsgType::FILTERLOAD) {
3152  CBloomFilter filter;
3153  vRecv >> filter;
3154 
3155  if (!filter.IsWithinSizeConstraints())
3156  {
3157  // There is no excuse for sending a too-large filter
3158  LOCK(cs_main);
3159  Misbehaving(pfrom->GetId(), 100);
3160  }
3161  else if (pfrom->m_tx_relay != nullptr)
3162  {
3163  LOCK(pfrom->m_tx_relay->cs_filter);
3164  pfrom->m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3165  pfrom->m_tx_relay->pfilter->UpdateEmptyFull();
3166  pfrom->m_tx_relay->fRelayTxes = true;
3167  }
3168  return true;
3169  }
3170 
3171  if (strCommand == NetMsgType::FILTERADD) {
3172  std::vector<unsigned char> vData;
3173  vRecv >> vData;
3174 
3175  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3176  // and thus, the maximum size any matched object can have) in a filteradd message
3177  bool bad = false;
3178  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3179  bad = true;
3180  } else if (pfrom->m_tx_relay != nullptr) {
3181  LOCK(pfrom->m_tx_relay->cs_filter);
3182  if (pfrom->m_tx_relay->pfilter) {
3183  pfrom->m_tx_relay->pfilter->insert(vData);
3184  } else {
3185  bad = true;
3186  }
3187  }
3188  if (bad) {
3189  LOCK(cs_main);
3190  Misbehaving(pfrom->GetId(), 100);
3191  }
3192  return true;
3193  }
3194 
3195  if (strCommand == NetMsgType::FILTERCLEAR) {
3196  if (pfrom->m_tx_relay == nullptr) {
3197  return true;
3198  }
3199  LOCK(pfrom->m_tx_relay->cs_filter);
3200  if (pfrom->GetLocalServices() & NODE_BLOOM) {
3201  pfrom->m_tx_relay->pfilter.reset(new CBloomFilter());
3202  }
3203  pfrom->m_tx_relay->fRelayTxes = true;
3204  return true;
3205  }
3206 
3207  if (strCommand == NetMsgType::FEEFILTER) {
3208  CAmount newFeeFilter = 0;
3209  vRecv >> newFeeFilter;
3210  if (MoneyRange(newFeeFilter)) {
3211  if (pfrom->m_tx_relay != nullptr) {
3212  LOCK(pfrom->m_tx_relay->cs_feeFilter);
3213  pfrom->m_tx_relay->minFeeFilter = newFeeFilter;
3214  }
3215  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
3216  }
3217  return true;
3218  }
3219 
3220  if (strCommand == NetMsgType::NOTFOUND) {
3221  // Remove the NOTFOUND transactions from the peer
3222  LOCK(cs_main);
3223  CNodeState *state = State(pfrom->GetId());
3224  std::vector<CInv> vInv;
3225  vRecv >> vInv;
3227  for (CInv &inv : vInv) {
3228  if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) {
3229  // If we receive a NOTFOUND message for a txid we requested, erase
3230  // it from our data structures for this peer.
3231  auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(inv.hash);
3232  if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) {
3233  // Skip any further work if this is a spurious NOTFOUND
3234  // message.
3235  continue;
3236  }
3237  state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
3238  state->m_tx_download.m_tx_announced.erase(inv.hash);
3239  }
3240  }
3241  }
3242  return true;
3243  }
3244 
3245  // Ignore unknown commands for extensibility
3246  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
3247  return true;
3248 }
3249 
3251 {
3253  CNodeState &state = *State(pnode->GetId());
3254 
3255  if (state.fShouldBan) {
3256  state.fShouldBan = false;
3257  if (pnode->HasPermission(PF_NOBAN))
3258  LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString());
3259  else if (pnode->m_manual_connection)
3260  LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode->addr.ToString());
3261  else if (pnode->addr.IsLocal()) {
3262  // Disconnect but don't ban _this_ local node
3263  LogPrintf("Warning: disconnecting but not banning local peer %s!\n", pnode->addr.ToString());
3264  pnode->fDisconnect = true;
3265  } else {
3266  // Disconnect and ban all nodes sharing the address
3267  if (m_banman) {
3269  }
3270  connman->DisconnectNode(pnode->addr);
3271  }
3272  return true;
3273  }
3274  return false;
3275 }
3276 
3277 bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3278 {
3279  const CChainParams& chainparams = Params();
3280  //
3281  // Message format
3282  // (4) message start
3283  // (12) command
3284  // (4) size
3285  // (4) checksum
3286  // (x) data
3287  //
3288  bool fMoreWork = false;
3289 
3290  if (!pfrom->vRecvGetData.empty())
3291  ProcessGetData(pfrom, chainparams, connman, m_mempool, interruptMsgProc);
3292 
3293  if (!pfrom->orphan_work_set.empty()) {
3294  std::list<CTransactionRef> removed_txn;
3295  LOCK2(cs_main, g_cs_orphans);
3296  ProcessOrphanTx(connman, m_mempool, pfrom->orphan_work_set, removed_txn);
3297  for (const CTransactionRef& removedTx : removed_txn) {
3298  AddToCompactExtraTransactions(removedTx);
3299  }
3300  }
3301 
3302  if (pfrom->fDisconnect)
3303  return false;
3304 
3305  // this maintains the order of responses
3306  // and prevents vRecvGetData to grow unbounded
3307  if (!pfrom->vRecvGetData.empty()) return true;
3308  if (!pfrom->orphan_work_set.empty()) return true;
3309 
3310  // Don't bother if send buffer is too full to respond anyway
3311  if (pfrom->fPauseSend)
3312  return false;
3313 
3314  std::list<CNetMessage> msgs;
3315  {
3316  LOCK(pfrom->cs_vProcessMsg);
3317  if (pfrom->vProcessMsg.empty())
3318  return false;
3319  // Just take one message
3320  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3321  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
3322  pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
3323  fMoreWork = !pfrom->vProcessMsg.empty();
3324  }
3325  CNetMessage& msg(msgs.front());
3326 
3327  msg.SetVersion(pfrom->GetRecvVersion());
3328  // Check network magic
3329  if (!msg.m_valid_netmagic) {
3330  LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId());
3331  pfrom->fDisconnect = true;
3332  return false;
3333  }
3334 
3335  // Check header
3336  if (!msg.m_valid_header)
3337  {
3338  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId());
3339  return fMoreWork;
3340  }
3341  const std::string& strCommand = msg.m_command;
3342 
3343  // Message size
3344  unsigned int nMessageSize = msg.m_message_size;
3345 
3346  // Checksum
3347  CDataStream& vRecv = msg.m_recv;
3348  if (!msg.m_valid_checksum)
3349  {
3350  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n", __func__,
3351  SanitizeString(strCommand), nMessageSize, pfrom->GetId());
3352  return fMoreWork;
3353  }
3354 
3355  // Process message
3356  bool fRet = false;
3357  try
3358  {
3359  fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.m_time, chainparams, m_mempool, connman, m_banman, interruptMsgProc);
3360  if (interruptMsgProc)
3361  return false;
3362  if (!pfrom->vRecvGetData.empty())
3363  fMoreWork = true;
3364  } catch (const std::exception& e) {
3365  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what(), typeid(e).name());
3366  } catch (...) {
3367  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(strCommand), nMessageSize);
3368  }
3369 
3370  if (!fRet) {
3371  LogPrint(BCLog::NET, "%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
3372  }
3373 
3374  LOCK(cs_main);
3375  CheckIfBanned(pfrom);
3376 
3377  return fMoreWork;
3378 }
3379 
3380 void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
3381 {
3383 
3384  CNodeState &state = *State(pto->GetId());
3385  const CNetMsgMaker msgMaker(pto->GetSendVersion());
3386 
3387  if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
3388  // This is an outbound peer subject to disconnection if they don't
3389  // announce a block with as much work as the current tip within
3390  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3391  // their chain has more work than ours, we should sync to it,
3392  // unless it's invalid, in which case we should find that out and
3393  // disconnect from them elsewhere).
3394  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) {
3395  if (state.m_chain_sync.m_timeout != 0) {
3396  state.m_chain_sync.m_timeout = 0;
3397  state.m_chain_sync.m_work_header = nullptr;
3398  state.m_chain_sync.m_sent_getheaders = false;
3399  }
3400  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3401  // Our best block known by this peer is behind our tip, and we're either noticing
3402  // that for the first time, OR this peer was able to catch up to some earlier point
3403  // where we checked against our tip.
3404  // Either way, set a new timeout based on current tip.
3405  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3406  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
3407  state.m_chain_sync.m_sent_getheaders = false;
3408  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3409  // No evidence yet that our peer has synced to a chain with work equal to that
3410  // of our tip, when we first detected it was behind. Send a single getheaders
3411  // message to give the peer a chance to update us.
3412  if (state.m_chain_sync.m_sent_getheaders) {
3413  // They've run out of time to catch up!
3414  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3415  pto->fDisconnect = true;
3416  } else {
3417  assert(state.m_chain_sync.m_work_header);
3418  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3419  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3420  state.m_chain_sync.m_sent_getheaders = true;
3421  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3422  // Bump the timeout to allow a response, which could clear the timeout
3423  // (if the response shows the peer has synced), reset the timeout (if
3424  // the peer syncs to the required work but not to our tip), or result
3425  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3426  // has not sufficiently progressed)
3427  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3428  }
3429  }
3430  }
3431 }
3432 
3434 {
3435  // Check whether we have too many outbound peers
3436  int extra_peers = connman->GetExtraOutboundCount();
3437  if (extra_peers > 0) {
3438  // If we have more outbound peers than we target, disconnect one.
3439  // Pick the outbound peer that least recently announced
3440  // us a new block, with ties broken by choosing the more recent
3441  // connection (higher node id)
3442  NodeId worst_peer = -1;
3443  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3444 
3445  connman->ForEachNode([&](CNode* pnode) {
3447 
3448  // Ignore non-outbound peers, or nodes marked for disconnect already
3449  if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
3450  CNodeState *state = State(pnode->GetId());
3451  if (state == nullptr) return; // shouldn't be possible, but just in case
3452  // Don't evict our protected peers
3453  if (state->m_chain_sync.m_protect) return;
3454  // Don't evict our block-relay-only peers.
3455  if (pnode->m_tx_relay == nullptr) return;
3456  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3457  worst_peer = pnode->GetId();
3458  oldest_block_announcement = state->m_last_block_announcement;
3459  }
3460  });
3461  if (worst_peer != -1) {
3462  bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
3464 
3465  // Only disconnect a peer that has been connected to us for
3466  // some reasonable fraction of our check-frequency, to give
3467  // it time for new information to have arrived.
3468  // Also don't disconnect any peer we're trying to download a
3469  // block from.
3470  CNodeState &state = *State(pnode->GetId());
3471  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3472  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3473  pnode->fDisconnect = true;
3474  return true;
3475  } else {
3476  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3477  return false;
3478  }
3479  });
3480  if (disconnected) {
3481  // If we disconnected an extra peer, that means we successfully
3482  // connected to at least one peer after the last time we
3483  // detected a stale tip. Don't try any more extra peers until
3484  // we next detect a stale tip, to limit the load we put on the
3485  // network from these extra connections.
3486  connman->SetTryNewOutboundPeer(false);
3487  }
3488  }
3489  }
3490 }
3491 
3493 {
3494  LOCK(cs_main);
3495 
3496  if (connman == nullptr) return;
3497 
3498  int64_t time_in_seconds = GetTime();
3499 
3500  EvictExtraOutboundPeers(time_in_seconds);
3501 
3502  if (time_in_seconds > m_stale_tip_check_time) {
3503  // Check whether our tip is stale, and if so, allow using an extra
3504  // outbound peer
3505  if (!fImporting && !fReindex && connman->GetNetworkActive() && connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
3506  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
3507  connman->SetTryNewOutboundPeer(true);
3508  } else if (connman->GetTryNewOutboundPeer()) {
3509  connman->SetTryNewOutboundPeer(false);
3510  }
3511  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
3512  }
3513 }
3514 
3515 namespace {
3516 class CompareInvMempoolOrder
3517 {
3518  CTxMemPool *mp;
3519 public:
3520  explicit CompareInvMempoolOrder(CTxMemPool *_mempool)
3521  {
3522  mp = _mempool;
3523  }
3524 
3525  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
3526  {
3527  /* As std::make_heap produces a max-heap, we want the entries with the
3528  * fewest ancestors/highest fee to sort later. */
3529  return mp->CompareDepthAndScore(*b, *a);
3530  }
3531 };
3532 }
3533 
3535 {
3536  const Consensus::Params& consensusParams = Params().GetConsensus();
3537  {
3538  // Don't send anything until the version handshake is complete
3539  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
3540  return true;
3541 
3542  // If we get here, the outgoing message serialization version is set and can't change.
3543  const CNetMsgMaker msgMaker(pto->GetSendVersion());
3544 
3545  //
3546  // Message: ping
3547  //
3548  bool pingSend = false;
3549  if (pto->fPingQueued) {
3550  // RPC ping request by user
3551  pingSend = true;
3552  }
3553  if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
3554  // Ping automatically sent as a latency probe & keepalive.
3555  pingSend = true;
3556  }
3557  if (pingSend) {
3558  uint64_t nonce = 0;
3559  while (nonce == 0) {
3560  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
3561  }
3562  pto->fPingQueued = false;
3563  pto->nPingUsecStart = GetTimeMicros();
3564  if (pto->nVersion > BIP0031_VERSION) {
3565  pto->nPingNonceSent = nonce;
3566  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
3567  } else {
3568  // Peer is too old to support ping command with nonce, pong will never arrive.
3569  pto->nPingNonceSent = 0;
3570  connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
3571  }
3572  }
3573 
3574  TRY_LOCK(cs_main, lockMain);
3575  if (!lockMain)
3576  return true;
3577 
3578  if (CheckIfBanned(pto)) return true;
3579 
3580  CNodeState &state = *State(pto->GetId());
3581 
3582  // Address refresh broadcast
3583  int64_t nNow = GetTimeMicros();
3584  auto current_time = GetTime<std::chrono::microseconds>();
3585 
3586  if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
3587  AdvertiseLocal(pto);
3588  pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
3589  }
3590 
3591  //
3592  // Message: addr
3593  //
3594  if (pto->IsAddrRelayPeer() && pto->nNextAddrSend < nNow) {
3595  pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
3596  std::vector<CAddress> vAddr;
3597  vAddr.reserve(pto->vAddrToSend.size());
3598  assert(pto->m_addr_known);
3599  for (const CAddress& addr : pto->vAddrToSend)
3600  {
3601  if (!pto->m_addr_known->contains(addr.GetKey()))
3602  {
3603  pto->m_addr_known->insert(addr.GetKey());
3604  vAddr.push_back(addr);
3605  // receiver rejects addr messages larger than 1000
3606  if (vAddr.size() >= 1000)
3607  {
3608  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3609  vAddr.clear();
3610  }
3611  }
3612  }
3613  pto->vAddrToSend.clear();
3614  if (!vAddr.empty())
3615  connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3616  // we only send the big addr message once
3617  if (pto->vAddrToSend.capacity() > 40)
3618  pto->vAddrToSend.shrink_to_fit();
3619  }
3620 
3621  // Start block sync
3622  if (pindexBestHeader == nullptr)
3624  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
3625  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
3626  // Only actively request headers from a single peer, unless we're close to today.
3627  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
3628  state.fSyncStarted = true;
3630  nSyncStarted++;
3631  const CBlockIndex *pindexStart = pindexBestHeader;
3632  /* If possible, start at the block preceding the currently
3633  best known header. This ensures that we always get a
3634  non-empty list of headers back as long as the peer
3635  is up-to-date. With a non-empty response, we can initialise
3636  the peer's known best block. This wouldn't be possible
3637  if we requested starting at pindexBestHeader and
3638  got back an empty response. */
3639  if (pindexStart->pprev)
3640  pindexStart = pindexStart->pprev;
3641  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
3642  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
3643  }
3644  }
3645 
3646  //
3647  // Try sending block announcements via headers
3648  //
3649  {
3650  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
3651  // list of block hashes we're relaying, and our peer wants
3652  // headers announcements, then find the first header
3653  // not yet known to our peer but would connect, and send.
3654  // If no header would connect, or if we have too many
3655  // blocks, or if the peer doesn't want headers, just
3656  // add all to the inv queue.
3657  LOCK(pto->cs_inventory);
3658  std::vector<CBlock> vHeaders;
3659  bool fRevertToInv = ((!state.fPreferHeaders &&
3660  (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
3661  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
3662  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
3663  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
3664 
3665  if (!fRevertToInv) {
3666  bool fFoundStartingHeader = false;
3667  // Try to find first header that our peer doesn't have, and
3668  // then send all headers past that one. If we come across any
3669  // headers that aren't on ::ChainActive(), give up.
3670  for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
3671  const CBlockIndex* pindex = LookupBlockIndex(hash);
3672  assert(pindex);
3673  if (::ChainActive()[pindex->nHeight] != pindex) {
3674  // Bail out if we reorged away from this block
3675  fRevertToInv = true;
3676  break;
3677  }
3678  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
3679  // This means that the list of blocks to announce don't
3680  // connect to each other.
3681  // This shouldn't really be possible to hit during
3682  // regular operation (because reorgs should take us to
3683  // a chain that has some block not on the prior chain,
3684  // which should be caught by the prior check), but one
3685  // way this could happen is by using invalidateblock /
3686  // reconsiderblock repeatedly on the tip, causing it to
3687  // be added multiple times to vBlockHashesToAnnounce.
3688  // Robustly deal with this rare situation by reverting
3689  // to an inv.
3690  fRevertToInv = true;
3691  break;
3692  }
3693  pBestIndex = pindex;
3694  if (fFoundStartingHeader) {
3695  // add this to the headers message
3696  vHeaders.push_back(pindex->GetBlockHeader());
3697  } else if (PeerHasHeader(&state, pindex)) {
3698  continue; // keep looking for the first new block
3699  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
3700  // Peer doesn't have this header but they do have the prior one.
3701  // Start sending headers.
3702  fFoundStartingHeader = true;
3703  vHeaders.push_back(pindex->GetBlockHeader());
3704  } else {
3705  // Peer doesn't have this header or the prior one -- nothing will
3706  // connect, so bail out.
3707  fRevertToInv = true;
3708  break;
3709  }
3710  }
3711  }
3712  if (!fRevertToInv && !vHeaders.empty()) {
3713  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
3714  // We only send up to 1 block as header-and-ids, as otherwise
3715  // probably means we're doing an initial-ish-sync or they're slow
3716  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
3717  vHeaders.front().GetHash().ToString(), pto->GetId());
3718 
3719  int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
3720 
3721  bool fGotBlockFromCache = false;
3722  {
3723  LOCK(cs_most_recent_block);
3724  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
3725  if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
3726  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
3727  else {
3728  CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
3729  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
3730  }
3731  fGotBlockFromCache = true;
3732  }
3733  }
3734  if (!fGotBlockFromCache) {
3735  CBlock block;
3736  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
3737  assert(ret);
3738  CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
3739  connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
3740  }
3741  state.pindexBestHeaderSent = pBestIndex;
3742  } else if (state.fPreferHeaders) {
3743  if (vHeaders.size() > 1) {
3744  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
3745  vHeaders.size(),
3746  vHeaders.front().GetHash().ToString(),
3747  vHeaders.back().GetHash().ToString(), pto->GetId());
3748  } else {
3749  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
3750  vHeaders.front().GetHash().ToString(), pto->GetId());
3751  }
3752  connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
3753  state.pindexBestHeaderSent = pBestIndex;
3754  } else
3755  fRevertToInv = true;
3756  }
3757  if (fRevertToInv) {
3758  // If falling back to using an inv, just try to inv the tip.
3759  // The last entry in vBlockHashesToAnnounce was our tip at some point
3760  // in the past.
3761  if (!pto->vBlockHashesToAnnounce.empty()) {
3762  const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
3763  const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
3764  assert(pindex);
3765 
3766  // Warn if we're announcing a block that is not on the main chain.
3767  // This should be very rare and could be optimized out.
3768  // Just log for now.
3769  if (::ChainActive()[pindex->nHeight] != pindex) {
3770  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
3771  hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString());
3772  }
3773 
3774  // If the peer's chain has this block, don't inv it back.
3775  if (!PeerHasHeader(&state, pindex)) {
3776  pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
3777  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
3778  pto->GetId(), hashToAnnounce.ToString());
3779  }
3780  }
3781  }
3782  pto->vBlockHashesToAnnounce.clear();
3783  }
3784 
3785  //
3786  // Message: inventory
3787  //
3788  std::vector<CInv> vInv;
3789  {
3790  LOCK(pto->cs_inventory);
3791  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
3792 
3793  // Add blocks
3794  for (const uint256& hash : pto->vInventoryBlockToSend) {
3795  vInv.push_back(CInv(MSG_BLOCK, hash));
3796  if (vInv.size() == MAX_INV_SZ) {
3797  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3798  vInv.clear();
3799  }
3800  }
3801  pto->vInventoryBlockToSend.clear();
3802 
3803  if (pto->m_tx_relay != nullptr) {
3804  LOCK(pto->m_tx_relay->cs_tx_inventory);
3805  // Check whether periodic sends should happen
3806  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
3807  if (pto->m_tx_relay->nNextInvSend < current_time) {
3808  fSendTrickle = true;
3809  if (pto->fInbound) {
3810  pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
3811  } else {
3812  // Use half the delay for outbound peers, as there is less privacy concern for them.
3813  pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
3814  }
3815  }
3816 
3817  // Time to send but the peer has requested we not relay transactions.
3818  if (fSendTrickle) {
3819  LOCK(pto->m_tx_relay->cs_filter);
3820  if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
3821  }
3822 
3823  // Respond to BIP35 mempool requests
3824  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
3825  auto vtxinfo = m_mempool.infoAll();
3826  pto->m_tx_relay->fSendMempool = false;
3827  CFeeRate filterrate;
3828  {
3829  LOCK(pto->m_tx_relay->cs_feeFilter);
3830  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
3831  }
3832 
3833  LOCK(pto->m_tx_relay->cs_filter);
3834 
3835  for (const auto& txinfo : vtxinfo) {
3836  const uint256& hash = txinfo.tx->GetHash();
3837  CInv inv(MSG_TX, hash);
3838  pto->m_tx_relay->setInventoryTxToSend.erase(hash);
3839  // Don't send transactions that peers will not put into their mempool
3840  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
3841  continue;
3842  }
3843  if (pto->m_tx_relay->pfilter) {
3844  if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
3845  }
3846  pto->m_tx_relay->filterInventoryKnown.insert(hash);
3847  vInv.push_back(inv);
3848  if (vInv.size() == MAX_INV_SZ) {
3849  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3850  vInv.clear();
3851  }
3852  }
3853  pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
3854  }
3855 
3856  // Determine transactions to relay
3857  if (fSendTrickle) {
3858  // Produce a vector with all candidates for sending
3859  std::vector<std::set<uint256>::iterator> vInvTx;
3860  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
3861  for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
3862  vInvTx.push_back(it);
3863  }
3864  CFeeRate filterrate;
3865  {
3866  LOCK(pto->m_tx_relay->cs_feeFilter);
3867  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
3868  }
3869  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
3870  // A heap is used so that not all items need sorting if only a few are being sent.
3871  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
3872  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
3873  // No reason to drain out at many times the network's capacity,
3874  // especially since we have many peers and some will draw much shorter delays.
3875  unsigned int nRelayedTransactions = 0;
3876  LOCK(pto->m_tx_relay->cs_filter);
3877  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
3878  // Fetch the top element from the heap
3879  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
3880  std::set<uint256>::iterator it = vInvTx.back();
3881  vInvTx.pop_back();
3882  uint256 hash = *it;
3883  // Remove it from the to-be-sent set
3884  pto->m_tx_relay->setInventoryTxToSend.erase(it);
3885  // Check if not in the filter already
3886  if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
3887  continue;
3888  }
3889  // Not in the mempool anymore? don't bother sending it.
3890  auto txinfo = m_mempool.info(hash);
3891  if (!txinfo.tx) {
3892  continue;
3893  }
3894  // Peer told you to not send transactions at that feerate? Don't bother sending it.
3895  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
3896  continue;
3897  }
3898  if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
3899  // Send
3900  vInv.push_back(CInv(MSG_TX, hash));
3901  nRelayedTransactions++;
3902  {
3903  // Expire old relay messages
3904  while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
3905  {
3906  mapRelay.erase(vRelayExpiration.front().second);
3907  vRelayExpiration.pop_front();
3908  }
3909 
3910  auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
3911  if (ret.second) {
3912  vRelayExpiration.push_back(std::make_pair(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first));
3913  }
3914  }
3915  if (vInv.size() == MAX_INV_SZ) {
3916  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3917  vInv.clear();
3918  }
3919  pto->m_tx_relay->filterInventoryKnown.insert(hash);
3920  }
3921  }
3922  }
3923  }
3924  if (!vInv.empty())
3925  connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3926 
3927  // Detect whether we're stalling
3928  current_time = GetTime<std::chrono::microseconds>();
3929  // nNow is the current system time (GetTimeMicros is not mockable) and
3930  // should be replaced by the mockable current_time eventually
3931  nNow = GetTimeMicros();
3932  if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
3933  // Stalling only triggers when the block download window cannot move. During normal steady state,
3934  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
3935  // should only happen during initial block download.
3936  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
3937  pto->fDisconnect = true;
3938  return true;
3939  }
3940  // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
3941  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
3942  // We compensate for other peers to prevent killing off peers due to our own downstream link
3943  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
3944  // to unreasonably increase our timeout.
3945  if (state.vBlocksInFlight.size() > 0) {
3946  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
3947  int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
3948  if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
3949  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
3950  pto->fDisconnect = true;
3951  return true;
3952  }
3953  }
3954  // Check for headers sync timeouts
3955  if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
3956  // Detect whether this is a stalling initial-headers-sync peer
3957  if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24*60*60) {
3958  if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
3959  // Disconnect a (non-whitelisted) peer if it is our only sync peer,
3960  // and we have others we could be using instead.
3961  // Note: If all our peers are inbound, then we won't
3962  // disconnect our sync peer for stalling; we have bigger
3963  // problems if we can't get any outbound peers.
3964  if (!pto->HasPermission(PF_NOBAN)) {
3965  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
3966  pto->fDisconnect = true;
3967  return true;
3968  } else {
3969  LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
3970  // Reset the headers sync state so that we have a
3971  // chance to try downloading from a different peer.
3972  // Note: this will also result in at least one more
3973  // getheaders message to be sent to
3974  // this peer (eventually).
3975  state.fSyncStarted = false;
3976  nSyncStarted--;
3977  state.nHeadersSyncTimeout = 0;
3978  }
3979  }
3980  } else {
3981  // After we've caught up once, reset the timeout so we can't trigger
3982  // disconnect later.
3983  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
3984  }
3985  }
3986 
3987  // Check that outbound peers have reasonable chains
3988  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
3989  ConsiderEviction(pto, GetTime());
3990 
3991  //
3992  // Message: getdata (blocks)
3993  //
3994  std::vector<CInv> vGetData;
3995  if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
3996  std::vector<const CBlockIndex*> vToDownload;
3997  NodeId staller = -1;
3998  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
3999  for (const CBlockIndex *pindex : vToDownload) {
4000  uint32_t nFetchFlags = GetFetchFlags(pto);
4001  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4002  MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
4003  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4004  pindex->nHeight, pto->GetId());
4005  }
4006  if (state.nBlocksInFlight == 0 && staller != -1) {
4007  if (State(staller)->nStallingSince == 0) {
4008  State(staller)->nStallingSince = nNow;
4009  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4010  }
4011  }
4012  }
4013 
4014  //
4015  // Message: getdata (non-blocks)
4016  //
4017 
4018  // For robustness, expire old requests after a long timeout, so that
4019  // we can resume downloading transactions from a peer even if they
4020  // were unresponsive in the past.
4021  // Eventually we should consider disconnecting peers, but this is
4022  // conservative.
4023  if (state.m_tx_download.m_check_expiry_timer <= current_time) {
4024  for (auto it=state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) {
4025  if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
4026  LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId());
4027  state.m_tx_download.m_tx_announced.erase(it->first);
4028  state.m_tx_download.m_tx_in_flight.erase(it++);
4029  } else {
4030  ++it;
4031  }
4032  }
4033  // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
4034  // so that we're not doing this for all peers at the same time.
4035  state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL);
4036  }
4037 
4038  auto& tx_process_time = state.m_tx_download.m_tx_process_time;
4039  while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
4040  const uint256 txid = tx_process_time.begin()->second;
4041  // Erase this entry from tx_process_time (it may be added back for
4042  // processing at a later time, see below)
4043  tx_process_time.erase(tx_process_time.begin());
4044  CInv inv(MSG_TX | GetFetchFlags(pto), txid);
4045  if (!AlreadyHave(inv, m_mempool)) {
4046  // If this transaction was last requested more than 1 minute ago,
4047  // then request.
4048  const auto last_request_time = GetTxRequestTime(inv.hash);
4049  if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
4050  LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
4051  vGetData.push_back(inv);
4052  if (vGetData.size() >= MAX_GETDATA_SZ) {
4053  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4054  vGetData.clear();
4055  }
4056  UpdateTxRequestTime(inv.hash, current_time);
4057  state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time);
4058  } else {
4059  // This transaction is in flight from someone else; queue
4060  // up processing to happen after the download times out
4061  // (with a slight delay for inbound peers, to prefer
4062  // requests to outbound peers).
4063  const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload);
4064  tx_process_time.emplace(next_process_time, txid);
4065  }
4066  } else {
4067  // We have already seen this transaction, no need to download.
4068  state.m_tx_download.m_tx_announced.erase(inv.hash);
4069  state.m_tx_download.m_tx_in_flight.erase(inv.hash);
4070  }
4071  }
4072 
4073 
4074  if (!vGetData.empty())
4075  connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4076 
4077  //
4078  // Message: feefilter
4079  //
4080  // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
4081  if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
4082  !pto->HasPermission(PF_FORCERELAY)) {
4083  CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4084  int64_t timeNow = GetTimeMicros();
4085  if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
4086  static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
4087  static FeeFilterRounder filterRounder(default_feerate);
4088  CAmount filterToSend = filterRounder.round(currentFilter);
4089  // We always have a fee filter of at least minRelayTxFee
4090  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4091  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
4092  connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
4093  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
4094  }
4095  pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
4096  }
4097  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4098  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4099  else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
4100  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
4101  pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
4102  }
4103  }
4104  }
4105  return true;
4106 }
4107 
4109 {
4110 public:
4113  // orphan transactions
4114  mapOrphanTransactions.clear();
4115  mapOrphanTransactionsByPrev.clear();
4116  }
4117 };
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:408
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:162
static int64_t GetTransactionWeight(const CTransaction &tx)
Definition: validation.h:133
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:31
CTxMemPool mempool
void Misbehaving(NodeId nodeid, int howmuch, const std::string &message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Increase a node&#39;s misbehavior score.
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:851
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: system.cpp:362
if(expired !=0)
Definition: validation.cpp:316
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:34
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:23
std::atomic_bool fPauseSend
Definition: net.h:786
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
Definition: validation.h:94
uint64_t GetRand(uint64_t nMax) noexcept
Definition: random.cpp:585
static const int SERIALIZE_TRANSACTION_NO_WITNESS
Definition: transaction.h:15
invalid by consensus rules
int GetSendVersion() const
Definition: net.cpp:619
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:42
bool fPruneMode
True if we&#39;re running in -prune mode.
Definition: validation.cpp:115
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
Definition: banman.h:41
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:243
bool IsLocal() const
Definition: netaddress.cpp:229
void SetNull()
Definition: uint256.h:38
#define LogPrint(category,...)
Definition: logging.h:179
int64_t GetBlockTime() const
Definition: chain.h:247
CConnman *const connman
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:126
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:144
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:789
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
#define TRY_LOCK(cs, name)
Definition: sync.h:222
STL-like map container that only keeps the N elements with the highest value.
Definition: limitedmap.h:13
uint32_t nStatus
Verification status of this block. See enum BlockStatus.
Definition: chain.h:174
void scheduleEvery(Function f, std::chrono::milliseconds delta)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:123
size_t GetAddressCount() const
Definition: net.cpp:2448
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:28
void WakeMessageHandler()
Definition: net.cpp:1454
void SetServices(const CService &addr, ServiceFlags nServices)
Definition: net.cpp:2453
std::string ToString() const
Definition: protocol.cpp:194
Definition: block.h:72
Defined in BIP144.
Definition: protocol.h:374
uint64_t ReadCompactSize(Stream &is)
Definition: serialize.h:342
We don&#39;t have the previous block the checked one is built on.
CChain & ChainActive()
Definition: validation.cpp:90
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:29
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Overridden from CValidationInterface.
int64_t nTimeExpire
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:32
Defined in BIP152.
Definition: protocol.h:372
std::vector< uint16_t > indexes
int GetRecvVersion() const
Definition: net.h:926
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1164
void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
Evict extra outbound peers.
bool SendMessages(CNode *pto) override EXCLUSIVE_LOCKS_REQUIRED(pto -> cs_sendProcessing)
Send queued protocol messages to be sent to a give node.
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:916
reverse_range< T > reverse_iterate(T &x)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos, const CMessageHeader::MessageStartChars &message_start)
inv message data
Definition: protocol.h:379
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in millionths of the block interval (i.e.
Definition: validation.h:105
invalid proof of work or time too old
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
RecursiveMutex cs_inventory
Definition: net.h:809
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:39
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ChainActive().Tip() will not be pr...
Definition: validation.h:178
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:156
transaction was missing some of its inputs
CTxMemPool & m_mempool
unsigned int nHeight
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:812
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:101
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:26
CBlockHeader GetBlockHeader() const
Definition: chain.h:220
std::vector< unsigned char > ParseHex(const char *psz)
int Height() const
Return the maximal height in the chain.
Definition: chain.h:421
static void ProcessGetData(CNode *pfrom, const CChainParams &chainparams, CConnman *connman, const CTxMemPool &mempool, const std::atomic< bool > &interruptMsgProc) LOCKS_EXCLUDED(cs_main)
Defined in BIP144.
Definition: protocol.h:373
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool GetTryNewOutboundPeer()
Definition: net.cpp:1686
CTransactionRef tx
unsigned long size() const
Definition: txmempool.h:677
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:108
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:28
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:2734
RecursiveMutex cs_vProcessMsg
Definition: net.h:739
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:124
void SetVersion(int nVersionIn)
Definition: net.h:631
static void LogPrintf(const char *fmt, const Args &... args)
Definition: logging.h:163
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:142
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:115
static bool IsOutboundDisconnectionCandidate(const CNode *node)
std::atomic< int64_t > nPingUsecStart
Definition: net.h:853
static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:154
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:34
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:47
violated mempool&#39;s fee/size/descendant/RBF/etc limits
bool IsNull() const
Definition: block.h:149
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:201
bool empty() const
Definition: streams.h:293
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:384
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1691
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:358
uint64_t GetLocalNonce() const
Definition: net.h:906
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:267
std::vector< CAddress > vAddrToSend
Definition: net.h:797
void GetRandBytes(unsigned char *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:578
transaction spends a coinbase too early, or violates locktime/sequence locks
std::atomic< int > nStartingHeight
Definition: net.h:794
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:956
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > pblock, bool fForceProcessing, bool *fNewBlock)
Process an incoming block.
static uint32_t GetFetchFlags(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
constexpr Span< A > MakeSpan(A(&a)[N])
Create a span to a container exposing data() and size().
Definition: span.h:58
void SetRecvVersion(int nVersionIn)
Definition: net.h:922
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:32
unsigned char * begin()
Definition: uint256.h:54
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
initial value. Tx has not yet been rejected
bool IsNull() const
Definition: uint256.h:30
bool ProcessMessages(CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:27
void PushInventory(const CInv &inv)
Definition: net.h:980
std::set< uint256 > orphan_work_set
Definition: net.h:861
std::atomic< ServiceFlags > nServices
Definition: net.h:729
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:287
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:490
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept
Definition: random.cpp:590
std::deque< CInv > vRecvGetData
Definition: net.h:745
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:21
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:2767
const std::unique_ptr< CRollingBloomFilter > m_addr_known
Definition: net.h:798
static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY
Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others...
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:611
static constexpr int32_t MAX_PEER_TX_IN_FLIGHT
Maximum number of in-flight transactions from a peer.
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2523
static bool ProcessHeadersMessage(CNode *pfrom, CConnman *connman, CTxMemPool &mempool, const std::vector< CBlockHeader > &headers, const CChainParams &chainparams, bool via_compact_block)
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
Definition: net.h:57
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
bool ProcessMessage(CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, int64_t nTimeReceived, const CChainParams &chainparams, CTxMemPool &mempool, CConnman *connman, BanMan *banman, const std::atomic< bool > &interruptMsgProc)
uint256 GetBlockHash() const
Definition: chain.h:233
bool IsAddrRelayPeer() const
Definition: net.h:803
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:282
bool done
bool fSentAddr
Definition: net.h:780
bool IsValid() const
Definition: validation.h:106
std::atomic< int64_t > nPingUsecTime
Definition: net.h:855
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:61
static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message="")
Potentially ban a node based on the contents of a BlockValidationState object.
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:857
int GetMyStartingHeight() const
Definition: net.h:910
#define LOCK2(cs1, cs2)
Definition: sync.h:219
initial value. Block has not yet been rejected
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
ServiceFlags GetLocalServices() const
Definition: net.h:1003
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:112
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
Definition: validation.h:107
bool fClient
Definition: net.h:773
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:133
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:25
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
Definition: validation.h:180
bool IsBanned(CNetAddr net_addr)
Definition: banman.cpp:92
void ConsiderEviction(CNode *pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Consider evicting an outbound peer based on the amount of time they&#39;ve been behind our tip...
BanMan *const m_banman
size_type size() const
Definition: streams.h:292
Invalid by a change to consensus rules more recent than SegWit.
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
size_t nProcessQueueSize
Definition: net.h:741
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:115
Transaction might be missing a witness, have a witness prior to SegWit activation, or witness may have been malleated (which includes non-standard witnesses).
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:126
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:107
std::vector< CTransactionRef > txn
static RecursiveMutex cs_most_recent_block
this block was cached as being invalid and we didn&#39;t store the reason why
bool exists(const uint256 &hash) const
Definition: txmempool.h:689
bool fOneShot
Definition: net.h:771
An input of a transaction.
Definition: transaction.h:63
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:309
bool AcceptToMemoryPool(CTxMemPool &pool, TxValidationState &state, const CTransactionRef &tx, std::list< CTransactionRef > *plTxnReplaced, bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
(try to) add transaction to memory pool plTxnReplaced will be appended to with all transactions repla...
#define LOCK(cs)
Definition: sync.h:218
const char * name
Definition: rest.cpp:40
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
Definition: validation.h:84
const uint256 & GetHash() const
Definition: transaction.h:322
std::string ToString() const
Definition: validation.h:112
the block failed to meet one of our checkpoints
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:174
int type
Definition: protocol.h:400
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:408
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:150
Fast randomness source.
Definition: random.h:106
Transport protocol agnostic message container.
Definition: net.h:618
bool g_relay_txes
Definition: net.cpp:87
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:2780
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:2630
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
Definition: validation.h:89
int64_t nPowTargetSpacing
Definition: params.h:78
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds Timeout = base + per_header * (expected number of ...
std::vector< CAddress > GetAddresses()
Definition: net.cpp:2468
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:413
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:37
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:30
NodeId fromPeer
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: validation.h:53
bool ActivateBestChain(BlockValidationState &state, const CChainParams &chainparams, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:40
void ForEachNodeThen(Callable &&pre, CallableAfter &&post)
Definition: net.h:234
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: validation.cpp:105
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:765
bool m_manual_connection
Definition: net.h:772
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
const std::vector< CTxOut > vout
Definition: transaction.h:288
A CService with information about it as peer.
Definition: protocol.h:322
std::map< uint256, COrphanTx > mapOrphanTransactions GUARDED_BY(g_cs_orphans)
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:726
static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts in seconds.
uint256 hash
Definition: protocol.h:401
Result GetResult() const
Definition: validation.h:109
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static const bool DEFAULT_FEEFILTER
Default for using fee filter.
Definition: validation.h:120
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:61
int64_t NodeId
Definition: net.h:93
Definition: net.h:121
void AddNewAddresses(const std::vector< CAddress > &vAddr, const CAddress &addrFrom, int64_t nTimePenalty=0)
Definition: net.cpp:2463
bool GetNetworkActive() const
Definition: net.h:203
static const unsigned int INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions in seconds.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:36
bool fGetAddr
Definition: net.h:799
std::atomic_bool fImporting
std::string ToString() const
Definition: uint256.cpp:60
std::vector< uint256 > vHave
Definition: block.h:128
NodeId GetId() const
Definition: net.h:902
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:33
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:2796
Parameters that influence chain consensus.
Definition: params.h:45
An outpoint - a combination of a transaction hash and an index n into its vout.
Definition: transaction.h:18
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:28
static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode *pfrom, CConnman *connman)
std::atomic_bool fDisconnect
Definition: net.h:779
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:91
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:38
CFeeRate GetMinFee(size_t sizelimit) const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.cpp:992
RecursiveMutex g_cs_orphans
void ForEachNode(Callable &&func)
Definition: net.h:214
bool IsRoutable() const
Definition: netaddress.cpp:301
uint64_t GetHash() const
Definition: netaddress.cpp:546
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:317
unsigned int GetReceiveFloodSize() const
Definition: net.cpp:2686
RecursiveMutex cs_SubVer
Definition: net.h:758
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
Definition: validation.h:126
void Ban(const CNetAddr &net_addr, const BanReason &ban_reason, int64_t ban_time_offset=0, bool since_unix_epoch=false)
Definition: banman.cpp:121
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:330
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
If we have extra outbound peers, try to disconnect the one with the oldest block announcement.
void RelayTransaction(const uint256 &txid, const CConnman &connman)
Relay transaction to every node.
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
const CAddress addr
Definition: net.h:754
static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL
How long to wait (in microseconds) before expiring an in-flight getdata request to a peer...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:24
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of announced transactions from a peer.
const int64_t nTimeConnected
Definition: net.h:751
int64_t GetTimeMicros()
Returns the system time (not mockable)
Definition: time.cpp:65
int64_t m_stale_tip_check_time
Next time to check for stale tip.
std::atomic_bool fReindex
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
uint256 GetHash() const
Definition: block.cpp:11
CBlockIndex * LookupBlockIndex(const uint256 &hash)
Definition: validation.cpp:151
std::atomic< bool > fPingQueued
Definition: net.h:859
256-bit opaque blob.
Definition: uint256.h:120
invalid by consensus rules (excluding any below reasons)
void AddInventoryKnown(const CInv &inv)
Definition: net.h:972
bool HasWitness() const
Definition: transaction.h:354
bool IsReachable(enum Network net)
Definition: net.cpp:255
CChainState & ChainstateActive()
Definition: validation.cpp:85
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Definition: protocol.h:351
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &pblock) override
Overridden from CValidationInterface.
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:51
std::vector< CTransactionRef > vtx
Definition: block.h:76
std::set< NodeId > setMisbehaving
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:40
void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex) override
Notifies listeners of a block being disconnected.
the block&#39;s data didn&#39;t match the data committed to by the PoW
bool fFeeler
Definition: net.h:770
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:443
std::atomic< int64_t > nLastTXTime
Definition: net.h:847
const bool fInbound
Definition: net.h:775
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb)
Definition: txmempool.cpp:727
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:50
static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY
How many microseconds to delay requesting transactions from inbound peers.
void BlockConnected(const std::shared_ptr< const CBlock > &pblock, const CBlockIndex *pindexConnected) override
Overridden from CValidationInterface.
static void RelayAddress(const CAddress &addr, bool fReachable, CConnman *connman)
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:37
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:146
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:137
const CChainParams & Params()
Return the currently selected parameters.
uint256 hashContinue
Definition: net.h:793
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:21
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
static const unsigned int MAX_STANDARD_TX_WEIGHT
The maximum weight for transactions we&#39;re willing to relay/mine.
Definition: policy.h:24
bool IsTxAvailable(size_t index) const
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
A block this one builds on is invalid.
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: system.cpp:372
CBlockIndex * FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator)
Find the last common block between the parameter chain and a locator.
Definition: validation.cpp:158
bool fLogIPs
Definition: logging.cpp:35
int64_t GetAdjustedTime()
Definition: timedata.cpp:35
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:135
static void ProcessGetBlockData(CNode *pfrom, const CChainParams &chainparams, const CInv &inv, CConnman *connman)
void SetSendVersion(int nVersionIn)
Definition: net.cpp:605
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:390
void SetBestHeight(int height)
Definition: net.cpp:2676
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
#define LIMITED_STRING(obj, n)
Definition: serialize.h:504
static const int CADDR_TIME_VERSION
nTime field added to CAddress, starting with this version; if possible, avoid requesting addresses no...
Definition: version.h:25
void EraseOrphansFor(NodeId peer)
static void ProcessOrphanTx(CConnman *connman, CTxMemPool &mempool, std::set< uint256 > &orphan_work_set, std::list< CTransactionRef > &removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main
std::atomic< int64_t > nTimeOffset
Definition: net.h:752
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
Definition: validation.h:123
int64_t PoissonNextSend(int64_t now, int average_interval_seconds)
Return a timestamp in the future (in microseconds) for exponentially distributed events.
Definition: net.cpp:2791
ArgsManager gArgs
Definition: system.cpp:76
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:22
bool fListen
Definition: net.cpp:86
Fee rate in satoshis per kilobyte: CAmount / kB.
Definition: feerate.h:19
std::atomic_bool fSuccessfullyConnected
Definition: net.h:776
SipHash-2-4.
Definition: siphash.h:13
#define AssertLockNotHeld(cs)
Definition: sync.h:74
bool IsInvalid() const
Definition: validation.h:107
static int count
Definition: tests.c:45
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:63
std::atomic< int > nVersion
Definition: net.h:757
Invalid by a change to consensus rules more recent than SegWit.
bool IsWitnessEnabled(const CBlockIndex *pindexPrev, const Consensus::Params &params)
Check whether witness commitments are required for a block, and whether to enforce NULLDUMMY (BIP 147...
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) override
Handle removal of a peer by updating various state and removing it from mapNodeState.
static const int PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive (in seconds).
Definition: net.h:49
static CNetProcessingCleanup instance_of_cnetprocessingcleanup
bool m_limited_node
Definition: net.h:774
std::string ToString() const
Definition: netaddress.cpp:750
static const int NO_BLOOM_VERSION
"filter*" commands are disabled without NODE_BLOOM after and including this version ...
Definition: version.h:31
block timestamp was > 2 hours in the future (or our clock is bad)
int GetExtraOutboundCount()
Definition: net.cpp:1703
static const unsigned int DEFAULT_BANSCORE_THRESHOLD
Definition: validation.h:116
static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts in seconds.
static const unsigned int BLOCK_STALLING_TIMEOUT
Timeout in seconds during which a peer must stall block download progress before being disconnected...
Definition: validation.h:86
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
Definition: validation.h:92
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:26
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:270
void MarkAddressGood(const CAddress &addr)
Definition: net.cpp:2458
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:150
Information about a peer.
Definition: net.h:721
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:60
std::vector< int > vHeightInFlight
CAmount round(CAmount currentMinFee)
Quantize a minimum fee for privacy purpose before broadcast.
Definition: fees.cpp:971
bool CheckIfBanned(CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
full block available in blk*.dat
Definition: chain.h:121
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &headers, BlockValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex)
Process incoming block headers.
std::string GetAddrName() const
Definition: net.cpp:473
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:47
static void AddToCompactExtraTransactions(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
AssertLockHeld(g_cs_orphans)
void AddAddressKnown(const CAddress &_addr)
Definition: net.h:950
void InitializeNode(CNode *pnode) override
Initialize a peer by adding it to mapNodeState and pushing a message requesting its version...
int64_t GetTime()
Return system time (or mocked time, if set)
Definition: time.cpp:23
auto it
Definition: validation.cpp:361
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS
Default for -maxorphantx, maximum number of orphan transactions kept in memory.
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:43
COutPoint prevout
Definition: transaction.h:66
std::atomic_bool fPauseRecv
Definition: net.h:785
int GetRandInt(int nMax) noexcept
Definition: random.cpp:595
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:765
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
Definition: net.h:55
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
std::atomic< int64_t > nLastBlockTime
Definition: net.h:846
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
static constexpr int64_t STALE_CHECK_INTERVAL
How frequently to check for stale tips, in seconds.
didn&#39;t meet our local policy rules
bool HaveTxsDownloaded() const
Check whether this block&#39;s and all previous blocks&#39; transactions have been downloaded (and stored to ...
Definition: chain.h:245
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL
Minimum time between orphan transactions expire time checks in seconds.
void BlockChecked(const CBlock &block, const BlockValidationState &state) override
Overridden from CValidationInterface.
CAmount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:41
uint64_t randrange(uint64_t range) noexcept
Generate a random integer in the range [0..range).
Definition: random.h:177
void AdvertiseLocal(CNode *pnode)
Definition: net.cpp:182
bool GetUseAddrmanOutgoing() const
Definition: net.h:204
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:166
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:20
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
Definition: validation.h:99
int in_avail() const
Definition: streams.h:390
Defined in BIP37.
Definition: protocol.h:371
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:35
PeerLogicValidation(CConnman *connman, BanMan *banman, CScheduler &scheduler, CTxMemPool &pool)
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:137
size_t list_pos
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:41
std::unique_ptr< TxRelay > m_tx_relay
Definition: net.h:840
CAmount GetFee(size_t nBytes) const
Return the fee in satoshis for the given size in bytes.
Definition: feerate.cpp:23
uint256 hash
Definition: transaction.h:21
static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message="")
Potentially ban a node based on the contents of a TxValidationState object.
static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.