Bitcoin Core  0.21.1
P2P Digital Currency
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <hash.h>
15 #include <index/blockfilterindex.h>
16 #include <merkleblock.h>
17 #include <netbase.h>
18 #include <netmessagemaker.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <streams.h>
27 #include <tinyformat.h>
28 #include <txmempool.h>
29 #include <util/check.h> // For NDEBUG compile time check
30 #include <util/strencodings.h>
31 #include <util/system.h>
32 #include <validation.h>
33 
34 #include <memory>
35 #include <typeinfo>
36 
38 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
40 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
42 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15};
44 static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2};
47 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
48 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
52 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
54 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
56 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
58 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
60 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
62 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
65 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
68 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
70 static constexpr std::chrono::minutes PING_INTERVAL{2};
72 static const unsigned int MAX_LOCATOR_SZ = 101;
74 static const unsigned int MAX_INV_SZ = 50000;
77 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
82 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
84 static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
86 static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
88 static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
90 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
92 static const unsigned int MAX_GETDATA_SZ = 1000;
94 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
96 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
99 static const unsigned int MAX_HEADERS_RESULTS = 2000;
102 static const int MAX_CMPCTBLOCK_DEPTH = 5;
104 static const int MAX_BLOCKTXN_DEPTH = 10;
109 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
111 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
113 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
115 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
117 static const int MAX_UNCONNECTING_HEADERS = 10;
119 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
121 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
123 static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
126 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
129 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
133 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
138 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
140 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
142 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
144 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
146 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
148 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
149 
150 struct COrphanTx {
151  // When modifying, adapt the copy of this definition in tests/DoS_tests.
152  CTransactionRef tx;
153  NodeId fromPeer;
154  int64_t nTimeExpire;
155  size_t list_pos;
156 };
157 
162 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
165 std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans);
166 
167 void EraseOrphansFor(NodeId peer);
168 
169 // Internal stuff
170 namespace {
172  int nSyncStarted GUARDED_BY(cs_main) = 0;
173 
180  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
181 
216  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
217  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
218 
219  /*
220  * Filter for transactions that have been recently confirmed.
221  * We use this to avoid requesting transactions that have already been
222  * confirnmed.
223  */
224  Mutex g_cs_recent_confirmed_transactions;
225  std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
226 
228  struct QueuedBlock {
229  uint256 hash;
230  const CBlockIndex* pindex;
231  bool fValidatedHeaders;
232  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
233  };
234  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
235 
237  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
238 
240  int nPreferredDownload GUARDED_BY(cs_main) = 0;
241 
243  int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
244 
246  int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
247 
249  int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
250 
252  std::atomic<int64_t> g_last_tip_update(0);
253 
255  typedef std::map<uint256, CTransactionRef> MapRelay;
256  MapRelay mapRelay GUARDED_BY(cs_main);
258  std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
259 
260  struct IteratorComparator
261  {
262  template<typename I>
263  bool operator()(const I& a, const I& b) const
264  {
265  return &(*a) < &(*b);
266  }
267  };
268 
271  std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
273  std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
274 
278  static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
280  static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
281 } // namespace
282 
283 namespace {
290 struct CNodeState {
292  const CService address;
294  bool fCurrentlyConnected;
296  const CBlockIndex *pindexBestKnownBlock;
298  uint256 hashLastUnknownBlock;
300  const CBlockIndex *pindexLastCommonBlock;
302  const CBlockIndex *pindexBestHeaderSent;
304  int nUnconnectingHeaders;
306  bool fSyncStarted;
308  int64_t nHeadersSyncTimeout;
310  int64_t nStallingSince;
311  std::list<QueuedBlock> vBlocksInFlight;
313  int64_t nDownloadingSince;
314  int nBlocksInFlight;
315  int nBlocksInFlightValidHeaders;
317  bool fPreferredDownload;
319  bool fPreferHeaders;
321  bool fPreferHeaderAndIDs;
327  bool fProvidesHeaderAndIDs;
329  bool fHaveWitness;
331  bool fWantsCmpctWitness;
336  bool fSupportsDesiredCmpctVersion;
337 
362  struct ChainSyncTimeoutState {
364  int64_t m_timeout;
366  const CBlockIndex * m_work_header;
368  bool m_sent_getheaders;
370  bool m_protect;
371  };
372 
373  ChainSyncTimeoutState m_chain_sync;
374 
376  int64_t m_last_block_announcement;
377 
379  bool m_is_inbound;
380 
382  bool m_is_manual_connection;
383 
385  CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
386 
388  bool m_wtxid_relay{false};
389 
390  CNodeState(CAddress addrIn, bool is_inbound, bool is_manual)
391  : address(addrIn), m_is_inbound(is_inbound), m_is_manual_connection(is_manual)
392  {
393  fCurrentlyConnected = false;
394  pindexBestKnownBlock = nullptr;
395  hashLastUnknownBlock.SetNull();
396  pindexLastCommonBlock = nullptr;
397  pindexBestHeaderSent = nullptr;
398  nUnconnectingHeaders = 0;
399  fSyncStarted = false;
400  nHeadersSyncTimeout = 0;
401  nStallingSince = 0;
402  nDownloadingSince = 0;
403  nBlocksInFlight = 0;
404  nBlocksInFlightValidHeaders = 0;
405  fPreferredDownload = false;
406  fPreferHeaders = false;
407  fPreferHeaderAndIDs = false;
408  fProvidesHeaderAndIDs = false;
409  fHaveWitness = false;
410  fWantsCmpctWitness = false;
411  fSupportsDesiredCmpctVersion = false;
412  m_chain_sync = { 0, nullptr, false, false };
413  m_last_block_announcement = 0;
414  m_recently_announced_invs.reset();
415  }
416 };
417 
419 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
420 
421 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
422  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
423  if (it == mapNodeState.end())
424  return nullptr;
425  return &it->second;
426 }
427 
438 struct Peer {
440  const NodeId m_id{0};
441 
443  Mutex m_misbehavior_mutex;
445  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
447  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
448 
450  std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
451 
453  Mutex m_getdata_requests_mutex;
455  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
456 
457  Peer(NodeId id) : m_id(id) {}
458 };
459 
460 using PeerRef = std::shared_ptr<Peer>;
461 
468 Mutex g_peer_mutex;
469 static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
470 
473 static PeerRef GetPeerRef(NodeId id)
474 {
475  LOCK(g_peer_mutex);
476  auto it = g_peer_map.find(id);
477  return it != g_peer_map.end() ? it->second : nullptr;
478 }
479 
480 static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
481 {
482  nPreferredDownload -= state->fPreferredDownload;
483 
484  // Whether this node should be marked as a preferred download node.
485  state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient;
486 
487  nPreferredDownload += state->fPreferredDownload;
488 }
489 
490 static void PushNodeVersion(CNode& pnode, CConnman& connman, int64_t nTime)
491 {
492  // Note that pnode->GetLocalServices() is a reflection of the local
493  // services we were offering when the CNode object was created for this
494  // peer.
495  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
496  uint64_t nonce = pnode.GetLocalNonce();
497  int nNodeStartingHeight = pnode.GetMyStartingHeight();
498  NodeId nodeid = pnode.GetId();
499  CAddress addr = pnode.addr;
500 
501  CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
502  addr :
503  CAddress(CService(), addr.nServices);
504  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
505 
506  connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
507  nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr));
508 
509  if (fLogIPs) {
510  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
511  } else {
512  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
513  }
514 }
515 
516 // Returns a bool indicating whether we requested this block.
517 // Also used if a block was /not/ received and timed out or started with another peer
518 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
519  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
520  if (itInFlight != mapBlocksInFlight.end()) {
521  CNodeState *state = State(itInFlight->second.first);
522  assert(state != nullptr);
523  state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
524  if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
525  // Last validated block on the queue was received.
526  nPeersWithValidatedDownloads--;
527  }
528  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
529  // First block on the queue was received, update the start download time for the next one
530  state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
531  }
532  state->vBlocksInFlight.erase(itInFlight->second.second);
533  state->nBlocksInFlight--;
534  state->nStallingSince = 0;
535  mapBlocksInFlight.erase(itInFlight);
536  return true;
537  }
538  return false;
539 }
540 
541 // returns false, still setting pit, if the block was already in flight from the same peer
542 // pit will only be valid as long as the same cs_main lock is being held
543 static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
544  CNodeState *state = State(nodeid);
545  assert(state != nullptr);
546 
547  // Short-circuit most stuff in case it is from the same node
548  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
549  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
550  if (pit) {
551  *pit = &itInFlight->second.second;
552  }
553  return false;
554  }
555 
556  // Make sure it's not listed somewhere already.
557  MarkBlockAsReceived(hash);
558 
559  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
560  {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
561  state->nBlocksInFlight++;
562  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
563  if (state->nBlocksInFlight == 1) {
564  // We're starting a block download (batch) from this peer.
565  state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
566  }
567  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
568  nPeersWithValidatedDownloads++;
569  }
570  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
571  if (pit)
572  *pit = &itInFlight->second.second;
573  return true;
574 }
575 
577 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
578  CNodeState *state = State(nodeid);
579  assert(state != nullptr);
580 
581  if (!state->hashLastUnknownBlock.IsNull()) {
582  const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
583  if (pindex && pindex->nChainWork > 0) {
584  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
585  state->pindexBestKnownBlock = pindex;
586  }
587  state->hashLastUnknownBlock.SetNull();
588  }
589  }
590 }
591 
593 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
594  CNodeState *state = State(nodeid);
595  assert(state != nullptr);
596 
597  ProcessBlockAvailability(nodeid);
598 
599  const CBlockIndex* pindex = LookupBlockIndex(hash);
600  if (pindex && pindex->nChainWork > 0) {
601  // An actually better block was announced.
602  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
603  state->pindexBestKnownBlock = pindex;
604  }
605  } else {
606  // An unknown block was announced; just assume that the latest one is the best one.
607  state->hashLastUnknownBlock = hash;
608  }
609 }
610 
617 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
618 {
619  AssertLockHeld(cs_main);
620  CNodeState* nodestate = State(nodeid);
621  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
622  // Never ask from peers who can't provide witnesses.
623  return;
624  }
625  if (nodestate->fProvidesHeaderAndIDs) {
626  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
627  if (*it == nodeid) {
628  lNodesAnnouncingHeaderAndIDs.erase(it);
629  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
630  return;
631  }
632  }
633  connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
634  AssertLockHeld(::cs_main);
635  uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
636  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
637  // As per BIP152, we only get 3 of our peers to announce
638  // blocks using compact encodings.
639  connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
640  connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
641  return true;
642  });
643  lNodesAnnouncingHeaderAndIDs.pop_front();
644  }
645  connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
646  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
647  return true;
648  });
649  }
650 }
651 
652 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
653 {
654  AssertLockHeld(cs_main);
655  if (g_last_tip_update == 0) {
656  g_last_tip_update = GetTime();
657  }
658  return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
659 }
660 
661 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
662 {
663  return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
664 }
665 
666 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
667 {
668  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
669  return true;
670  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
671  return true;
672  return false;
673 }
674 
677 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
678 {
679  if (count == 0)
680  return;
681 
682  vBlocks.reserve(vBlocks.size() + count);
683  CNodeState *state = State(nodeid);
684  assert(state != nullptr);
685 
686  // Make sure pindexBestKnownBlock is up to date, we'll need it.
687  ProcessBlockAvailability(nodeid);
688 
689  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
690  // This peer has nothing interesting.
691  return;
692  }
693 
694  if (state->pindexLastCommonBlock == nullptr) {
695  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
696  // Guessing wrong in either direction is not a problem.
697  state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
698  }
699 
700  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
701  // of its current tip anymore. Go back enough to fix that.
702  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
703  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
704  return;
705 
706  std::vector<const CBlockIndex*> vToFetch;
707  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
708  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
709  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
710  // download that next block if the window were 1 larger.
711  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
712  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
713  NodeId waitingfor = -1;
714  while (pindexWalk->nHeight < nMaxHeight) {
715  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
716  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
717  // as iterating over ~100 CBlockIndex* entries anyway.
718  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
719  vToFetch.resize(nToFetch);
720  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
721  vToFetch[nToFetch - 1] = pindexWalk;
722  for (unsigned int i = nToFetch - 1; i > 0; i--) {
723  vToFetch[i - 1] = vToFetch[i]->pprev;
724  }
725 
726  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
727  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
728  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
729  // already part of our chain (and therefore don't need it even if pruned).
730  for (const CBlockIndex* pindex : vToFetch) {
731  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
732  // We consider the chain that this peer is on invalid.
733  return;
734  }
735  if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
736  // We wouldn't download this block or its descendants from this peer.
737  return;
738  }
739  if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) {
740  if (pindex->HaveTxsDownloaded())
741  state->pindexLastCommonBlock = pindex;
742  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
743  // The block is not already downloaded, and not yet in flight.
744  if (pindex->nHeight > nWindowEnd) {
745  // We reached the end of the window.
746  if (vBlocks.size() == 0 && waitingfor != nodeid) {
747  // We aren't able to fetch anything, but we would be if the download window was one larger.
748  nodeStaller = waitingfor;
749  }
750  return;
751  }
752  vBlocks.push_back(pindex);
753  if (vBlocks.size() == count) {
754  return;
755  }
756  } else if (waitingfor == -1) {
757  // This is the first already-in-flight block.
758  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
759  }
760  }
761  }
762 }
763 
764 } // namespace
765 
766 void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
767 {
768  AssertLockHeld(::cs_main); // For m_txrequest
769  NodeId nodeid = node.GetId();
770  if (!node.HasPermission(PF_RELAY) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
771  // Too many queued announcements from this peer
772  return;
773  }
774  const CNodeState* state = State(nodeid);
775 
776  // Decide the TxRequestTracker parameters for this announcement:
777  // - "preferred": if fPreferredDownload is set (= outbound, or PF_NOBAN permission)
778  // - "reqtime": current time plus delays for:
779  // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
780  // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
781  // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
782  // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have PF_RELAY).
783  auto delay = std::chrono::microseconds{0};
784  const bool preferred = state->fPreferredDownload;
785  if (!preferred) delay += NONPREF_PEER_TX_DELAY;
786  if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
787  const bool overloaded = !node.HasPermission(PF_RELAY) &&
788  m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
789  if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
790  m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
791 }
792 
793 // This function is used for testing the stale tip eviction logic, see
794 // denialofservice_tests.cpp
795 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
796 {
797  LOCK(cs_main);
798  CNodeState *state = State(node);
799  if (state) state->m_last_block_announcement = time_in_seconds;
800 }
801 
802 void PeerManager::InitializeNode(CNode *pnode) {
803  CAddress addr = pnode->addr;
804  std::string addrName = pnode->GetAddrName();
805  NodeId nodeid = pnode->GetId();
806  {
807  LOCK(cs_main);
808  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn()));
809  assert(m_txrequest.Count(nodeid) == 0);
810  }
811  {
812  PeerRef peer = std::make_shared<Peer>(nodeid);
813  LOCK(g_peer_mutex);
814  g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
815  }
816  if (!pnode->IsInboundConn()) {
817  PushNodeVersion(*pnode, m_connman, GetTime());
818  }
819 }
820 
822 {
823  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
824 
825  for (const auto& txid : unbroadcast_txids) {
826  CTransactionRef tx = m_mempool.get(txid);
827 
828  if (tx != nullptr) {
829  LOCK(cs_main);
830  RelayTransaction(txid, tx->GetWitnessHash(), m_connman);
831  } else {
832  m_mempool.RemoveUnbroadcastTx(txid, true);
833  }
834  }
835 
836  // Schedule next run for 10-15 minutes in the future.
837  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
838  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
839  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
840 }
841 
842 void PeerManager::FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) {
843  NodeId nodeid = node.GetId();
844  fUpdateConnectionTime = false;
845  LOCK(cs_main);
846  int misbehavior{0};
847  {
848  PeerRef peer = GetPeerRef(nodeid);
849  assert(peer != nullptr);
850  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
851  LOCK(g_peer_mutex);
852  g_peer_map.erase(nodeid);
853  }
854  CNodeState *state = State(nodeid);
855  assert(state != nullptr);
856 
857  if (state->fSyncStarted)
858  nSyncStarted--;
859 
860  if (misbehavior == 0 && state->fCurrentlyConnected && !node.IsBlockOnlyConn()) {
861  // Note: we avoid changing visible addrman state for block-relay-only peers
862  fUpdateConnectionTime = true;
863  }
864 
865  for (const QueuedBlock& entry : state->vBlocksInFlight) {
866  mapBlocksInFlight.erase(entry.hash);
867  }
868  EraseOrphansFor(nodeid);
869  m_txrequest.DisconnectedPeer(nodeid);
870  nPreferredDownload -= state->fPreferredDownload;
871  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
872  assert(nPeersWithValidatedDownloads >= 0);
873  g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
874  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
875  g_wtxid_relay_peers -= state->m_wtxid_relay;
876  assert(g_wtxid_relay_peers >= 0);
877 
878  mapNodeState.erase(nodeid);
879 
880  if (mapNodeState.empty()) {
881  // Do a consistency check after the last peer is removed.
882  assert(mapBlocksInFlight.empty());
883  assert(nPreferredDownload == 0);
884  assert(nPeersWithValidatedDownloads == 0);
885  assert(g_outbound_peers_with_protect_from_disconnect == 0);
886  assert(g_wtxid_relay_peers == 0);
887  assert(m_txrequest.Size() == 0);
888  }
889  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
890 }
891 
892 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
893  {
894  LOCK(cs_main);
895  CNodeState* state = State(nodeid);
896  if (state == nullptr)
897  return false;
898  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
899  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
900  for (const QueuedBlock& queue : state->vBlocksInFlight) {
901  if (queue.pindex)
902  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
903  }
904  }
905 
906  PeerRef peer = GetPeerRef(nodeid);
907  if (peer == nullptr) return false;
908  stats.m_misbehavior_score = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
909 
910  return true;
911 }
912 
914 //
915 // mapOrphanTransactions
916 //
917 
918 static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
919 {
920  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
921  if (max_extra_txn <= 0)
922  return;
923  if (!vExtraTxnForCompact.size())
924  vExtraTxnForCompact.resize(max_extra_txn);
925  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
926  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
927 }
928 
929 bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
930 {
931  const uint256& hash = tx->GetHash();
932  if (mapOrphanTransactions.count(hash))
933  return false;
934 
935  // Ignore big transactions, to avoid a
936  // send-big-orphans memory exhaustion attack. If a peer has a legitimate
937  // large transaction with a missing parent then we assume
938  // it will rebroadcast it later, after the parent transaction(s)
939  // have been mined or received.
940  // 100 orphans, each of which is at most 100,000 bytes big is
941  // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
942  unsigned int sz = GetTransactionWeight(*tx);
943  if (sz > MAX_STANDARD_TX_WEIGHT)
944  {
945  LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
946  return false;
947  }
948 
949  auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
950  assert(ret.second);
951  g_orphan_list.push_back(ret.first);
952  // Allow for lookups in the orphan pool by wtxid, as well as txid
953  g_orphans_by_wtxid.emplace(tx->GetWitnessHash(), ret.first);
954  for (const CTxIn& txin : tx->vin) {
955  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
956  }
957 
958  AddToCompactExtraTransactions(tx);
959 
960  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
961  mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
962  return true;
963 }
964 
965 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
966 {
967  std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
968  if (it == mapOrphanTransactions.end())
969  return 0;
970  for (const CTxIn& txin : it->second.tx->vin)
971  {
972  auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
973  if (itPrev == mapOrphanTransactionsByPrev.end())
974  continue;
975  itPrev->second.erase(it);
976  if (itPrev->second.empty())
977  mapOrphanTransactionsByPrev.erase(itPrev);
978  }
979 
980  size_t old_pos = it->second.list_pos;
981  assert(g_orphan_list[old_pos] == it);
982  if (old_pos + 1 != g_orphan_list.size()) {
983  // Unless we're deleting the last entry in g_orphan_list, move the last
984  // entry to the position we're deleting.
985  auto it_last = g_orphan_list.back();
986  g_orphan_list[old_pos] = it_last;
987  it_last->second.list_pos = old_pos;
988  }
989  g_orphan_list.pop_back();
990  g_orphans_by_wtxid.erase(it->second.tx->GetWitnessHash());
991 
992  mapOrphanTransactions.erase(it);
993  return 1;
994 }
995 
996 void EraseOrphansFor(NodeId peer)
997 {
998  LOCK(g_cs_orphans);
999  int nErased = 0;
1000  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
1001  while (iter != mapOrphanTransactions.end())
1002  {
1003  std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
1004  if (maybeErase->second.fromPeer == peer)
1005  {
1006  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1007  }
1008  }
1009  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
1010 }
1011 
1012 
1013 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
1014 {
1015  LOCK(g_cs_orphans);
1016 
1017  unsigned int nEvicted = 0;
1018  static int64_t nNextSweep;
1019  int64_t nNow = GetTime();
1020  if (nNextSweep <= nNow) {
1021  // Sweep out expired orphan pool entries:
1022  int nErased = 0;
1023  int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
1024  std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
1025  while (iter != mapOrphanTransactions.end())
1026  {
1027  std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
1028  if (maybeErase->second.nTimeExpire <= nNow) {
1029  nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1030  } else {
1031  nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1032  }
1033  }
1034  // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
1035  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1036  if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
1037  }
1038  FastRandomContext rng;
1039  while (mapOrphanTransactions.size() > nMaxOrphans)
1040  {
1041  // Evict a random orphan:
1042  size_t randompos = rng.randrange(g_orphan_list.size());
1043  EraseOrphanTx(g_orphan_list[randompos]->first);
1044  ++nEvicted;
1045  }
1046  return nEvicted;
1047 }
1048 
1049 void PeerManager::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
1050 {
1051  assert(howmuch > 0);
1052 
1053  PeerRef peer = GetPeerRef(pnode);
1054  if (peer == nullptr) return;
1055 
1056  LOCK(peer->m_misbehavior_mutex);
1057  peer->m_misbehavior_score += howmuch;
1058  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1059  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1060  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1061  peer->m_should_discourage = true;
1062  } else {
1063  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1064  }
1065 }
1066 
1068  bool via_compact_block, const std::string& message)
1069 {
1070  switch (state.GetResult()) {
1072  break;
1073  // The node is providing invalid data:
1076  if (!via_compact_block) {
1077  Misbehaving(nodeid, 100, message);
1078  return true;
1079  }
1080  break;
1082  {
1083  LOCK(cs_main);
1084  CNodeState *node_state = State(nodeid);
1085  if (node_state == nullptr) {
1086  break;
1087  }
1088 
1089  // Discourage outbound (but not inbound) peers if on an invalid chain.
1090  // Exempt HB compact block peers and manual connections.
1091  if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) {
1092  Misbehaving(nodeid, 100, message);
1093  return true;
1094  }
1095  break;
1096  }
1100  Misbehaving(nodeid, 100, message);
1101  return true;
1102  // Conflicting (but not necessarily invalid) data or different policy:
1104  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1105  Misbehaving(nodeid, 10, message);
1106  return true;
1109  break;
1110  }
1111  if (message != "") {
1112  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1113  }
1114  return false;
1115 }
1116 
1117 bool PeerManager::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message)
1118 {
1119  switch (state.GetResult()) {
1121  break;
1122  // The node is providing invalid data:
1124  Misbehaving(nodeid, 100, message);
1125  return true;
1126  // Conflicting (but not necessarily invalid) data or different policy:
1136  break;
1137  }
1138  if (message != "") {
1139  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1140  }
1141  return false;
1142 }
1143 
1144 
1146 //
1147 // blockchain -> download logic notification
1148 //
1149 
1150 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1151 // active chain if they are no more than a month older (both in time, and in
1152 // best equivalent proof of work) than the best header chain we know about and
1153 // we fully-validated them at some point.
1154 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1155 {
1156  AssertLockHeld(cs_main);
1157  if (::ChainActive().Contains(pindex)) return true;
1158  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1161 }
1162 
1163 PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
1164  CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
1165  : m_chainparams(chainparams),
1166  m_connman(connman),
1167  m_banman(banman),
1168  m_chainman(chainman),
1169  m_mempool(pool),
1170  m_stale_tip_check_time(0)
1171 {
1172  // Initialize global variables that cannot be constructed at startup.
1173  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1174 
1175  // Blocks don't typically have more than 4000 transactions, so this should
1176  // be at least six blocks (~1 hr) worth of transactions that we can store,
1177  // inserting both a txid and wtxid for every observed transaction.
1178  // If the number of transactions appearing in a block goes up, or if we are
1179  // seeing getdata requests more than an hour after initial announcement, we
1180  // can increase this number.
1181  // The false positive rate of 1/1M should come out to less than 1
1182  // transaction per day that would be inadvertently ignored (which is the
1183  // same probability that we have in the reject filter).
1184  g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
1185 
1186  // Stale tip checking and peer eviction are on two different timers, but we
1187  // don't want them to get out of sync due to drift in the scheduler, so we
1188  // combine them in one function and schedule at the quicker (peer-eviction)
1189  // timer.
1190  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1191  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1192 
1193  // schedule next run for 10-15 minutes in the future
1194  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1195  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1196 }
1197 
1203 void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1204 {
1205  {
1206  LOCK(g_cs_orphans);
1207 
1208  std::vector<uint256> vOrphanErase;
1209 
1210  for (const CTransactionRef& ptx : pblock->vtx) {
1211  const CTransaction& tx = *ptx;
1212 
1213  // Which orphan pool entries must we evict?
1214  for (const auto& txin : tx.vin) {
1215  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1216  if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1217  for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1218  const CTransaction& orphanTx = *(*mi)->second.tx;
1219  const uint256& orphanHash = orphanTx.GetHash();
1220  vOrphanErase.push_back(orphanHash);
1221  }
1222  }
1223  }
1224 
1225  // Erase orphan transactions included or precluded by this block
1226  if (vOrphanErase.size()) {
1227  int nErased = 0;
1228  for (const uint256& orphanHash : vOrphanErase) {
1229  nErased += EraseOrphanTx(orphanHash);
1230  }
1231  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1232  }
1233 
1234  g_last_tip_update = GetTime();
1235  }
1236  {
1237  LOCK(g_cs_recent_confirmed_transactions);
1238  for (const auto& ptx : pblock->vtx) {
1239  g_recent_confirmed_transactions->insert(ptx->GetHash());
1240  if (ptx->GetHash() != ptx->GetWitnessHash()) {
1241  g_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
1242  }
1243  }
1244  }
1245  {
1246  LOCK(cs_main);
1247  for (const auto& ptx : pblock->vtx) {
1248  m_txrequest.ForgetTxHash(ptx->GetHash());
1249  m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1250  }
1251  }
1252 }
1253 
1254 void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1255 {
1256  // To avoid relay problems with transactions that were previously
1257  // confirmed, clear our filter of recently confirmed transactions whenever
1258  // there's a reorg.
1259  // This means that in a 1-block reorg (where 1 block is disconnected and
1260  // then another block reconnected), our filter will drop to having only one
1261  // block's worth of transactions in it, but that should be fine, since
1262  // presumably the most common case of relaying a confirmed transaction
1263  // should be just after a new block containing it is found.
1264  LOCK(g_cs_recent_confirmed_transactions);
1265  g_recent_confirmed_transactions->reset();
1266 }
1267 
1268 // All of the following cache a recent block, and are protected by cs_most_recent_block
1269 static RecursiveMutex cs_most_recent_block;
1270 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1271 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1272 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1273 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1274 
1279 void PeerManager::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1280  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1281  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1282 
1283  LOCK(cs_main);
1284 
1285  static int nHighestFastAnnounce = 0;
1286  if (pindex->nHeight <= nHighestFastAnnounce)
1287  return;
1288  nHighestFastAnnounce = pindex->nHeight;
1289 
1290  bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus());
1291  uint256 hashBlock(pblock->GetHash());
1292 
1293  {
1294  LOCK(cs_most_recent_block);
1295  most_recent_block_hash = hashBlock;
1296  most_recent_block = pblock;
1297  most_recent_compact_block = pcmpctblock;
1298  fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1299  }
1300 
1301  m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1302  AssertLockHeld(::cs_main);
1303 
1304  // TODO: Avoid the repeated-serialization here
1305  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1306  return;
1307  ProcessBlockAvailability(pnode->GetId());
1308  CNodeState &state = *State(pnode->GetId());
1309  // If the peer has, or we announced to them the previous block already,
1310  // but we don't think they have this one, go ahead and announce it
1311  if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1312  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1313 
1314  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
1315  hashBlock.ToString(), pnode->GetId());
1316  m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1317  state.pindexBestHeaderSent = pindex;
1318  }
1319  });
1320 }
1321 
1326 void PeerManager::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1327  const int nNewHeight = pindexNew->nHeight;
1328  m_connman.SetBestHeight(nNewHeight);
1329 
1330  SetServiceFlagsIBDCache(!fInitialDownload);
1331  if (!fInitialDownload) {
1332  // Find the hashes of all blocks that weren't previously in the best chain.
1333  std::vector<uint256> vHashes;
1334  const CBlockIndex *pindexToAnnounce = pindexNew;
1335  while (pindexToAnnounce != pindexFork) {
1336  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1337  pindexToAnnounce = pindexToAnnounce->pprev;
1338  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1339  // Limit announcements in case of a huge reorganization.
1340  // Rely on the peer's synchronization mechanism in that case.
1341  break;
1342  }
1343  }
1344  // Relay inventory, but don't relay old inventory during initial block download.
1345  m_connman.ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1346  LOCK(pnode->cs_inventory);
1347  if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1348  for (const uint256& hash : reverse_iterate(vHashes)) {
1349  pnode->vBlockHashesToAnnounce.push_back(hash);
1350  }
1351  }
1352  });
1354  }
1355 }
1356 
1361 void PeerManager::BlockChecked(const CBlock& block, const BlockValidationState& state) {
1362  LOCK(cs_main);
1363 
1364  const uint256 hash(block.GetHash());
1365  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1366 
1367  // If the block failed validation, we know where it came from and we're still connected
1368  // to that peer, maybe punish.
1369  if (state.IsInvalid() &&
1370  it != mapBlockSource.end() &&
1371  State(it->second.first)) {
1372  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1373  }
1374  // Check that:
1375  // 1. The block is valid
1376  // 2. We're not in initial block download
1377  // 3. This is currently the best block we're aware of. We haven't updated
1378  // the tip yet so we have no way to check this directly here. Instead we
1379  // just check that there are currently no other blocks in flight.
1380  else if (state.IsValid() &&
1382  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1383  if (it != mapBlockSource.end()) {
1384  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
1385  }
1386  }
1387  if (it != mapBlockSource.end())
1388  mapBlockSource.erase(it);
1389 }
1390 
1392 //
1393 // Messages
1394 //
1395 
1396 
1397 bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1398 {
1399  assert(recentRejects);
1400  if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
1401  // If the chain tip has changed previously rejected transactions
1402  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1403  // or a double-spend. Reset the rejects filter and give those
1404  // txs a second chance.
1405  hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1406  recentRejects->reset();
1407  }
1408 
1409  const uint256& hash = gtxid.GetHash();
1410 
1411  {
1412  LOCK(g_cs_orphans);
1413  if (!gtxid.IsWtxid() && mapOrphanTransactions.count(hash)) {
1414  return true;
1415  } else if (gtxid.IsWtxid() && g_orphans_by_wtxid.count(hash)) {
1416  return true;
1417  }
1418  }
1419 
1420  {
1421  LOCK(g_cs_recent_confirmed_transactions);
1422  if (g_recent_confirmed_transactions->contains(hash)) return true;
1423  }
1424 
1425  return recentRejects->contains(hash) || mempool.exists(gtxid);
1426 }
1427 
1428 bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1429 {
1430  return LookupBlockIndex(block_hash) != nullptr;
1431 }
1432 
1433 void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman)
1434 {
1435  connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1436  AssertLockHeld(::cs_main);
1437 
1438  CNodeState* state = State(pnode->GetId());
1439  if (state == nullptr) return;
1440  if (state->m_wtxid_relay) {
1441  pnode->PushTxInventory(wtxid);
1442  } else {
1443  pnode->PushTxInventory(txid);
1444  }
1445  });
1446 }
1447 
1448 static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman)
1449 {
1450  if (!fReachable && !addr.IsRelayable()) return;
1451 
1452  // Relay to a limited number of other nodes
1453  // Use deterministic randomness to send to the same nodes for 24 hours
1454  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1455  uint64_t hashAddr = addr.GetHash();
1456  const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
1457  FastRandomContext insecure_rand;
1458 
1459  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
1460  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
1461 
1462  std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1463  assert(nRelayNodes <= best.size());
1464 
1465  auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1466  if (pnode->RelayAddrsWithConn()) {
1467  uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1468  for (unsigned int i = 0; i < nRelayNodes; i++) {
1469  if (hashKey > best[i].first) {
1470  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1471  best[i] = std::make_pair(hashKey, pnode);
1472  break;
1473  }
1474  }
1475  }
1476  };
1477 
1478  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1479  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1480  best[i].second->PushAddress(addr, insecure_rand);
1481  }
1482  };
1483 
1484  connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1485 }
1486 
1487 void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman& connman)
1488 {
1489  bool send = false;
1490  std::shared_ptr<const CBlock> a_recent_block;
1491  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1492  bool fWitnessesPresentInARecentCompactBlock;
1493  const Consensus::Params& consensusParams = chainparams.GetConsensus();
1494  {
1495  LOCK(cs_most_recent_block);
1496  a_recent_block = most_recent_block;
1497  a_recent_compact_block = most_recent_compact_block;
1498  fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1499  }
1500 
1501  bool need_activate_chain = false;
1502  {
1503  LOCK(cs_main);
1504  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1505  if (pindex) {
1506  if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1507  pindex->IsValid(BLOCK_VALID_TREE)) {
1508  // If we have the block and all of its parents, but have not yet validated it,
1509  // we might be in the middle of connecting it (ie in the unlock of cs_main
1510  // before ActivateBestChain but after AcceptBlock).
1511  // In this case, we need to run ActivateBestChain prior to checking the relay
1512  // conditions below.
1513  need_activate_chain = true;
1514  }
1515  }
1516  } // release cs_main before calling ActivateBestChain
1517  if (need_activate_chain) {
1518  BlockValidationState state;
1519  if (!ActivateBestChain(state, chainparams, a_recent_block)) {
1520  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1521  }
1522  }
1523 
1524  LOCK(cs_main);
1525  const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1526  if (pindex) {
1527  send = BlockRequestAllowed(pindex, consensusParams);
1528  if (!send) {
1529  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
1530  }
1531  }
1532  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1533  // disconnect node in case we have reached the outbound limit for serving historical blocks
1534  if (send &&
1535  connman.OutboundTargetReached(true) &&
1536  (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
1537  !pfrom.HasPermission(PF_DOWNLOAD) // nodes with the download permission may exceed target
1538  ) {
1539  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
1540 
1541  //disconnect node
1542  pfrom.fDisconnect = true;
1543  send = false;
1544  }
1545  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1546  if (send && !pfrom.HasPermission(PF_NOBAN) && (
1547  (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1548  )) {
1549  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom.GetId());
1550 
1551  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1552  pfrom.fDisconnect = true;
1553  send = false;
1554  }
1555  // Pruned nodes may have deleted the block, so check whether
1556  // it's available before trying to send.
1557  if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1558  {
1559  std::shared_ptr<const CBlock> pblock;
1560  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1561  pblock = a_recent_block;
1562  } else if (inv.IsMsgWitnessBlk()) {
1563  // Fast-path: in this case it is possible to serve the block directly from disk,
1564  // as the network format matches the format on disk
1565  std::vector<uint8_t> block_data;
1566  if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1567  assert(!"cannot load block from disk");
1568  }
1569  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1570  // Don't set pblock as we've sent the block
1571  } else {
1572  // Send block from disk
1573  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1574  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1575  assert(!"cannot load block from disk");
1576  pblock = pblockRead;
1577  }
1578  if (pblock) {
1579  if (inv.IsMsgBlk()) {
1580  connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1581  } else if (inv.IsMsgWitnessBlk()) {
1582  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1583  } else if (inv.IsMsgFilteredBlk()) {
1584  bool sendMerkleBlock = false;
1585  CMerkleBlock merkleBlock;
1586  if (pfrom.m_tx_relay != nullptr) {
1587  LOCK(pfrom.m_tx_relay->cs_filter);
1588  if (pfrom.m_tx_relay->pfilter) {
1589  sendMerkleBlock = true;
1590  merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1591  }
1592  }
1593  if (sendMerkleBlock) {
1594  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1595  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1596  // This avoids hurting performance by pointlessly requiring a round-trip
1597  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1598  // they must either disconnect and retry or request the full block.
1599  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1600  // however we MUST always provide at least what the remote peer needs
1601  typedef std::pair<unsigned int, uint256> PairType;
1602  for (PairType& pair : merkleBlock.vMatchedTxn)
1603  connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1604  }
1605  // else
1606  // no response
1607  } else if (inv.IsMsgCmpctBlk()) {
1608  // If a peer is asking for old blocks, we're almost guaranteed
1609  // they won't have a useful mempool to match against a compact block,
1610  // and we don't feel like constructing the object for them, so
1611  // instead we respond with the full, non-compact block.
1612  bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness;
1613  int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1614  if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
1615  if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1616  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1617  } else {
1618  CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1619  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1620  }
1621  } else {
1622  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1623  }
1624  }
1625  }
1626 
1627  // Trigger the peer node to send a getblocks request for the next batch of inventory
1628  if (inv.hash == pfrom.hashContinue)
1629  {
1630  // Send immediately. This must send even if redundant,
1631  // and we want it right after the last block so they don't
1632  // wait for other stuff first.
1633  std::vector<CInv> vInv;
1634  vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1635  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1636  pfrom.hashContinue.SetNull();
1637  }
1638  }
1639 }
1640 
1642 static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
1643 {
1644  auto txinfo = mempool.info(gtxid);
1645  if (txinfo.tx) {
1646  // If a TX could have been INVed in reply to a MEMPOOL request,
1647  // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
1648  // unconditionally.
1649  if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
1650  return std::move(txinfo.tx);
1651  }
1652  }
1653 
1654  {
1655  LOCK(cs_main);
1656  // Otherwise, the transaction must have been announced recently.
1657  if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
1658  // If it was, it can be relayed from either the mempool...
1659  if (txinfo.tx) return std::move(txinfo.tx);
1660  // ... or the relay pool.
1661  auto mi = mapRelay.find(gtxid.GetHash());
1662  if (mi != mapRelay.end()) return mi->second;
1663  }
1664  }
1665 
1666  return {};
1667 }
1668 
1669 void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex)
1670 {
1671  AssertLockNotHeld(cs_main);
1672 
1673  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
1674  std::vector<CInv> vNotFound;
1675  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1676 
1677  const std::chrono::seconds now = GetTime<std::chrono::seconds>();
1678  // Get last mempool request time
1679  const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
1680  : std::chrono::seconds::min();
1681 
1682  // Process as many TX items from the front of the getdata queue as
1683  // possible, since they're common and it's efficient to batch process
1684  // them.
1685  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
1686  if (interruptMsgProc) return;
1687  // The send buffer provides backpressure. If there's no space in
1688  // the buffer, pause processing until the next call.
1689  if (pfrom.fPauseSend) break;
1690 
1691  const CInv &inv = *it++;
1692 
1693  if (pfrom.m_tx_relay == nullptr) {
1694  // Ignore GETDATA requests for transactions from blocks-only peers.
1695  continue;
1696  }
1697 
1698  CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now);
1699  if (tx) {
1700  // WTX and WITNESS_TX imply we serialize with witness
1701  int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1702  connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
1703  mempool.RemoveUnbroadcastTx(tx->GetHash());
1704  // As we're going to send tx, make sure its unconfirmed parents are made requestable.
1705  std::vector<uint256> parent_ids_to_add;
1706  {
1707  LOCK(mempool.cs);
1708  auto txiter = mempool.GetIter(tx->GetHash());
1709  if (txiter) {
1710  const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst();
1711  parent_ids_to_add.reserve(parents.size());
1712  for (const CTxMemPoolEntry& parent : parents) {
1713  if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
1714  parent_ids_to_add.push_back(parent.GetTx().GetHash());
1715  }
1716  }
1717  }
1718  }
1719  for (const uint256& parent_txid : parent_ids_to_add) {
1720  // Relaying a transaction with a recent but unconfirmed parent.
1721  if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
1722  LOCK(cs_main);
1723  State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
1724  }
1725  }
1726  } else {
1727  vNotFound.push_back(inv);
1728  }
1729  }
1730 
1731  // Only process one BLOCK item per call, since they're uncommon and can be
1732  // expensive to process.
1733  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
1734  const CInv &inv = *it++;
1735  if (inv.IsGenBlkMsg()) {
1736  ProcessGetBlockData(pfrom, chainparams, inv, connman);
1737  }
1738  // else: If the first item on the queue is an unknown type, we erase it
1739  // and continue processing the queue on the next call.
1740  }
1741 
1742  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
1743 
1744  if (!vNotFound.empty()) {
1745  // Let the peer know that we didn't find what it asked for, so it doesn't
1746  // have to wait around forever.
1747  // SPV clients care about this message: it's needed when they are
1748  // recursively walking the dependencies of relevant unconfirmed
1749  // transactions. SPV clients want to do that because they want to know
1750  // about (and store and rebroadcast and risk analyze) the dependencies
1751  // of transactions relevant to them, without having to download the
1752  // entire memory pool.
1753  // Also, other nodes can use these messages to automatically request a
1754  // transaction from some other peer that annnounced it, and stop
1755  // waiting for us to respond.
1756  // In normal operation, we often send NOTFOUND messages for parents of
1757  // transactions that we relay; if a peer is missing a parent, they may
1758  // assume we have them and request the parents from us.
1759  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1760  }
1761 }
1762 
1763 static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1764  uint32_t nFetchFlags = 0;
1765  if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
1766  nFetchFlags |= MSG_WITNESS_FLAG;
1767  }
1768  return nFetchFlags;
1769 }
1770 
1771 void PeerManager::SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req) {
1772  BlockTransactions resp(req);
1773  for (size_t i = 0; i < req.indexes.size(); i++) {
1774  if (req.indexes[i] >= block.vtx.size()) {
1775  Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
1776  return;
1777  }
1778  resp.txn[i] = block.vtx[req.indexes[i]];
1779  }
1780  LOCK(cs_main);
1781  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1782  int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1783  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1784 }
1785 
1786 void PeerManager::ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHeader>& headers, bool via_compact_block)
1787 {
1788  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1789  size_t nCount = headers.size();
1790 
1791  if (nCount == 0) {
1792  // Nothing interesting. Stop asking this peers for more headers.
1793  return;
1794  }
1795 
1796  bool received_new_header = false;
1797  const CBlockIndex *pindexLast = nullptr;
1798  {
1799  LOCK(cs_main);
1800  CNodeState *nodestate = State(pfrom.GetId());
1801 
1802  // If this looks like it could be a block announcement (nCount <
1803  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1804  // don't connect:
1805  // - Send a getheaders message in response to try to connect the chain.
1806  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1807  // don't connect before giving DoS points
1808  // - Once a headers message is received that is valid and does connect,
1809  // nUnconnectingHeaders gets reset back to 0.
1810  if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1811  nodestate->nUnconnectingHeaders++;
1812  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
1813  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1814  headers[0].GetHash().ToString(),
1815  headers[0].hashPrevBlock.ToString(),
1817  pfrom.GetId(), nodestate->nUnconnectingHeaders);
1818  // Set hashLastUnknownBlock for this peer, so that if we
1819  // eventually get the headers - even from a different peer -
1820  // we can use this peer to download.
1821  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
1822 
1823  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1824  Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
1825  }
1826  return;
1827  }
1828 
1829  uint256 hashLastBlock;
1830  for (const CBlockHeader& header : headers) {
1831  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1832  Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
1833  return;
1834  }
1835  hashLastBlock = header.GetHash();
1836  }
1837 
1838  // If we don't have the last header, then they'll have given us
1839  // something new (if these headers are valid).
1840  if (!LookupBlockIndex(hashLastBlock)) {
1841  received_new_header = true;
1842  }
1843  }
1844 
1845  BlockValidationState state;
1846  if (!m_chainman.ProcessNewBlockHeaders(headers, state, m_chainparams, &pindexLast)) {
1847  if (state.IsInvalid()) {
1848  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
1849  return;
1850  }
1851  }
1852 
1853  {
1854  LOCK(cs_main);
1855  CNodeState *nodestate = State(pfrom.GetId());
1856  if (nodestate->nUnconnectingHeaders > 0) {
1857  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
1858  }
1859  nodestate->nUnconnectingHeaders = 0;
1860 
1861  assert(pindexLast);
1862  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
1863 
1864  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1865  // because it is set in UpdateBlockAvailability. Some nullptr checks
1866  // are still present, however, as belt-and-suspenders.
1867 
1868  if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
1869  nodestate->m_last_block_announcement = GetTime();
1870  }
1871 
1872  if (nCount == MAX_HEADERS_RESULTS) {
1873  // Headers message had its maximum size; the peer may have more headers.
1874  // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue
1875  // from there instead.
1876  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
1877  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256()));
1878  }
1879 
1880  bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus());
1881  // If this set of headers is valid and ends in a block with at least as
1882  // much work as our tip, download as much as possible.
1883  if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
1884  std::vector<const CBlockIndex*> vToFetch;
1885  const CBlockIndex *pindexWalk = pindexLast;
1886  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1887  while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1888  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1889  !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1890  (!IsWitnessEnabled(pindexWalk->pprev, m_chainparams.GetConsensus()) || State(pfrom.GetId())->fHaveWitness)) {
1891  // We don't have this block, and it's not yet in flight.
1892  vToFetch.push_back(pindexWalk);
1893  }
1894  pindexWalk = pindexWalk->pprev;
1895  }
1896  // If pindexWalk still isn't on our main chain, we're looking at a
1897  // very large reorg at a time we think we're close to caught up to
1898  // the main chain -- this shouldn't really happen. Bail out on the
1899  // direct fetch and rely on parallel download instead.
1900  if (!::ChainActive().Contains(pindexWalk)) {
1901  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1902  pindexLast->GetBlockHash().ToString(),
1903  pindexLast->nHeight);
1904  } else {
1905  std::vector<CInv> vGetData;
1906  // Download as much as possible, from earliest to latest.
1907  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1908  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1909  // Can't download any more from this peer
1910  break;
1911  }
1912  uint32_t nFetchFlags = GetFetchFlags(pfrom);
1913  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1914  MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex);
1915  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1916  pindex->GetBlockHash().ToString(), pfrom.GetId());
1917  }
1918  if (vGetData.size() > 1) {
1919  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1920  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1921  }
1922  if (vGetData.size() > 0) {
1923  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1924  // In any case, we want to download using a compact block, not a regular one
1925  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1926  }
1927  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1928  }
1929  }
1930  }
1931  // If we're in IBD, we want outbound peers that will serve us a useful
1932  // chain. Disconnect peers that are on chains with insufficient work.
1934  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1935  // headers to fetch from this peer.
1936  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1937  // This peer has too little work on their headers chain to help
1938  // us sync -- disconnect if it is an outbound disconnection
1939  // candidate.
1940  // Note: We compare their tip to nMinimumChainWork (rather than
1941  // ::ChainActive().Tip()) because we won't start block download
1942  // until we have a headers chain that has at least
1943  // nMinimumChainWork, even if a peer has a chain past our tip,
1944  // as an anti-DoS measure.
1945  if (pfrom.IsOutboundOrBlockRelayConn()) {
1946  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
1947  pfrom.fDisconnect = true;
1948  }
1949  }
1950  }
1951 
1952  // If this is an outbound full-relay peer, check to see if we should protect
1953  // it from the bad/lagging chain logic.
1954  // Note that outbound block-relay peers are excluded from this protection, and
1955  // thus always subject to eviction under the bad/lagging chain logic.
1956  // See ChainSyncTimeoutState.
1957  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
1958  if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1959  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
1960  nodestate->m_chain_sync.m_protect = true;
1961  ++g_outbound_peers_with_protect_from_disconnect;
1962  }
1963  }
1964  }
1965 
1966  return;
1967 }
1968 
1977 void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
1978 {
1979  AssertLockHeld(cs_main);
1980  AssertLockHeld(g_cs_orphans);
1981 
1982  while (!orphan_work_set.empty()) {
1983  const uint256 orphanHash = *orphan_work_set.begin();
1984  orphan_work_set.erase(orphan_work_set.begin());
1985 
1986  auto orphan_it = mapOrphanTransactions.find(orphanHash);
1987  if (orphan_it == mapOrphanTransactions.end()) continue;
1988 
1989  const CTransactionRef porphanTx = orphan_it->second.tx;
1990  TxValidationState state;
1991  std::list<CTransactionRef> removed_txn;
1992 
1993  if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) {
1994  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1995  RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman);
1996  for (unsigned int i = 0; i < porphanTx->vout.size(); i++) {
1997  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1998  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1999  for (const auto& elem : it_by_prev->second) {
2000  orphan_work_set.insert(elem->first);
2001  }
2002  }
2003  }
2004  EraseOrphanTx(orphanHash);
2005  for (const CTransactionRef& removedTx : removed_txn) {
2006  AddToCompactExtraTransactions(removedTx);
2007  }
2008  break;
2009  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2010  if (state.IsInvalid()) {
2011  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
2012  orphanHash.ToString(),
2013  orphan_it->second.fromPeer,
2014  state.ToString());
2015  // Maybe punish peer that gave us an invalid orphan tx
2016  MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
2017  }
2018  // Has inputs but not accepted to mempool
2019  // Probably non-standard or insufficient fee
2020  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
2022  // We can add the wtxid of this transaction to our reject filter.
2023  // Do not add txids of witness transactions or witness-stripped
2024  // transactions to the filter, as they can have been malleated;
2025  // adding such txids to the reject filter would potentially
2026  // interfere with relay of valid transactions from peers that
2027  // do not support wtxid-based relay. See
2028  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
2029  // We can remove this restriction (and always add wtxids to
2030  // the filter even for witness stripped transactions) once
2031  // wtxid-based relay is broadly deployed.
2032  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
2033  // for concerns around weakening security of unupgraded nodes
2034  // if we start doing this too early.
2035  assert(recentRejects);
2036  recentRejects->insert(porphanTx->GetWitnessHash());
2037  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
2038  // then we know that the witness was irrelevant to the policy
2039  // failure, since this check depends only on the txid
2040  // (the scriptPubKey being spent is covered by the txid).
2041  // Add the txid to the reject filter to prevent repeated
2042  // processing of this transaction in the event that child
2043  // transactions are later received (resulting in
2044  // parent-fetching by txid via the orphan-handling logic).
2045  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
2046  // We only add the txid if it differs from the wtxid, to
2047  // avoid wasting entries in the rolling bloom filter.
2048  recentRejects->insert(porphanTx->GetHash());
2049  }
2050  }
2051  EraseOrphanTx(orphanHash);
2052  break;
2053  }
2054  }
2055  m_mempool.check(&::ChainstateActive().CoinsTip());
2056 }
2057 
2073 static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params,
2074  BlockFilterType filter_type, uint32_t start_height,
2075  const uint256& stop_hash, uint32_t max_height_diff,
2076  const CBlockIndex*& stop_index,
2077  BlockFilterIndex*& filter_index)
2078 {
2079  const bool supported_filter_type =
2080  (filter_type == BlockFilterType::BASIC &&
2082  if (!supported_filter_type) {
2083  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
2084  peer.GetId(), static_cast<uint8_t>(filter_type));
2085  peer.fDisconnect = true;
2086  return false;
2087  }
2088 
2089  {
2090  LOCK(cs_main);
2091  stop_index = LookupBlockIndex(stop_hash);
2092 
2093  // Check that the stop block exists and the peer would be allowed to fetch it.
2094  if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2095  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2096  peer.GetId(), stop_hash.ToString());
2097  peer.fDisconnect = true;
2098  return false;
2099  }
2100  }
2101 
2102  uint32_t stop_height = stop_index->nHeight;
2103  if (start_height > stop_height) {
2104  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
2105  "start height %d and stop height %d\n",
2106  peer.GetId(), start_height, stop_height);
2107  peer.fDisconnect = true;
2108  return false;
2109  }
2110  if (stop_height - start_height >= max_height_diff) {
2111  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2112  peer.GetId(), stop_height - start_height + 1, max_height_diff);
2113  peer.fDisconnect = true;
2114  return false;
2115  }
2116 
2117  filter_index = GetBlockFilterIndex(filter_type);
2118  if (!filter_index) {
2119  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
2120  return false;
2121  }
2122 
2123  return true;
2124 }
2125 
2136 static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2137  CConnman& connman)
2138 {
2139  uint8_t filter_type_ser;
2140  uint32_t start_height;
2141  uint256 stop_hash;
2142 
2143  vRecv >> filter_type_ser >> start_height >> stop_hash;
2144 
2145  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2146 
2147  const CBlockIndex* stop_index;
2148  BlockFilterIndex* filter_index;
2149  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2150  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2151  return;
2152  }
2153 
2154  std::vector<BlockFilter> filters;
2155  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2156  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2157  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2158  return;
2159  }
2160 
2161  for (const auto& filter : filters) {
2163  .Make(NetMsgType::CFILTER, filter);
2164  connman.PushMessage(&peer, std::move(msg));
2165  }
2166 }
2167 
2178 static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2179  CConnman& connman)
2180 {
2181  uint8_t filter_type_ser;
2182  uint32_t start_height;
2183  uint256 stop_hash;
2184 
2185  vRecv >> filter_type_ser >> start_height >> stop_hash;
2186 
2187  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2188 
2189  const CBlockIndex* stop_index;
2190  BlockFilterIndex* filter_index;
2191  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2192  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2193  return;
2194  }
2195 
2196  uint256 prev_header;
2197  if (start_height > 0) {
2198  const CBlockIndex* const prev_block =
2199  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2200  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2201  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2202  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
2203  return;
2204  }
2205  }
2206 
2207  std::vector<uint256> filter_hashes;
2208  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
2209  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2210  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2211  return;
2212  }
2213 
2215  .Make(NetMsgType::CFHEADERS,
2216  filter_type_ser,
2217  stop_index->GetBlockHash(),
2218  prev_header,
2219  filter_hashes);
2220  connman.PushMessage(&peer, std::move(msg));
2221 }
2222 
2233 static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2234  CConnman& connman)
2235 {
2236  uint8_t filter_type_ser;
2237  uint256 stop_hash;
2238 
2239  vRecv >> filter_type_ser >> stop_hash;
2240 
2241  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2242 
2243  const CBlockIndex* stop_index;
2244  BlockFilterIndex* filter_index;
2245  if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
2246  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2247  stop_index, filter_index)) {
2248  return;
2249  }
2250 
2251  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2252 
2253  // Populate headers.
2254  const CBlockIndex* block_index = stop_index;
2255  for (int i = headers.size() - 1; i >= 0; i--) {
2256  int height = (i + 1) * CFCHECKPT_INTERVAL;
2257  block_index = block_index->GetAncestor(height);
2258 
2259  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2260  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2261  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
2262  return;
2263  }
2264  }
2265 
2267  .Make(NetMsgType::CFCHECKPT,
2268  filter_type_ser,
2269  stop_index->GetBlockHash(),
2270  headers);
2271  connman.PushMessage(&peer, std::move(msg));
2272 }
2273 
2274 void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
2275  const std::chrono::microseconds time_received,
2276  const std::atomic<bool>& interruptMsgProc)
2277 {
2278  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2279  if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
2280  {
2281  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2282  return;
2283  }
2284 
2285  PeerRef peer = GetPeerRef(pfrom.GetId());
2286  if (peer == nullptr) return;
2287 
2288  if (msg_type == NetMsgType::VERSION) {
2289  // Each connection can only send one version message
2290  if (pfrom.nVersion != 0)
2291  {
2292  Misbehaving(pfrom.GetId(), 1, "redundant version message");
2293  return;
2294  }
2295 
2296  int64_t nTime;
2297  CAddress addrMe;
2298  CAddress addrFrom;
2299  uint64_t nNonce = 1;
2300  uint64_t nServiceInt;
2301  ServiceFlags nServices;
2302  int nVersion;
2303  std::string cleanSubVer;
2304  int nStartingHeight = -1;
2305  bool fRelay = true;
2306 
2307  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2308  if (nTime < 0) {
2309  nTime = 0;
2310  }
2311  nServices = ServiceFlags(nServiceInt);
2312  if (!pfrom.IsInboundConn())
2313  {
2314  m_connman.SetServices(pfrom.addr, nServices);
2315  }
2316  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
2317  {
2318  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
2319  pfrom.fDisconnect = true;
2320  return;
2321  }
2322 
2323  if (nVersion < MIN_PEER_PROTO_VERSION) {
2324  // disconnect from peers older than this proto version
2325  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
2326  pfrom.fDisconnect = true;
2327  return;
2328  }
2329 
2330  if (!vRecv.empty())
2331  vRecv >> addrFrom >> nNonce;
2332  if (!vRecv.empty()) {
2333  std::string strSubVer;
2334  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2335  cleanSubVer = SanitizeString(strSubVer);
2336  }
2337  if (!vRecv.empty()) {
2338  vRecv >> nStartingHeight;
2339  }
2340  if (!vRecv.empty())
2341  vRecv >> fRelay;
2342  // Disconnect if we connected to ourself
2343  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
2344  {
2345  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
2346  pfrom.fDisconnect = true;
2347  return;
2348  }
2349 
2350  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
2351  {
2352  SeenLocal(addrMe);
2353  }
2354 
2355  // Be shy and don't send version until we hear
2356  if (pfrom.IsInboundConn())
2357  PushNodeVersion(pfrom, m_connman, GetAdjustedTime());
2358 
2359  // Change version
2360  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
2361  pfrom.SetCommonVersion(greatest_common_version);
2362  pfrom.nVersion = nVersion;
2363 
2364  const CNetMsgMaker msg_maker(greatest_common_version);
2365 
2366  if (greatest_common_version >= WTXID_RELAY_VERSION) {
2367  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
2368  }
2369 
2370  // Signal ADDRv2 support (BIP155).
2371  if (greatest_common_version >= 70016) {
2372  // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
2373  // implementations reject messages they don't know. As a courtesy, don't send
2374  // it to nodes with a version before 70016, as no software is known to support
2375  // BIP155 that doesn't announce at least that protocol version number.
2376  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
2377  }
2378 
2379  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
2380 
2381  pfrom.nServices = nServices;
2382  pfrom.SetAddrLocal(addrMe);
2383  {
2384  LOCK(pfrom.cs_SubVer);
2385  pfrom.cleanSubVer = cleanSubVer;
2386  }
2387  pfrom.nStartingHeight = nStartingHeight;
2388 
2389  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2390  pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2391 
2392  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2393  pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2394 
2395  if (pfrom.m_tx_relay != nullptr) {
2396  LOCK(pfrom.m_tx_relay->cs_filter);
2397  pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2398  }
2399 
2400  if((nServices & NODE_WITNESS))
2401  {
2402  LOCK(cs_main);
2403  State(pfrom.GetId())->fHaveWitness = true;
2404  }
2405 
2406  // Potentially mark this peer as a preferred download peer.
2407  {
2408  LOCK(cs_main);
2409  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2410  }
2411 
2412  if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
2413  // For outbound peers, we try to relay our address (so that other
2414  // nodes can try to find us more quickly, as we have no guarantee
2415  // that an outbound peer is even aware of how to reach us) and do a
2416  // one-time address fetch (to help populate/update our addrman). If
2417  // we're starting up for the first time, our addrman may be pretty
2418  // empty and no one will know who we are, so these mechanisms are
2419  // important to help us connect to the network.
2420  //
2421  // We skip this for BLOCK_RELAY peers to avoid potentially leaking
2422  // information about our BLOCK_RELAY connections via address relay.
2424  {
2425  CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2426  FastRandomContext insecure_rand;
2427  if (addr.IsRoutable())
2428  {
2429  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2430  pfrom.PushAddress(addr, insecure_rand);
2431  } else if (IsPeerAddrLocalGood(&pfrom)) {
2432  addr.SetIP(addrMe);
2433  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2434  pfrom.PushAddress(addr, insecure_rand);
2435  }
2436  }
2437 
2438  // Get recent addresses
2439  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
2440  pfrom.fGetAddr = true;
2441  }
2442 
2443  if (!pfrom.IsInboundConn()) {
2444  // For non-inbound connections, we update the addrman to record
2445  // connection success so that addrman will have an up-to-date
2446  // notion of which peers are online and available.
2447  //
2448  // While we strive to not leak information about block-relay-only
2449  // connections via the addrman, not moving an address to the tried
2450  // table is also potentially detrimental because new-table entries
2451  // are subject to eviction in the event of addrman collisions. We
2452  // mitigate the information-leak by never calling
2453  // CAddrMan::Connected() on block-relay-only peers; see
2454  // FinalizeNode().
2455  //
2456  // This moves an address from New to Tried table in Addrman,
2457  // resolves tried-table collisions, etc.
2459  }
2460 
2461  std::string remoteAddr;
2462  if (fLogIPs)
2463  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2464 
2465  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2466  cleanSubVer, pfrom.nVersion,
2467  pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
2468  remoteAddr);
2469 
2470  int64_t nTimeOffset = nTime - GetTime();
2471  pfrom.nTimeOffset = nTimeOffset;
2472  AddTimeData(pfrom.addr, nTimeOffset);
2473 
2474  // If the peer is old enough to have the old alert system, send it the final alert.
2475  if (greatest_common_version <= 70012) {
2476  CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2477  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
2478  }
2479 
2480  // Feeler connections exist only to verify if address is online.
2481  if (pfrom.IsFeelerConn()) {
2482  pfrom.fDisconnect = true;
2483  }
2484  return;
2485  }
2486 
2487  if (pfrom.nVersion == 0) {
2488  // Must have a version message before anything else
2489  Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
2490  return;
2491  }
2492 
2493  // At this point, the outgoing message serialization version can't change.
2494  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2495 
2496  if (msg_type == NetMsgType::VERACK) {
2497  if (pfrom.fSuccessfullyConnected) return;
2498 
2499  if (!pfrom.IsInboundConn()) {
2500  // Mark this node as currently connected, so we update its timestamp later.
2501  LOCK(cs_main);
2502  State(pfrom.GetId())->fCurrentlyConnected = true;
2503  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2504  pfrom.nVersion.load(), pfrom.nStartingHeight,
2505  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
2506  pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
2507  }
2508 
2509  if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
2510  // Tell our peer we prefer to receive headers rather than inv's
2511  // We send this to non-NODE NETWORK peers as well, because even
2512  // non-NODE NETWORK peers can announce blocks (such as pruning
2513  // nodes)
2514  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2515  }
2516  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
2517  // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2518  // However, we do not request new block announcements using
2519  // cmpctblock messages.
2520  // We send this to non-NODE NETWORK peers as well, because
2521  // they may wish to request compact blocks from us
2522  bool fAnnounceUsingCMPCTBLOCK = false;
2523  uint64_t nCMPCTBLOCKVersion = 2;
2524  if (pfrom.GetLocalServices() & NODE_WITNESS)
2525  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2526  nCMPCTBLOCKVersion = 1;
2527  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2528  }
2529  pfrom.fSuccessfullyConnected = true;
2530  return;
2531  }
2532 
2533  // Feature negotiation of wtxidrelay should happen between VERSION and
2534  // VERACK, to avoid relay problems from switching after a connection is up
2535  if (msg_type == NetMsgType::WTXIDRELAY) {
2536  if (pfrom.fSuccessfullyConnected) {
2537  // Disconnect peers that send wtxidrelay message after VERACK; this
2538  // must be negotiated between VERSION and VERACK.
2539  pfrom.fDisconnect = true;
2540  return;
2541  }
2542  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
2543  LOCK(cs_main);
2544  if (!State(pfrom.GetId())->m_wtxid_relay) {
2545  State(pfrom.GetId())->m_wtxid_relay = true;
2546  g_wtxid_relay_peers++;
2547  }
2548  }
2549  return;
2550  }
2551 
2552  if (msg_type == NetMsgType::SENDADDRV2) {
2553  if (pfrom.fSuccessfullyConnected) {
2554  // Disconnect peers that send SENDADDRV2 message after VERACK; this
2555  // must be negotiated between VERSION and VERACK.
2556  pfrom.fDisconnect = true;
2557  return;
2558  }
2559  pfrom.m_wants_addrv2 = true;
2560  return;
2561  }
2562 
2563  if (!pfrom.fSuccessfullyConnected) {
2564  LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
2565  return;
2566  }
2567 
2568  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
2569  int stream_version = vRecv.GetVersion();
2570  if (msg_type == NetMsgType::ADDRV2) {
2571  // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
2572  // unserialize methods know that an address in v2 format is coming.
2573  stream_version |= ADDRV2_FORMAT;
2574  }
2575 
2576  OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
2577  std::vector<CAddress> vAddr;
2578 
2579  s >> vAddr;
2580 
2581  if (!pfrom.RelayAddrsWithConn()) {
2582  return;
2583  }
2584  if (vAddr.size() > MAX_ADDR_TO_SEND)
2585  {
2586  Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
2587  return;
2588  }
2589 
2590  // Store the new addresses
2591  std::vector<CAddress> vAddrOk;
2592  int64_t nNow = GetAdjustedTime();
2593  int64_t nSince = nNow - 10 * 60;
2594  for (CAddress& addr : vAddr)
2595  {
2596  if (interruptMsgProc)
2597  return;
2598 
2599  // We only bother storing full nodes, though this may include
2600  // things which we would not make an outbound connection to, in
2601  // part because we may make feeler connections to them.
2603  continue;
2604 
2605  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2606  addr.nTime = nNow - 5 * 24 * 60 * 60;
2607  pfrom.AddAddressKnown(addr);
2608  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
2609  // Do not process banned/discouraged addresses beyond remembering we received them
2610  continue;
2611  }
2612  bool fReachable = IsReachable(addr);
2613  if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2614  {
2615  // Relay to a limited number of other nodes
2616  RelayAddress(addr, fReachable, m_connman);
2617  }
2618  // Do not store addresses outside our network
2619  if (fReachable)
2620  vAddrOk.push_back(addr);
2621  }
2622  m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
2623  if (vAddr.size() < 1000)
2624  pfrom.fGetAddr = false;
2625  if (pfrom.IsAddrFetchConn())
2626  pfrom.fDisconnect = true;
2627  return;
2628  }
2629 
2630  if (msg_type == NetMsgType::SENDHEADERS) {
2631  LOCK(cs_main);
2632  State(pfrom.GetId())->fPreferHeaders = true;
2633  return;
2634  }
2635 
2636  if (msg_type == NetMsgType::SENDCMPCT) {
2637  bool fAnnounceUsingCMPCTBLOCK = false;
2638  uint64_t nCMPCTBLOCKVersion = 0;
2639  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2640  if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2641  LOCK(cs_main);
2642  // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2643  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2644  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2645  State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2646  }
2647  if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
2648  State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2649  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2650  if (pfrom.GetLocalServices() & NODE_WITNESS)
2651  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2652  else
2653  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2654  }
2655  }
2656  return;
2657  }
2658 
2659  if (msg_type == NetMsgType::INV) {
2660  std::vector<CInv> vInv;
2661  vRecv >> vInv;
2662  if (vInv.size() > MAX_INV_SZ)
2663  {
2664  Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
2665  return;
2666  }
2667 
2668  // We won't accept tx inv's if we're in blocks-only mode, or this is a
2669  // block-relay-only peer
2670  bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
2671 
2672  // Allow peers with relay permission to send data other than blocks in blocks only mode
2673  if (pfrom.HasPermission(PF_RELAY)) {
2674  fBlocksOnly = false;
2675  }
2676 
2677  LOCK(cs_main);
2678 
2679  const auto current_time = GetTime<std::chrono::microseconds>();
2680  uint256* best_block{nullptr};
2681 
2682  for (CInv& inv : vInv) {
2683  if (interruptMsgProc) return;
2684 
2685  // Ignore INVs that don't match wtxidrelay setting.
2686  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
2687  // This is fine as no INV messages are involved in that process.
2688  if (State(pfrom.GetId())->m_wtxid_relay) {
2689  if (inv.IsMsgTx()) continue;
2690  } else {
2691  if (inv.IsMsgWtx()) continue;
2692  }
2693 
2694  if (inv.IsMsgBlk()) {
2695  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
2696  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2697 
2698  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
2699  if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2700  // Headers-first is the primary method of announcement on
2701  // the network. If a node fell back to sending blocks by inv,
2702  // it's probably for a re-org. The final block hash
2703  // provided should be the highest, so send a getheaders and
2704  // then fetch the blocks we need to catch up.
2705  best_block = &inv.hash;
2706  }
2707  } else if (inv.IsGenTxMsg()) {
2708  const GenTxid gtxid = ToGenTxid(inv);
2709  const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool);
2710  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2711 
2712  pfrom.AddKnownTx(inv.hash);
2713  if (fBlocksOnly) {
2714  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
2715  pfrom.fDisconnect = true;
2716  return;
2717  } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
2718  AddTxAnnouncement(pfrom, gtxid, current_time);
2719  }
2720  } else {
2721  LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
2722  }
2723  }
2724 
2725  if (best_block != nullptr) {
2726  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
2727  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
2728  }
2729 
2730  return;
2731  }
2732 
2733  if (msg_type == NetMsgType::GETDATA) {
2734  std::vector<CInv> vInv;
2735  vRecv >> vInv;
2736  if (vInv.size() > MAX_INV_SZ)
2737  {
2738  Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
2739  return;
2740  }
2741 
2742  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
2743 
2744  if (vInv.size() > 0) {
2745  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
2746  }
2747 
2748  {
2749  LOCK(peer->m_getdata_requests_mutex);
2750  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
2751  ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
2752  }
2753 
2754  return;
2755  }
2756 
2757  if (msg_type == NetMsgType::GETBLOCKS) {
2758  CBlockLocator locator;
2759  uint256 hashStop;
2760  vRecv >> locator >> hashStop;
2761 
2762  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2763  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2764  pfrom.fDisconnect = true;
2765  return;
2766  }
2767 
2768  // We might have announced the currently-being-connected tip using a
2769  // compact block, which resulted in the peer sending a getblocks
2770  // request, which we would otherwise respond to without the new block.
2771  // To avoid this situation we simply verify that we are on our best
2772  // known chain now. This is super overkill, but we handle it better
2773  // for getheaders requests, and there are no known nodes which support
2774  // compact blocks but still use getblocks to request blocks.
2775  {
2776  std::shared_ptr<const CBlock> a_recent_block;
2777  {
2778  LOCK(cs_most_recent_block);
2779  a_recent_block = most_recent_block;
2780  }
2781  BlockValidationState state;
2782  if (!ActivateBestChain(state, m_chainparams, a_recent_block)) {
2783  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2784  }
2785  }
2786 
2787  LOCK(cs_main);
2788 
2789  // Find the last block the caller has in the main chain
2790  const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2791 
2792  // Send the rest of the chain
2793  if (pindex)
2794  pindex = ::ChainActive().Next(pindex);
2795  int nLimit = 500;
2796  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
2797  for (; pindex; pindex = ::ChainActive().Next(pindex))
2798  {
2799  if (pindex->GetBlockHash() == hashStop)
2800  {
2801  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2802  break;
2803  }
2804  // If pruning, don't inv blocks unless we have on disk and are likely to still have
2805  // for some reasonable time window (1 hour) that block relay might require.
2806  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
2807  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave))
2808  {
2809  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2810  break;
2811  }
2812  WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(pindex->GetBlockHash()));
2813  if (--nLimit <= 0)
2814  {
2815  // When this block is requested, we'll send an inv that'll
2816  // trigger the peer to getblocks the next batch of inventory.
2817  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2818  pfrom.hashContinue = pindex->GetBlockHash();
2819  break;
2820  }
2821  }
2822  return;
2823  }
2824 
2825  if (msg_type == NetMsgType::GETBLOCKTXN) {
2827  vRecv >> req;
2828 
2829  std::shared_ptr<const CBlock> recent_block;
2830  {
2831  LOCK(cs_most_recent_block);
2832  if (most_recent_block_hash == req.blockhash)
2833  recent_block = most_recent_block;
2834  // Unlock cs_most_recent_block to avoid cs_main lock inversion
2835  }
2836  if (recent_block) {
2837  SendBlockTransactions(pfrom, *recent_block, req);
2838  return;
2839  }
2840 
2841  {
2842  LOCK(cs_main);
2843 
2844  const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2845  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2846  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
2847  return;
2848  }
2849 
2850  if (pindex->nHeight >= ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
2851  CBlock block;
2852  bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
2853  assert(ret);
2854 
2855  SendBlockTransactions(pfrom, block, req);
2856  return;
2857  }
2858  }
2859 
2860  // If an older block is requested (should never happen in practice,
2861  // but can happen in tests) send a block response instead of a
2862  // blocktxn response. Sending a full block response instead of a
2863  // small blocktxn response is preferable in the case where a peer
2864  // might maliciously send lots of getblocktxn requests to trigger
2865  // expensive disk reads, because it will require the peer to
2866  // actually receive all the data read from disk over the network.
2867  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
2868  CInv inv;
2869  WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
2870  inv.hash = req.blockhash;
2871  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
2872  // The message processing loop will go around again (without pausing) and we'll respond then
2873  return;
2874  }
2875 
2876  if (msg_type == NetMsgType::GETHEADERS) {
2877  CBlockLocator locator;
2878  uint256 hashStop;
2879  vRecv >> locator >> hashStop;
2880 
2881  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2882  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2883  pfrom.fDisconnect = true;
2884  return;
2885  }
2886 
2887  LOCK(cs_main);
2888  if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_DOWNLOAD)) {
2889  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
2890  return;
2891  }
2892 
2893  CNodeState *nodestate = State(pfrom.GetId());
2894  const CBlockIndex* pindex = nullptr;
2895  if (locator.IsNull())
2896  {
2897  // If locator is null, return the hashStop block
2898  pindex = LookupBlockIndex(hashStop);
2899  if (!pindex) {
2900  return;
2901  }
2902 
2903  if (!BlockRequestAllowed(pindex, m_chainparams.GetConsensus())) {
2904  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
2905  return;
2906  }
2907  }
2908  else
2909  {
2910  // Find the last block the caller has in the main chain
2911  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2912  if (pindex)
2913  pindex = ::ChainActive().Next(pindex);
2914  }
2915 
2916  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2917  std::vector<CBlock> vHeaders;
2918  int nLimit = MAX_HEADERS_RESULTS;
2919  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
2920  for (; pindex; pindex = ::ChainActive().Next(pindex))
2921  {
2922  vHeaders.push_back(pindex->GetBlockHeader());
2923  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2924  break;
2925  }
2926  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
2927  // if our peer has ::ChainActive().Tip() (and thus we are sending an empty
2928  // headers message). In both cases it's safe to update
2929  // pindexBestHeaderSent to be our tip.
2930  //
2931  // It is important that we simply reset the BestHeaderSent value here,
2932  // and not max(BestHeaderSent, newHeaderSent). We might have announced
2933  // the currently-being-connected tip using a compact block, which
2934  // resulted in the peer sending a headers request, which we respond to
2935  // without the new block. By resetting the BestHeaderSent, we ensure we
2936  // will re-announce the new block via headers (or compact blocks again)
2937  // in the SendMessages logic.
2938  nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
2939  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2940  return;
2941  }
2942 
2943  if (msg_type == NetMsgType::TX) {
2944  // Stop processing the transaction early if
2945  // 1) We are in blocks only mode and peer has no relay permission
2946  // 2) This peer is a block-relay-only peer
2947  if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
2948  {
2949  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
2950  pfrom.fDisconnect = true;
2951  return;
2952  }
2953 
2954  CTransactionRef ptx;
2955  vRecv >> ptx;
2956  const CTransaction& tx = *ptx;
2957 
2958  const uint256& txid = ptx->GetHash();
2959  const uint256& wtxid = ptx->GetWitnessHash();
2960 
2961  LOCK2(cs_main, g_cs_orphans);
2962 
2963  CNodeState* nodestate = State(pfrom.GetId());
2964 
2965  const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid;
2966  pfrom.AddKnownTx(hash);
2967  if (nodestate->m_wtxid_relay && txid != wtxid) {
2968  // Insert txid into filterInventoryKnown, even for
2969  // wtxidrelay peers. This prevents re-adding of
2970  // unconfirmed parents to the recently_announced
2971  // filter, when a child tx is requested. See
2972  // ProcessGetData().
2973  pfrom.AddKnownTx(txid);
2974  }
2975 
2976  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
2977  if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
2978 
2979  // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
2980  // absence of witness malleation, this is strictly better, because the
2981  // recent rejects filter may contain the wtxid but rarely contains
2982  // the txid of a segwit transaction that has been rejected.
2983  // In the presence of witness malleation, it's possible that by only
2984  // doing the check with wtxid, we could overlook a transaction which
2985  // was confirmed with a different witness, or exists in our mempool
2986  // with a different witness, but this has limited downside:
2987  // mempool validation does its own lookup of whether we have the txid
2988  // already; and an adversary can already relay us old transactions
2989  // (older than our recency filter) if trying to DoS us, without any need
2990  // for witness malleation.
2991  if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) {
2992  if (pfrom.HasPermission(PF_FORCERELAY)) {
2993  // Always relay transactions received from peers with forcerelay
2994  // permission, even if they were already in the mempool, allowing
2995  // the node to function as a gateway for nodes hidden behind it.
2996  if (!m_mempool.exists(tx.GetHash())) {
2997  LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
2998  } else {
2999  LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
3001  }
3002  }
3003  return;
3004  }
3005 
3006  TxValidationState state;
3007  std::list<CTransactionRef> lRemovedTxn;
3008 
3009  if (AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) {
3010  m_mempool.check(&::ChainstateActive().CoinsTip());
3011  // As this version of the transaction was acceptable, we can forget about any
3012  // requests for it.
3013  m_txrequest.ForgetTxHash(tx.GetHash());
3014  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3016  for (unsigned int i = 0; i < tx.vout.size(); i++) {
3017  auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
3018  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
3019  for (const auto& elem : it_by_prev->second) {
3020  peer->m_orphan_work_set.insert(elem->first);
3021  }
3022  }
3023  }
3024 
3025  pfrom.nLastTXTime = GetTime();
3026 
3027  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
3028  pfrom.GetId(),
3029  tx.GetHash().ToString(),
3031 
3032  for (const CTransactionRef& removedTx : lRemovedTxn) {
3033  AddToCompactExtraTransactions(removedTx);
3034  }
3035 
3036  // Recursively process any orphan transactions that depended on this one
3037  ProcessOrphanTx(peer->m_orphan_work_set);
3038  }
3040  {
3041  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
3042 
3043  // Deduplicate parent txids, so that we don't have to loop over
3044  // the same parent txid more than once down below.
3045  std::vector<uint256> unique_parents;
3046  unique_parents.reserve(tx.vin.size());
3047  for (const CTxIn& txin : tx.vin) {
3048  // We start with all parents, and then remove duplicates below.
3049  unique_parents.push_back(txin.prevout.hash);
3050  }
3051  std::sort(unique_parents.begin(), unique_parents.end());
3052  unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
3053  for (const uint256& parent_txid : unique_parents) {
3054  if (recentRejects->contains(parent_txid)) {
3055  fRejectedParents = true;
3056  break;
3057  }
3058  }
3059  if (!fRejectedParents) {
3060  const auto current_time = GetTime<std::chrono::microseconds>();
3061 
3062  for (const uint256& parent_txid : unique_parents) {
3063  // Here, we only have the txid (and not wtxid) of the
3064  // inputs, so we only request in txid mode, even for
3065  // wtxidrelay peers.
3066  // Eventually we should replace this with an improved
3067  // protocol for getting all unconfirmed parents.
3068  const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
3069  pfrom.AddKnownTx(parent_txid);
3070  if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time);
3071  }
3072  AddOrphanTx(ptx, pfrom.GetId());
3073 
3074  // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
3075  m_txrequest.ForgetTxHash(tx.GetHash());
3076  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3077 
3078  // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
3079  unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
3080  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
3081  if (nEvicted > 0) {
3082  LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
3083  }
3084  } else {
3085  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
3086  // We will continue to reject this tx since it has rejected
3087  // parents so avoid re-requesting it from other peers.
3088  // Here we add both the txid and the wtxid, as we know that
3089  // regardless of what witness is provided, we will not accept
3090  // this, so we don't need to allow for redownload of this txid
3091  // from any of our non-wtxidrelay peers.
3092  recentRejects->insert(tx.GetHash());
3093  recentRejects->insert(tx.GetWitnessHash());
3094  m_txrequest.ForgetTxHash(tx.GetHash());
3095  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3096  }
3097  } else {
3099  // We can add the wtxid of this transaction to our reject filter.
3100  // Do not add txids of witness transactions or witness-stripped
3101  // transactions to the filter, as they can have been malleated;
3102  // adding such txids to the reject filter would potentially
3103  // interfere with relay of valid transactions from peers that
3104  // do not support wtxid-based relay. See
3105  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3106  // We can remove this restriction (and always add wtxids to
3107  // the filter even for witness stripped transactions) once
3108  // wtxid-based relay is broadly deployed.
3109  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3110  // for concerns around weakening security of unupgraded nodes
3111  // if we start doing this too early.
3112  assert(recentRejects);
3113  recentRejects->insert(tx.GetWitnessHash());
3114  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3115  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3116  // then we know that the witness was irrelevant to the policy
3117  // failure, since this check depends only on the txid
3118  // (the scriptPubKey being spent is covered by the txid).
3119  // Add the txid to the reject filter to prevent repeated
3120  // processing of this transaction in the event that child
3121  // transactions are later received (resulting in
3122  // parent-fetching by txid via the orphan-handling logic).
3124  recentRejects->insert(tx.GetHash());
3125  m_txrequest.ForgetTxHash(tx.GetHash());
3126  }
3127  if (RecursiveDynamicUsage(*ptx) < 100000) {
3128  AddToCompactExtraTransactions(ptx);
3129  }
3130  }
3131  }
3132 
3133  // If a tx has been detected by recentRejects, we will have reached
3134  // this point and the tx will have been ignored. Because we haven't run
3135  // the tx through AcceptToMemoryPool, we won't have computed a DoS
3136  // score for it or determined exactly why we consider it invalid.
3137  //
3138  // This means we won't penalize any peer subsequently relaying a DoSy
3139  // tx (even if we penalized the first peer who gave it to us) because
3140  // we have to account for recentRejects showing false positives. In
3141  // other words, we shouldn't penalize a peer if we aren't *sure* they
3142  // submitted a DoSy tx.
3143  //
3144  // Note that recentRejects doesn't just record DoSy or invalid
3145  // transactions, but any tx not accepted by the mempool, which may be
3146  // due to node policy (vs. consensus). So we can't blanket penalize a
3147  // peer simply for relaying a tx that our recentRejects has caught,
3148  // regardless of false positives.
3149 
3150  if (state.IsInvalid()) {
3151  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
3152  pfrom.GetId(),
3153  state.ToString());
3154  MaybePunishNodeForTx(pfrom.GetId(), state);
3155  }
3156  return;
3157  }
3158 
3159  if (msg_type == NetMsgType::CMPCTBLOCK)
3160  {
3161  // Ignore cmpctblock received while importing
3162  if (fImporting || fReindex) {
3163  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
3164  return;
3165  }
3166 
3167  CBlockHeaderAndShortTxIDs cmpctblock;
3168  vRecv >> cmpctblock;
3169 
3170  bool received_new_header = false;
3171 
3172  {
3173  LOCK(cs_main);
3174 
3175  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3176  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
3177  if (!::ChainstateActive().IsInitialBlockDownload())
3178  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
3179  return;
3180  }
3181 
3182  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
3183  received_new_header = true;
3184  }
3185  }
3186 
3187  const CBlockIndex *pindex = nullptr;
3188  BlockValidationState state;
3189  if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) {
3190  if (state.IsInvalid()) {
3191  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
3192  return;
3193  }
3194  }
3195 
3196  // When we succeed in decoding a block's txids from a cmpctblock
3197  // message we typically jump to the BLOCKTXN handling code, with a
3198  // dummy (empty) BLOCKTXN message, to re-use the logic there in
3199  // completing processing of the putative block (without cs_main).
3200  bool fProcessBLOCKTXN = false;
3202 
3203  // If we end up treating this as a plain headers message, call that as well
3204  // without cs_main.
3205  bool fRevertToHeaderProcessing = false;
3206 
3207  // Keep a CBlock for "optimistic" compactblock reconstructions (see
3208  // below)
3209  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3210  bool fBlockReconstructed = false;
3211 
3212  {
3213  LOCK2(cs_main, g_cs_orphans);
3214  // If AcceptBlockHeader returned true, it set pindex
3215  assert(pindex);
3216  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3217 
3218  CNodeState *nodestate = State(pfrom.GetId());
3219 
3220  // If this was a new header with more work than our tip, update the
3221  // peer's last block announcement time
3222  if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
3223  nodestate->m_last_block_announcement = GetTime();
3224  }
3225 
3226  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
3227  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3228 
3229  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
3230  return;
3231 
3232  if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better
3233  pindex->nTx != 0) { // We had this block at some point, but pruned it
3234  if (fAlreadyInFlight) {
3235  // We requested this block for some reason, but our mempool will probably be useless
3236  // so we just grab the block via normal getdata
3237  std::vector<CInv> vInv(1);
3238  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3239  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3240  }
3241  return;
3242  }
3243 
3244  // If we're not close to tip yet, give up and let parallel block fetch work its magic
3245  if (!fAlreadyInFlight && !CanDirectFetch(m_chainparams.GetConsensus()))
3246  return;
3247 
3248  if (IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
3249  // Don't bother trying to process compact blocks from v1 peers
3250  // after segwit activates.
3251  return;
3252  }
3253 
3254  // We want to be a bit conservative just to be extra careful about DoS
3255  // possibilities in compact block processing...
3256  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3257  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3258  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
3259  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
3260  if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
3261  if (!(*queuedBlockIt)->partialBlock)
3262  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
3263  else {
3264  // The block was already in flight using compact blocks from the same peer
3265  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3266  return;
3267  }
3268  }
3269 
3270  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3271  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3272  if (status == READ_STATUS_INVALID) {
3273  MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3274  Misbehaving(pfrom.GetId(), 100, "invalid compact block");
3275  return;
3276  } else if (status == READ_STATUS_FAILED) {
3277  // Duplicate txindexes, the block is now in-flight, so just request it
3278  std::vector<CInv> vInv(1);
3279  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3280  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3281  return;
3282  }
3283 
3285  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3286  if (!partialBlock.IsTxAvailable(i))
3287  req.indexes.push_back(i);
3288  }
3289  if (req.indexes.empty()) {
3290  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3291  BlockTransactions txn;
3292  txn.blockhash = cmpctblock.header.GetHash();
3293  blockTxnMsg << txn;
3294  fProcessBLOCKTXN = true;
3295  } else {
3296  req.blockhash = pindex->GetBlockHash();
3297  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3298  }
3299  } else {
3300  // This block is either already in flight from a different
3301  // peer, or this peer has too many blocks outstanding to
3302  // download from.
3303  // Optimistically try to reconstruct anyway since we might be
3304  // able to without any round trips.
3305  PartiallyDownloadedBlock tempBlock(&m_mempool);
3306  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3307  if (status != READ_STATUS_OK) {
3308  // TODO: don't ignore failures
3309  return;
3310  }
3311  std::vector<CTransactionRef> dummy;
3312  status = tempBlock.FillBlock(*pblock, dummy);
3313  if (status == READ_STATUS_OK) {
3314  fBlockReconstructed = true;
3315  }
3316  }
3317  } else {
3318  if (fAlreadyInFlight) {
3319  // We requested this block, but its far into the future, so our
3320  // mempool will probably be useless - request the block normally
3321  std::vector<CInv> vInv(1);
3322  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3323  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3324  return;
3325  } else {
3326  // If this was an announce-cmpctblock, we want the same treatment as a header message
3327  fRevertToHeaderProcessing = true;
3328  }
3329  }
3330  } // cs_main
3331 
3332  if (fProcessBLOCKTXN) {
3333  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, interruptMsgProc);
3334  }
3335 
3336  if (fRevertToHeaderProcessing) {
3337  // Headers received from HB compact block peers are permitted to be
3338  // relayed before full validation (see BIP 152), so we don't want to disconnect
3339  // the peer if the header turns out to be for an invalid block.
3340  // Note that if a peer tries to build on an invalid chain, that
3341  // will be detected and the peer will be disconnected/discouraged.
3342  return ProcessHeadersMessage(pfrom, {cmpctblock.header}, /*via_compact_block=*/true);
3343  }
3344 
3345  if (fBlockReconstructed) {
3346  // If we got here, we were able to optimistically reconstruct a
3347  // block that is in flight from some other peer.
3348  {
3349  LOCK(cs_main);
3350  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
3351  }
3352  bool fNewBlock = false;
3353  // Setting fForceProcessing to true means that we bypass some of
3354  // our anti-DoS protections in AcceptBlock, which filters
3355  // unrequested blocks that might be trying to waste our resources
3356  // (eg disk space). Because we only try to reconstruct blocks when
3357  // we're close to caught up (via the CanDirectFetch() requirement
3358  // above, combined with the behavior of not requesting blocks until
3359  // we have a chain with at least nMinimumChainWork), and we ignore
3360  // compact blocks with less work than our tip, it is safe to treat
3361  // reconstructed compact blocks as having been requested.
3362  m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3363  if (fNewBlock) {
3364  pfrom.nLastBlockTime = GetTime();
3365  } else {
3366  LOCK(cs_main);
3367  mapBlockSource.erase(pblock->GetHash());
3368  }
3369  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3370  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3371  // Clear download state for this block, which is in
3372  // process from some other peer. We do this after calling
3373  // ProcessNewBlock so that a malleated cmpctblock announcement
3374  // can't be used to interfere with block relay.
3375  MarkBlockAsReceived(pblock->GetHash());
3376  }
3377  }
3378  return;
3379  }
3380 
3381  if (msg_type == NetMsgType::BLOCKTXN)
3382  {
3383  // Ignore blocktxn received while importing
3384  if (fImporting || fReindex) {
3385  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
3386  return;
3387  }
3388 
3389  BlockTransactions resp;
3390  vRecv >> resp;
3391 
3392  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3393  bool fBlockRead = false;
3394  {
3395  LOCK(cs_main);
3396 
3397  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3398  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3399  it->second.first != pfrom.GetId()) {
3400  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3401  return;
3402  }
3403 
3404  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3405  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3406  if (status == READ_STATUS_INVALID) {
3407  MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
3408  Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
3409  return;
3410  } else if (status == READ_STATUS_FAILED) {
3411  // Might have collided, fall back to getdata now :(
3412  std::vector<CInv> invs;
3413  invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
3414  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3415  } else {
3416  // Block is either okay, or possibly we received
3417  // READ_STATUS_CHECKBLOCK_FAILED.
3418  // Note that CheckBlock can only fail for one of a few reasons:
3419  // 1. bad-proof-of-work (impossible here, because we've already
3420  // accepted the header)
3421  // 2. merkleroot doesn't match the transactions given (already
3422  // caught in FillBlock with READ_STATUS_FAILED, so
3423  // impossible here)
3424  // 3. the block is otherwise invalid (eg invalid coinbase,
3425  // block is too big, too many legacy sigops, etc).
3426  // So if CheckBlock failed, #3 is the only possibility.
3427  // Under BIP 152, we don't discourage the peer unless proof of work is
3428  // invalid (we don't require all the stateless checks to have
3429  // been run). This is handled below, so just treat this as
3430  // though the block was successfully read, and rely on the
3431  // handling in ProcessNewBlock to ensure the block index is
3432  // updated, etc.
3433  MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
3434  fBlockRead = true;
3435  // mapBlockSource is used for potentially punishing peers and
3436  // updating which peers send us compact blocks, so the race
3437  // between here and cs_main in ProcessNewBlock is fine.
3438  // BIP 152 permits peers to relay compact blocks after validating
3439  // the header only; we should not punish peers if the block turns
3440  // out to be invalid.
3441  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false));
3442  }
3443  } // Don't hold cs_main when we call into ProcessNewBlock
3444  if (fBlockRead) {
3445  bool fNewBlock = false;
3446  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3447  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3448  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3449  // disk-space attacks), but this should be safe due to the
3450  // protections in the compact block handler -- see related comment
3451  // in compact block optimistic reconstruction handling.
3452  m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3453  if (fNewBlock) {
3454  pfrom.nLastBlockTime = GetTime();
3455  } else {
3456  LOCK(cs_main);
3457  mapBlockSource.erase(pblock->GetHash());
3458  }
3459  }
3460  return;
3461  }
3462 
3463  if (msg_type == NetMsgType::HEADERS)
3464  {
3465  // Ignore headers received while importing
3466  if (fImporting || fReindex) {
3467  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
3468  return;
3469  }
3470 
3471  std::vector<CBlockHeader> headers;
3472 
3473  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3474  unsigned int nCount = ReadCompactSize(vRecv);
3475  if (nCount > MAX_HEADERS_RESULTS) {
3476  Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
3477  return;
3478  }
3479  headers.resize(nCount);
3480  for (unsigned int n = 0; n < nCount; n++) {
3481  vRecv >> headers[n];
3482  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3483  }
3484 
3485  return ProcessHeadersMessage(pfrom, headers, /*via_compact_block=*/false);
3486  }
3487 
3488  if (msg_type == NetMsgType::BLOCK)
3489  {
3490  // Ignore block received while importing
3491  if (fImporting || fReindex) {
3492  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
3493  return;
3494  }
3495 
3496  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3497  vRecv >> *pblock;
3498 
3499  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
3500 
3501  bool forceProcessing = false;
3502  const uint256 hash(pblock->GetHash());
3503  {
3504  LOCK(cs_main);
3505  // Also always process if we requested the block explicitly, as we may
3506  // need it even though it is not a candidate for a new best tip.
3507  forceProcessing |= MarkBlockAsReceived(hash);
3508  // mapBlockSource is only used for punishing peers and setting
3509  // which peers send us compact blocks, so the race between here and
3510  // cs_main in ProcessNewBlock is fine.
3511  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3512  }
3513  bool fNewBlock = false;
3514  m_chainman.ProcessNewBlock(m_chainparams, pblock, forceProcessing, &fNewBlock);
3515  if (fNewBlock) {
3516  pfrom.nLastBlockTime = GetTime();
3517  } else {
3518  LOCK(cs_main);
3519  mapBlockSource.erase(pblock->GetHash());
3520  }
3521  return;
3522  }
3523 
3524  if (msg_type == NetMsgType::GETADDR) {
3525  // This asymmetric behavior for inbound and outbound connections was introduced
3526  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3527  // to users' AddrMan and later request them by sending getaddr messages.
3528  // Making nodes which are behind NAT and can only make outgoing connections ignore
3529  // the getaddr message mitigates the attack.
3530  if (!pfrom.IsInboundConn()) {
3531  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
3532  return;
3533  }
3534 
3535  // Only send one GetAddr response per connection to reduce resource waste
3536  // and discourage addr stamping of INV announcements.
3537  if (pfrom.fSentAddr) {
3538  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
3539  return;
3540  }
3541  pfrom.fSentAddr = true;
3542 
3543  pfrom.vAddrToSend.clear();
3544  std::vector<CAddress> vAddr;
3545  if (pfrom.HasPermission(PF_ADDR)) {
3546  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3547  } else {
3548  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3549  }
3550  FastRandomContext insecure_rand;
3551  for (const CAddress &addr : vAddr) {
3552  pfrom.PushAddress(addr, insecure_rand);
3553  }
3554  return;
3555  }
3556 
3557  if (msg_type == NetMsgType::MEMPOOL) {
3558  if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL))
3559  {
3560  if (!pfrom.HasPermission(PF_NOBAN))
3561  {
3562  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
3563  pfrom.fDisconnect = true;
3564  }
3565  return;
3566  }
3567 
3569  {
3570  if (!pfrom.HasPermission(PF_NOBAN))
3571  {
3572  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
3573  pfrom.fDisconnect = true;
3574  }
3575  return;
3576  }
3577 
3578  if (pfrom.m_tx_relay != nullptr) {
3579  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
3580  pfrom.m_tx_relay->fSendMempool = true;
3581  }
3582  return;
3583  }
3584 
3585  if (msg_type == NetMsgType::PING) {
3586  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
3587  uint64_t nonce = 0;
3588  vRecv >> nonce;
3589  // Echo the message back with the nonce. This allows for two useful features:
3590  //
3591  // 1) A remote node can quickly check if the connection is operational
3592  // 2) Remote nodes can measure the latency of the network thread. If this node
3593  // is overloaded it won't respond to pings quickly and the remote node can
3594  // avoid sending us more work, like chain download requests.
3595  //
3596  // The nonce stops the remote getting confused between different pings: without
3597  // it, if the remote node sends a ping once per second and this node takes 5
3598  // seconds to respond to each, the 5th ping the remote sends would appear to
3599  // return very quickly.
3600  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3601  }
3602  return;
3603  }
3604 
3605  if (msg_type == NetMsgType::PONG) {
3606  const auto ping_end = time_received;
3607  uint64_t nonce = 0;
3608  size_t nAvail = vRecv.in_avail();
3609  bool bPingFinished = false;
3610  std::string sProblem;
3611 
3612  if (nAvail >= sizeof(nonce)) {
3613  vRecv >> nonce;
3614 
3615  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3616  if (pfrom.nPingNonceSent != 0) {
3617  if (nonce == pfrom.nPingNonceSent) {
3618  // Matching pong received, this ping is no longer outstanding
3619  bPingFinished = true;
3620  const auto ping_time = ping_end - pfrom.m_ping_start.load();
3621  if (ping_time.count() >= 0) {
3622  // Successful ping time measurement, replace previous
3623  pfrom.nPingUsecTime = count_microseconds(ping_time);
3624  pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time));
3625  } else {
3626  // This should never happen
3627  sProblem = "Timing mishap";
3628  }
3629  } else {
3630  // Nonce mismatches are normal when pings are overlapping
3631  sProblem = "Nonce mismatch";
3632  if (nonce == 0) {
3633  // This is most likely a bug in another implementation somewhere; cancel this ping
3634  bPingFinished = true;
3635  sProblem = "Nonce zero";
3636  }
3637  }
3638  } else {
3639  sProblem = "Unsolicited pong without ping";
3640  }
3641  } else {
3642  // This is most likely a bug in another implementation somewhere; cancel this ping
3643  bPingFinished = true;
3644  sProblem = "Short payload";
3645  }
3646 
3647  if (!(sProblem.empty())) {
3648  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3649  pfrom.GetId(),
3650  sProblem,
3651  pfrom.nPingNonceSent,
3652  nonce,
3653  nAvail);
3654  }
3655  if (bPingFinished) {
3656  pfrom.nPingNonceSent = 0;
3657  }
3658  return;
3659  }
3660 
3661  if (msg_type == NetMsgType::FILTERLOAD) {
3662  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3663  pfrom.fDisconnect = true;
3664  return;
3665  }
3666  CBloomFilter filter;
3667  vRecv >> filter;
3668 
3669  if (!filter.IsWithinSizeConstraints())
3670  {
3671  // There is no excuse for sending a too-large filter
3672  Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
3673  }
3674  else if (pfrom.m_tx_relay != nullptr)
3675  {
3676  LOCK(pfrom.m_tx_relay->cs_filter);
3677  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3678  pfrom.m_tx_relay->fRelayTxes = true;
3679  }
3680  return;
3681  }
3682 
3683  if (msg_type == NetMsgType::FILTERADD) {
3684  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3685  pfrom.fDisconnect = true;
3686  return;
3687  }
3688  std::vector<unsigned char> vData;
3689  vRecv >> vData;
3690 
3691  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3692  // and thus, the maximum size any matched object can have) in a filteradd message
3693  bool bad = false;
3694  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3695  bad = true;
3696  } else if (pfrom.m_tx_relay != nullptr) {
3697  LOCK(pfrom.m_tx_relay->cs_filter);
3698  if (pfrom.m_tx_relay->pfilter) {
3699  pfrom.m_tx_relay->pfilter->insert(vData);
3700  } else {
3701  bad = true;
3702  }
3703  }
3704  if (bad) {
3705  Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
3706  }
3707  return;
3708  }
3709 
3710  if (msg_type == NetMsgType::FILTERCLEAR) {
3711  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3712  pfrom.fDisconnect = true;
3713  return;
3714  }
3715  if (pfrom.m_tx_relay == nullptr) {
3716  return;
3717  }
3718  LOCK(pfrom.m_tx_relay->cs_filter);
3719  pfrom.m_tx_relay->pfilter = nullptr;
3720  pfrom.m_tx_relay->fRelayTxes = true;
3721  return;
3722  }
3723 
3724  if (msg_type == NetMsgType::FEEFILTER) {
3725  CAmount newFeeFilter = 0;
3726  vRecv >> newFeeFilter;
3727  if (MoneyRange(newFeeFilter)) {
3728  if (pfrom.m_tx_relay != nullptr) {
3729  LOCK(pfrom.m_tx_relay->cs_feeFilter);
3730  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
3731  }
3732  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
3733  }
3734  return;
3735  }
3736 
3737  if (msg_type == NetMsgType::GETCFILTERS) {
3738  ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman);
3739  return;
3740  }
3741 
3742  if (msg_type == NetMsgType::GETCFHEADERS) {
3743  ProcessGetCFHeaders(pfrom, vRecv, m_chainparams, m_connman);
3744  return;
3745  }
3746 
3747  if (msg_type == NetMsgType::GETCFCHECKPT) {
3748  ProcessGetCFCheckPt(pfrom, vRecv, m_chainparams, m_connman);
3749  return;
3750  }
3751 
3752  if (msg_type == NetMsgType::NOTFOUND) {
3753  std::vector<CInv> vInv;
3754  vRecv >> vInv;
3756  LOCK(::cs_main);
3757  for (CInv &inv : vInv) {
3758  if (inv.IsGenTxMsg()) {
3759  // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
3760  // completed in TxRequestTracker.
3761  m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
3762  }
3763  }
3764  }
3765  return;
3766  }
3767 
3768  // Ignore unknown commands for extensibility
3769  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3770  return;
3771 }
3772 
3774 {
3775  const NodeId peer_id{pnode.GetId()};
3776  PeerRef peer = GetPeerRef(peer_id);
3777  if (peer == nullptr) return false;
3778 
3779  {
3780  LOCK(peer->m_misbehavior_mutex);
3781 
3782  // There's nothing to do if the m_should_discourage flag isn't set
3783  if (!peer->m_should_discourage) return false;
3784 
3785  peer->m_should_discourage = false;
3786  } // peer.m_misbehavior_mutex
3787 
3788  if (pnode.HasPermission(PF_NOBAN)) {
3789  // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag
3790  LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
3791  return false;
3792  }
3793 
3794  if (pnode.IsManualConn()) {
3795  // We never disconnect or discourage manual peers for bad behavior
3796  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id);
3797  return false;
3798  }
3799 
3800  if (pnode.addr.IsLocal()) {
3801  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
3802  // all peers on the same local address)
3803  LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id);
3804  pnode.fDisconnect = true;
3805  return true;
3806  }
3807 
3808  // Normal case: Disconnect the peer and discourage all nodes sharing the address
3809  LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
3810  if (m_banman) m_banman->Discourage(pnode.addr);
3811  m_connman.DisconnectNode(pnode.addr);
3812  return true;
3813 }
3814 
3815 bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3816 {
3817  bool fMoreWork = false;
3818 
3819  PeerRef peer = GetPeerRef(pfrom->GetId());
3820  if (peer == nullptr) return false;
3821 
3822  {
3823  LOCK(peer->m_getdata_requests_mutex);
3824  if (!peer->m_getdata_requests.empty()) {
3825  ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
3826  }
3827  }
3828 
3829  {
3830  LOCK2(cs_main, g_cs_orphans);
3831  if (!peer->m_orphan_work_set.empty()) {
3832  ProcessOrphanTx(peer->m_orphan_work_set);
3833  }
3834  }
3835 
3836  if (pfrom->fDisconnect)
3837  return false;
3838 
3839  // this maintains the order of responses
3840  // and prevents m_getdata_requests to grow unbounded
3841  {
3842  LOCK(peer->m_getdata_requests_mutex);
3843  if (!peer->m_getdata_requests.empty()) return true;
3844  }
3845 
3846  {
3847  LOCK(g_cs_orphans);
3848  if (!peer->m_orphan_work_set.empty()) return true;
3849  }
3850 
3851  // Don't bother if send buffer is too full to respond anyway
3852  if (pfrom->fPauseSend)
3853  return false;
3854 
3855  std::list<CNetMessage> msgs;
3856  {
3857  LOCK(pfrom->cs_vProcessMsg);
3858  if (pfrom->vProcessMsg.empty())
3859  return false;
3860  // Just take one message
3861  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3862  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
3864  fMoreWork = !pfrom->vProcessMsg.empty();
3865  }
3866  CNetMessage& msg(msgs.front());
3867 
3868  msg.SetVersion(pfrom->GetCommonVersion());
3869  const std::string& msg_type = msg.m_command;
3870 
3871  // Message size
3872  unsigned int nMessageSize = msg.m_message_size;
3873 
3874  try {
3875  ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
3876  if (interruptMsgProc) return false;
3877  {
3878  LOCK(peer->m_getdata_requests_mutex);
3879  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
3880  }
3881  } catch (const std::exception& e) {
3882  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
3883  } catch (...) {
3884  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
3885  }
3886 
3887  return fMoreWork;
3888 }
3889 
3890 void PeerManager::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
3891 {
3892  AssertLockHeld(cs_main);
3893 
3894  CNodeState &state = *State(pto.GetId());
3895  const CNetMsgMaker msgMaker(pto.GetCommonVersion());
3896 
3897  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
3898  // This is an outbound peer subject to disconnection if they don't
3899  // announce a block with as much work as the current tip within
3900  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3901  // their chain has more work than ours, we should sync to it,
3902  // unless it's invalid, in which case we should find that out and
3903  // disconnect from them elsewhere).
3904  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) {
3905  if (state.m_chain_sync.m_timeout != 0) {
3906  state.m_chain_sync.m_timeout = 0;
3907  state.m_chain_sync.m_work_header = nullptr;
3908  state.m_chain_sync.m_sent_getheaders = false;
3909  }
3910  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3911  // Our best block known by this peer is behind our tip, and we're either noticing
3912  // that for the first time, OR this peer was able to catch up to some earlier point
3913  // where we checked against our tip.
3914  // Either way, set a new timeout based on current tip.
3915  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3916  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
3917  state.m_chain_sync.m_sent_getheaders = false;
3918  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3919  // No evidence yet that our peer has synced to a chain with work equal to that
3920  // of our tip, when we first detected it was behind. Send a single getheaders
3921  // message to give the peer a chance to update us.
3922  if (state.m_chain_sync.m_sent_getheaders) {
3923  // They've run out of time to catch up!
3924  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3925  pto.fDisconnect = true;
3926  } else {
3927  assert(state.m_chain_sync.m_work_header);
3928  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3929  m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3930  state.m_chain_sync.m_sent_getheaders = true;
3931  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3932  // Bump the timeout to allow a response, which could clear the timeout
3933  // (if the response shows the peer has synced), reset the timeout (if
3934  // the peer syncs to the required work but not to our tip), or result
3935  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3936  // has not sufficiently progressed)
3937  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3938  }
3939  }
3940  }
3941 }
3942 
3943 void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
3944 {
3945  // Check whether we have too many outbound peers
3946  int extra_peers = m_connman.GetExtraOutboundCount();
3947  if (extra_peers > 0) {
3948  // If we have more outbound peers than we target, disconnect one.
3949  // Pick the outbound peer that least recently announced
3950  // us a new block, with ties broken by choosing the more recent
3951  // connection (higher node id)
3952  NodeId worst_peer = -1;
3953  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3954 
3955  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
3956  AssertLockHeld(::cs_main);
3957 
3958  // Ignore non-outbound peers, or nodes marked for disconnect already
3959  if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
3960  CNodeState *state = State(pnode->GetId());
3961  if (state == nullptr) return; // shouldn't be possible, but just in case
3962  // Don't evict our protected peers
3963  if (state->m_chain_sync.m_protect) return;
3964  // Don't evict our block-relay-only peers.
3965  if (pnode->m_tx_relay == nullptr) return;
3966  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3967  worst_peer = pnode->GetId();
3968  oldest_block_announcement = state->m_last_block_announcement;
3969  }
3970  });
3971  if (worst_peer != -1) {
3972  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
3973  AssertLockHeld(::cs_main);
3974 
3975  // Only disconnect a peer that has been connected to us for
3976  // some reasonable fraction of our check-frequency, to give
3977  // it time for new information to have arrived.
3978  // Also don't disconnect any peer we're trying to download a
3979  // block from.
3980  CNodeState &state = *State(pnode->GetId());
3981  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3982  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3983  pnode->fDisconnect = true;
3984  return true;
3985  } else {
3986  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3987  return false;
3988  }
3989  });
3990  if (disconnected) {
3991  // If we disconnected an extra peer, that means we successfully
3992  // connected to at least one peer after the last time we
3993  // detected a stale tip. Don't try any more extra peers until
3994  // we next detect a stale tip, to limit the load we put on the
3995  // network from these extra connections.
3997  }
3998  }
3999  }
4000 }
4001 
4003 {
4004  LOCK(cs_main);
4005 
4006  int64_t time_in_seconds = GetTime();
4007 
4008  EvictExtraOutboundPeers(time_in_seconds);
4009 
4010  if (time_in_seconds > m_stale_tip_check_time) {
4011  // Check whether our tip is stale, and if so, allow using an extra
4012  // outbound peer
4013  if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) {
4014  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
4016  } else if (m_connman.GetTryNewOutboundPeer()) {
4018  }
4019  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
4020  }
4021 }
4022 
4023 namespace {
4024 class CompareInvMempoolOrder
4025 {
4026  CTxMemPool *mp;
4027  bool m_wtxid_relay;
4028 public:
4029  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
4030  {
4031  mp = _mempool;
4032  m_wtxid_relay = use_wtxid;
4033  }
4034 
4035  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
4036  {
4037  /* As std::make_heap produces a max-heap, we want the entries with the
4038  * fewest ancestors/highest fee to sort later. */
4039  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
4040  }
4041 };
4042 }
4043 
4045 {
4046  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
4047 
4048  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
4049  // disconnect misbehaving peers even before the version handshake is complete.
4050  if (MaybeDiscourageAndDisconnect(*pto)) return true;
4051 
4052  // Don't send anything until the version handshake is complete
4053  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
4054  return true;
4055 
4056  // If we get here, the outgoing message serialization version is set and can't change.
4057  const CNetMsgMaker msgMaker(pto->GetCommonVersion());
4058 
4059  //
4060  // Message: ping
4061  //
4062  bool pingSend = false;
4063  if (pto->fPingQueued) {
4064  // RPC ping request by user
4065  pingSend = true;
4066  }
4067  if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
4068  // Ping automatically sent as a latency probe & keepalive.
4069  pingSend = true;
4070  }
4071  if (pingSend) {
4072  uint64_t nonce = 0;
4073  while (nonce == 0) {
4074  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
4075  }
4076  pto->fPingQueued = false;
4077  pto->m_ping_start = GetTime<std::chrono::microseconds>();
4078  if (pto->GetCommonVersion() > BIP0031_VERSION) {
4079  pto->nPingNonceSent = nonce;
4080  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
4081  } else {
4082  // Peer is too old to support ping command with nonce, pong will never arrive.
4083  pto->nPingNonceSent = 0;
4084  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
4085  }
4086  }
4087 
4088  {
4089  LOCK(cs_main);
4090 
4091  CNodeState &state = *State(pto->GetId());
4092 
4093  // Address refresh broadcast
4094  auto current_time = GetTime<std::chrono::microseconds>();
4095 
4096  if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
4097  AdvertiseLocal(pto);
4098  pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
4099  }
4100 
4101  //
4102  // Message: addr
4103  //
4104  if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
4105  pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
4106  std::vector<CAddress> vAddr;
4107  vAddr.reserve(pto->vAddrToSend.size());
4108  assert(pto->m_addr_known);
4109 
4110  const char* msg_type;
4111  int make_flags;
4112  if (pto->m_wants_addrv2) {
4113  msg_type = NetMsgType::ADDRV2;
4114  make_flags = ADDRV2_FORMAT;
4115  } else {
4116  msg_type = NetMsgType::ADDR;
4117  make_flags = 0;
4118  }
4119 
4120  for (const CAddress& addr : pto->vAddrToSend)
4121  {
4122  if (!pto->m_addr_known->contains(addr.GetKey()))
4123  {
4124  pto->m_addr_known->insert(addr.GetKey());
4125  vAddr.push_back(addr);
4126  // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
4127  if (vAddr.size() >= MAX_ADDR_TO_SEND)
4128  {
4129  m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4130  vAddr.clear();
4131  }
4132  }
4133  }
4134  pto->vAddrToSend.clear();
4135  if (!vAddr.empty())
4136  m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4137  // we only send the big addr message once
4138  if (pto->vAddrToSend.capacity() > 40)
4139  pto->vAddrToSend.shrink_to_fit();
4140  }
4141 
4142  // Start block sync
4143  if (pindexBestHeader == nullptr)
4145  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
4146  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
4147  // Only actively request headers from a single peer, unless we're close to today.
4148  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
4149  state.fSyncStarted = true;
4151  nSyncStarted++;
4152  const CBlockIndex *pindexStart = pindexBestHeader;
4153  /* If possible, start at the block preceding the currently
4154  best known header. This ensures that we always get a
4155  non-empty list of headers back as long as the peer
4156  is up-to-date. With a non-empty response, we can initialise
4157  the peer's known best block. This wouldn't be possible
4158  if we requested starting at pindexBestHeader and
4159  got back an empty response. */
4160  if (pindexStart->pprev)
4161  pindexStart = pindexStart->pprev;
4162  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
4163  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
4164  }
4165  }
4166 
4167  //
4168  // Try sending block announcements via headers
4169  //
4170  {
4171  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
4172  // list of block hashes we're relaying, and our peer wants
4173  // headers announcements, then find the first header
4174  // not yet known to our peer but would connect, and send.
4175  // If no header would connect, or if we have too many
4176  // blocks, or if the peer doesn't want headers, just
4177  // add all to the inv queue.
4178  LOCK(pto->cs_inventory);
4179  std::vector<CBlock> vHeaders;
4180  bool fRevertToInv = ((!state.fPreferHeaders &&
4181  (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
4182  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
4183  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
4184  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
4185 
4186  if (!fRevertToInv) {
4187  bool fFoundStartingHeader = false;
4188  // Try to find first header that our peer doesn't have, and
4189  // then send all headers past that one. If we come across any
4190  // headers that aren't on ::ChainActive(), give up.
4191  for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
4192  const CBlockIndex* pindex = LookupBlockIndex(hash);
4193  assert(pindex);
4194  if (::ChainActive()[pindex->nHeight] != pindex) {
4195  // Bail out if we reorged away from this block
4196  fRevertToInv = true;
4197  break;
4198  }
4199  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
4200  // This means that the list of blocks to announce don't
4201  // connect to each other.
4202  // This shouldn't really be possible to hit during
4203  // regular operation (because reorgs should take us to
4204  // a chain that has some block not on the prior chain,
4205  // which should be caught by the prior check), but one
4206  // way this could happen is by using invalidateblock /
4207  // reconsiderblock repeatedly on the tip, causing it to
4208  // be added multiple times to vBlockHashesToAnnounce.
4209  // Robustly deal with this rare situation by reverting
4210  // to an inv.
4211  fRevertToInv = true;
4212  break;
4213  }
4214  pBestIndex = pindex;
4215  if (fFoundStartingHeader) {
4216  // add this to the headers message
4217  vHeaders.push_back(pindex->GetBlockHeader());
4218  } else if (PeerHasHeader(&state, pindex)) {
4219  continue; // keep looking for the first new block
4220  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
4221  // Peer doesn't have this header but they do have the prior one.
4222  // Start sending headers.
4223  fFoundStartingHeader = true;
4224  vHeaders.push_back(pindex->GetBlockHeader());
4225  } else {
4226  // Peer doesn't have this header or the prior one -- nothing will
4227  // connect, so bail out.
4228  fRevertToInv = true;
4229  break;
4230  }
4231  }
4232  }
4233  if (!fRevertToInv && !vHeaders.empty()) {
4234  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4235  // We only send up to 1 block as header-and-ids, as otherwise
4236  // probably means we're doing an initial-ish-sync or they're slow
4237  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4238  vHeaders.front().GetHash().ToString(), pto->GetId());
4239 
4240  int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
4241 
4242  bool fGotBlockFromCache = false;
4243  {
4244  LOCK(cs_most_recent_block);
4245  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4246  if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
4247  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
4248  else {
4249  CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
4250  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4251  }
4252  fGotBlockFromCache = true;
4253  }
4254  }
4255  if (!fGotBlockFromCache) {
4256  CBlock block;
4257  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4258  assert(ret);
4259  CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
4260  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4261  }
4262  state.pindexBestHeaderSent = pBestIndex;
4263  } else if (state.fPreferHeaders) {
4264  if (vHeaders.size() > 1) {
4265  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4266  vHeaders.size(),
4267  vHeaders.front().GetHash().ToString(),
4268  vHeaders.back().GetHash().ToString(), pto->GetId());
4269  } else {
4270  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4271  vHeaders.front().GetHash().ToString(), pto->GetId());
4272  }
4273  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4274  state.pindexBestHeaderSent = pBestIndex;
4275  } else
4276  fRevertToInv = true;
4277  }
4278  if (fRevertToInv) {
4279  // If falling back to using an inv, just try to inv the tip.
4280  // The last entry in vBlockHashesToAnnounce was our tip at some point
4281  // in the past.
4282  if (!pto->vBlockHashesToAnnounce.empty()) {
4283  const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
4284  const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
4285  assert(pindex);
4286 
4287  // Warn if we're announcing a block that is not on the main chain.
4288  // This should be very rare and could be optimized out.
4289  // Just log for now.
4290  if (::ChainActive()[pindex->nHeight] != pindex) {
4291  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4292  hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString());
4293  }
4294 
4295  // If the peer's chain has this block, don't inv it back.
4296  if (!PeerHasHeader(&state, pindex)) {
4297  pto->vInventoryBlockToSend.push_back(hashToAnnounce);
4298  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4299  pto->GetId(), hashToAnnounce.ToString());
4300  }
4301  }
4302  }
4303  pto->vBlockHashesToAnnounce.clear();
4304  }
4305 
4306  //
4307  // Message: inventory
4308  //
4309  std::vector<CInv> vInv;
4310  {
4311  LOCK(pto->cs_inventory);
4312  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
4313 
4314  // Add blocks
4315  for (const uint256& hash : pto->vInventoryBlockToSend) {
4316  vInv.push_back(CInv(MSG_BLOCK, hash));
4317  if (vInv.size() == MAX_INV_SZ) {
4318  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4319  vInv.clear();
4320  }
4321  }
4322  pto->vInventoryBlockToSend.clear();
4323 
4324  if (pto->m_tx_relay != nullptr) {
4325  LOCK(pto->m_tx_relay->cs_tx_inventory);
4326  // Check whether periodic sends should happen
4327  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
4328  if (pto->m_tx_relay->nNextInvSend < current_time) {
4329  fSendTrickle = true;
4330  if (pto->IsInboundConn()) {
4331  pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
4332  } else {
4333  // Use half the delay for outbound peers, as there is less privacy concern for them.
4334  pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
4335  }
4336  }
4337 
4338  // Time to send but the peer has requested we not relay transactions.
4339  if (fSendTrickle) {
4340  LOCK(pto->m_tx_relay->cs_filter);
4341  if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
4342  }
4343 
4344  // Respond to BIP35 mempool requests
4345  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
4346  auto vtxinfo = m_mempool.infoAll();
4347  pto->m_tx_relay->fSendMempool = false;
4348  CFeeRate filterrate;
4349  {
4350  LOCK(pto->m_tx_relay->cs_feeFilter);
4351  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4352  }
4353 
4354  LOCK(pto->m_tx_relay->cs_filter);
4355 
4356  for (const auto& txinfo : vtxinfo) {
4357  const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
4358  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4359  pto->m_tx_relay->setInventoryTxToSend.erase(hash);
4360  // Don't send transactions that peers will not put into their mempool
4361  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4362  continue;
4363  }
4364  if (pto->m_tx_relay->pfilter) {
4365  if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4366  }
4367  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4368  // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
4369  vInv.push_back(inv);
4370  if (vInv.size() == MAX_INV_SZ) {
4371  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4372  vInv.clear();
4373  }
4374  }
4375  pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
4376  }
4377 
4378  // Determine transactions to relay
4379  if (fSendTrickle) {
4380  // Produce a vector with all candidates for sending
4381  std::vector<std::set<uint256>::iterator> vInvTx;
4382  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
4383  for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
4384  vInvTx.push_back(it);
4385  }
4386  CFeeRate filterrate;
4387  {
4388  LOCK(pto->m_tx_relay->cs_feeFilter);
4389  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4390  }
4391  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4392  // A heap is used so that not all items need sorting if only a few are being sent.
4393  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay);
4394  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4395  // No reason to drain out at many times the network's capacity,
4396  // especially since we have many peers and some will draw much shorter delays.
4397  unsigned int nRelayedTransactions = 0;
4398  LOCK(pto->m_tx_relay->cs_filter);
4399  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
4400  // Fetch the top element from the heap
4401  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4402  std::set<uint256>::iterator it = vInvTx.back();
4403  vInvTx.pop_back();
4404  uint256 hash = *it;
4405  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4406  // Remove it from the to-be-sent set
4407  pto->m_tx_relay->setInventoryTxToSend.erase(it);
4408  // Check if not in the filter already
4409  if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
4410  continue;
4411  }
4412  // Not in the mempool anymore? don't bother sending it.
4413  auto txinfo = m_mempool.info(ToGenTxid(inv));
4414  if (!txinfo.tx) {
4415  continue;
4416  }
4417  auto txid = txinfo.tx->GetHash();
4418  auto wtxid = txinfo.tx->GetWitnessHash();
4419  // Peer told you to not send transactions at that feerate? Don't bother sending it.
4420  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4421  continue;
4422  }
4423  if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4424  // Send
4425  State(pto->GetId())->m_recently_announced_invs.insert(hash);
4426  vInv.push_back(inv);
4427  nRelayedTransactions++;
4428  {
4429  // Expire old relay messages
4430  while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
4431  {
4432  mapRelay.erase(vRelayExpiration.front().second);
4433  vRelayExpiration.pop_front();
4434  }
4435 
4436  auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
4437  if (ret.second) {
4438  vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
4439  }
4440  // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
4441  auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
4442  if (ret2.second) {
4443  vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
4444  }
4445  }
4446  if (vInv.size() == MAX_INV_SZ) {
4447  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4448  vInv.clear();
4449  }
4450  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4451  if (hash != txid) {
4452  // Insert txid into filterInventoryKnown, even for
4453  // wtxidrelay peers. This prevents re-adding of
4454  // unconfirmed parents to the recently_announced
4455  // filter, when a child tx is requested. See
4456  // ProcessGetData().
4457  pto->m_tx_relay->filterInventoryKnown.insert(txid);
4458  }
4459  }
4460  }
4461  }
4462  }
4463  if (!vInv.empty())
4464  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4465 
4466  // Detect whether we're stalling
4467  current_time = GetTime<std::chrono::microseconds>();
4468  if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
4469  // Stalling only triggers when the block download window cannot move. During normal steady state,
4470  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4471  // should only happen during initial block download.
4472  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4473  pto->fDisconnect = true;
4474  return true;
4475  }
4476  // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
4477  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4478  // We compensate for other peers to prevent killing off peers due to our own downstream link
4479  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4480  // to unreasonably increase our timeout.
4481  if (state.vBlocksInFlight.size() > 0) {
4482  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4483  int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
4484  if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4485  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
4486  pto->fDisconnect = true;
4487  return true;
4488  }
4489  }
4490  // Check for headers sync timeouts
4491  if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
4492  // Detect whether this is a stalling initial-headers-sync peer
4493  if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
4494  if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4495  // Disconnect a peer (without the noban permission) if it is our only sync peer,
4496  // and we have others we could be using instead.
4497  // Note: If all our peers are inbound, then we won't
4498  // disconnect our sync peer for stalling; we have bigger
4499  // problems if we can't get any outbound peers.
4500  if (!pto->HasPermission(PF_NOBAN)) {
4501  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4502  pto->fDisconnect = true;
4503  return true;
4504  } else {
4505  LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
4506  // Reset the headers sync state so that we have a
4507  // chance to try downloading from a different peer.
4508  // Note: this will also result in at least one more
4509  // getheaders message to be sent to
4510  // this peer (eventually).
4511  state.fSyncStarted = false;
4512  nSyncStarted--;
4513  state.nHeadersSyncTimeout = 0;
4514  }
4515  }
4516  } else {
4517  // After we've caught up once, reset the timeout so we can't trigger
4518  // disconnect later.
4519  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
4520  }
4521  }
4522 
4523  // Check that outbound peers have reasonable chains
4524  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4525  ConsiderEviction(*pto, GetTime());
4526 
4527  //
4528  // Message: getdata (blocks)
4529  //
4530  std::vector<CInv> vGetData;
4531  if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4532  std::vector<const CBlockIndex*> vToDownload;
4533  NodeId staller = -1;
4534  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
4535  for (const CBlockIndex *pindex : vToDownload) {
4536  uint32_t nFetchFlags = GetFetchFlags(*pto);
4537  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4538  MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
4539  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4540  pindex->nHeight, pto->GetId());
4541  }
4542  if (state.nBlocksInFlight == 0 && staller != -1) {
4543  if (State(staller)->nStallingSince == 0) {
4544  State(staller)->nStallingSince = count_microseconds(current_time);
4545  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4546  }
4547  }
4548  }
4549 
4550  //
4551  // Message: getdata (non-blocks)
4552  //
4553  std::vector<std::pair<NodeId, GenTxid>> expired;
4554  auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
4555  for (const auto& entry : expired) {
4556  LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
4557  entry.second.GetHash().ToString(), entry.first);
4558  }
4559  for (const GenTxid& gtxid : requestable) {
4560  if (!AlreadyHaveTx(gtxid, m_mempool)) {
4561  LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
4562  gtxid.GetHash().ToString(), pto->GetId());
4563  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
4564  if (vGetData.size() >= MAX_GETDATA_SZ) {
4565  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4566  vGetData.clear();
4567  }
4568  m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
4569  } else {
4570  // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
4571  // this should already be called whenever a transaction becomes AlreadyHaveTx().
4572  m_txrequest.ForgetTxHash(gtxid.GetHash());
4573  }
4574  }
4575 
4576 
4577  if (!vGetData.empty())
4578  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4579 
4580  //
4581  // Message: feefilter
4582  //
4583  if (pto->m_tx_relay != nullptr && pto->GetCommonVersion() >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
4584  !pto->HasPermission(PF_FORCERELAY) // peers with the forcerelay permission should not filter txs to us
4585  ) {
4586  CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4587  static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
4589  // Received tx-inv messages are discarded when the active
4590  // chainstate is in IBD, so tell the peer to not send them.
4591  currentFilter = MAX_MONEY;
4592  } else {
4593  static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
4594  if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
4595  // Send the current filter if we sent MAX_FILTER previously
4596  // and made it out of IBD.
4597  pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
4598  }
4599  }
4600  if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
4601  CAmount filterToSend = g_filter_rounder.round(currentFilter);
4602  // We always have a fee filter of at least minRelayTxFee
4603  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4604  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
4605  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
4606  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
4607  }
4608  pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
4609  }
4610  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4611  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4612  else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
4613  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
4614  pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
4615  }
4616  }
4617  } // release cs_main
4618  return true;
4619 }
4620 
4621 class CNetProcessingCleanup
4622 {
4623 public:
4624  CNetProcessingCleanup() {}
4625  ~CNetProcessingCleanup() {
4626  // orphan transactions
4627  mapOrphanTransactions.clear();
4628  mapOrphanTransactionsByPrev.clear();
4629  g_orphans_by_wtxid.clear();
4630  }
4631 };
4632 static CNetProcessingCleanup instance_of_cnetprocessingcleanup;
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:395
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:41
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:162
static int64_t GetTransactionWeight(const CTransaction &tx)
Definition: validation.h:146
bool SendMessages(CNode *pto) override EXCLUSIVE_LOCKS_REQUIRED(pto-> cs_sendProcessing)
Send queued protocol messages to be sent to a give node.
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
CAmount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:60
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:29
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
CFeeRate GetMinFee(size_t sizelimit) const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.cpp:1000
bool IsMsgBlk() const
Definition: protocol.h:442
bool MaybeDiscourageAndDisconnect(CNode &pnode)
Maybe disconnect a peer and discourage future connections from its address.
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:1049
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:884
bool IsReachable(enum Network net)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:32
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:21
std::atomic_bool fPauseSend
Definition: net.h:906
uint64_t GetRand(uint64_t nMax) noexcept
Generate a uniform random integer in the range [0..range).
Definition: random.cpp:592
static const int SERIALIZE_TRANSACTION_NO_WITNESS
A flag that is ORed into the protocol version to designate that a transaction should be (un)serialize...
Definition: transaction.h:23
invalid by consensus rules
int GetCommonVersion() const
Definition: net.h:1127
Optional< txiter > GetIter(const uint256 &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given hash, if found.
Definition: txmempool.cpp:886
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:40
bool fPruneMode
True if we're running in -prune mode.
Definition: validation.cpp:139
int in_avail() const
Definition: streams.h:391
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
uint64_t GetLocalNonce() const
Definition: net.h:1107
int64_t m_stale_tip_check_time
Next time to check for stale tip.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:112
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:18
Definition: banman.h:57
bool contains(const std::vector< unsigned char > &vKey) const
Definition: bloom.cpp:250
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:269
void SetNull()
Definition: uint256.h:39
#define LogPrint(category,...)
Definition: logging.h:182
unsigned int GetReceiveFloodSize() const
bool fListen
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
Definition: block.h:114
void InitializeNode(CNode *pnode) override
Initialize a peer by adding it to mapNodeState and pushing a message requesting its version...
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:144
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > pblock, bool fForceProcessing, bool *fNewBlock) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
uint32_t nStatus
Verification status of this block. See enum BlockStatus.
Definition: chain.h:174
void scheduleEvery(Function f, std::chrono::milliseconds delta)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:108
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:122
void WakeMessageHandler()
void SetServices(const CService &addr, ServiceFlags nServices)
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:479
Definition: block.h:62
We don't have the previous block the checked one is built on.
CChain & ChainActive()
Please prefer the identical ChainstateManager::ActiveChain.
Definition: validation.cpp:113
void PushTxInventory(const uint256 &hash)
Definition: net.h:1184
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:27
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:318
uint64_t GetHash() const
Definition: netaddress.cpp:779
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:32
static const CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:25
bool SeenLocal(const CService &addr)
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:781
std::vector< uint16_t > indexes
bool IsValid() const
Definition: validation.h:119
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1164
int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:26
bool IsMsgWitnessBlk() const
Definition: protocol.h:446
void ProcessOrphanTx(std::set< uint256 > &orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main
void insert(const std::vector< unsigned char > &vKey)
Definition: bloom.cpp:213
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct)
reverse_range< T > reverse_iterate(T &x)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos, const CMessageHeader::MessageStartChars &message_start)
inv message data
Definition: protocol.h:427
static void LogPrintf(const char *fmt, const Args &...args)
Definition: logging.h:166
invalid proof of work or time too old
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:37
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ChainActive().Tip() will not be pr...
Definition: validation.h:84
constexpr auto GetRandMillis
Definition: random.h:84
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:156
transaction was missing some of its inputs
unsigned int nHeight
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:101
bool g_relay_txes
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:26
bool exists(const GenTxid &gtxid) const
Definition: txmempool.h:736
std::vector< unsigned char > ParseHex(const char *psz)
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:65
std::atomic_bool m_wants_addrv2
Whether the peer has signaled support for receiving ADDRv2 (BIP155) messages, implying a preference t...
Definition: net.h:895
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
Mutex cs_inventory
Definition: net.h:999
bool GetTryNewOutboundPeer()
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:44
bool HasWitness() const
Definition: transaction.h:341
bool IsMsgTx() const
Definition: protocol.h:441
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:108
void SetCommonVersion(int greatest_common_version)
Definition: net.h:1123
void ReattemptInitialBroadcast(CScheduler &scheduler) const
Retrieve unbroadcast transactions from the mempool and reattempt sending to peers.
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:21
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
CTransactionRef get(const uint256 &hash) const
Definition: txmempool.cpp:814
RecursiveMutex cs_vProcessMsg
Definition: net.h:860
Defined in BIP152.
Definition: protocol.h:418
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:147
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
RecursiveMutex g_cs_orphans
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb, bool wtxid=false)
Definition: txmempool.cpp:738
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:145
bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message="")
Potentially mark a node discouraged based on the contents of a BlockValidationState object...
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:99
void ProcessHeadersMessage(CNode &pfrom, const std::vector< CBlockHeader > &headers, bool via_compact_block)
Process a single headers message from a peer.
if(expired!=0)
Definition: validation.cpp:335
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:27
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:52
void AdvertiseLocal(CNode *pnode)
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:929
violated mempool's fee/size/descendant/RBF/etc limits
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
PeerManager(const CChainParams &chainparams, CConnman &connman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool)
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:202
inputs (covered by txid) failed policy rules
void SetTryNewOutboundPeer(bool flag)
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:404
bool IsMsgCmpctBlk() const
Definition: protocol.h:445
std::vector< CAddress > vAddrToSend
Definition: net.h:989
void GetRandBytes(unsigned char *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:585
transaction spends a coinbase too early, or violates locktime/sequence locks
bool IsBlockOnlyConn() const
Definition: net.h:931
std::atomic< int > nStartingHeight
Definition: net.h:986
bool IsFeelerConn() const
Definition: net.h:935
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:42
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:1155
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:30
unsigned char * begin()
Definition: uint256.h:58
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:39
bool IsLocal() const
Definition: netaddress.cpp:406
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
initial value. Tx has not yet been rejected
const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.cpp:47
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:25
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:45
std::atomic< ServiceFlags > nServices
Definition: net.h:850
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:276
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:800
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:19
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
std::string ToString() const
Definition: uint256.cpp:64
CTxMemPoolEntry stores data about the corresponding transaction, as well as data about all in-mempool...
Definition: txmempool.h:78
bool DisconnectNode(const std::string &node)
bool RelayAddrsWithConn() const
Definition: net.h:948
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
Definition: netaddress.h:32
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message="")
Potentially disconnect and discourage a node based on the contents of a TxValidationState object...
bool AcceptToMemoryPool(CTxMemPool &pool, TxValidationState &state, const CTransactionRef &tx, std::list< CTransactionRef > *plTxnReplaced, bool bypass_limits, bool test_accept, CAmount *fee_out)
(try to) add transaction to memory pool plTxnReplaced will be appended to with all transactions repla...
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:71
bool fSentAddr
Definition: net.h:900
void CheckForStaleTipAndEvictPeers()
Evict extra outbound peers.
std::atomic< int64_t > nPingUsecTime
Definition: net.h:1053
BlockFilterType
Definition: blockfilter.h:88
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:71
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: system.cpp:371
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:390
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
bool IsAddrFetchConn() const
Definition: net.h:939
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:1055
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:235
#define LOCK2(cs1, cs2)
Definition: sync.h:231
initial value. Block has not yet been rejected
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:112
void BlockConnected(const std::shared_ptr< const CBlock > &pblock, const CBlockIndex *pindexConnected) override
Overridden from CValidationInterface.
bool GetUseAddrmanOutgoing() const
Definition: net.h:269
int Height() const
Return the maximal height in the chain.
Definition: chain.h:415
std::set< CTxMemPoolEntryRef, CompareIteratorByHash > Parents
Definition: txmempool.h:83
bool fClient
Definition: net.h:889
void FinalizeNode(const CNode &node, bool &fUpdateConnectionTime) override
Handle removal of a peer by updating various state and removing it from mapNodeState.
Used to relay blocks as header + vector to filtered nodes.
Definition: merkleblock.h:124
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:23
bool IsInboundConn() const
Definition: net.h:943
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:407
Invalid by a change to consensus rules more recent than SegWit.
std::unique_ptr< CRollingBloomFilter > m_addr_known
Definition: net.h:990
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Overridden from CValidationInterface.
size_t nProcessQueueSize
Definition: net.h:862
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:115
CBlockHeader GetBlockHeader() const
Definition: chain.h:220
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
bool IsFullOutboundConn() const
Definition: net.h:923
int GetType() const
Definition: streams.h:394
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:149
CBlockIndex * pindexBestHeader
Best header we've seen so far (used for getheaders queries' starting points).
Definition: validation.cpp:131
void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex) override
Notifies listeners of a block being disconnected.
this block was cached as being invalid and we didn't store the reason why
An input of a transaction.
Definition: transaction.h:65
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:345
bool IsNull() const
Definition: uint256.h:31
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:832
#define LOCK(cs)
Definition: sync.h:230
const char * name
Definition: rest.cpp:41
static constexpr std::chrono::minutes PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.cpp:17
bool IsPeerAddrLocalGood(CNode *pnode)
size_type size() const
Definition: streams.h:293
the block failed to meet one of our checkpoints
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
CChainState & ActiveChainstate() const
The most-work chain.
bool IsTxAvailable(size_t index) const
AssertLockHeld(mempool.cs)
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:523
Fast randomness source.
Definition: random.h:119
Transport protocol agnostic message container.
Definition: net.h:734
bool ProcessMessages(CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached if param historicalBlockServingLimit is set true...
int64_t nPowTargetSpacing
Definition: params.h:89
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds Timeout = base + per_header * (expected number of ...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:35
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:28
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: validation.h:56
bool ActivateBestChain(BlockValidationState &state, const CChainParams &chainparams, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
void ConsiderEviction(CNode &pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Consider evicting an outbound peer based on the amount of time they've been behind our tip...
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:33
void ForEachNodeThen(Callable &&pre, CallableAfter &&post)
Definition: net.h:298
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: validation.cpp:129
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:76
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:773
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
const std::vector< CTxOut > vout
Definition: transaction.h:277
A CService with information about it as peer.
Definition: protocol.h:360
uint256 hash
Definition: protocol.h:459
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
std::string ToString() const
Definition: netaddress.cpp:981
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
bool IsInvalid() const
Definition: validation.h:120
static const bool DEFAULT_FEEFILTER
Default for using fee filter.
Definition: validation.h:80
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:16
int64_t NodeId
Definition: net.h:92
Definition: net.h:187
bool IsGenBlkMsg() const
Definition: protocol.h:453
Defined in BIP144.
Definition: protocol.h:419
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:66
bool IsWtxid() const
Definition: transaction.h:406
CConnman & m_connman
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
static const unsigned int INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions in seconds.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:34
bool fGetAddr
Definition: net.h:991
std::atomic_bool fImporting
std::vector< uint256 > vHave
Definition: block.h:116
bool IsGenTxMsg() const
Definition: protocol.h:449
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:31
Parameters that influence chain consensus.
Definition: params.h:56
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
int GetVersion() const
Definition: streams.h:396
An outpoint - a combination of a transaction hash and an index n into its vout.
Definition: transaction.h:26
bool AddNewAddresses(const std::vector< CAddress > &vAddr, const CAddress &addrFrom, int64_t nTimePenalty=0)
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:26
std::atomic_bool fDisconnect
Definition: net.h:899
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:36
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:43
int GetMyStartingHeight() const
Definition: net.h:1111
const uint256 & GetWitnessHash() const
Definition: transaction.h:312
ServiceFlags GetLocalServices() const
Definition: net.h:1197
bool IsRoutable() const
Definition: netaddress.cpp:466
static RPCHelpMan send()
Definition: rpcwallet.cpp:4002
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
If we have extra outbound peers, try to disconnect the one with the oldest block announcement.
void SendBlockTransactions(CNode &pfrom, const CBlock &block, const BlockTransactionsRequest &req)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:257
void AddTxAnnouncement(const CNode &node, const GenTxid &gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(const CChainParams & m_chainparams
Register with TxRequestTracker that an INV has been received from a peer.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:354
bool GetNetworkActive() const
Definition: net.h:268
bool IsMsgWtx() const
Definition: protocol.h:443
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
void Misbehaving(const NodeId pnode, const int howmuch, const std::string &message)
Increment peer's misbehavior score.
RecursiveMutex cs_SubVer
Definition: net.h:877
const uint256 & GetHash() const
Definition: transaction.h:407
bool CheckIncomingNonce(uint64_t nonce)
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:481
const CAddress addr
Definition: net.h:873
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:22
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
const int64_t nTimeConnected
Definition: net.h:870
Transaction is missing a witness.
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, BlockValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
void RelayTransaction(const uint256 &txid, const uint256 &wtxid, const CConnman &connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Relay transaction to every node.
ChainstateManager & m_chainman
CTxMemPool & m_mempool
bool IsNull() const
Definition: block.h:135
std::atomic_bool fReindex
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:15
CAmount GetFee(size_t nBytes) const
Return the fee in satoshis for the given size in bytes.
Definition: feerate.cpp:21
CBlockIndex * LookupBlockIndex(const uint256 &hash)
Definition: validation.cpp:173
std::atomic< bool > fPingQueued
Definition: net.h:1057
256-bit opaque blob.
Definition: uint256.h:124
invalid by consensus rules (excluding any below reasons)
bool ExpectServicesFromConn() const
Definition: net.h:953
CChainState & ChainstateActive()
Please prefer the identical ChainstateManager::ActiveChainstate.
Definition: validation.cpp:106
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Definition: protocol.h:400
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:66
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:38
uint256 GetHash() const
Definition: block.cpp:11
the block's data didn't match the data committed to by the PoW
Result GetResult() const
Definition: validation.h:122
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:488
std::string GetAddrName() const
void AddKnownTx(const uint256 &hash)
Definition: net.h:1176
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:620
std::atomic< int64_t > nLastTXTime
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:1045
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:30
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:14
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:908
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:137
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:137
uint256 hashContinue
Definition: net.h:985
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:18
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
static const unsigned int MAX_STANDARD_TX_WEIGHT
The maximum weight for transactions we're willing to relay/mine.
Definition: policy.h:24
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: system.cpp:467
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
A block this one builds on is invalid.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of addresses from our addrman to return in response to a getaddr message...
Definition: net.h:56
std::set< uint256 > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:771
CBlockIndex * FindForkInGlobalIndex(const CChain &chain, const CBlockLocator &locator)
Find the last common block between the parameter chain and a locator.
Definition: validation.cpp:180
bool fLogIPs
Definition: logging.cpp:35
int64_t GetAdjustedTime()
Definition: timedata.cpp:34
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:138
NodeId GetId() const
Definition: net.h:1103
static const unsigned int BLOCK_STALLING_TIMEOUT
Timeout in seconds during which a peer must stall block download progress before being disconnected...
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out)
Get a single filter header by block.
int64_t PoissonNextSend(int64_t now, int average_interval_seconds)
Return a timestamp in the future (in microseconds) for exponentially distributed events.
std::string ConnectionTypeAsString() const
void SetBestHeight(int height)
#define LIMITED_STRING(obj, n)
Definition: serialize.h:481
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:402
std::atomic< int64_t > nTimeOffset
Definition: net.h:871
ArgsManager gArgs
Definition: system.cpp:77
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:20
Fee rate in satoshis per kilobyte: CAmount / kB.
Definition: feerate.h:29
static constexpr auto OVERLOADED_PEER_TX_DELAY
How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT).
std::atomic_bool fSuccessfullyConnected
Definition: net.h:896
SipHash-2-4.
Definition: siphash.h:13
#define AssertLockNotHeld(cs)
Definition: sync.h:80
std::string ToString() const
Definition: validation.h:125
static int count
Definition: tests.c:35
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:60
std::atomic< int > nVersion
Definition: net.h:876
Invalid by a change to consensus rules more recent than SegWit.
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:282
#define GUARDED_BY(x)
Definition: threadsafety.h:38
bool IsWitnessEnabled(const CBlockIndex *pindexPrev, const Consensus::Params &params)
Check whether witness commitments are required for a block, and whether to enforce NULLDUMMY (BIP 147...
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:46
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
const uint256 & GetHash() const
Definition: transaction.h:311
bool m_limited_node
Definition: net.h:890
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
int GetExtraOutboundCount()
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:935
static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed (even when n...
static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:77
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
bool IsMsgFilteredBlk() const
Definition: protocol.h:444
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:24
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:259
void MarkAddressGood(const CAddress &addr)
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:150
Information about a peer.
Definition: net.h:840
std::vector< int > vHeightInFlight
void ForEachNode(const NodeFn &func)
Definition: net.h:279
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:32
static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT
Maximum number of in-flight transaction requests from a peer.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:111
full block available in blk*.dat
Definition: chain.h:121
std::string ToString() const
Definition: protocol.cpp:181
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:41
int64_t GetBlockTime() const
Definition: chain.h:247
bool IsManualConn() const
Definition: net.h:927
void AddAddressKnown(const CAddress &_addr)
Definition: net.h:1149
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &pblock) override
Overridden from CValidationInterface.
int64_t GetTime()
Return system time (or mocked time, if set)
Definition: time.cpp:25
auto it
Definition: validation.cpp:381
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS
Default for -maxorphantx, maximum number of orphan transactions kept in memory.
Defined in BIP 339.
Definition: protocol.h:415
BanMan *const m_banman
Pointer to this node's banman.
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:36
COutPoint prevout
Definition: transaction.h:68
std::atomic_bool fPauseRecv
Definition: net.h:905
void BlockChecked(const CBlock &block, const BlockValidationState &state) override
Overridden from CValidationInterface.
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
Definition: version.h:39
int GetRandInt(int nMax) noexcept
Definition: random.cpp:597
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
std::atomic< int64_t > nLastBlockTime
UNIX epoch time of the last block received from this peer that we had not yet seen (e...
Definition: net.h:1039
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
uint32_t nTime
Definition: protocol.h:398
static constexpr int64_t STALE_CHECK_INTERVAL
How frequently to check for stale tips, in seconds.
otherwise didn't meet our local policy rules
A generic txid reference (txid or wtxid).
Definition: transaction.h:400
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL
Minimum time between orphan transactions expire time checks in seconds.
uint64_t randrange(uint64_t range) noexcept
Generate a random integer in the range [0..range).
Definition: random.h:190
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:166
void scheduleFromNow(Function f, std::chrono::milliseconds delta)
Call f once after the delta has passed.
Definition: scheduler.h:44
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:20
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in millionths of the block interval (i.e.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:33
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it...
Definition: txmempool.h:576
void ProcessMessage(CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc)
Process a single message from a peer.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:137
bool empty() const
Definition: streams.h:294
std::atomic< std::chrono::microseconds > m_ping_start
When the last ping was sent, or 0 if no ping was ever sent.
Definition: net.h:1051
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition: netaddress.h:217
uint256 GetBlockHash() const
Definition: chain.h:233
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
unsigned long size() const
Definition: txmempool.h:724
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:39
std::vector< unsigned char > GetKey() const
Definition: netaddress.cpp:959
std::unique_ptr< TxRelay > m_tx_relay
Definition: net.h:1029
static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
uint256 hash
Definition: transaction.h:29
Span< A > constexpr MakeSpan(A(&a)[N])
MakeSpan for arrays:
Definition: span.h:193
bool HaveTxsDownloaded() const
Check whether this block's and all previous blocks' transactions have been downloaded (and stored to ...
Definition: chain.h:245