Bitcoin Core  22.0.0
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <deploymentstatus.h>
15 #include <hash.h>
16 #include <index/blockfilterindex.h>
17 #include <merkleblock.h>
18 #include <netbase.h>
19 #include <netmessagemaker.h>
20 #include <node/blockstorage.h>
21 #include <policy/fees.h>
22 #include <policy/policy.h>
23 #include <primitives/block.h>
24 #include <primitives/transaction.h>
25 #include <random.h>
26 #include <reverse_iterator.h>
27 #include <scheduler.h>
28 #include <streams.h>
29 #include <sync.h>
30 #include <tinyformat.h>
31 #include <txmempool.h>
32 #include <txorphanage.h>
33 #include <txrequest.h>
34 #include <util/check.h> // For NDEBUG compile time check
35 #include <util/strencodings.h>
36 #include <util/system.h>
37 #include <validation.h>
38 
39 #include <algorithm>
40 #include <memory>
41 #include <optional>
42 #include <typeinfo>
43 
45 static constexpr auto RELAY_TX_CACHE_TIME = 15min;
47 static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
50 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
51 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
55 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
57 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
59 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
61 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
63 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
65 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
68 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
71 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
73 static constexpr std::chrono::minutes PING_INTERVAL{2};
75 static const unsigned int MAX_LOCATOR_SZ = 101;
77 static const unsigned int MAX_INV_SZ = 50000;
80 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
85 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
87 static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
89 static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
91 static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
93 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
95 static const unsigned int MAX_GETDATA_SZ = 1000;
97 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
99 static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
102 static const unsigned int MAX_HEADERS_RESULTS = 2000;
105 static const int MAX_CMPCTBLOCK_DEPTH = 5;
107 static const int MAX_BLOCKTXN_DEPTH = 10;
112 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
114 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
116 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
118 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
120 static const int MAX_UNCONNECTING_HEADERS = 10;
122 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
124 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
126 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
129 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
133 static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL = 2s;
136 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
140 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
145 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
147 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL = 10min;
149 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY = 5min;
151 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
153 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
155 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
157 static constexpr size_t MAX_ADDR_TO_SEND{1000};
160 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
165 
166 // Internal stuff
167 namespace {
169 struct QueuedBlock {
171  const CBlockIndex* pindex;
173  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
174 };
175 
188 struct Peer {
190  const NodeId m_id{0};
191 
193  Mutex m_misbehavior_mutex;
195  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
197  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
198 
200  Mutex m_block_inv_mutex;
204  std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
208  std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
213  uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
214 
216  std::atomic<int> m_starting_height{-1};
217 
219  std::atomic<uint64_t> m_ping_nonce_sent{0};
221  std::atomic<std::chrono::microseconds> m_ping_start{0us};
223  std::atomic<bool> m_ping_queued{false};
224 
226  std::vector<CAddress> m_addrs_to_send;
229  const std::unique_ptr<CRollingBloomFilter> m_addr_known;
231  bool m_getaddr_sent{false};
233  mutable Mutex m_addr_send_times_mutex;
235  std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
237  std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
240  std::atomic_bool m_wants_addrv2{false};
242  bool m_getaddr_recvd{false};
245  double m_addr_token_bucket{1.0};
247  std::chrono::microseconds m_addr_token_timestamp{GetTime<std::chrono::microseconds>()};
249  std::atomic<uint64_t> m_addr_rate_limited{0};
251  std::atomic<uint64_t> m_addr_processed{0};
252 
254  std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
255 
257  Mutex m_getdata_requests_mutex;
259  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
260 
261  explicit Peer(NodeId id, bool addr_relay)
262  : m_id(id)
263  , m_addr_known{addr_relay ? std::make_unique<CRollingBloomFilter>(5000, 0.001) : nullptr}
264  {}
265 };
266 
267 using PeerRef = std::shared_ptr<Peer>;
268 
269 class PeerManagerImpl final : public PeerManager
270 {
271 public:
272  PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
273  BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
274  CTxMemPool& pool, bool ignore_incoming_txs);
275 
277  void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override;
278  void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override;
279  void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override;
280  void BlockChecked(const CBlock& block, const BlockValidationState& state) override;
281  void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override;
282 
284  void InitializeNode(CNode* pnode) override;
285  void FinalizeNode(const CNode& node) override;
286  bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override;
287  bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
288 
290  void CheckForStaleTipAndEvictPeers() override;
291  bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
292  bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
293  void SendPings() override;
294  void RelayTransaction(const uint256& txid, const uint256& wtxid) override;
295  void SetBestHeight(int height) override { m_best_height = height; };
296  void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override;
297  void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
298  const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override;
299 
300 private:
301  void _RelayTransaction(const uint256& txid, const uint256& wtxid)
303 
305  void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
306 
308  void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
309 
311  void ReattemptInitialBroadcast(CScheduler& scheduler);
312 
315  PeerRef GetPeerRef(NodeId id) const;
316 
319  PeerRef RemovePeer(NodeId id);
320 
331  bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
332  bool via_compact_block, const std::string& message = "");
333 
339  bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "");
340 
347  bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
348 
349  void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans);
351  void ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
352  const std::vector<CBlockHeader>& headers,
353  bool via_compact_block);
354 
355  void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req);
356 
360  void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
362 
364  void PushNodeVersion(CNode& pnode, int64_t nTime);
365 
370  void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
371 
373  void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time);
374 
382  void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable);
383 
385  void MaybeSendFeefilter(CNode& node, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
386 
387  const CChainParams& m_chainparams;
388  CConnman& m_connman;
389  CAddrMan& m_addrman;
391  BanMan* const m_banman;
392  ChainstateManager& m_chainman;
393  CTxMemPool& m_mempool;
394  TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
395 
397  std::atomic<int> m_best_height{-1};
398 
399  int64_t m_stale_tip_check_time;
400 
402  const bool m_ignore_incoming_txs;
403 
406  bool m_initial_sync_finished{false};
407 
410  mutable Mutex m_peer_mutex;
417  std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
418 
420  int nSyncStarted GUARDED_BY(cs_main) = 0;
421 
428  std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
429 
431  int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
432 
434  int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
435 
436  bool AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
437 
472  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
473  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
474 
475  /*
476  * Filter for transactions that have been recently confirmed.
477  * We use this to avoid requesting transactions that have already been
478  * confirnmed.
479  */
480  Mutex m_recent_confirmed_transactions_mutex;
481  std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex);
482 
484  bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
485 
490  void RemoveBlockRequest(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
491 
492  /* Mark a block as in flight
493  * Returns false, still setting pit, if the block was already in flight from the same peer
494  * pit will only be valid as long as the same cs_main lock is being held
495  */
496  bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
497 
498  bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
499 
503  void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
504 
505  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
506 
508  std::atomic<int64_t> m_last_tip_update{0};
509 
511  CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main);
512 
513  void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main);
514 
516  void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing);
517 
519  typedef std::map<uint256, CTransactionRef> MapRelay;
520  MapRelay mapRelay GUARDED_BY(cs_main);
522  std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>> g_relay_expiration GUARDED_BY(cs_main);
523 
530  void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
531 
533  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
534 
536  int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
537 
539  TxOrphanage m_orphanage;
540 
541  void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
542 
546  std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
548  size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
549 
551  void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
553  void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
554  bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
555 
562  bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
563  bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
564  void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv);
565 
580  bool PrepareBlockFilterRequest(CNode& peer,
581  BlockFilterType filter_type, uint32_t start_height,
582  const uint256& stop_hash, uint32_t max_height_diff,
583  const CBlockIndex*& stop_index,
584  BlockFilterIndex*& filter_index);
585 
594  void ProcessGetCFilters(CNode& peer, CDataStream& vRecv);
595 
604  void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv);
605 
614  void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv);
615 };
616 } // namespace
617 
618 namespace {
620  int nPreferredDownload GUARDED_BY(cs_main) = 0;
621 } // namespace
622 
623 namespace {
630 struct CNodeState {
632  const CBlockIndex* pindexBestKnownBlock{nullptr};
634  uint256 hashLastUnknownBlock{};
636  const CBlockIndex* pindexLastCommonBlock{nullptr};
638  const CBlockIndex* pindexBestHeaderSent{nullptr};
640  int nUnconnectingHeaders{0};
642  bool fSyncStarted{false};
644  std::chrono::microseconds m_headers_sync_timeout{0us};
646  std::chrono::microseconds m_stalling_since{0us};
647  std::list<QueuedBlock> vBlocksInFlight;
649  std::chrono::microseconds m_downloading_since{0us};
650  int nBlocksInFlight{0};
652  bool fPreferredDownload{false};
654  bool fPreferHeaders{false};
656  bool fPreferHeaderAndIDs{false};
662  bool fProvidesHeaderAndIDs{false};
664  bool fHaveWitness{false};
666  bool fWantsCmpctWitness{false};
671  bool fSupportsDesiredCmpctVersion{false};
672 
697  struct ChainSyncTimeoutState {
699  int64_t m_timeout{0};
701  const CBlockIndex* m_work_header{nullptr};
703  bool m_sent_getheaders{false};
705  bool m_protect{false};
706  };
707 
708  ChainSyncTimeoutState m_chain_sync;
709 
711  int64_t m_last_block_announcement{0};
712 
714  const bool m_is_inbound;
715 
717  CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
718 
720  bool m_wtxid_relay{false};
721 
722  CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
723 };
724 
726 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
727 
728 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
729  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
730  if (it == mapNodeState.end())
731  return nullptr;
732  return &it->second;
733 }
734 
735 static bool RelayAddrsWithPeer(const Peer& peer)
736 {
737  return peer.m_addr_known != nullptr;
738 }
739 
745 static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
746 {
747  return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
748 }
749 
750 static void AddAddressKnown(Peer& peer, const CAddress& addr)
751 {
752  assert(peer.m_addr_known);
753  peer.m_addr_known->insert(addr.GetKey());
754 }
755 
756 static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand)
757 {
758  // Known checking here is only to save space from duplicates.
759  // Before sending, we'll filter it again for known addresses that were
760  // added after addresses were pushed.
761  assert(peer.m_addr_known);
762  if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
763  if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
764  peer.m_addrs_to_send[insecure_rand.randrange(peer.m_addrs_to_send.size())] = addr;
765  } else {
766  peer.m_addrs_to_send.push_back(addr);
767  }
768  }
769 }
770 
771 static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
772 {
773  nPreferredDownload -= state->fPreferredDownload;
774 
775  // Whether this node should be marked as a preferred download node.
776  state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(NetPermissionFlags::NoBan)) && !node.IsAddrFetchConn() && !node.fClient;
777 
778  nPreferredDownload += state->fPreferredDownload;
779 }
780 
781 bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
782 {
783  return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
784 }
785 
786 void PeerManagerImpl::RemoveBlockRequest(const uint256& hash)
787 {
788  auto it = mapBlocksInFlight.find(hash);
789  if (it == mapBlocksInFlight.end()) {
790  // Block was not requested
791  return;
792  }
793 
794  auto [node_id, list_it] = it->second;
795  CNodeState *state = State(node_id);
796  assert(state != nullptr);
797 
798  if (state->vBlocksInFlight.begin() == list_it) {
799  // First block on the queue was received, update the start download time for the next one
800  state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>());
801  }
802  state->vBlocksInFlight.erase(list_it);
803 
804  state->nBlocksInFlight--;
805  if (state->nBlocksInFlight == 0) {
806  // Last validated block on the queue was received.
807  m_peers_downloading_from--;
808  }
809  state->m_stalling_since = 0us;
810  mapBlocksInFlight.erase(it);
811 }
812 
813 bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
814 {
815  const uint256& hash{block.GetBlockHash()};
816 
817  CNodeState *state = State(nodeid);
818  assert(state != nullptr);
819 
820  // Short-circuit most stuff in case it is from the same node
821  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
822  if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
823  if (pit) {
824  *pit = &itInFlight->second.second;
825  }
826  return false;
827  }
828 
829  // Make sure it's not listed somewhere already.
830  RemoveBlockRequest(hash);
831 
832  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
833  {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
834  state->nBlocksInFlight++;
835  if (state->nBlocksInFlight == 1) {
836  // We're starting a block download (batch) from this peer.
837  state->m_downloading_since = GetTime<std::chrono::microseconds>();
838  m_peers_downloading_from++;
839  }
840  itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
841  if (pit) {
842  *pit = &itInFlight->second.second;
843  }
844  return true;
845 }
846 
847 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
848 {
850  CNodeState* nodestate = State(nodeid);
851  if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
852  // Never ask from peers who can't provide witnesses.
853  return;
854  }
855  if (nodestate->fProvidesHeaderAndIDs) {
856  int num_outbound_hb_peers = 0;
857  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
858  if (*it == nodeid) {
859  lNodesAnnouncingHeaderAndIDs.erase(it);
860  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
861  return;
862  }
863  CNodeState *state = State(*it);
864  if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
865  }
866  if (nodestate->m_is_inbound) {
867  // If we're adding an inbound HB peer, make sure we're not removing
868  // our last outbound HB peer in the process.
869  if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
870  CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
871  if (remove_node != nullptr && !remove_node->m_is_inbound) {
872  // Put the HB outbound peer in the second slot, so that it
873  // doesn't get removed.
874  std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
875  }
876  }
877  }
878  m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
880  uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
881  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
882  // As per BIP152, we only get 3 of our peers to announce
883  // blocks using compact encodings.
884  m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this, nCMPCTBLOCKVersion](CNode* pnodeStop){
885  m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
886  // save BIP152 bandwidth state: we select peer to be low-bandwidth
887  pnodeStop->m_bip152_highbandwidth_to = false;
888  return true;
889  });
890  lNodesAnnouncingHeaderAndIDs.pop_front();
891  }
892  m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
893  // save BIP152 bandwidth state: we select peer to be high-bandwidth
894  pfrom->m_bip152_highbandwidth_to = true;
895  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
896  return true;
897  });
898  }
899 }
900 
901 bool PeerManagerImpl::TipMayBeStale()
902 {
904  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
905  if (m_last_tip_update == 0) {
906  m_last_tip_update = GetTime();
907  }
908  return m_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
909 }
910 
911 bool PeerManagerImpl::CanDirectFetch()
912 {
913  return m_chainman.ActiveChain().Tip()->GetBlockTime() > GetAdjustedTime() - m_chainparams.GetConsensus().nPowTargetSpacing * 20;
914 }
915 
916 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
917 {
918  if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
919  return true;
920  if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
921  return true;
922  return false;
923 }
924 
925 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
926  CNodeState *state = State(nodeid);
927  assert(state != nullptr);
928 
929  if (!state->hashLastUnknownBlock.IsNull()) {
930  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
931  if (pindex && pindex->nChainWork > 0) {
932  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
933  state->pindexBestKnownBlock = pindex;
934  }
935  state->hashLastUnknownBlock.SetNull();
936  }
937  }
938 }
939 
940 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
941  CNodeState *state = State(nodeid);
942  assert(state != nullptr);
943 
944  ProcessBlockAvailability(nodeid);
945 
946  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
947  if (pindex && pindex->nChainWork > 0) {
948  // An actually better block was announced.
949  if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
950  state->pindexBestKnownBlock = pindex;
951  }
952  } else {
953  // An unknown block was announced; just assume that the latest one is the best one.
954  state->hashLastUnknownBlock = hash;
955  }
956 }
957 
958 void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
959 {
960  if (count == 0)
961  return;
962 
963  vBlocks.reserve(vBlocks.size() + count);
964  CNodeState *state = State(nodeid);
965  assert(state != nullptr);
966 
967  // Make sure pindexBestKnownBlock is up to date, we'll need it.
968  ProcessBlockAvailability(nodeid);
969 
970  if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
971  // This peer has nothing interesting.
972  return;
973  }
974 
975  if (state->pindexLastCommonBlock == nullptr) {
976  // Bootstrap quickly by guessing a parent of our best tip is the forking point.
977  // Guessing wrong in either direction is not a problem.
978  state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
979  }
980 
981  // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
982  // of its current tip anymore. Go back enough to fix that.
983  state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
984  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
985  return;
986 
987  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
988  std::vector<const CBlockIndex*> vToFetch;
989  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
990  // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
991  // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
992  // download that next block if the window were 1 larger.
993  int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
994  int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
995  NodeId waitingfor = -1;
996  while (pindexWalk->nHeight < nMaxHeight) {
997  // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
998  // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
999  // as iterating over ~100 CBlockIndex* entries anyway.
1000  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1001  vToFetch.resize(nToFetch);
1002  pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1003  vToFetch[nToFetch - 1] = pindexWalk;
1004  for (unsigned int i = nToFetch - 1; i > 0; i--) {
1005  vToFetch[i - 1] = vToFetch[i]->pprev;
1006  }
1007 
1008  // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1009  // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1010  // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1011  // already part of our chain (and therefore don't need it even if pruned).
1012  for (const CBlockIndex* pindex : vToFetch) {
1013  if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1014  // We consider the chain that this peer is on invalid.
1015  return;
1016  }
1017  if (!State(nodeid)->fHaveWitness && DeploymentActiveAt(*pindex, consensusParams, Consensus::DEPLOYMENT_SEGWIT)) {
1018  // We wouldn't download this block or its descendants from this peer.
1019  return;
1020  }
1021  if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) {
1022  if (pindex->HaveTxsDownloaded())
1023  state->pindexLastCommonBlock = pindex;
1024  } else if (!IsBlockRequested(pindex->GetBlockHash())) {
1025  // The block is not already downloaded, and not yet in flight.
1026  if (pindex->nHeight > nWindowEnd) {
1027  // We reached the end of the window.
1028  if (vBlocks.size() == 0 && waitingfor != nodeid) {
1029  // We aren't able to fetch anything, but we would be if the download window was one larger.
1030  nodeStaller = waitingfor;
1031  }
1032  return;
1033  }
1034  vBlocks.push_back(pindex);
1035  if (vBlocks.size() == count) {
1036  return;
1037  }
1038  } else if (waitingfor == -1) {
1039  // This is the first already-in-flight block.
1040  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
1041  }
1042  }
1043  }
1044 }
1045 
1046 } // namespace
1047 
1048 void PeerManagerImpl::PushNodeVersion(CNode& pnode, int64_t nTime)
1049 {
1050  // Note that pnode->GetLocalServices() is a reflection of the local
1051  // services we were offering when the CNode object was created for this
1052  // peer.
1053  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
1054  uint64_t nonce = pnode.GetLocalNonce();
1055  const int nNodeStartingHeight{m_best_height};
1056  NodeId nodeid = pnode.GetId();
1057  CAddress addr = pnode.addr;
1058 
1059  CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
1060  addr :
1061  CAddress(CService(), addr.nServices);
1062  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
1063 
1064  const bool tx_relay = !m_ignore_incoming_txs && pnode.m_tx_relay != nullptr;
1065  m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
1066  nonce, strSubVersion, nNodeStartingHeight, tx_relay));
1067 
1068  if (fLogIPs) {
1069  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), tx_relay, nodeid);
1070  } else {
1071  LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), tx_relay, nodeid);
1072  }
1073 }
1074 
1075 void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
1076 {
1077  AssertLockHeld(::cs_main); // For m_txrequest
1078  NodeId nodeid = node.GetId();
1079  if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
1080  // Too many queued announcements from this peer
1081  return;
1082  }
1083  const CNodeState* state = State(nodeid);
1084 
1085  // Decide the TxRequestTracker parameters for this announcement:
1086  // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission)
1087  // - "reqtime": current time plus delays for:
1088  // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
1089  // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
1090  // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
1091  // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
1092  auto delay = std::chrono::microseconds{0};
1093  const bool preferred = state->fPreferredDownload;
1094  if (!preferred) delay += NONPREF_PEER_TX_DELAY;
1095  if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
1096  const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) &&
1097  m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
1098  if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
1099  m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
1100 }
1101 
1102 // This function is used for testing the stale tip eviction logic, see
1103 // denialofservice_tests.cpp
1104 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1105 {
1106  LOCK(cs_main);
1107  CNodeState *state = State(node);
1108  if (state) state->m_last_block_announcement = time_in_seconds;
1109 }
1110 
1111 void PeerManagerImpl::InitializeNode(CNode *pnode)
1112 {
1113  NodeId nodeid = pnode->GetId();
1114  {
1115  LOCK(cs_main);
1116  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(pnode->IsInboundConn()));
1117  assert(m_txrequest.Count(nodeid) == 0);
1118  }
1119  {
1120  // Addr relay is disabled for outbound block-relay-only peers to
1121  // prevent adversaries from inferring these links from addr traffic.
1122  PeerRef peer = std::make_shared<Peer>(nodeid, /* addr_relay = */ !pnode->IsBlockOnlyConn());
1123  LOCK(m_peer_mutex);
1124  m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
1125  }
1126  if (!pnode->IsInboundConn()) {
1127  PushNodeVersion(*pnode, GetTime());
1128  }
1129 }
1130 
1131 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1132 {
1133  std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1134 
1135  for (const auto& txid : unbroadcast_txids) {
1136  CTransactionRef tx = m_mempool.get(txid);
1137 
1138  if (tx != nullptr) {
1139  LOCK(cs_main);
1140  _RelayTransaction(txid, tx->GetWitnessHash());
1141  } else {
1142  m_mempool.RemoveUnbroadcastTx(txid, true);
1143  }
1144  }
1145 
1146  // Schedule next run for 10-15 minutes in the future.
1147  // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1148  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1149  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1150 }
1151 
1152 void PeerManagerImpl::FinalizeNode(const CNode& node)
1153 {
1154  NodeId nodeid = node.GetId();
1155  int misbehavior{0};
1156  {
1157  LOCK(cs_main);
1158  {
1159  // We remove the PeerRef from g_peer_map here, but we don't always
1160  // destruct the Peer. Sometimes another thread is still holding a
1161  // PeerRef, so the refcount is >= 1. Be careful not to do any
1162  // processing here that assumes Peer won't be changed before it's
1163  // destructed.
1164  PeerRef peer = RemovePeer(nodeid);
1165  assert(peer != nullptr);
1166  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1167  }
1168  CNodeState *state = State(nodeid);
1169  assert(state != nullptr);
1170 
1171  if (state->fSyncStarted)
1172  nSyncStarted--;
1173 
1174  for (const QueuedBlock& entry : state->vBlocksInFlight) {
1175  mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
1176  }
1177  WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid));
1178  m_txrequest.DisconnectedPeer(nodeid);
1179  nPreferredDownload -= state->fPreferredDownload;
1180  m_peers_downloading_from -= (state->nBlocksInFlight != 0);
1181  assert(m_peers_downloading_from >= 0);
1182  m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1183  assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1184  m_wtxid_relay_peers -= state->m_wtxid_relay;
1185  assert(m_wtxid_relay_peers >= 0);
1186 
1187  mapNodeState.erase(nodeid);
1188 
1189  if (mapNodeState.empty()) {
1190  // Do a consistency check after the last peer is removed.
1191  assert(mapBlocksInFlight.empty());
1192  assert(nPreferredDownload == 0);
1193  assert(m_peers_downloading_from == 0);
1194  assert(m_outbound_peers_with_protect_from_disconnect == 0);
1195  assert(m_wtxid_relay_peers == 0);
1196  assert(m_txrequest.Size() == 0);
1197  }
1198  } // cs_main
1199  if (node.fSuccessfullyConnected && misbehavior == 0 &&
1200  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1201  // Only change visible addrman state for full outbound peers. We don't
1202  // call Connected() for feeler connections since they don't have
1203  // fSuccessfullyConnected set.
1204  m_addrman.Connected(node.addr);
1205  }
1206  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1207 }
1208 
1209 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1210 {
1211  LOCK(m_peer_mutex);
1212  auto it = m_peer_map.find(id);
1213  return it != m_peer_map.end() ? it->second : nullptr;
1214 }
1215 
1216 PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1217 {
1218  PeerRef ret;
1219  LOCK(m_peer_mutex);
1220  auto it = m_peer_map.find(id);
1221  if (it != m_peer_map.end()) {
1222  ret = std::move(it->second);
1223  m_peer_map.erase(it);
1224  }
1225  return ret;
1226 }
1227 
1228 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1229 {
1230  {
1231  LOCK(cs_main);
1232  CNodeState* state = State(nodeid);
1233  if (state == nullptr)
1234  return false;
1235  stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1236  stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1237  for (const QueuedBlock& queue : state->vBlocksInFlight) {
1238  if (queue.pindex)
1239  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1240  }
1241  }
1242 
1243  PeerRef peer = GetPeerRef(nodeid);
1244  if (peer == nullptr) return false;
1245  stats.m_starting_height = peer->m_starting_height;
1246  // It is common for nodes with good ping times to suddenly become lagged,
1247  // due to a new block arriving or other large transfer.
1248  // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1249  // since pingtime does not update until the ping is complete, which might take a while.
1250  // So, if a ping is taking an unusually long time in flight,
1251  // the caller can immediately detect that this is happening.
1252  std::chrono::microseconds ping_wait{0};
1253  if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1254  ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1255  }
1256 
1257  stats.m_ping_wait = ping_wait;
1258  stats.m_addr_processed = peer->m_addr_processed.load();
1259  stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1260 
1261  return true;
1262 }
1263 
1264 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1265 {
1266  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
1267  if (max_extra_txn <= 0)
1268  return;
1269  if (!vExtraTxnForCompact.size())
1270  vExtraTxnForCompact.resize(max_extra_txn);
1271  vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1272  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
1273 }
1274 
1275 void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
1276 {
1277  assert(howmuch > 0);
1278 
1279  PeerRef peer = GetPeerRef(pnode);
1280  if (peer == nullptr) return;
1281 
1282  LOCK(peer->m_misbehavior_mutex);
1283  peer->m_misbehavior_score += howmuch;
1284  const std::string message_prefixed = message.empty() ? "" : (": " + message);
1285  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1286  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1287  peer->m_should_discourage = true;
1288  } else {
1289  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1290  }
1291 }
1292 
1293 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1294  bool via_compact_block, const std::string& message)
1295 {
1296  switch (state.GetResult()) {
1298  break;
1299  // The node is providing invalid data:
1302  if (!via_compact_block) {
1303  Misbehaving(nodeid, 100, message);
1304  return true;
1305  }
1306  break;
1308  {
1309  LOCK(cs_main);
1310  CNodeState *node_state = State(nodeid);
1311  if (node_state == nullptr) {
1312  break;
1313  }
1314 
1315  // Discourage outbound (but not inbound) peers if on an invalid chain.
1316  // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1317  if (!via_compact_block && !node_state->m_is_inbound) {
1318  Misbehaving(nodeid, 100, message);
1319  return true;
1320  }
1321  break;
1322  }
1326  Misbehaving(nodeid, 100, message);
1327  return true;
1328  // Conflicting (but not necessarily invalid) data or different policy:
1330  // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1331  Misbehaving(nodeid, 10, message);
1332  return true;
1335  break;
1336  }
1337  if (message != "") {
1338  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1339  }
1340  return false;
1341 }
1342 
1343 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message)
1344 {
1345  switch (state.GetResult()) {
1347  break;
1348  // The node is providing invalid data:
1350  Misbehaving(nodeid, 100, message);
1351  return true;
1352  // Conflicting (but not necessarily invalid) data or different policy:
1362  break;
1363  }
1364  if (message != "") {
1365  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1366  }
1367  return false;
1368 }
1369 
1370 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1371 {
1373  if (m_chainman.ActiveChain().Contains(pindex)) return true;
1374  return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1377 }
1378 
1379 std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
1380  BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
1381  CTxMemPool& pool, bool ignore_incoming_txs)
1382 {
1383  return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, scheduler, chainman, pool, ignore_incoming_txs);
1384 }
1385 
1386 PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
1387  BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman,
1388  CTxMemPool& pool, bool ignore_incoming_txs)
1389  : m_chainparams(chainparams),
1390  m_connman(connman),
1391  m_addrman(addrman),
1392  m_banman(banman),
1393  m_chainman(chainman),
1394  m_mempool(pool),
1395  m_stale_tip_check_time(0),
1396  m_ignore_incoming_txs(ignore_incoming_txs)
1397 {
1398  // Initialize global variables that cannot be constructed at startup.
1399  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1400 
1401  // Blocks don't typically have more than 4000 transactions, so this should
1402  // be at least six blocks (~1 hr) worth of transactions that we can store,
1403  // inserting both a txid and wtxid for every observed transaction.
1404  // If the number of transactions appearing in a block goes up, or if we are
1405  // seeing getdata requests more than an hour after initial announcement, we
1406  // can increase this number.
1407  // The false positive rate of 1/1M should come out to less than 1
1408  // transaction per day that would be inadvertently ignored (which is the
1409  // same probability that we have in the reject filter).
1410  m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
1411 
1412  // Stale tip checking and peer eviction are on two different timers, but we
1413  // don't want them to get out of sync due to drift in the scheduler, so we
1414  // combine them in one function and schedule at the quicker (peer-eviction)
1415  // timer.
1416  static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1417  scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1418 
1419  // schedule next run for 10-15 minutes in the future
1420  const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1421  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1422 }
1423 
1429 void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1430 {
1431  m_orphanage.EraseForBlock(*pblock);
1432  m_last_tip_update = GetTime();
1433 
1434  {
1435  LOCK(m_recent_confirmed_transactions_mutex);
1436  for (const auto& ptx : pblock->vtx) {
1437  m_recent_confirmed_transactions->insert(ptx->GetHash());
1438  if (ptx->GetHash() != ptx->GetWitnessHash()) {
1439  m_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
1440  }
1441  }
1442  }
1443  {
1444  LOCK(cs_main);
1445  for (const auto& ptx : pblock->vtx) {
1446  m_txrequest.ForgetTxHash(ptx->GetHash());
1447  m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1448  }
1449  }
1450 }
1451 
1452 void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1453 {
1454  // To avoid relay problems with transactions that were previously
1455  // confirmed, clear our filter of recently confirmed transactions whenever
1456  // there's a reorg.
1457  // This means that in a 1-block reorg (where 1 block is disconnected and
1458  // then another block reconnected), our filter will drop to having only one
1459  // block's worth of transactions in it, but that should be fine, since
1460  // presumably the most common case of relaying a confirmed transaction
1461  // should be just after a new block containing it is found.
1462  LOCK(m_recent_confirmed_transactions_mutex);
1463  m_recent_confirmed_transactions->reset();
1464 }
1465 
1466 // All of the following cache a recent block, and are protected by cs_most_recent_block
1468 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1469 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1470 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1471 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1472 
1477 void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
1478 {
1479  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1480  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1481 
1482  LOCK(cs_main);
1483 
1484  static int nHighestFastAnnounce = 0;
1485  if (pindex->nHeight <= nHighestFastAnnounce)
1486  return;
1487  nHighestFastAnnounce = pindex->nHeight;
1488 
1489  bool fWitnessEnabled = DeploymentActiveAt(*pindex, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT);
1490  uint256 hashBlock(pblock->GetHash());
1491 
1492  {
1494  most_recent_block_hash = hashBlock;
1495  most_recent_block = pblock;
1496  most_recent_compact_block = pcmpctblock;
1497  fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1498  }
1499 
1500  m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1502 
1503  // TODO: Avoid the repeated-serialization here
1504  if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1505  return;
1506  ProcessBlockAvailability(pnode->GetId());
1507  CNodeState &state = *State(pnode->GetId());
1508  // If the peer has, or we announced to them the previous block already,
1509  // but we don't think they have this one, go ahead and announce it
1510  if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1511  !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1512 
1513  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
1514  hashBlock.ToString(), pnode->GetId());
1515  m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1516  state.pindexBestHeaderSent = pindex;
1517  }
1518  });
1519 }
1520 
1525 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
1526 {
1527  SetBestHeight(pindexNew->nHeight);
1528  SetServiceFlagsIBDCache(!fInitialDownload);
1529 
1530  // Don't relay inventory during initial block download.
1531  if (fInitialDownload) return;
1532 
1533  // Find the hashes of all blocks that weren't previously in the best chain.
1534  std::vector<uint256> vHashes;
1535  const CBlockIndex *pindexToAnnounce = pindexNew;
1536  while (pindexToAnnounce != pindexFork) {
1537  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1538  pindexToAnnounce = pindexToAnnounce->pprev;
1539  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1540  // Limit announcements in case of a huge reorganization.
1541  // Rely on the peer's synchronization mechanism in that case.
1542  break;
1543  }
1544  }
1545 
1546  {
1547  LOCK(m_peer_mutex);
1548  for (auto& it : m_peer_map) {
1549  Peer& peer = *it.second;
1550  LOCK(peer.m_block_inv_mutex);
1551  for (const uint256& hash : reverse_iterate(vHashes)) {
1552  peer.m_blocks_for_headers_relay.push_back(hash);
1553  }
1554  }
1555  }
1556 
1557  m_connman.WakeMessageHandler();
1558 }
1559 
1564 void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
1565 {
1566  LOCK(cs_main);
1567 
1568  const uint256 hash(block.GetHash());
1569  std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1570 
1571  // If the block failed validation, we know where it came from and we're still connected
1572  // to that peer, maybe punish.
1573  if (state.IsInvalid() &&
1574  it != mapBlockSource.end() &&
1575  State(it->second.first)) {
1576  MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1577  }
1578  // Check that:
1579  // 1. The block is valid
1580  // 2. We're not in initial block download
1581  // 3. This is currently the best block we're aware of. We haven't updated
1582  // the tip yet so we have no way to check this directly here. Instead we
1583  // just check that there are currently no other blocks in flight.
1584  else if (state.IsValid() &&
1585  !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
1586  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1587  if (it != mapBlockSource.end()) {
1588  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
1589  }
1590  }
1591  if (it != mapBlockSource.end())
1592  mapBlockSource.erase(it);
1593 }
1594 
1596 //
1597 // Messages
1598 //
1599 
1600 
1601 bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
1602 {
1603  assert(recentRejects);
1604  if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
1605  // If the chain tip has changed previously rejected transactions
1606  // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1607  // or a double-spend. Reset the rejects filter and give those
1608  // txs a second chance.
1609  hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
1610  recentRejects->reset();
1611  }
1612 
1613  const uint256& hash = gtxid.GetHash();
1614 
1615  if (m_orphanage.HaveTx(gtxid)) return true;
1616 
1617  {
1618  LOCK(m_recent_confirmed_transactions_mutex);
1619  if (m_recent_confirmed_transactions->contains(hash)) return true;
1620  }
1621 
1622  return recentRejects->contains(hash) || m_mempool.exists(gtxid);
1623 }
1624 
1625 bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
1626 {
1627  return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
1628 }
1629 
1630 void PeerManagerImpl::SendPings()
1631 {
1632  LOCK(m_peer_mutex);
1633  for(auto& it : m_peer_map) it.second->m_ping_queued = true;
1634 }
1635 
1636 void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
1637 {
1638  WITH_LOCK(cs_main, _RelayTransaction(txid, wtxid););
1639 }
1640 
1641 void PeerManagerImpl::_RelayTransaction(const uint256& txid, const uint256& wtxid)
1642 {
1643  m_connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1645 
1646  CNodeState* state = State(pnode->GetId());
1647  if (state == nullptr) return;
1648  if (state->m_wtxid_relay) {
1649  pnode->PushTxInventory(wtxid);
1650  } else {
1651  pnode->PushTxInventory(txid);
1652  }
1653  });
1654 }
1655 
1656 void PeerManagerImpl::RelayAddress(NodeId originator,
1657  const CAddress& addr,
1658  bool fReachable)
1659 {
1660  // We choose the same nodes within a given 24h window (if the list of connected
1661  // nodes does not change) and we don't relay to nodes that already know an
1662  // address. So within 24h we will likely relay a given address once. This is to
1663  // prevent a peer from unjustly giving their address better propagation by sending
1664  // it to us repeatedly.
1665 
1666  if (!fReachable && !addr.IsRelayable()) return;
1667 
1668  // Relay to a limited number of other nodes
1669  // Use deterministic randomness to send to the same nodes for 24 hours
1670  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1671  uint64_t hashAddr = addr.GetHash();
1672  const CSipHasher hasher = m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
1673  FastRandomContext insecure_rand;
1674 
1675  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
1676  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
1677 
1678  std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
1679  assert(nRelayNodes <= best.size());
1680 
1681  LOCK(m_peer_mutex);
1682 
1683  for (auto& [id, peer] : m_peer_map) {
1684  if (RelayAddrsWithPeer(*peer) && id != originator && IsAddrCompatible(*peer, addr)) {
1685  uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
1686  for (unsigned int i = 0; i < nRelayNodes; i++) {
1687  if (hashKey > best[i].first) {
1688  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1689  best[i] = std::make_pair(hashKey, peer.get());
1690  break;
1691  }
1692  }
1693  }
1694  };
1695 
1696  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1697  PushAddress(*best[i].second, addr, insecure_rand);
1698  }
1699 }
1700 
1701 void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
1702 {
1703  std::shared_ptr<const CBlock> a_recent_block;
1704  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1705  bool fWitnessesPresentInARecentCompactBlock;
1706  {
1708  a_recent_block = most_recent_block;
1709  a_recent_compact_block = most_recent_compact_block;
1710  fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1711  }
1712 
1713  bool need_activate_chain = false;
1714  {
1715  LOCK(cs_main);
1716  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
1717  if (pindex) {
1718  if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1719  pindex->IsValid(BLOCK_VALID_TREE)) {
1720  // If we have the block and all of its parents, but have not yet validated it,
1721  // we might be in the middle of connecting it (ie in the unlock of cs_main
1722  // before ActivateBestChain but after AcceptBlock).
1723  // In this case, we need to run ActivateBestChain prior to checking the relay
1724  // conditions below.
1725  need_activate_chain = true;
1726  }
1727  }
1728  } // release cs_main before calling ActivateBestChain
1729  if (need_activate_chain) {
1730  BlockValidationState state;
1731  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
1732  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1733  }
1734  }
1735 
1736  LOCK(cs_main);
1737  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
1738  if (!pindex) {
1739  return;
1740  }
1741  if (!BlockRequestAllowed(pindex)) {
1742  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
1743  return;
1744  }
1745  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1746  // disconnect node in case we have reached the outbound limit for serving historical blocks
1747  if (m_connman.OutboundTargetReached(true) &&
1748  (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
1749  !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
1750  ) {
1751  LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
1752  pfrom.fDisconnect = true;
1753  return;
1754  }
1755  // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1756  if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
1757  (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1758  )) {
1759  LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
1760  //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1761  pfrom.fDisconnect = true;
1762  return;
1763  }
1764  // Pruned nodes may have deleted the block, so check whether
1765  // it's available before trying to send.
1766  if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
1767  return;
1768  }
1769  std::shared_ptr<const CBlock> pblock;
1770  if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1771  pblock = a_recent_block;
1772  } else if (inv.IsMsgWitnessBlk()) {
1773  // Fast-path: in this case it is possible to serve the block directly from disk,
1774  // as the network format matches the format on disk
1775  std::vector<uint8_t> block_data;
1776  if (!ReadRawBlockFromDisk(block_data, pindex, m_chainparams.MessageStart())) {
1777  assert(!"cannot load block from disk");
1778  }
1779  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1780  // Don't set pblock as we've sent the block
1781  } else {
1782  // Send block from disk
1783  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1784  if (!ReadBlockFromDisk(*pblockRead, pindex, m_chainparams.GetConsensus())) {
1785  assert(!"cannot load block from disk");
1786  }
1787  pblock = pblockRead;
1788  }
1789  if (pblock) {
1790  if (inv.IsMsgBlk()) {
1791  m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1792  } else if (inv.IsMsgWitnessBlk()) {
1793  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1794  } else if (inv.IsMsgFilteredBlk()) {
1795  bool sendMerkleBlock = false;
1796  CMerkleBlock merkleBlock;
1797  if (pfrom.m_tx_relay != nullptr) {
1798  LOCK(pfrom.m_tx_relay->cs_filter);
1799  if (pfrom.m_tx_relay->pfilter) {
1800  sendMerkleBlock = true;
1801  merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1802  }
1803  }
1804  if (sendMerkleBlock) {
1805  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1806  // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1807  // This avoids hurting performance by pointlessly requiring a round-trip
1808  // Note that there is currently no way for a node to request any single transactions we didn't send here -
1809  // they must either disconnect and retry or request the full block.
1810  // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1811  // however we MUST always provide at least what the remote peer needs
1812  typedef std::pair<unsigned int, uint256> PairType;
1813  for (PairType& pair : merkleBlock.vMatchedTxn)
1814  m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1815  }
1816  // else
1817  // no response
1818  } else if (inv.IsMsgCmpctBlk()) {
1819  // If a peer is asking for old blocks, we're almost guaranteed
1820  // they won't have a useful mempool to match against a compact block,
1821  // and we don't feel like constructing the object for them, so
1822  // instead we respond with the full, non-compact block.
1823  bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness;
1824  int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1825  if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
1826  if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1827  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1828  } else {
1829  CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1830  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1831  }
1832  } else {
1833  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1834  }
1835  }
1836  }
1837 
1838  {
1839  LOCK(peer.m_block_inv_mutex);
1840  // Trigger the peer node to send a getblocks request for the next batch of inventory
1841  if (inv.hash == peer.m_continuation_block) {
1842  // Send immediately. This must send even if redundant,
1843  // and we want it right after the last block so they don't
1844  // wait for other stuff first.
1845  std::vector<CInv> vInv;
1846  vInv.push_back(CInv(MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
1847  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1848  peer.m_continuation_block.SetNull();
1849  }
1850  }
1851 }
1852 
1853 CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
1854 {
1855  auto txinfo = m_mempool.info(gtxid);
1856  if (txinfo.tx) {
1857  // If a TX could have been INVed in reply to a MEMPOOL request,
1858  // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
1859  // unconditionally.
1860  if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
1861  return std::move(txinfo.tx);
1862  }
1863  }
1864 
1865  {
1866  LOCK(cs_main);
1867  // Otherwise, the transaction must have been announced recently.
1868  if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
1869  // If it was, it can be relayed from either the mempool...
1870  if (txinfo.tx) return std::move(txinfo.tx);
1871  // ... or the relay pool.
1872  auto mi = mapRelay.find(gtxid.GetHash());
1873  if (mi != mapRelay.end()) return mi->second;
1874  }
1875  }
1876 
1877  return {};
1878 }
1879 
1880 void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
1881 {
1883 
1884  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
1885  std::vector<CInv> vNotFound;
1886  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1887 
1888  const std::chrono::seconds now = GetTime<std::chrono::seconds>();
1889  // Get last mempool request time
1890  const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
1891  : std::chrono::seconds::min();
1892 
1893  // Process as many TX items from the front of the getdata queue as
1894  // possible, since they're common and it's efficient to batch process
1895  // them.
1896  while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
1897  if (interruptMsgProc) return;
1898  // The send buffer provides backpressure. If there's no space in
1899  // the buffer, pause processing until the next call.
1900  if (pfrom.fPauseSend) break;
1901 
1902  const CInv &inv = *it++;
1903 
1904  if (pfrom.m_tx_relay == nullptr) {
1905  // Ignore GETDATA requests for transactions from blocks-only peers.
1906  continue;
1907  }
1908 
1909  CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now);
1910  if (tx) {
1911  // WTX and WITNESS_TX imply we serialize with witness
1912  int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1913  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
1914  m_mempool.RemoveUnbroadcastTx(tx->GetHash());
1915  // As we're going to send tx, make sure its unconfirmed parents are made requestable.
1916  std::vector<uint256> parent_ids_to_add;
1917  {
1918  LOCK(m_mempool.cs);
1919  auto txiter = m_mempool.GetIter(tx->GetHash());
1920  if (txiter) {
1921  const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst();
1922  parent_ids_to_add.reserve(parents.size());
1923  for (const CTxMemPoolEntry& parent : parents) {
1924  if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
1925  parent_ids_to_add.push_back(parent.GetTx().GetHash());
1926  }
1927  }
1928  }
1929  }
1930  for (const uint256& parent_txid : parent_ids_to_add) {
1931  // Relaying a transaction with a recent but unconfirmed parent.
1932  if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
1933  LOCK(cs_main);
1934  State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
1935  }
1936  }
1937  } else {
1938  vNotFound.push_back(inv);
1939  }
1940  }
1941 
1942  // Only process one BLOCK item per call, since they're uncommon and can be
1943  // expensive to process.
1944  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
1945  const CInv &inv = *it++;
1946  if (inv.IsGenBlkMsg()) {
1947  ProcessGetBlockData(pfrom, peer, inv);
1948  }
1949  // else: If the first item on the queue is an unknown type, we erase it
1950  // and continue processing the queue on the next call.
1951  }
1952 
1953  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
1954 
1955  if (!vNotFound.empty()) {
1956  // Let the peer know that we didn't find what it asked for, so it doesn't
1957  // have to wait around forever.
1958  // SPV clients care about this message: it's needed when they are
1959  // recursively walking the dependencies of relevant unconfirmed
1960  // transactions. SPV clients want to do that because they want to know
1961  // about (and store and rebroadcast and risk analyze) the dependencies
1962  // of transactions relevant to them, without having to download the
1963  // entire memory pool.
1964  // Also, other nodes can use these messages to automatically request a
1965  // transaction from some other peer that annnounced it, and stop
1966  // waiting for us to respond.
1967  // In normal operation, we often send NOTFOUND messages for parents of
1968  // transactions that we relay; if a peer is missing a parent, they may
1969  // assume we have them and request the parents from us.
1970  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1971  }
1972 }
1973 
1974 static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1975  uint32_t nFetchFlags = 0;
1976  if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
1977  nFetchFlags |= MSG_WITNESS_FLAG;
1978  }
1979  return nFetchFlags;
1980 }
1981 
1982 void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req)
1983 {
1984  BlockTransactions resp(req);
1985  for (size_t i = 0; i < req.indexes.size(); i++) {
1986  if (req.indexes[i] >= block.vtx.size()) {
1987  Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
1988  return;
1989  }
1990  resp.txn[i] = block.vtx[req.indexes[i]];
1991  }
1992  LOCK(cs_main);
1993  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1994  int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1995  m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1996 }
1997 
1998 void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
1999  const std::vector<CBlockHeader>& headers,
2000  bool via_compact_block)
2001 {
2002  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2003  size_t nCount = headers.size();
2004 
2005  if (nCount == 0) {
2006  // Nothing interesting. Stop asking this peers for more headers.
2007  return;
2008  }
2009 
2010  bool received_new_header = false;
2011  const CBlockIndex *pindexLast = nullptr;
2012  {
2013  LOCK(cs_main);
2014  CNodeState *nodestate = State(pfrom.GetId());
2015 
2016  // If this looks like it could be a block announcement (nCount <
2017  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
2018  // don't connect:
2019  // - Send a getheaders message in response to try to connect the chain.
2020  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2021  // don't connect before giving DoS points
2022  // - Once a headers message is received that is valid and does connect,
2023  // nUnconnectingHeaders gets reset back to 0.
2024  if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
2025  nodestate->nUnconnectingHeaders++;
2026  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256()));
2027  LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
2028  headers[0].GetHash().ToString(),
2029  headers[0].hashPrevBlock.ToString(),
2031  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2032  // Set hashLastUnknownBlock for this peer, so that if we
2033  // eventually get the headers - even from a different peer -
2034  // we can use this peer to download.
2035  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
2036 
2037  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
2038  Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
2039  }
2040  return;
2041  }
2042 
2043  uint256 hashLastBlock;
2044  for (const CBlockHeader& header : headers) {
2045  if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2046  Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
2047  return;
2048  }
2049  hashLastBlock = header.GetHash();
2050  }
2051 
2052  // If we don't have the last header, then they'll have given us
2053  // something new (if these headers are valid).
2054  if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
2055  received_new_header = true;
2056  }
2057  }
2058 
2059  BlockValidationState state;
2060  if (!m_chainman.ProcessNewBlockHeaders(headers, state, m_chainparams, &pindexLast)) {
2061  if (state.IsInvalid()) {
2062  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
2063  return;
2064  }
2065  }
2066 
2067  {
2068  LOCK(cs_main);
2069  CNodeState *nodestate = State(pfrom.GetId());
2070  if (nodestate->nUnconnectingHeaders > 0) {
2071  LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
2072  }
2073  nodestate->nUnconnectingHeaders = 0;
2074 
2075  assert(pindexLast);
2076  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
2077 
2078  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2079  // because it is set in UpdateBlockAvailability. Some nullptr checks
2080  // are still present, however, as belt-and-suspenders.
2081 
2082  if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2083  nodestate->m_last_block_announcement = GetTime();
2084  }
2085 
2086  if (nCount == MAX_HEADERS_RESULTS) {
2087  // Headers message had its maximum size; the peer may have more headers.
2088  // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue
2089  // from there instead.
2090  LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2091  pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
2092  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256()));
2093  }
2094 
2095  // If this set of headers is valid and ends in a block with at least as
2096  // much work as our tip, download as much as possible.
2097  if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
2098  std::vector<const CBlockIndex*> vToFetch;
2099  const CBlockIndex *pindexWalk = pindexLast;
2100  // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
2101  while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2102  if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2103  !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2104  (!DeploymentActiveAt(*pindexWalk, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) {
2105  // We don't have this block, and it's not yet in flight.
2106  vToFetch.push_back(pindexWalk);
2107  }
2108  pindexWalk = pindexWalk->pprev;
2109  }
2110  // If pindexWalk still isn't on our main chain, we're looking at a
2111  // very large reorg at a time we think we're close to caught up to
2112  // the main chain -- this shouldn't really happen. Bail out on the
2113  // direct fetch and rely on parallel download instead.
2114  if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2115  LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2116  pindexLast->GetBlockHash().ToString(),
2117  pindexLast->nHeight);
2118  } else {
2119  std::vector<CInv> vGetData;
2120  // Download as much as possible, from earliest to latest.
2121  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2122  if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2123  // Can't download any more from this peer
2124  break;
2125  }
2126  uint32_t nFetchFlags = GetFetchFlags(pfrom);
2127  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
2128  BlockRequested(pfrom.GetId(), *pindex);
2129  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2130  pindex->GetBlockHash().ToString(), pfrom.GetId());
2131  }
2132  if (vGetData.size() > 1) {
2133  LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2134  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
2135  }
2136  if (vGetData.size() > 0) {
2137  if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
2138  // In any case, we want to download using a compact block, not a regular one
2139  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2140  }
2141  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
2142  }
2143  }
2144  }
2145  // If we're in IBD, we want outbound peers that will serve us a useful
2146  // chain. Disconnect peers that are on chains with insufficient work.
2147  if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
2148  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
2149  // headers to fetch from this peer.
2150  if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
2151  // This peer has too little work on their headers chain to help
2152  // us sync -- disconnect if it is an outbound disconnection
2153  // candidate.
2154  // Note: We compare their tip to nMinimumChainWork (rather than
2155  // m_chainman.ActiveChain().Tip()) because we won't start block download
2156  // until we have a headers chain that has at least
2157  // nMinimumChainWork, even if a peer has a chain past our tip,
2158  // as an anti-DoS measure.
2159  if (pfrom.IsOutboundOrBlockRelayConn()) {
2160  LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
2161  pfrom.fDisconnect = true;
2162  }
2163  }
2164  }
2165 
2166  // If this is an outbound full-relay peer, check to see if we should protect
2167  // it from the bad/lagging chain logic.
2168  // Note that outbound block-relay peers are excluded from this protection, and
2169  // thus always subject to eviction under the bad/lagging chain logic.
2170  // See ChainSyncTimeoutState.
2171  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2172  if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2173  LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2174  nodestate->m_chain_sync.m_protect = true;
2175  ++m_outbound_peers_with_protect_from_disconnect;
2176  }
2177  }
2178  }
2179 
2180  return;
2181 }
2182 
2191 void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
2192 {
2195 
2196  while (!orphan_work_set.empty()) {
2197  const uint256 orphanHash = *orphan_work_set.begin();
2198  orphan_work_set.erase(orphan_work_set.begin());
2199 
2200  const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanHash);
2201  if (porphanTx == nullptr) continue;
2202 
2203  const MempoolAcceptResult result = AcceptToMemoryPool(m_chainman.ActiveChainstate(), m_mempool, porphanTx, false /* bypass_limits */);
2204  const TxValidationState& state = result.m_state;
2205 
2207  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
2208  _RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
2209  m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
2210  m_orphanage.EraseTx(orphanHash);
2211  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
2212  AddToCompactExtraTransactions(removedTx);
2213  }
2214  break;
2215  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2216  if (state.IsInvalid()) {
2217  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
2218  orphanHash.ToString(),
2219  from_peer,
2220  state.ToString());
2221  // Maybe punish peer that gave us an invalid orphan tx
2222  MaybePunishNodeForTx(from_peer, state);
2223  }
2224  // Has inputs but not accepted to mempool
2225  // Probably non-standard or insufficient fee
2226  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
2227  if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
2228  // We can add the wtxid of this transaction to our reject filter.
2229  // Do not add txids of witness transactions or witness-stripped
2230  // transactions to the filter, as they can have been malleated;
2231  // adding such txids to the reject filter would potentially
2232  // interfere with relay of valid transactions from peers that
2233  // do not support wtxid-based relay. See
2234  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
2235  // We can remove this restriction (and always add wtxids to
2236  // the filter even for witness stripped transactions) once
2237  // wtxid-based relay is broadly deployed.
2238  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
2239  // for concerns around weakening security of unupgraded nodes
2240  // if we start doing this too early.
2241  assert(recentRejects);
2242  recentRejects->insert(porphanTx->GetWitnessHash());
2243  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
2244  // then we know that the witness was irrelevant to the policy
2245  // failure, since this check depends only on the txid
2246  // (the scriptPubKey being spent is covered by the txid).
2247  // Add the txid to the reject filter to prevent repeated
2248  // processing of this transaction in the event that child
2249  // transactions are later received (resulting in
2250  // parent-fetching by txid via the orphan-handling logic).
2251  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
2252  // We only add the txid if it differs from the wtxid, to
2253  // avoid wasting entries in the rolling bloom filter.
2254  recentRejects->insert(porphanTx->GetHash());
2255  }
2256  }
2257  m_orphanage.EraseTx(orphanHash);
2258  break;
2259  }
2260  }
2261  m_mempool.check(m_chainman.ActiveChainstate());
2262 }
2263 
2264 bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer,
2265  BlockFilterType filter_type, uint32_t start_height,
2266  const uint256& stop_hash, uint32_t max_height_diff,
2267  const CBlockIndex*& stop_index,
2268  BlockFilterIndex*& filter_index)
2269 {
2270  const bool supported_filter_type =
2271  (filter_type == BlockFilterType::BASIC &&
2273  if (!supported_filter_type) {
2274  LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
2275  peer.GetId(), static_cast<uint8_t>(filter_type));
2276  peer.fDisconnect = true;
2277  return false;
2278  }
2279 
2280  {
2281  LOCK(cs_main);
2282  stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
2283 
2284  // Check that the stop block exists and the peer would be allowed to fetch it.
2285  if (!stop_index || !BlockRequestAllowed(stop_index)) {
2286  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2287  peer.GetId(), stop_hash.ToString());
2288  peer.fDisconnect = true;
2289  return false;
2290  }
2291  }
2292 
2293  uint32_t stop_height = stop_index->nHeight;
2294  if (start_height > stop_height) {
2295  LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
2296  "start height %d and stop height %d\n",
2297  peer.GetId(), start_height, stop_height);
2298  peer.fDisconnect = true;
2299  return false;
2300  }
2301  if (stop_height - start_height >= max_height_diff) {
2302  LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2303  peer.GetId(), stop_height - start_height + 1, max_height_diff);
2304  peer.fDisconnect = true;
2305  return false;
2306  }
2307 
2308  filter_index = GetBlockFilterIndex(filter_type);
2309  if (!filter_index) {
2310  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
2311  return false;
2312  }
2313 
2314  return true;
2315 }
2316 
2317 void PeerManagerImpl::ProcessGetCFilters(CNode& peer, CDataStream& vRecv)
2318 {
2319  uint8_t filter_type_ser;
2320  uint32_t start_height;
2321  uint256 stop_hash;
2322 
2323  vRecv >> filter_type_ser >> start_height >> stop_hash;
2324 
2325  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2326 
2327  const CBlockIndex* stop_index;
2328  BlockFilterIndex* filter_index;
2329  if (!PrepareBlockFilterRequest(peer, filter_type, start_height, stop_hash,
2330  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2331  return;
2332  }
2333 
2334  std::vector<BlockFilter> filters;
2335  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2336  LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2337  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2338  return;
2339  }
2340 
2341  for (const auto& filter : filters) {
2343  .Make(NetMsgType::CFILTER, filter);
2344  m_connman.PushMessage(&peer, std::move(msg));
2345  }
2346 }
2347 
2348 void PeerManagerImpl::ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv)
2349 {
2350  uint8_t filter_type_ser;
2351  uint32_t start_height;
2352  uint256 stop_hash;
2353 
2354  vRecv >> filter_type_ser >> start_height >> stop_hash;
2355 
2356  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2357 
2358  const CBlockIndex* stop_index;
2359  BlockFilterIndex* filter_index;
2360  if (!PrepareBlockFilterRequest(peer, filter_type, start_height, stop_hash,
2361  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2362  return;
2363  }
2364 
2365  uint256 prev_header;
2366  if (start_height > 0) {
2367  const CBlockIndex* const prev_block =
2368  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2369  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2370  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2371  BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
2372  return;
2373  }
2374  }
2375 
2376  std::vector<uint256> filter_hashes;
2377  if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
2378  LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2379  BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2380  return;
2381  }
2382 
2384  .Make(NetMsgType::CFHEADERS,
2385  filter_type_ser,
2386  stop_index->GetBlockHash(),
2387  prev_header,
2388  filter_hashes);
2389  m_connman.PushMessage(&peer, std::move(msg));
2390 }
2391 
2392 void PeerManagerImpl::ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv)
2393 {
2394  uint8_t filter_type_ser;
2395  uint256 stop_hash;
2396 
2397  vRecv >> filter_type_ser >> stop_hash;
2398 
2399  const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2400 
2401  const CBlockIndex* stop_index;
2402  BlockFilterIndex* filter_index;
2403  if (!PrepareBlockFilterRequest(peer, filter_type, /*start_height=*/0, stop_hash,
2404  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2405  stop_index, filter_index)) {
2406  return;
2407  }
2408 
2409  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2410 
2411  // Populate headers.
2412  const CBlockIndex* block_index = stop_index;
2413  for (int i = headers.size() - 1; i >= 0; i--) {
2414  int height = (i + 1) * CFCHECKPT_INTERVAL;
2415  block_index = block_index->GetAncestor(height);
2416 
2417  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2418  LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2419  BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
2420  return;
2421  }
2422  }
2423 
2425  .Make(NetMsgType::CFCHECKPT,
2426  filter_type_ser,
2427  stop_index->GetBlockHash(),
2428  headers);
2429  m_connman.PushMessage(&peer, std::move(msg));
2430 }
2431 
2432 void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing)
2433 {
2434  bool new_block{false};
2435  m_chainman.ProcessNewBlock(m_chainparams, block, force_processing, &new_block);
2436  if (new_block) {
2437  node.nLastBlockTime = GetTime();
2438  } else {
2439  LOCK(cs_main);
2440  mapBlockSource.erase(block->GetHash());
2441  }
2442 }
2443 
2444 void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
2445  const std::chrono::microseconds time_received,
2446  const std::atomic<bool>& interruptMsgProc)
2447 {
2448  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2449 
2450  PeerRef peer = GetPeerRef(pfrom.GetId());
2451  if (peer == nullptr) return;
2452 
2453  if (msg_type == NetMsgType::VERSION) {
2454  if (pfrom.nVersion != 0) {
2455  LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
2456  return;
2457  }
2458 
2459  int64_t nTime;
2460  CAddress addrMe;
2461  CAddress addrFrom;
2462  uint64_t nNonce = 1;
2463  uint64_t nServiceInt;
2464  ServiceFlags nServices;
2465  int nVersion;
2466  std::string cleanSubVer;
2467  int starting_height = -1;
2468  bool fRelay = true;
2469 
2470  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2471  if (nTime < 0) {
2472  nTime = 0;
2473  }
2474  nServices = ServiceFlags(nServiceInt);
2475  if (!pfrom.IsInboundConn())
2476  {
2477  m_addrman.SetServices(pfrom.addr, nServices);
2478  }
2479  if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
2480  {
2481  LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
2482  pfrom.fDisconnect = true;
2483  return;
2484  }
2485 
2486  if (nVersion < MIN_PEER_PROTO_VERSION) {
2487  // disconnect from peers older than this proto version
2488  LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
2489  pfrom.fDisconnect = true;
2490  return;
2491  }
2492 
2493  if (!vRecv.empty())
2494  vRecv >> addrFrom >> nNonce;
2495  if (!vRecv.empty()) {
2496  std::string strSubVer;
2497  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2498  cleanSubVer = SanitizeString(strSubVer);
2499  }
2500  if (!vRecv.empty()) {
2501  vRecv >> starting_height;
2502  }
2503  if (!vRecv.empty())
2504  vRecv >> fRelay;
2505  // Disconnect if we connected to ourself
2506  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
2507  {
2508  LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
2509  pfrom.fDisconnect = true;
2510  return;
2511  }
2512 
2513  if (pfrom.IsInboundConn() && addrMe.IsRoutable())
2514  {
2515  SeenLocal(addrMe);
2516  }
2517 
2518  // Inbound peers send us their version message when they connect.
2519  // We send our version message in response.
2520  if (pfrom.IsInboundConn()) PushNodeVersion(pfrom, GetAdjustedTime());
2521 
2522  // Change version
2523  const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
2524  pfrom.SetCommonVersion(greatest_common_version);
2525  pfrom.nVersion = nVersion;
2526 
2527  const CNetMsgMaker msg_maker(greatest_common_version);
2528 
2529  if (greatest_common_version >= WTXID_RELAY_VERSION) {
2530  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
2531  }
2532 
2533  // Signal ADDRv2 support (BIP155).
2534  if (greatest_common_version >= 70016) {
2535  // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
2536  // implementations reject messages they don't know. As a courtesy, don't send
2537  // it to nodes with a version before 70016, as no software is known to support
2538  // BIP155 that doesn't announce at least that protocol version number.
2539  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
2540  }
2541 
2542  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
2543 
2544  pfrom.nServices = nServices;
2545  pfrom.SetAddrLocal(addrMe);
2546  {
2547  LOCK(pfrom.cs_SubVer);
2548  pfrom.cleanSubVer = cleanSubVer;
2549  }
2550  peer->m_starting_height = starting_height;
2551 
2552  // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2553  pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2554 
2555  // set nodes not capable of serving the complete blockchain history as "limited nodes"
2556  pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2557 
2558  if (pfrom.m_tx_relay != nullptr) {
2559  LOCK(pfrom.m_tx_relay->cs_filter);
2560  pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2561  }
2562 
2563  if((nServices & NODE_WITNESS))
2564  {
2565  LOCK(cs_main);
2566  State(pfrom.GetId())->fHaveWitness = true;
2567  }
2568 
2569  // Potentially mark this peer as a preferred download peer.
2570  {
2571  LOCK(cs_main);
2572  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2573  }
2574 
2575  if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
2576  // For outbound peers, we try to relay our address (so that other
2577  // nodes can try to find us more quickly, as we have no guarantee
2578  // that an outbound peer is even aware of how to reach us) and do a
2579  // one-time address fetch (to help populate/update our addrman). If
2580  // we're starting up for the first time, our addrman may be pretty
2581  // empty and no one will know who we are, so these mechanisms are
2582  // important to help us connect to the network.
2583  //
2584  // We skip this for block-relay-only peers to avoid potentially leaking
2585  // information about our block-relay-only connections via address relay.
2586  if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload())
2587  {
2588  CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2589  FastRandomContext insecure_rand;
2590  if (addr.IsRoutable())
2591  {
2592  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2593  PushAddress(*peer, addr, insecure_rand);
2594  } else if (IsPeerAddrLocalGood(&pfrom)) {
2595  addr.SetIP(addrMe);
2596  LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2597  PushAddress(*peer, addr, insecure_rand);
2598  }
2599  }
2600 
2601  // Get recent addresses
2602  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
2603  peer->m_getaddr_sent = true;
2604  // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
2605  // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
2606  peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
2607  }
2608 
2609  if (!pfrom.IsInboundConn()) {
2610  // For non-inbound connections, we update the addrman to record
2611  // connection success so that addrman will have an up-to-date
2612  // notion of which peers are online and available.
2613  //
2614  // While we strive to not leak information about block-relay-only
2615  // connections via the addrman, not moving an address to the tried
2616  // table is also potentially detrimental because new-table entries
2617  // are subject to eviction in the event of addrman collisions. We
2618  // mitigate the information-leak by never calling
2619  // CAddrMan::Connected() on block-relay-only peers; see
2620  // FinalizeNode().
2621  //
2622  // This moves an address from New to Tried table in Addrman,
2623  // resolves tried-table collisions, etc.
2624  m_addrman.Good(pfrom.addr);
2625  }
2626 
2627  std::string remoteAddr;
2628  if (fLogIPs)
2629  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2630 
2631  LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s\n",
2632  cleanSubVer, pfrom.nVersion,
2633  peer->m_starting_height, addrMe.ToString(), fRelay, pfrom.GetId(),
2634  remoteAddr);
2635 
2636  int64_t nTimeOffset = nTime - GetTime();
2637  pfrom.nTimeOffset = nTimeOffset;
2638  AddTimeData(pfrom.addr, nTimeOffset);
2639 
2640  // If the peer is old enough to have the old alert system, send it the final alert.
2641  if (greatest_common_version <= 70012) {
2642  CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2643  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
2644  }
2645 
2646  // Feeler connections exist only to verify if address is online.
2647  if (pfrom.IsFeelerConn()) {
2648  LogPrint(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
2649  pfrom.fDisconnect = true;
2650  }
2651  return;
2652  }
2653 
2654  if (pfrom.nVersion == 0) {
2655  // Must have a version message before anything else
2656  LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
2657  return;
2658  }
2659 
2660  // At this point, the outgoing message serialization version can't change.
2661  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2662 
2663  if (msg_type == NetMsgType::VERACK) {
2664  if (pfrom.fSuccessfullyConnected) {
2665  LogPrint(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
2666  return;
2667  }
2668 
2669  if (!pfrom.IsInboundConn()) {
2670  LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2671  pfrom.nVersion.load(), peer->m_starting_height,
2672  pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
2673  pfrom.ConnectionTypeAsString());
2674  }
2675 
2676  if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
2677  // Tell our peer we prefer to receive headers rather than inv's
2678  // We send this to non-NODE NETWORK peers as well, because even
2679  // non-NODE NETWORK peers can announce blocks (such as pruning
2680  // nodes)
2681  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2682  }
2683  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
2684  // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2685  // However, we do not request new block announcements using
2686  // cmpctblock messages.
2687  // We send this to non-NODE NETWORK peers as well, because
2688  // they may wish to request compact blocks from us
2689  bool fAnnounceUsingCMPCTBLOCK = false;
2690  uint64_t nCMPCTBLOCKVersion = 2;
2691  if (pfrom.GetLocalServices() & NODE_WITNESS)
2692  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2693  nCMPCTBLOCKVersion = 1;
2694  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2695  }
2696  pfrom.fSuccessfullyConnected = true;
2697  return;
2698  }
2699 
2700  if (msg_type == NetMsgType::SENDHEADERS) {
2701  LOCK(cs_main);
2702  State(pfrom.GetId())->fPreferHeaders = true;
2703  return;
2704  }
2705 
2706  if (msg_type == NetMsgType::SENDCMPCT) {
2707  bool fAnnounceUsingCMPCTBLOCK = false;
2708  uint64_t nCMPCTBLOCKVersion = 0;
2709  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2710  if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2711  LOCK(cs_main);
2712  // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2713  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2714  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2715  State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2716  }
2717  if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) { // ignore later version announces
2718  State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2719  // save whether peer selects us as BIP152 high-bandwidth peer
2720  // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
2721  pfrom.m_bip152_highbandwidth_from = fAnnounceUsingCMPCTBLOCK;
2722  }
2723  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2724  if (pfrom.GetLocalServices() & NODE_WITNESS)
2725  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2726  else
2727  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2728  }
2729  }
2730  return;
2731  }
2732 
2733  // BIP339 defines feature negotiation of wtxidrelay, which must happen between
2734  // VERSION and VERACK to avoid relay problems from switching after a connection is up.
2735  if (msg_type == NetMsgType::WTXIDRELAY) {
2736  if (pfrom.fSuccessfullyConnected) {
2737  // Disconnect peers that send a wtxidrelay message after VERACK.
2738  LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
2739  pfrom.fDisconnect = true;
2740  return;
2741  }
2742  if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
2743  LOCK(cs_main);
2744  if (!State(pfrom.GetId())->m_wtxid_relay) {
2745  State(pfrom.GetId())->m_wtxid_relay = true;
2746  m_wtxid_relay_peers++;
2747  } else {
2748  LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
2749  }
2750  } else {
2751  LogPrint(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
2752  }
2753  return;
2754  }
2755 
2756  // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
2757  // between VERSION and VERACK.
2758  if (msg_type == NetMsgType::SENDADDRV2) {
2759  if (pfrom.fSuccessfullyConnected) {
2760  // Disconnect peers that send a SENDADDRV2 message after VERACK.
2761  LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
2762  pfrom.fDisconnect = true;
2763  return;
2764  }
2765  peer->m_wants_addrv2 = true;
2766  return;
2767  }
2768 
2769  if (!pfrom.fSuccessfullyConnected) {
2770  LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
2771  return;
2772  }
2773 
2774  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
2775  int stream_version = vRecv.GetVersion();
2776  if (msg_type == NetMsgType::ADDRV2) {
2777  // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
2778  // unserialize methods know that an address in v2 format is coming.
2779  stream_version |= ADDRV2_FORMAT;
2780  }
2781 
2782  OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
2783  std::vector<CAddress> vAddr;
2784 
2785  s >> vAddr;
2786 
2787  if (!RelayAddrsWithPeer(*peer)) {
2788  LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
2789  return;
2790  }
2791  if (vAddr.size() > MAX_ADDR_TO_SEND)
2792  {
2793  Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
2794  return;
2795  }
2796 
2797  // Store the new addresses
2798  std::vector<CAddress> vAddrOk;
2799  int64_t nNow = GetAdjustedTime();
2800  int64_t nSince = nNow - 10 * 60;
2801 
2802  // Update/increment addr rate limiting bucket.
2803  const auto current_time = GetTime<std::chrono::microseconds>();
2804  if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
2805  // Don't increment bucket if it's already full
2806  const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
2807  const double increment = CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
2808  peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
2809  }
2810  peer->m_addr_token_timestamp = current_time;
2811 
2812  const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
2813  uint64_t num_proc = 0;
2814  uint64_t num_rate_limit = 0;
2815  Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
2816  for (CAddress& addr : vAddr)
2817  {
2818  if (interruptMsgProc)
2819  return;
2820 
2821  // Apply rate limiting.
2822  if (rate_limited) {
2823  if (peer->m_addr_token_bucket < 1.0) {
2824  ++num_rate_limit;
2825  continue;
2826  }
2827  peer->m_addr_token_bucket -= 1.0;
2828  }
2829  // We only bother storing full nodes, though this may include
2830  // things which we would not make an outbound connection to, in
2831  // part because we may make feeler connections to them.
2833  continue;
2834 
2835  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2836  addr.nTime = nNow - 5 * 24 * 60 * 60;
2837  AddAddressKnown(*peer, addr);
2838  if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
2839  // Do not process banned/discouraged addresses beyond remembering we received them
2840  continue;
2841  }
2842  ++num_proc;
2843  bool fReachable = IsReachable(addr);
2844  if (addr.nTime > nSince && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
2845  // Relay to a limited number of other nodes
2846  RelayAddress(pfrom.GetId(), addr, fReachable);
2847  }
2848  // Do not store addresses outside our network
2849  if (fReachable)
2850  vAddrOk.push_back(addr);
2851  }
2852  peer->m_addr_processed += num_proc;
2853  peer->m_addr_rate_limited += num_rate_limit;
2854  LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d%s\n",
2855  vAddr.size(),
2856  num_proc,
2857  num_rate_limit,
2858  pfrom.GetId(),
2859  fLogIPs ? ", peeraddr=" + pfrom.addr.ToString() : "");
2860 
2861  m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60);
2862  if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
2863 
2864  // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
2865  if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
2866  LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
2867  pfrom.fDisconnect = true;
2868  }
2869  return;
2870  }
2871 
2872  if (msg_type == NetMsgType::INV) {
2873  std::vector<CInv> vInv;
2874  vRecv >> vInv;
2875  if (vInv.size() > MAX_INV_SZ)
2876  {
2877  Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
2878  return;
2879  }
2880 
2881  // We won't accept tx inv's if we're in blocks-only mode, or this is a
2882  // block-relay-only peer
2883  bool fBlocksOnly = m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr);
2884 
2885  // Allow peers with relay permission to send data other than blocks in blocks only mode
2887  fBlocksOnly = false;
2888  }
2889 
2890  LOCK(cs_main);
2891 
2892  const auto current_time = GetTime<std::chrono::microseconds>();
2893  uint256* best_block{nullptr};
2894 
2895  for (CInv& inv : vInv) {
2896  if (interruptMsgProc) return;
2897 
2898  // Ignore INVs that don't match wtxidrelay setting.
2899  // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
2900  // This is fine as no INV messages are involved in that process.
2901  if (State(pfrom.GetId())->m_wtxid_relay) {
2902  if (inv.IsMsgTx()) continue;
2903  } else {
2904  if (inv.IsMsgWtx()) continue;
2905  }
2906 
2907  if (inv.IsMsgBlk()) {
2908  const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
2909  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2910 
2911  UpdateBlockAvailability(pfrom.GetId(), inv.hash);
2912  if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) {
2913  // Headers-first is the primary method of announcement on
2914  // the network. If a node fell back to sending blocks by inv,
2915  // it's probably for a re-org. The final block hash
2916  // provided should be the highest, so send a getheaders and
2917  // then fetch the blocks we need to catch up.
2918  best_block = &inv.hash;
2919  }
2920  } else if (inv.IsGenTxMsg()) {
2921  const GenTxid gtxid = ToGenTxid(inv);
2922  const bool fAlreadyHave = AlreadyHaveTx(gtxid);
2923  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2924 
2925  pfrom.AddKnownTx(inv.hash);
2926  if (fBlocksOnly) {
2927  LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
2928  pfrom.fDisconnect = true;
2929  return;
2930  } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
2931  AddTxAnnouncement(pfrom, gtxid, current_time);
2932  }
2933  } else {
2934  LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
2935  }
2936  }
2937 
2938  if (best_block != nullptr) {
2939  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), *best_block));
2940  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
2941  }
2942 
2943  return;
2944  }
2945 
2946  if (msg_type == NetMsgType::GETDATA) {
2947  std::vector<CInv> vInv;
2948  vRecv >> vInv;
2949  if (vInv.size() > MAX_INV_SZ)
2950  {
2951  Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
2952  return;
2953  }
2954 
2955  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
2956 
2957  if (vInv.size() > 0) {
2958  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
2959  }
2960 
2961  {
2962  LOCK(peer->m_getdata_requests_mutex);
2963  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
2964  ProcessGetData(pfrom, *peer, interruptMsgProc);
2965  }
2966 
2967  return;
2968  }
2969 
2970  if (msg_type == NetMsgType::GETBLOCKS) {
2971  CBlockLocator locator;
2972  uint256 hashStop;
2973  vRecv >> locator >> hashStop;
2974 
2975  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2976  LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2977  pfrom.fDisconnect = true;
2978  return;
2979  }
2980 
2981  // We might have announced the currently-being-connected tip using a
2982  // compact block, which resulted in the peer sending a getblocks
2983  // request, which we would otherwise respond to without the new block.
2984  // To avoid this situation we simply verify that we are on our best
2985  // known chain now. This is super overkill, but we handle it better
2986  // for getheaders requests, and there are no known nodes which support
2987  // compact blocks but still use getblocks to request blocks.
2988  {
2989  std::shared_ptr<const CBlock> a_recent_block;
2990  {
2992  a_recent_block = most_recent_block;
2993  }
2994  BlockValidationState state;
2995  if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2996  LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2997  }
2998  }
2999 
3000  LOCK(cs_main);
3001 
3002  // Find the last block the caller has in the main chain
3003  const CBlockIndex* pindex = m_chainman.m_blockman.FindForkInGlobalIndex(m_chainman.ActiveChain(), locator);
3004 
3005  // Send the rest of the chain
3006  if (pindex)
3007  pindex = m_chainman.ActiveChain().Next(pindex);
3008  int nLimit = 500;
3009  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
3010  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
3011  {
3012  if (pindex->GetBlockHash() == hashStop)
3013  {
3014  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3015  break;
3016  }
3017  // If pruning, don't inv blocks unless we have on disk and are likely to still have
3018  // for some reasonable time window (1 hour) that block relay might require.
3019  const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
3020  if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave))
3021  {
3022  LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3023  break;
3024  }
3025  WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
3026  if (--nLimit <= 0) {
3027  // When this block is requested, we'll send an inv that'll
3028  // trigger the peer to getblocks the next batch of inventory.
3029  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3030  WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
3031  break;
3032  }
3033  }
3034  return;
3035  }
3036 
3037  if (msg_type == NetMsgType::GETBLOCKTXN) {
3039  vRecv >> req;
3040 
3041  std::shared_ptr<const CBlock> recent_block;
3042  {
3044  if (most_recent_block_hash == req.blockhash)
3045  recent_block = most_recent_block;
3046  // Unlock cs_most_recent_block to avoid cs_main lock inversion
3047  }
3048  if (recent_block) {
3049  SendBlockTransactions(pfrom, *recent_block, req);
3050  return;
3051  }
3052 
3053  {
3054  LOCK(cs_main);
3055 
3056  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
3057  if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
3058  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
3059  return;
3060  }
3061 
3062  if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
3063  CBlock block;
3064  bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
3065  assert(ret);
3066 
3067  SendBlockTransactions(pfrom, block, req);
3068  return;
3069  }
3070  }
3071 
3072  // If an older block is requested (should never happen in practice,
3073  // but can happen in tests) send a block response instead of a
3074  // blocktxn response. Sending a full block response instead of a
3075  // small blocktxn response is preferable in the case where a peer
3076  // might maliciously send lots of getblocktxn requests to trigger
3077  // expensive disk reads, because it will require the peer to
3078  // actually receive all the data read from disk over the network.
3079  LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
3080  CInv inv;
3081  WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
3082  inv.hash = req.blockhash;
3083  WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
3084  // The message processing loop will go around again (without pausing) and we'll respond then
3085  return;
3086  }
3087 
3088  if (msg_type == NetMsgType::GETHEADERS) {
3089  CBlockLocator locator;
3090  uint256 hashStop;
3091  vRecv >> locator >> hashStop;
3092 
3093  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3094  LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3095  pfrom.fDisconnect = true;
3096  return;
3097  }
3098 
3099  LOCK(cs_main);
3101  LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
3102  return;
3103  }
3104 
3105  CNodeState *nodestate = State(pfrom.GetId());
3106  const CBlockIndex* pindex = nullptr;
3107  if (locator.IsNull())
3108  {
3109  // If locator is null, return the hashStop block
3110  pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
3111  if (!pindex) {
3112  return;
3113  }
3114 
3115  if (!BlockRequestAllowed(pindex)) {
3116  LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
3117  return;
3118  }
3119  }
3120  else
3121  {
3122  // Find the last block the caller has in the main chain
3123  pindex = m_chainman.m_blockman.FindForkInGlobalIndex(m_chainman.ActiveChain(), locator);
3124  if (pindex)
3125  pindex = m_chainman.ActiveChain().Next(pindex);
3126  }
3127 
3128  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
3129  std::vector<CBlock> vHeaders;
3130  int nLimit = MAX_HEADERS_RESULTS;
3131  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
3132  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
3133  {
3134  vHeaders.push_back(pindex->GetBlockHeader());
3135  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
3136  break;
3137  }
3138  // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
3139  // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
3140  // headers message). In both cases it's safe to update
3141  // pindexBestHeaderSent to be our tip.
3142  //
3143  // It is important that we simply reset the BestHeaderSent value here,
3144  // and not max(BestHeaderSent, newHeaderSent). We might have announced
3145  // the currently-being-connected tip using a compact block, which
3146  // resulted in the peer sending a headers request, which we respond to
3147  // without the new block. By resetting the BestHeaderSent, we ensure we
3148  // will re-announce the new block via headers (or compact blocks again)
3149  // in the SendMessages logic.
3150  nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
3151  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
3152  return;
3153  }
3154 
3155  if (msg_type == NetMsgType::TX) {
3156  // Stop processing the transaction early if
3157  // 1) We are in blocks only mode and peer has no relay permission
3158  // 2) This peer is a block-relay-only peer
3159  if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || (pfrom.m_tx_relay == nullptr))
3160  {
3161  LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
3162  pfrom.fDisconnect = true;
3163  return;
3164  }
3165 
3166  CTransactionRef ptx;
3167  vRecv >> ptx;
3168  const CTransaction& tx = *ptx;
3169 
3170  const uint256& txid = ptx->GetHash();
3171  const uint256& wtxid = ptx->GetWitnessHash();
3172 
3174 
3175  CNodeState* nodestate = State(pfrom.GetId());
3176 
3177  const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid;
3178  pfrom.AddKnownTx(hash);
3179  if (nodestate->m_wtxid_relay && txid != wtxid) {
3180  // Insert txid into filterInventoryKnown, even for
3181  // wtxidrelay peers. This prevents re-adding of
3182  // unconfirmed parents to the recently_announced
3183  // filter, when a child tx is requested. See
3184  // ProcessGetData().
3185  pfrom.AddKnownTx(txid);
3186  }
3187 
3188  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
3189  if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
3190 
3191  // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
3192  // absence of witness malleation, this is strictly better, because the
3193  // recent rejects filter may contain the wtxid but rarely contains
3194  // the txid of a segwit transaction that has been rejected.
3195  // In the presence of witness malleation, it's possible that by only
3196  // doing the check with wtxid, we could overlook a transaction which
3197  // was confirmed with a different witness, or exists in our mempool
3198  // with a different witness, but this has limited downside:
3199  // mempool validation does its own lookup of whether we have the txid
3200  // already; and an adversary can already relay us old transactions
3201  // (older than our recency filter) if trying to DoS us, without any need
3202  // for witness malleation.
3203  if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid))) {
3205  // Always relay transactions received from peers with forcerelay
3206  // permission, even if they were already in the mempool, allowing
3207  // the node to function as a gateway for nodes hidden behind it.
3208  if (!m_mempool.exists(tx.GetHash())) {
3209  LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
3210  } else {
3211  LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
3212  _RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
3213  }
3214  }
3215  return;
3216  }
3217 
3218  const MempoolAcceptResult result = AcceptToMemoryPool(m_chainman.ActiveChainstate(), m_mempool, ptx, false /* bypass_limits */);
3219  const TxValidationState& state = result.m_state;
3220 
3222  m_mempool.check(m_chainman.ActiveChainstate());
3223  // As this version of the transaction was acceptable, we can forget about any
3224  // requests for it.
3225  m_txrequest.ForgetTxHash(tx.GetHash());
3226  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3227  _RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
3228  m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
3229 
3230  pfrom.nLastTXTime = GetTime();
3231 
3232  LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
3233  pfrom.GetId(),
3234  tx.GetHash().ToString(),
3235  m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3236 
3237  for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
3238  AddToCompactExtraTransactions(removedTx);
3239  }
3240 
3241  // Recursively process any orphan transactions that depended on this one
3242  ProcessOrphanTx(peer->m_orphan_work_set);
3243  }
3244  else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
3245  {
3246  bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
3247 
3248  // Deduplicate parent txids, so that we don't have to loop over
3249  // the same parent txid more than once down below.
3250  std::vector<uint256> unique_parents;
3251  unique_parents.reserve(tx.vin.size());
3252  for (const CTxIn& txin : tx.vin) {
3253  // We start with all parents, and then remove duplicates below.
3254  unique_parents.push_back(txin.prevout.hash);
3255  }
3256  std::sort(unique_parents.begin(), unique_parents.end());
3257  unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
3258  for (const uint256& parent_txid : unique_parents) {
3259  if (recentRejects->contains(parent_txid)) {
3260  fRejectedParents = true;
3261  break;
3262  }
3263  }
3264  if (!fRejectedParents) {
3265  const auto current_time = GetTime<std::chrono::microseconds>();
3266 
3267  for (const uint256& parent_txid : unique_parents) {
3268  // Here, we only have the txid (and not wtxid) of the
3269  // inputs, so we only request in txid mode, even for
3270  // wtxidrelay peers.
3271  // Eventually we should replace this with an improved
3272  // protocol for getting all unconfirmed parents.
3273  const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
3274  pfrom.AddKnownTx(parent_txid);
3275  if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
3276  }
3277 
3278  if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
3279  AddToCompactExtraTransactions(ptx);
3280  }
3281 
3282  // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
3283  m_txrequest.ForgetTxHash(tx.GetHash());
3284  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3285 
3286  // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
3287  unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
3288  unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTx);
3289  if (nEvicted > 0) {
3290  LogPrint(BCLog::MEMPOOL, "orphanage overflow, removed %u tx\n", nEvicted);
3291  }
3292  } else {
3293  LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
3294  // We will continue to reject this tx since it has rejected
3295  // parents so avoid re-requesting it from other peers.
3296  // Here we add both the txid and the wtxid, as we know that
3297  // regardless of what witness is provided, we will not accept
3298  // this, so we don't need to allow for redownload of this txid
3299  // from any of our non-wtxidrelay peers.
3300  recentRejects->insert(tx.GetHash());
3301  recentRejects->insert(tx.GetWitnessHash());
3302  m_txrequest.ForgetTxHash(tx.GetHash());
3303  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3304  }
3305  } else {
3306  if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
3307  // We can add the wtxid of this transaction to our reject filter.
3308  // Do not add txids of witness transactions or witness-stripped
3309  // transactions to the filter, as they can have been malleated;
3310  // adding such txids to the reject filter would potentially
3311  // interfere with relay of valid transactions from peers that
3312  // do not support wtxid-based relay. See
3313  // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3314  // We can remove this restriction (and always add wtxids to
3315  // the filter even for witness stripped transactions) once
3316  // wtxid-based relay is broadly deployed.
3317  // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3318  // for concerns around weakening security of unupgraded nodes
3319  // if we start doing this too early.
3320  assert(recentRejects);
3321  recentRejects->insert(tx.GetWitnessHash());
3322  m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3323  // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3324  // then we know that the witness was irrelevant to the policy
3325  // failure, since this check depends only on the txid
3326  // (the scriptPubKey being spent is covered by the txid).
3327  // Add the txid to the reject filter to prevent repeated
3328  // processing of this transaction in the event that child
3329  // transactions are later received (resulting in
3330  // parent-fetching by txid via the orphan-handling logic).
3331  if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
3332  recentRejects->insert(tx.GetHash());
3333  m_txrequest.ForgetTxHash(tx.GetHash());
3334  }
3335  if (RecursiveDynamicUsage(*ptx) < 100000) {
3336  AddToCompactExtraTransactions(ptx);
3337  }
3338  }
3339  }
3340 
3341  // If a tx has been detected by recentRejects, we will have reached
3342  // this point and the tx will have been ignored. Because we haven't run
3343  // the tx through AcceptToMemoryPool, we won't have computed a DoS
3344  // score for it or determined exactly why we consider it invalid.
3345  //
3346  // This means we won't penalize any peer subsequently relaying a DoSy
3347  // tx (even if we penalized the first peer who gave it to us) because
3348  // we have to account for recentRejects showing false positives. In
3349  // other words, we shouldn't penalize a peer if we aren't *sure* they
3350  // submitted a DoSy tx.
3351  //
3352  // Note that recentRejects doesn't just record DoSy or invalid
3353  // transactions, but any tx not accepted by the mempool, which may be
3354  // due to node policy (vs. consensus). So we can't blanket penalize a
3355  // peer simply for relaying a tx that our recentRejects has caught,
3356  // regardless of false positives.
3357 
3358  if (state.IsInvalid()) {
3359  LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
3360  pfrom.GetId(),
3361  state.ToString());
3362  MaybePunishNodeForTx(pfrom.GetId(), state);
3363  }
3364  return;
3365  }
3366 
3367  if (msg_type == NetMsgType::CMPCTBLOCK)
3368  {
3369  // Ignore cmpctblock received while importing
3370  if (fImporting || fReindex) {
3371  LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
3372  return;
3373  }
3374 
3375  CBlockHeaderAndShortTxIDs cmpctblock;
3376  vRecv >> cmpctblock;
3377 
3378  bool received_new_header = false;
3379 
3380  {
3381  LOCK(cs_main);
3382 
3383  if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3384  // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
3385  if (!m_chainman.ActiveChainstate().IsInitialBlockDownload())
3386  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256()));
3387  return;
3388  }
3389 
3390  if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.GetHash())) {
3391  received_new_header = true;
3392  }
3393  }
3394 
3395  const CBlockIndex *pindex = nullptr;
3396  BlockValidationState state;
3397  if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) {
3398  if (state.IsInvalid()) {
3399  MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
3400  return;
3401  }
3402  }
3403 
3404  // When we succeed in decoding a block's txids from a cmpctblock
3405  // message we typically jump to the BLOCKTXN handling code, with a
3406  // dummy (empty) BLOCKTXN message, to re-use the logic there in
3407  // completing processing of the putative block (without cs_main).
3408  bool fProcessBLOCKTXN = false;
3410 
3411  // If we end up treating this as a plain headers message, call that as well
3412  // without cs_main.
3413  bool fRevertToHeaderProcessing = false;
3414 
3415  // Keep a CBlock for "optimistic" compactblock reconstructions (see
3416  // below)
3417  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3418  bool fBlockReconstructed = false;
3419 
3420  {
3422  // If AcceptBlockHeader returned true, it set pindex
3423  assert(pindex);
3424  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3425 
3426  CNodeState *nodestate = State(pfrom.GetId());
3427 
3428  // If this was a new header with more work than our tip, update the
3429  // peer's last block announcement time
3430  if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
3431  nodestate->m_last_block_announcement = GetTime();
3432  }
3433 
3434  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
3435  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3436 
3437  if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
3438  return;
3439 
3440  if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
3441  pindex->nTx != 0) { // We had this block at some point, but pruned it
3442  if (fAlreadyInFlight) {
3443  // We requested this block for some reason, but our mempool will probably be useless
3444  // so we just grab the block via normal getdata
3445  std::vector<CInv> vInv(1);
3446  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3447  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3448  }
3449  return;
3450  }
3451 
3452  // If we're not close to tip yet, give up and let parallel block fetch work its magic
3453  if (!fAlreadyInFlight && !CanDirectFetch()) {
3454  return;
3455  }
3456 
3457  if (DeploymentActiveAt(*pindex, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT) && !nodestate->fSupportsDesiredCmpctVersion) {
3458  // Don't bother trying to process compact blocks from v1 peers
3459  // after segwit activates.
3460  return;
3461  }
3462 
3463  // We want to be a bit conservative just to be extra careful about DoS
3464  // possibilities in compact block processing...
3465  if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
3466  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3467  (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
3468  std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
3469  if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
3470  if (!(*queuedBlockIt)->partialBlock)
3471  (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
3472  else {
3473  // The block was already in flight using compact blocks from the same peer
3474  LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3475  return;
3476  }
3477  }
3478 
3479  PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3480  ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3481  if (status == READ_STATUS_INVALID) {
3482  RemoveBlockRequest(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3483  Misbehaving(pfrom.GetId(), 100, "invalid compact block");
3484  return;
3485  } else if (status == READ_STATUS_FAILED) {
3486  // Duplicate txindexes, the block is now in-flight, so just request it
3487  std::vector<CInv> vInv(1);
3488  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3489  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3490  return;
3491  }
3492 
3494  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3495  if (!partialBlock.IsTxAvailable(i))
3496  req.indexes.push_back(i);
3497  }
3498  if (req.indexes.empty()) {
3499  // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3500  BlockTransactions txn;
3501  txn.blockhash = cmpctblock.header.GetHash();
3502  blockTxnMsg << txn;
3503  fProcessBLOCKTXN = true;
3504  } else {
3505  req.blockhash = pindex->GetBlockHash();
3506  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3507  }
3508  } else {
3509  // This block is either already in flight from a different
3510  // peer, or this peer has too many blocks outstanding to
3511  // download from.
3512  // Optimistically try to reconstruct anyway since we might be
3513  // able to without any round trips.
3514  PartiallyDownloadedBlock tempBlock(&m_mempool);
3515  ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3516  if (status != READ_STATUS_OK) {
3517  // TODO: don't ignore failures
3518  return;
3519  }
3520  std::vector<CTransactionRef> dummy;
3521  status = tempBlock.FillBlock(*pblock, dummy);
3522  if (status == READ_STATUS_OK) {
3523  fBlockReconstructed = true;
3524  }
3525  }
3526  } else {
3527  if (fAlreadyInFlight) {
3528  // We requested this block, but its far into the future, so our
3529  // mempool will probably be useless - request the block normally
3530  std::vector<CInv> vInv(1);
3531  vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3532  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3533  return;
3534  } else {
3535  // If this was an announce-cmpctblock, we want the same treatment as a header message
3536  fRevertToHeaderProcessing = true;
3537  }
3538  }
3539  } // cs_main
3540 
3541  if (fProcessBLOCKTXN) {
3542  return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, interruptMsgProc);
3543  }
3544 
3545  if (fRevertToHeaderProcessing) {
3546  // Headers received from HB compact block peers are permitted to be
3547  // relayed before full validation (see BIP 152), so we don't want to disconnect
3548  // the peer if the header turns out to be for an invalid block.
3549  // Note that if a peer tries to build on an invalid chain, that
3550  // will be detected and the peer will be disconnected/discouraged.
3551  return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
3552  }
3553 
3554  if (fBlockReconstructed) {
3555  // If we got here, we were able to optimistically reconstruct a
3556  // block that is in flight from some other peer.
3557  {
3558  LOCK(cs_main);
3559  mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
3560  }
3561  // Setting force_processing to true means that we bypass some of
3562  // our anti-DoS protections in AcceptBlock, which filters
3563  // unrequested blocks that might be trying to waste our resources
3564  // (eg disk space). Because we only try to reconstruct blocks when
3565  // we're close to caught up (via the CanDirectFetch() requirement
3566  // above, combined with the behavior of not requesting blocks until
3567  // we have a chain with at least nMinimumChainWork), and we ignore
3568  // compact blocks with less work than our tip, it is safe to treat
3569  // reconstructed compact blocks as having been requested.
3570  ProcessBlock(pfrom, pblock, /*force_processing=*/true);
3571  LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3572  if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3573  // Clear download state for this block, which is in
3574  // process from some other peer. We do this after calling
3575  // ProcessNewBlock so that a malleated cmpctblock announcement
3576  // can't be used to interfere with block relay.
3577  RemoveBlockRequest(pblock->GetHash());
3578  }
3579  }
3580  return;
3581  }
3582 
3583  if (msg_type == NetMsgType::BLOCKTXN)
3584  {
3585  // Ignore blocktxn received while importing
3586  if (fImporting || fReindex) {
3587  LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
3588  return;
3589  }
3590 
3591  BlockTransactions resp;
3592  vRecv >> resp;
3593 
3594  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3595  bool fBlockRead = false;
3596  {
3597  LOCK(cs_main);
3598 
3599  std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3600  if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3601  it->second.first != pfrom.GetId()) {
3602  LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3603  return;
3604  }
3605 
3606  PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3607  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3608  if (status == READ_STATUS_INVALID) {
3609  RemoveBlockRequest(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
3610  Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
3611  return;
3612  } else if (status == READ_STATUS_FAILED) {
3613  // Might have collided, fall back to getdata now :(
3614  std::vector<CInv> invs;
3615  invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
3616  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3617  } else {
3618  // Block is either okay, or possibly we received
3619  // READ_STATUS_CHECKBLOCK_FAILED.
3620  // Note that CheckBlock can only fail for one of a few reasons:
3621  // 1. bad-proof-of-work (impossible here, because we've already
3622  // accepted the header)
3623  // 2. merkleroot doesn't match the transactions given (already
3624  // caught in FillBlock with READ_STATUS_FAILED, so
3625  // impossible here)
3626  // 3. the block is otherwise invalid (eg invalid coinbase,
3627  // block is too big, too many legacy sigops, etc).
3628  // So if CheckBlock failed, #3 is the only possibility.
3629  // Under BIP 152, we don't discourage the peer unless proof of work is
3630  // invalid (we don't require all the stateless checks to have
3631  // been run). This is handled below, so just treat this as
3632  // though the block was successfully read, and rely on the
3633  // handling in ProcessNewBlock to ensure the block index is
3634  // updated, etc.
3635  RemoveBlockRequest(resp.blockhash); // it is now an empty pointer
3636  fBlockRead = true;
3637  // mapBlockSource is used for potentially punishing peers and
3638  // updating which peers send us compact blocks, so the race
3639  // between here and cs_main in ProcessNewBlock is fine.
3640  // BIP 152 permits peers to relay compact blocks after validating
3641  // the header only; we should not punish peers if the block turns
3642  // out to be invalid.
3643  mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false));
3644  }
3645  } // Don't hold cs_main when we call into ProcessNewBlock
3646  if (fBlockRead) {
3647  // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3648  // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3649  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3650  // disk-space attacks), but this should be safe due to the
3651  // protections in the compact block handler -- see related comment
3652  // in compact block optimistic reconstruction handling.
3653  ProcessBlock(pfrom, pblock, /*force_processing=*/true);
3654  }
3655  return;
3656  }
3657 
3658  if (msg_type == NetMsgType::HEADERS)
3659  {
3660  // Ignore headers received while importing
3661  if (fImporting || fReindex) {
3662  LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
3663  return;
3664  }
3665 
3666  std::vector<CBlockHeader> headers;
3667 
3668  // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3669  unsigned int nCount = ReadCompactSize(vRecv);
3670  if (nCount > MAX_HEADERS_RESULTS) {
3671  Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
3672  return;
3673  }
3674  headers.resize(nCount);
3675  for (unsigned int n = 0; n < nCount; n++) {
3676  vRecv >> headers[n];
3677  ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3678  }
3679 
3680  return ProcessHeadersMessage(pfrom, *peer, headers, /*via_compact_block=*/false);
3681  }
3682 
3683  if (msg_type == NetMsgType::BLOCK)
3684  {
3685  // Ignore block received while importing
3686  if (fImporting || fReindex) {
3687  LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
3688  return;
3689  }
3690 
3691  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3692  vRecv >> *pblock;
3693 
3694  LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
3695 
3696  bool forceProcessing = false;
3697  const uint256 hash(pblock->GetHash());
3698  {
3699  LOCK(cs_main);
3700  // Always process the block if we requested it, since we may
3701  // need it even when it's not a candidate for a new best tip.
3702  forceProcessing = IsBlockRequested(hash);
3703  RemoveBlockRequest(hash);
3704  // mapBlockSource is only used for punishing peers and setting
3705  // which peers send us compact blocks, so the race between here and
3706  // cs_main in ProcessNewBlock is fine.
3707  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3708  }
3709  ProcessBlock(pfrom, pblock, forceProcessing);
3710  return;
3711  }
3712 
3713  if (msg_type == NetMsgType::GETADDR) {
3714  // This asymmetric behavior for inbound and outbound connections was introduced
3715  // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3716  // to users' AddrMan and later request them by sending getaddr messages.
3717  // Making nodes which are behind NAT and can only make outgoing connections ignore
3718  // the getaddr message mitigates the attack.
3719  if (!pfrom.IsInboundConn()) {
3720  LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
3721  return;
3722  }
3723 
3724  // Only send one GetAddr response per connection to reduce resource waste
3725  // and discourage addr stamping of INV announcements.
3726  if (peer->m_getaddr_recvd) {
3727  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
3728  return;
3729  }
3730  peer->m_getaddr_recvd = true;
3731 
3732  peer->m_addrs_to_send.clear();
3733  std::vector<CAddress> vAddr;
3735  vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /* network */ std::nullopt);
3736  } else {
3737  vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3738  }
3739  FastRandomContext insecure_rand;
3740  for (const CAddress &addr : vAddr) {
3741  PushAddress(*peer, addr, insecure_rand);
3742  }
3743  return;
3744  }
3745 
3746  if (msg_type == NetMsgType::MEMPOOL) {
3748  {
3750  {
3751  LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
3752  pfrom.fDisconnect = true;
3753  }
3754  return;
3755  }
3756 
3757  if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
3758  {
3760  {
3761  LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
3762  pfrom.fDisconnect = true;
3763  }
3764  return;
3765  }
3766 
3767  if (pfrom.m_tx_relay != nullptr) {
3768  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
3769  pfrom.m_tx_relay->fSendMempool = true;
3770  }
3771  return;
3772  }
3773 
3774  if (msg_type == NetMsgType::PING) {
3775  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
3776  uint64_t nonce = 0;
3777  vRecv >> nonce;
3778  // Echo the message back with the nonce. This allows for two useful features:
3779  //
3780  // 1) A remote node can quickly check if the connection is operational
3781  // 2) Remote nodes can measure the latency of the network thread. If this node
3782  // is overloaded it won't respond to pings quickly and the remote node can
3783  // avoid sending us more work, like chain download requests.
3784  //
3785  // The nonce stops the remote getting confused between different pings: without
3786  // it, if the remote node sends a ping once per second and this node takes 5
3787  // seconds to respond to each, the 5th ping the remote sends would appear to
3788  // return very quickly.
3789  m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3790  }
3791  return;
3792  }
3793 
3794  if (msg_type == NetMsgType::PONG) {
3795  const auto ping_end = time_received;
3796  uint64_t nonce = 0;
3797  size_t nAvail = vRecv.in_avail();
3798  bool bPingFinished = false;
3799  std::string sProblem;
3800 
3801  if (nAvail >= sizeof(nonce)) {
3802  vRecv >> nonce;
3803 
3804  // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3805  if (peer->m_ping_nonce_sent != 0) {
3806  if (nonce == peer->m_ping_nonce_sent) {
3807  // Matching pong received, this ping is no longer outstanding
3808  bPingFinished = true;
3809  const auto ping_time = ping_end - peer->m_ping_start.load();
3810  if (ping_time.count() >= 0) {
3811  // Let connman know about this successful ping-pong
3812  pfrom.PongReceived(ping_time);
3813  } else {
3814  // This should never happen
3815  sProblem = "Timing mishap";
3816  }
3817  } else {
3818  // Nonce mismatches are normal when pings are overlapping
3819  sProblem = "Nonce mismatch";
3820  if (nonce == 0) {
3821  // This is most likely a bug in another implementation somewhere; cancel this ping
3822  bPingFinished = true;
3823  sProblem = "Nonce zero";
3824  }
3825  }
3826  } else {
3827  sProblem = "Unsolicited pong without ping";
3828  }
3829  } else {
3830  // This is most likely a bug in another implementation somewhere; cancel this ping
3831  bPingFinished = true;
3832  sProblem = "Short payload";
3833  }
3834 
3835  if (!(sProblem.empty())) {
3836  LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3837  pfrom.GetId(),
3838  sProblem,
3839  peer->m_ping_nonce_sent,
3840  nonce,
3841  nAvail);
3842  }
3843  if (bPingFinished) {
3844  peer->m_ping_nonce_sent = 0;
3845  }
3846  return;
3847  }
3848 
3849  if (msg_type == NetMsgType::FILTERLOAD) {
3850  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3851  LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
3852  pfrom.fDisconnect = true;
3853  return;
3854  }
3855  CBloomFilter filter;
3856  vRecv >> filter;
3857 
3858  if (!filter.IsWithinSizeConstraints())
3859  {
3860  // There is no excuse for sending a too-large filter
3861  Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
3862  }
3863  else if (pfrom.m_tx_relay != nullptr)
3864  {
3865  LOCK(pfrom.m_tx_relay->cs_filter);
3866  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3867  pfrom.m_tx_relay->fRelayTxes = true;
3868  }
3869  return;
3870  }
3871 
3872  if (msg_type == NetMsgType::FILTERADD) {
3873  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3874  LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
3875  pfrom.fDisconnect = true;
3876  return;
3877  }
3878  std::vector<unsigned char> vData;
3879  vRecv >> vData;
3880 
3881  // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3882  // and thus, the maximum size any matched object can have) in a filteradd message
3883  bool bad = false;
3884  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3885  bad = true;
3886  } else if (pfrom.m_tx_relay != nullptr) {
3887  LOCK(pfrom.m_tx_relay->cs_filter);
3888  if (pfrom.m_tx_relay->pfilter) {
3889  pfrom.m_tx_relay->pfilter->insert(vData);
3890  } else {
3891  bad = true;
3892  }
3893  }
3894  if (bad) {
3895  Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
3896  }
3897  return;
3898  }
3899 
3900  if (msg_type == NetMsgType::FILTERCLEAR) {
3901  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3902  LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
3903  pfrom.fDisconnect = true;
3904  return;
3905  }
3906  if (pfrom.m_tx_relay == nullptr) {
3907  return;
3908  }
3909  LOCK(pfrom.m_tx_relay->cs_filter);
3910  pfrom.m_tx_relay->pfilter = nullptr;
3911  pfrom.m_tx_relay->fRelayTxes = true;
3912  return;
3913  }
3914 
3915  if (msg_type == NetMsgType::FEEFILTER) {
3916  CAmount newFeeFilter = 0;
3917  vRecv >> newFeeFilter;
3918  if (MoneyRange(newFeeFilter)) {
3919  if (pfrom.m_tx_relay != nullptr) {
3920  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
3921  }
3922  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
3923  }
3924  return;
3925  }
3926 
3927  if (msg_type == NetMsgType::GETCFILTERS) {
3928  ProcessGetCFilters(pfrom, vRecv);
3929  return;
3930  }
3931 
3932  if (msg_type == NetMsgType::GETCFHEADERS) {
3933  ProcessGetCFHeaders(pfrom, vRecv);
3934  return;
3935  }
3936 
3937  if (msg_type == NetMsgType::GETCFCHECKPT) {
3938  ProcessGetCFCheckPt(pfrom, vRecv);
3939  return;
3940  }
3941 
3942  if (msg_type == NetMsgType::NOTFOUND) {
3943  std::vector<CInv> vInv;
3944  vRecv >> vInv;
3946  LOCK(::cs_main);
3947  for (CInv &inv : vInv) {
3948  if (inv.IsGenTxMsg()) {
3949  // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
3950  // completed in TxRequestTracker.
3951  m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
3952  }
3953  }
3954  }
3955  return;
3956  }
3957 
3958  // Ignore unknown commands for extensibility
3959  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3960  return;
3961 }
3962 
3963 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
3964 {
3965  {
3966  LOCK(peer.m_misbehavior_mutex);
3967 
3968  // There's nothing to do if the m_should_discourage flag isn't set
3969  if (!peer.m_should_discourage) return false;
3970 
3971  peer.m_should_discourage = false;
3972  } // peer.m_misbehavior_mutex
3973 
3975  // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
3976  LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
3977  return false;
3978  }
3979 
3980  if (pnode.IsManualConn()) {
3981  // We never disconnect or discourage manual peers for bad behavior
3982  LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
3983  return false;
3984  }
3985 
3986  if (pnode.addr.IsLocal()) {
3987  // We disconnect local peers for bad behavior but don't discourage (since that would discourage
3988  // all peers on the same local address)
3989  LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
3990  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
3991  pnode.fDisconnect = true;
3992  return true;
3993  }
3994 
3995  // Normal case: Disconnect the peer and discourage all nodes sharing the address
3996  LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
3997  if (m_banman) m_banman->Discourage(pnode.addr);
3998  m_connman.DisconnectNode(pnode.addr);
3999  return true;
4000 }
4001 
4002 bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
4003 {
4004  bool fMoreWork = false;
4005 
4006  PeerRef peer = GetPeerRef(pfrom->GetId());
4007  if (peer == nullptr) return false;
4008 
4009  {
4010  LOCK(peer->m_getdata_requests_mutex);
4011  if (!peer->m_getdata_requests.empty()) {
4012  ProcessGetData(*pfrom, *peer, interruptMsgProc);
4013  }
4014  }
4015 
4016  {
4018  if (!peer->m_orphan_work_set.empty()) {
4019  ProcessOrphanTx(peer->m_orphan_work_set);
4020  }
4021  }
4022 
4023  if (pfrom->fDisconnect)
4024  return false;
4025 
4026  // this maintains the order of responses
4027  // and prevents m_getdata_requests to grow unbounded
4028  {
4029  LOCK(peer->m_getdata_requests_mutex);
4030  if (!peer->m_getdata_requests.empty()) return true;
4031  }
4032 
4033  {
4034  LOCK(g_cs_orphans);
4035  if (!peer->m_orphan_work_set.empty()) return true;
4036  }
4037 
4038  // Don't bother if send buffer is too full to respond anyway
4039  if (pfrom->fPauseSend) return false;
4040 
4041  std::list<CNetMessage> msgs;
4042  {
4043  LOCK(pfrom->cs_vProcessMsg);
4044  if (pfrom->vProcessMsg.empty()) return false;
4045  // Just take one message
4046  msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
4047  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
4048  pfrom->fPauseRecv = pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
4049  fMoreWork = !pfrom->vProcessMsg.empty();
4050  }
4051  CNetMessage& msg(msgs.front());
4052 
4053  if (gArgs.GetBoolArg("-capturemessages", false)) {
4054  CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /* incoming */ true);
4055  }
4056 
4057  msg.SetVersion(pfrom->GetCommonVersion());
4058  const std::string& msg_type = msg.m_command;
4059 
4060  // Message size
4061  unsigned int nMessageSize = msg.m_message_size;
4062 
4063  try {
4064  ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
4065  if (interruptMsgProc) return false;
4066  {
4067  LOCK(peer->m_getdata_requests_mutex);
4068  if (!peer->m_getdata_requests.empty()) fMoreWork = true;
4069  }
4070  } catch (const std::exception& e) {
4071  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
4072  } catch (...) {
4073  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
4074  }
4075 
4076  return fMoreWork;
4077 }
4078 
4079 void PeerManagerImpl::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
4080 {
4082 
4083  CNodeState &state = *State(pto.GetId());
4084  const CNetMsgMaker msgMaker(pto.GetCommonVersion());
4085 
4086  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
4087  // This is an outbound peer subject to disconnection if they don't
4088  // announce a block with as much work as the current tip within
4089  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
4090  // their chain has more work than ours, we should sync to it,
4091  // unless it's invalid, in which case we should find that out and
4092  // disconnect from them elsewhere).
4093  if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
4094  if (state.m_chain_sync.m_timeout != 0) {
4095  state.m_chain_sync.m_timeout = 0;
4096  state.m_chain_sync.m_work_header = nullptr;
4097  state.m_chain_sync.m_sent_getheaders = false;
4098  }
4099  } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
4100  // Our best block known by this peer is behind our tip, and we're either noticing
4101  // that for the first time, OR this peer was able to catch up to some earlier point
4102  // where we checked against our tip.
4103  // Either way, set a new timeout based on current tip.
4104  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
4105  state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
4106  state.m_chain_sync.m_sent_getheaders = false;
4107  } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
4108  // No evidence yet that our peer has synced to a chain with work equal to that
4109  // of our tip, when we first detected it was behind. Send a single getheaders
4110  // message to give the peer a chance to update us.
4111  if (state.m_chain_sync.m_sent_getheaders) {
4112  // They've run out of time to catch up!
4113  LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
4114  pto.fDisconnect = true;
4115  } else {
4116  assert(state.m_chain_sync.m_work_header);
4117  LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
4118  m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
4119  state.m_chain_sync.m_sent_getheaders = true;
4120  constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
4121  // Bump the timeout to allow a response, which could clear the timeout
4122  // (if the response shows the peer has synced), reset the timeout (if
4123  // the peer syncs to the required work but not to our tip), or result
4124  // in disconnect (if we advance to the timeout and pindexBestKnownBlock
4125  // has not sufficiently progressed)
4126  state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
4127  }
4128  }
4129  }
4130 }
4131 
4132 void PeerManagerImpl::EvictExtraOutboundPeers(int64_t time_in_seconds)
4133 {
4134  // If we have any extra block-relay-only peers, disconnect the youngest unless
4135  // it's given us a block -- in which case, compare with the second-youngest, and
4136  // out of those two, disconnect the peer who least recently gave us a block.
4137  // The youngest block-relay-only peer would be the extra peer we connected
4138  // to temporarily in order to sync our tip; see net.cpp.
4139  // Note that we use higher nodeid as a measure for most recent connection.
4140  if (m_connman.GetExtraBlockRelayCount() > 0) {
4141  std::pair<NodeId, int64_t> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
4142 
4143  m_connman.ForEachNode([&](CNode* pnode) {
4144  if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
4145  if (pnode->GetId() > youngest_peer.first) {
4146  next_youngest_peer = youngest_peer;
4147  youngest_peer.first = pnode->GetId();
4148  youngest_peer.second = pnode->nLastBlockTime;
4149  }
4150  });
4151  NodeId to_disconnect = youngest_peer.first;
4152  if (youngest_peer.second > next_youngest_peer.second) {
4153  // Our newest block-relay-only peer gave us a block more recently;
4154  // disconnect our second youngest.
4155  to_disconnect = next_youngest_peer.first;
4156  }
4157  m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
4159  // Make sure we're not getting a block right now, and that
4160  // we've been connected long enough for this eviction to happen
4161  // at all.
4162  // Note that we only request blocks from a peer if we learn of a
4163  // valid headers chain with at least as much work as our tip.
4164  CNodeState *node_state = State(pnode->GetId());
4165  if (node_state == nullptr ||
4166  (time_in_seconds - pnode->nTimeConnected >= MINIMUM_CONNECT_TIME && node_state->nBlocksInFlight == 0)) {
4167  pnode->fDisconnect = true;
4168  LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", pnode->GetId(), pnode->nLastBlockTime);
4169  return true;
4170  } else {
4171  LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
4172  pnode->GetId(), pnode->nTimeConnected, node_state->nBlocksInFlight);
4173  }
4174  return false;
4175  });
4176  }
4177 
4178  // Check whether we have too many outbound-full-relay peers
4179  if (m_connman.GetExtraFullOutboundCount() > 0) {
4180  // If we have more outbound-full-relay peers than we target, disconnect one.
4181  // Pick the outbound-full-relay peer that least recently announced
4182  // us a new block, with ties broken by choosing the more recent
4183  // connection (higher node id)
4184  NodeId worst_peer = -1;
4185  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
4186 
4187  m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
4189 
4190  // Only consider outbound-full-relay peers that are not already
4191  // marked for disconnection
4192  if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
4193  CNodeState *state = State(pnode->GetId());
4194  if (state == nullptr) return; // shouldn't be possible, but just in case
4195  // Don't evict our protected peers
4196  if (state->m_chain_sync.m_protect) return;
4197  if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
4198  worst_peer = pnode->GetId();
4199  oldest_block_announcement = state->m_last_block_announcement;
4200  }
4201  });
4202  if (worst_peer != -1) {
4203  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
4205 
4206  // Only disconnect a peer that has been connected to us for
4207  // some reasonable fraction of our check-frequency, to give
4208  // it time for new information to have arrived.
4209  // Also don't disconnect any peer we're trying to download a
4210  // block from.
4211  CNodeState &state = *State(pnode->GetId());
4212  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
4213  LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
4214  pnode->fDisconnect = true;
4215  return true;
4216  } else {
4217  LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
4218  return false;
4219  }
4220  });
4221  if (disconnected) {
4222  // If we disconnected an extra peer, that means we successfully
4223  // connected to at least one peer after the last time we
4224  // detected a stale tip. Don't try any more extra peers until
4225  // we next detect a stale tip, to limit the load we put on the
4226  // network from these extra connections.
4227  m_connman.SetTryNewOutboundPeer(false);
4228  }
4229  }
4230  }
4231 }
4232 
4233 void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
4234 {
4235  LOCK(cs_main);
4236 
4237  int64_t time_in_seconds = GetTime();
4238 
4239  EvictExtraOutboundPeers(time_in_seconds);
4240 
4241  if (time_in_seconds > m_stale_tip_check_time) {
4242  // Check whether our tip is stale, and if so, allow using an extra
4243  // outbound peer
4244  if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
4245  LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - m_last_tip_update);
4246  m_connman.SetTryNewOutboundPeer(true);
4247  } else if (m_connman.GetTryNewOutboundPeer()) {
4248  m_connman.SetTryNewOutboundPeer(false);
4249  }
4250  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
4251  }
4252 
4253  if (!m_initial_sync_finished && CanDirectFetch()) {
4254  m_connman.StartExtraBlockRelayPeers();
4255  m_initial_sync_finished = true;
4256  }
4257 }
4258 
4259 void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
4260 {
4261  if (m_connman.ShouldRunInactivityChecks(node_to) && peer.m_ping_nonce_sent &&
4262  now > peer.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL}) {
4263  LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
4264  node_to.fDisconnect = true;
4265  return;
4266  }
4267 
4268  const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
4269  bool pingSend = false;
4270 
4271  if (peer.m_ping_queued) {
4272  // RPC ping request by user
4273  pingSend = true;
4274  }
4275 
4276  if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
4277  // Ping automatically sent as a latency probe & keepalive.
4278  pingSend = true;
4279  }
4280 
4281  if (pingSend) {
4282  uint64_t nonce = 0;
4283  while (nonce == 0) {
4284  GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
4285  }
4286  peer.m_ping_queued = false;
4287  peer.m_ping_start = now;
4288  if (node_to.GetCommonVersion() > BIP0031_VERSION) {
4289  peer.m_ping_nonce_sent = nonce;
4290  m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING, nonce));
4291  } else {
4292  // Peer is too old to support ping command with nonce, pong will never arrive.
4293  peer.m_ping_nonce_sent = 0;
4294  m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
4295  }
4296  }
4297 }
4298 
4299 void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
4300 {
4301  // Nothing to do for non-address-relay peers
4302  if (!RelayAddrsWithPeer(peer)) return;
4303 
4304  LOCK(peer.m_addr_send_times_mutex);
4305  // Periodically advertise our local address to the peer.
4306  if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
4307  peer.m_next_local_addr_send < current_time) {
4308  // If we've sent before, clear the bloom filter for the peer, so that our
4309  // self-announcement will actually go out.
4310  // This might be unnecessary if the bloom filter has already rolled
4311  // over since our last self-announcement, but there is only a small
4312  // bandwidth cost that we can incur by doing this (which happens
4313  // once a day on average).
4314  if (peer.m_next_local_addr_send != 0us) {
4315  peer.m_addr_known->reset();
4316  }
4317  if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(&node)) {
4318  FastRandomContext insecure_rand;
4319  PushAddress(peer, *local_addr, insecure_rand);
4320  }
4321  peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
4322  }
4323 
4324  // We sent an `addr` message to this peer recently. Nothing more to do.
4325  if (current_time <= peer.m_next_addr_send) return;
4326 
4327  peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
4328 
4329  if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
4330  // Should be impossible since we always check size before adding to
4331  // m_addrs_to_send. Recover by trimming the vector.
4332  peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
4333  }
4334 
4335  // Remove addr records that the peer already knows about, and add new
4336  // addrs to the m_addr_known filter on the same pass.
4337  auto addr_already_known = [&peer](const CAddress& addr) {
4338  bool ret = peer.m_addr_known->contains(addr.GetKey());
4339  if (!ret) peer.m_addr_known->insert(addr.GetKey());
4340  return ret;
4341  };
4342  peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
4343  peer.m_addrs_to_send.end());
4344 
4345  // No addr messages to send
4346  if (peer.m_addrs_to_send.empty()) return;
4347 
4348  const char* msg_type;
4349  int make_flags;
4350  if (peer.m_wants_addrv2) {
4351  msg_type = NetMsgType::ADDRV2;
4352  make_flags = ADDRV2_FORMAT;
4353  } else {
4354  msg_type = NetMsgType::ADDR;
4355  make_flags = 0;
4356  }
4357  m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(make_flags, msg_type, peer.m_addrs_to_send));
4358  peer.m_addrs_to_send.clear();
4359 
4360  // we only send the big addr message once
4361  if (peer.m_addrs_to_send.capacity() > 40) {
4362  peer.m_addrs_to_send.shrink_to_fit();
4363  }
4364 }
4365 
4366 void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds current_time)
4367 {
4369 
4370  if (m_ignore_incoming_txs) return;
4371  if (!pto.m_tx_relay) return;
4372  if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
4373  // peers with the forcerelay permission should not filter txs to us
4375 
4376  CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4377  static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
4378 
4379  if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4380  // Received tx-inv messages are discarded when the active
4381  // chainstate is in IBD, so tell the peer to not send them.
4382  currentFilter = MAX_MONEY;
4383  } else {
4384  static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
4385  if (pto.m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
4386  // Send the current filter if we sent MAX_FILTER previously
4387  // and made it out of IBD.
4388  pto.m_tx_relay->m_next_send_feefilter = 0us;
4389  }
4390  }
4391  if (current_time > pto.m_tx_relay->m_next_send_feefilter) {
4392  CAmount filterToSend = g_filter_rounder.round(currentFilter);
4393  // We always have a fee filter of at least minRelayTxFee
4394  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4395  if (filterToSend != pto.m_tx_relay->lastSentFeeFilter) {
4396  m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
4397  pto.m_tx_relay->lastSentFeeFilter = filterToSend;
4398  }
4399  pto.m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
4400  }
4401  // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4402  // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4403  else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < pto.m_tx_relay->m_next_send_feefilter &&
4404  (currentFilter < 3 * pto.m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto.m_tx_relay->lastSentFeeFilter / 3)) {
4405  pto.m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
4406  }
4407 }
4408 
4409 namespace {
4410 class CompareInvMempoolOrder
4411 {
4412  CTxMemPool *mp;
4413  bool m_wtxid_relay;
4414 public:
4415  explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
4416  {
4417  mp = _mempool;
4418  m_wtxid_relay = use_wtxid;
4419  }
4420 
4421  bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
4422  {
4423  /* As std::make_heap produces a max-heap, we want the entries with the
4424  * fewest ancestors/highest fee to sort later. */
4425  return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
4426  }
4427 };
4428 }
4429 
4430 bool PeerManagerImpl::SendMessages(CNode* pto)
4431 {
4432  PeerRef peer = GetPeerRef(pto->GetId());
4433  if (!peer) return false;
4434  const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
4435 
4436  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
4437  // disconnect misbehaving peers even before the version handshake is complete.
4438  if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
4439 
4440  // Don't send anything until the version handshake is complete
4441  if (!pto->fSuccessfullyConnected || pto->fDisconnect)
4442  return true;
4443 
4444  // If we get here, the outgoing message serialization version is set and can't change.
4445  const CNetMsgMaker msgMaker(pto->GetCommonVersion());
4446 
4447  const auto current_time = GetTime<std::chrono::microseconds>();
4448 
4449  if (pto->IsAddrFetchConn() && current_time - std::chrono::seconds(pto->nTimeConnected) > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
4450  LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
4451  pto->fDisconnect = true;
4452  return true;
4453  }
4454 
4455  MaybeSendPing(*pto, *peer, current_time);
4456 
4457  // MaybeSendPing may have marked peer for disconnection
4458  if (pto->fDisconnect) return true;
4459 
4460  MaybeSendAddr(*pto, *peer, current_time);
4461 
4462  {
4463  LOCK(cs_main);
4464 
4465  CNodeState &state = *State(pto->GetId());
4466 
4467  // Start block sync
4468  if (pindexBestHeader == nullptr)
4469  pindexBestHeader = m_chainman.ActiveChain().Tip();
4470  bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
4471  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
4472  // Only actively request headers from a single peer, unless we're close to today.
4473  if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
4474  state.fSyncStarted = true;
4475  state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
4476  (
4477  // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
4478  // to maintain precision
4479  std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
4481  );
4482  nSyncStarted++;
4483  const CBlockIndex *pindexStart = pindexBestHeader;
4484  /* If possible, start at the block preceding the currently
4485  best known header. This ensures that we always get a
4486  non-empty list of headers back as long as the peer
4487  is up-to-date. With a non-empty response, we can initialise
4488  the peer's known best block. This wouldn't be possible
4489  if we requested starting at pindexBestHeader and
4490  got back an empty response. */
4491  if (pindexStart->pprev)
4492  pindexStart = pindexStart->pprev;
4493  LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
4494  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256()));
4495  }
4496  }
4497 
4498  //
4499  // Try sending block announcements via headers
4500  //
4501  {
4502  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
4503  // list of block hashes we're relaying, and our peer wants
4504  // headers announcements, then find the first header
4505  // not yet known to our peer but would connect, and send.
4506  // If no header would connect, or if we have too many
4507  // blocks, or if the peer doesn't want headers, just
4508  // add all to the inv queue.
4509  LOCK(peer->m_block_inv_mutex);
4510  std::vector<CBlock> vHeaders;
4511  bool fRevertToInv = ((!state.fPreferHeaders &&
4512  (!state.fPreferHeaderAndIDs || peer->m_blocks_for_headers_relay.size() > 1)) ||
4513  peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
4514  const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
4515  ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
4516 
4517  if (!fRevertToInv) {
4518  bool fFoundStartingHeader = false;
4519  // Try to find first header that our peer doesn't have, and
4520  // then send all headers past that one. If we come across any
4521  // headers that aren't on m_chainman.ActiveChain(), give up.
4522  for (const uint256& hash : peer->m_blocks_for_headers_relay) {
4523  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
4524  assert(pindex);
4525  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
4526  // Bail out if we reorged away from this block
4527  fRevertToInv = true;
4528  break;
4529  }
4530  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
4531  // This means that the list of blocks to announce don't
4532  // connect to each other.
4533  // This shouldn't really be possible to hit during
4534  // regular operation (because reorgs should take us to
4535  // a chain that has some block not on the prior chain,
4536  // which should be caught by the prior check), but one
4537  // way this could happen is by using invalidateblock /
4538  // reconsiderblock repeatedly on the tip, causing it to
4539  // be added multiple times to m_blocks_for_headers_relay.
4540  // Robustly deal with this rare situation by reverting
4541  // to an inv.
4542  fRevertToInv = true;
4543  break;
4544  }
4545  pBestIndex = pindex;
4546  if (fFoundStartingHeader) {
4547  // add this to the headers message
4548  vHeaders.push_back(pindex->GetBlockHeader());
4549  } else if (PeerHasHeader(&state, pindex)) {
4550  continue; // keep looking for the first new block
4551  } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
4552  // Peer doesn't have this header but they do have the prior one.
4553  // Start sending headers.
4554  fFoundStartingHeader = true;
4555  vHeaders.push_back(pindex->GetBlockHeader());
4556  } else {
4557  // Peer doesn't have this header or the prior one -- nothing will
4558  // connect, so bail out.
4559  fRevertToInv = true;
4560  break;
4561  }
4562  }
4563  }
4564  if (!fRevertToInv && !vHeaders.empty()) {
4565  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4566  // We only send up to 1 block as header-and-ids, as otherwise
4567  // probably means we're doing an initial-ish-sync or they're slow
4568  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4569  vHeaders.front().GetHash().ToString(), pto->GetId());
4570 
4571  int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
4572 
4573  bool fGotBlockFromCache = false;
4574  {
4576  if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4577  if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
4578  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
4579  else {
4580  CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
4581  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4582  }
4583  fGotBlockFromCache = true;
4584  }
4585  }
4586  if (!fGotBlockFromCache) {
4587  CBlock block;
4588  bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4589  assert(ret);
4590  CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
4591  m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4592  }
4593  state.pindexBestHeaderSent = pBestIndex;
4594  } else if (state.fPreferHeaders) {
4595  if (vHeaders.size() > 1) {
4596  LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4597  vHeaders.size(),
4598  vHeaders.front().GetHash().ToString(),
4599  vHeaders.back().GetHash().ToString(), pto->GetId());
4600  } else {
4601  LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4602  vHeaders.front().GetHash().ToString(), pto->GetId());
4603  }
4604  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4605  state.pindexBestHeaderSent = pBestIndex;
4606  } else
4607  fRevertToInv = true;
4608  }
4609  if (fRevertToInv) {
4610  // If falling back to using an inv, just try to inv the tip.
4611  // The last entry in m_blocks_for_headers_relay was our tip at some point
4612  // in the past.
4613  if (!peer->m_blocks_for_headers_relay.empty()) {
4614  const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
4615  const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
4616  assert(pindex);
4617 
4618  // Warn if we're announcing a block that is not on the main chain.
4619  // This should be very rare and could be optimized out.
4620  // Just log for now.
4621  if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
4622  LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4623  hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
4624  }
4625 
4626  // If the peer's chain has this block, don't inv it back.
4627  if (!PeerHasHeader(&state, pindex)) {
4628  peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
4629  LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4630  pto->GetId(), hashToAnnounce.ToString());
4631  }
4632  }
4633  }
4634  peer->m_blocks_for_headers_relay.clear();
4635  }
4636 
4637  //
4638  // Message: inventory
4639  //
4640  std::vector<CInv> vInv;
4641  {
4642  LOCK(peer->m_block_inv_mutex);
4643  vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_MAX));
4644 
4645  // Add blocks
4646  for (const uint256& hash : peer->m_blocks_for_inv_relay) {
4647  vInv.push_back(CInv(MSG_BLOCK, hash));
4648  if (vInv.size() == MAX_INV_SZ) {
4649  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4650  vInv.clear();
4651  }
4652  }
4653  peer->m_blocks_for_inv_relay.clear();
4654  }
4655 
4656  if (pto->m_tx_relay != nullptr) {
4657  LOCK(pto->m_tx_relay->cs_tx_inventory);
4658  // Check whether periodic sends should happen
4659  bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
4660  if (pto->m_tx_relay->nNextInvSend < current_time) {
4661  fSendTrickle = true;
4662  if (pto->IsInboundConn()) {
4663  pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
4664  } else {
4665  pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
4666  }
4667  }
4668 
4669  // Time to send but the peer has requested we not relay transactions.
4670  if (fSendTrickle) {
4671  LOCK(pto->m_tx_relay->cs_filter);
4672  if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
4673  }
4674 
4675  // Respond to BIP35 mempool requests
4676  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
4677  auto vtxinfo = m_mempool.infoAll();
4678  pto->m_tx_relay->fSendMempool = false;
4679  const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()};
4680 
4681  LOCK(pto->m_tx_relay->cs_filter);
4682 
4683  for (const auto& txinfo : vtxinfo) {
4684  const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
4685  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4686  pto->m_tx_relay->setInventoryTxToSend.erase(hash);
4687  // Don't send transactions that peers will not put into their mempool
4688  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4689  continue;
4690  }
4691  if (pto->m_tx_relay->pfilter) {
4692  if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4693  }
4694  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4695  // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
4696  vInv.push_back(inv);
4697  if (vInv.size() == MAX_INV_SZ) {
4698  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4699  vInv.clear();
4700  }
4701  }
4702  pto->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time);
4703  }
4704 
4705  // Determine transactions to relay
4706  if (fSendTrickle) {
4707  // Produce a vector with all candidates for sending
4708  std::vector<std::set<uint256>::iterator> vInvTx;
4709  vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
4710  for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
4711  vInvTx.push_back(it);
4712  }
4713  const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()};
4714  // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4715  // A heap is used so that not all items need sorting if only a few are being sent.
4716  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay);
4717  std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4718  // No reason to drain out at many times the network's capacity,
4719  // especially since we have many peers and some will draw much shorter delays.
4720  unsigned int nRelayedTransactions = 0;
4721  LOCK(pto->m_tx_relay->cs_filter);
4722  while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
4723  // Fetch the top element from the heap
4724  std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4725  std::set<uint256>::iterator it = vInvTx.back();
4726  vInvTx.pop_back();
4727  uint256 hash = *it;
4728  CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4729  // Remove it from the to-be-sent set
4730  pto->m_tx_relay->setInventoryTxToSend.erase(it);
4731  // Check if not in the filter already
4732  if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
4733  continue;
4734  }
4735  // Not in the mempool anymore? don't bother sending it.
4736  auto txinfo = m_mempool.info(ToGenTxid(inv));
4737  if (!txinfo.tx) {
4738  continue;
4739  }
4740  auto txid = txinfo.tx->GetHash();
4741  auto wtxid = txinfo.tx->GetWitnessHash();
4742  // Peer told you to not send transactions at that feerate? Don't bother sending it.
4743  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4744  continue;
4745  }
4746  if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4747  // Send
4748  State(pto->GetId())->m_recently_announced_invs.insert(hash);
4749  vInv.push_back(inv);
4750  nRelayedTransactions++;
4751  {
4752  // Expire old relay messages
4753  while (!g_relay_expiration.empty() && g_relay_expiration.front().first < current_time)
4754  {
4755  mapRelay.erase(g_relay_expiration.front().second);
4756  g_relay_expiration.pop_front();
4757  }
4758 
4759  auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
4760  if (ret.second) {
4761  g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret.first);
4762  }
4763  // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
4764  auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
4765  if (ret2.second) {
4766  g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret2.first);
4767  }
4768  }
4769  if (vInv.size() == MAX_INV_SZ) {
4770  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4771  vInv.clear();
4772  }
4773  pto->m_tx_relay->filterInventoryKnown.insert(hash);
4774  if (hash != txid) {
4775  // Insert txid into filterInventoryKnown, even for
4776  // wtxidrelay peers. This prevents re-adding of
4777  // unconfirmed parents to the recently_announced
4778  // filter, when a child tx is requested. See
4779  // ProcessGetData().
4780  pto->m_tx_relay->filterInventoryKnown.insert(txid);
4781  }
4782  }
4783  }
4784  }
4785  if (!vInv.empty())
4786  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4787 
4788  // Detect whether we're stalling
4789  if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
4790  // Stalling only triggers when the block download window cannot move. During normal steady state,
4791  // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4792  // should only happen during initial block download.
4793  LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4794  pto->fDisconnect = true;
4795  return true;
4796  }
4797  // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
4798  // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4799  // We compensate for other peers to prevent killing off peers due to our own downstream link
4800  // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4801  // to unreasonably increase our timeout.
4802  if (state.vBlocksInFlight.size() > 0) {
4803  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4804  int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
4805  if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4806  LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId());
4807  pto->fDisconnect = true;
4808  return true;
4809  }
4810  }
4811  // Check for headers sync timeouts
4812  if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
4813  // Detect whether this is a stalling initial-headers-sync peer
4814  if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
4815  if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4816  // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
4817  // and we have others we could be using instead.
4818  // Note: If all our peers are inbound, then we won't
4819  // disconnect our sync peer for stalling; we have bigger
4820  // problems if we can't get any outbound peers.
4822  LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4823  pto->fDisconnect = true;
4824  return true;
4825  } else {
4826  LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
4827  // Reset the headers sync state so that we have a
4828  // chance to try downloading from a different peer.
4829  // Note: this will also result in at least one more
4830  // getheaders message to be sent to
4831  // this peer (eventually).
4832  state.fSyncStarted = false;
4833  nSyncStarted--;
4834  state.m_headers_sync_timeout = 0us;
4835  }
4836  }
4837  } else {
4838  // After we've caught up once, reset the timeout so we can't trigger
4839  // disconnect later.
4840  state.m_headers_sync_timeout = std::chrono::microseconds::max();
4841  }
4842  }
4843 
4844  // Check that outbound peers have reasonable chains
4845  // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4846  ConsiderEviction(*pto, GetTime());
4847 
4848  //
4849  // Message: getdata (blocks)
4850  //
4851  std::vector<CInv> vGetData;
4852  if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4853  std::vector<const CBlockIndex*> vToDownload;
4854  NodeId staller = -1;
4855  FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
4856  for (const CBlockIndex *pindex : vToDownload) {
4857  uint32_t nFetchFlags = GetFetchFlags(*pto);
4858  vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4859  BlockRequested(pto->GetId(), *pindex);
4860  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4861  pindex->nHeight, pto->GetId());
4862  }
4863  if (state.nBlocksInFlight == 0 && staller != -1) {
4864  if (State(staller)->m_stalling_since == 0us) {
4865  State(staller)->m_stalling_since = current_time;
4866  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4867  }
4868  }
4869  }
4870 
4871  //
4872  // Message: getdata (transactions)
4873  //
4874  std::vector<std::pair<NodeId, GenTxid>> expired;
4875  auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
4876  for (const auto& entry : expired) {
4877  LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
4878  entry.second.GetHash().ToString(), entry.first);
4879  }
4880  for (const GenTxid& gtxid : requestable) {
4881  if (!AlreadyHaveTx(gtxid)) {
4882  LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
4883  gtxid.GetHash().ToString(), pto->GetId());
4884  vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
4885  if (vGetData.size() >= MAX_GETDATA_SZ) {
4886  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4887  vGetData.clear();
4888  }
4889  m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
4890  } else {
4891  // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
4892  // this should already be called whenever a transaction becomes AlreadyHaveTx().
4893  m_txrequest.ForgetTxHash(gtxid.GetHash());
4894  }
4895  }
4896 
4897 
4898  if (!vGetData.empty())
4899  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4900 
4901  MaybeSendFeefilter(*pto, current_time);
4902  } // release cs_main
4903  return true;
4904 }
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:386
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:40
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: chain.h:162
RecursiveMutex g_cs_orphans
Guards orphan transactions and extra txs for compact blocks.
Definition: txorphanage.cpp:18
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
bool IsMsgWtx() const
Definition: protocol.h:501
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:28
virtual void Misbehaving(const NodeId pnode, const int howmuch, const std::string &message)=0
Increment peer&#39;s misbehavior score.
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
if(expired !=0)
Definition: validation.cpp:310
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:31
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:20
std::atomic_bool fPauseSend
Definition: net.h:458
static const int SERIALIZE_TRANSACTION_NO_WITNESS
A flag that is ORed into the protocol version to designate that a transaction should be (un)serialize...
Definition: transaction.h:23
invalid by consensus rules
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:39
AssertLockHeld(pool.cs)
bool IsMsgTx() const
Definition: protocol.h:499
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed (even when n...
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:110
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:17
Definition: banman.h:58
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
ServiceFlags
nServices flags
Definition: protocol.h:271
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
bool IsLocal() const
Definition: netaddress.cpp:425
#define LogPrint(category,...)
Definition: logging.h:188
int64_t GetBlockTime() const
Definition: chain.h:260
assert(!tx.IsCoinBase())
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:114
int GetVersion() const
Definition: streams.h:363
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: chain.h:144
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:808
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition: siphash.cpp:28
uint32_t nStatus
Verification status of this block.
Definition: chain.h:187
void scheduleEvery(Function f, std::chrono::milliseconds delta)
Repeat f until the scheduler is stopped.
Definition: scheduler.cpp:110
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:107
uint64_t m_addr_rate_limited
void WakeMessageHandler()
Definition: net.cpp:1640
static std::shared_ptr< const CBlock > most_recent_block GUARDED_BY(cs_most_recent_block)
std::string ToString() const
Definition: protocol.cpp:170
bool exists(const GenTxid &gtxid) const
Definition: txmempool.h:736
Definition: block.h:62
We don&#39;t have the previous block the checked one is built on.
Data structure to keep track of, and schedule, transaction downloads from peers.
Definition: txrequest.h:96
void PushTxInventory(const uint256 &hash)
Definition: net.h:643
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:26
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:282
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:32
int GetType() const
Definition: streams.h:361
static const CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:25
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:866
std::vector< uint16_t > indexes
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1164
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:460
bool IsMsgFilteredBlk() const
Definition: protocol.h:502
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:951
const std::optional< std::list< CTransactionRef > > m_replaced_transactions
Mempool transactions replaced by the tx per BIP 125 rules.
Definition: validation.h:180
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
reverse_range< T > reverse_iterate(T &x)
inv message data
Definition: protocol.h:485
invalid proof of work or time too old
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:36
MempoolAcceptResult AcceptToMemoryPool(CChainState &active_chainstate, CTxMemPool &pool, const CTransactionRef &tx, bool bypass_limits, bool test_accept)
(Try to) add a transaction to the memory pool.
A class to track orphan transactions (failed on TX_MISSING_INPUTS) Since we cannot distinguish orphan...
Definition: txorphanage.h:21
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ::ChainActive().Tip() will not be pruned.
Definition: validation.h:88
constexpr auto GetRandMillis
Definition: random.h:84
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:156
transaction was missing some of its inputs
bool IsMsgCmpctBlk() const
Definition: protocol.h:503
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
bool IsFeelerConn() const
Definition: net.h:487
TxMempoolInfo info(const uint256 &hash) const
Definition: txmempool.cpp:840
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition: chain.h:101
bool MoneyRange(const CAmount &nValue)
Definition: amount.h:26
CBlockHeader GetBlockHeader() const
Definition: chain.h:233
std::vector< unsigned char > ParseHex(const char *psz)
int Height() const
Return the maximal height in the chain.
Definition: chain.h:428
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
const uint256 & GetHash() const
Definition: transaction.h:397
unsigned long size() const
Definition: txmempool.h:718
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:43
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid...
Definition: chain.h:108
void SetCommonVersion(int greatest_common_version)
Definition: net.h:610
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:21
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:3017
RecursiveMutex cs_vProcessMsg
Definition: net.h:416
Defined in BIP152.
Definition: protocol.h:476
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:128
bool CompareDepthAndScore(const uint256 &hasha, const uint256 &hashb, bool wtxid=false)
Definition: txmempool.cpp:746
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:134
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:110
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
const TxValidationState m_state
Definition: validation.h:176
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:177
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:27
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message...
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:69
violated mempool&#39;s fee/size/descendant/RBF/etc limits
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
bool IsNull() const
Definition: block.h:135
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:204
inputs (covered by txid) failed policy rules
bool empty() const
Definition: streams.h:256
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:600
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1792
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition: protocol.h:462
const ResultType m_result_type
Definition: validation.h:175
uint64_t GetLocalNonce() const
Definition: net.h:589
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:288
void GetRandBytes(unsigned char *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:584
transaction spends a coinbase too early, or violates locktime/sequence locks
static constexpr auto RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:41
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval)
Return a timestamp in the future (in microseconds) for exponentially distributed events.
Definition: net.cpp:3073
bool ActivateBestChain(BlockValidationState &state, std::shared_ptr< const CBlock > pblock=nullptr) LOCKS_EXCLUDED(cs_main)
Find the best known block, and make it the tip of the block chain.
static const int TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:52
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:29
unsigned char * begin()
Definition: uint256.h:58
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:2734
State
The various states a (txhash,peer) pair can be in.
Definition: txrequest.cpp:38
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
initial value. Tx has not yet been rejected
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition: protocol.cpp:46
bool GetTryNewOutboundPeer() const
Definition: net.cpp:1787
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
bool IsNull() const
Definition: uint256.h:31
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:24
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:44
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can&#39;t reach it ourselves.
Definition: netaddress.h:228
std::atomic< ServiceFlags > nServices
Definition: net.h:404
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:270
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:553
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:18
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3049
CTxMemPoolEntry stores data about the corresponding transaction, as well as data about all in-mempool...
Definition: txmempool.h:81
virtual void BlockConnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
bool IsValid() const
Definition: netaddress.cpp:451
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2836
Stochastical (IP) address manager.
Definition: addrman.h:173
int GetExtraBlockRelayCount() const
Definition: net.cpp:1818
int64_t CAmount
Amount in satoshis (Can be negative)
Definition: amount.h:12
static constexpr int ADDRV2_FORMAT
A flag that is ORed into the protocol version to designate that addresses should be serialized in (un...
Definition: netaddress.h:34
uint256 GetBlockHash() const
Definition: chain.h:246
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const
Check whether this block index entry is valid up to the passed validity level.
Definition: chain.h:295
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:69
bool IsValid() const
Definition: validation.h:119
std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:3062
BlockFilterType
Definition: blockfilter.h:88
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:79
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition: protocol.cpp:223
#define LOCK2(cs1, cs2)
Definition: sync.h:233
initial value. Block has not yet been rejected
bool IsGenBlkMsg() const
Definition: protocol.h:511
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
ServiceFlags GetLocalServices() const
Definition: net.h:656
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
Definition: chain.h:112
std::set< CTxMemPoolEntryRef, CompareIteratorByHash > Parents
Definition: txmempool.h:86
bool fClient
Definition: net.h:446
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
Definition: merkleblock.h:124
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:22
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(MakeSpan(std::forward< V >(v))))
Like MakeSpan, but for (const) unsigned char member types only.
Definition: span.h:249
uint64_t m_addr_processed
size_type size() const
Definition: streams.h:255
Invalid by a change to consensus rules more recent than SegWit.
bool fPruneMode
True if we&#39;re running in -prune mode.
virtual void ProcessMessage(CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc)=0
Process a single message from a peer.
size_t nProcessQueueSize
Definition: net.h:418
Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
Definition: chain.h:115
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
bool Add(const CAddress &addr, const CNetAddr &source, int64_t nTimePenalty=0) EXCLUSIVE_LOCKS_REQUIRED(!cs)
Add a single address.
Definition: addrman.h:508
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:130
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:117
std::optional< CAddress > GetLocalAddrForPeer(CNode *pnode)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:204
static RecursiveMutex cs_most_recent_block
this block was cached as being invalid and we didn&#39;t store the reason why
An input of a transaction.
Definition: transaction.h:65
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
void Connected(const CService &addr, int64_t nTime=GetAdjustedTime()) EXCLUSIVE_LOCKS_REQUIRED(!cs)
Outer function for Connected_()
Definition: addrman.h:611
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:343
const uint256 & GetWitnessHash() const
Definition: transaction.h:303
void SetServices(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(!cs)
Definition: addrman.h:620
#define LOCK(cs)
Definition: sync.h:232
void StartExtraBlockRelayPeers()
Definition: net.h:872
const char * name
Definition: rest.cpp:43
static constexpr std::chrono::minutes PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message...
Definition: protocol.cpp:16
const uint256 & GetHash() const
Definition: transaction.h:302
std::string ToString() const
Definition: validation.h:125
the block failed to meet one of our checkpoints
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:197
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:15
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:415
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:560
Fast randomness source.
Definition: random.h:119
Transport protocol agnostic message container.
Definition: net.h:283
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
int64_t nPowTargetSpacing
Definition: params.h:103
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:420
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:34
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:27
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition: validation.h:63
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:31
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
Definition: version.h:33
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:646
bool IsGenTxMsg() const
Definition: protocol.h:507
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
void CaptureMessage(const CAddress &addr, const std::string &msg_type, const Span< const unsigned char > &data, bool is_incoming)
Dump binary message to file, with timestamp.
Definition: net.cpp:3091
static constexpr int64_t CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds.
bool IsManualConn() const
Definition: net.h:479
virtual void FinalizeNode(const CNode &node)=0
Handle removal of a peer (clear state)
int GetExtraFullOutboundCount() const
Definition: net.cpp:1804
A CService with information about it as peer.
Definition: protocol.h:358
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
std::vector< unsigned char > GetKey() const
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
uint256 hash
Definition: protocol.h:517
Result GetResult() const
Definition: validation.h:122
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:15
bool ExpectServicesFromConn() const
Definition: net.h:499
const CMessageHeader::MessageStartChars & MessageStart() const
Definition: chainparams.h:83
int64_t NodeId
Definition: net.h:88
Definition: net.h:747
Defined in BIP144.
Definition: protocol.h:477
bool GetNetworkActive() const
Definition: net.h:821
static const int DISCOURAGEMENT_THRESHOLD
Threshold for marking a node to be discouraged, e.g.
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:76
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter...
Definition: protocol.cpp:33
std::string ToString() const
Definition: uint256.cpp:64
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:527
std::vector< uint256 > vHave
Definition: block.h:116
NodeId GetId() const
Definition: net.h:585
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:30
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3079
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition: net.h:434
Parameters that influence chain consensus.
Definition: params.h:70
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we&#39;re willing to serve as compact blocks to peers when requested.
bool ProcessNewBlock(const CChainParams &chainparams, const std::shared_ptr< const CBlock > &block, bool force_processing, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:668
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:25
std::atomic_bool fDisconnect
Definition: net.h:452
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition: net.cpp:113
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:35
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks...
Definition: protocol.cpp:42
CFeeRate GetMinFee(size_t sizelimit) const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.cpp:1022
bool IsMsgWitnessBlk() const
Definition: protocol.h:504
Validation result for a single transaction mempool acceptance.
Definition: validation.h:169
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:276
bool IsRoutable() const
Definition: netaddress.cpp:490
#define Assume(val)
Assume is the identity function.
Definition: check.h:72
uint64_t GetHash() const
Definition: netaddress.cpp:830
bool IsWtxid() const
Definition: transaction.h:396
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB...
Definition: protocol.h:352
unsigned int GetReceiveFloodSize() const
Definition: net.cpp:2973
static const int MAX_UNCONNECTING_HEADERS
Maximum number of unconnecting headers announcements before DoS score.
constexpr int64_t count_seconds(std::chrono::seconds t)
Helper to count the seconds of a duration.
Definition: time.h:29
RecursiveMutex cs_SubVer
Definition: net.h:436
std::chrono::microseconds m_ping_wait
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:356
const CAddress addr
Definition: net.h:430
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate...
Definition: validation.cpp:115
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:21
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
bool IsBlockOnlyConn() const
Definition: net.h:483
const int64_t nTimeConnected
Unix epoch time at peer connection, in seconds.
Definition: net.h:427
Transaction is missing a witness.
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, BlockValidationState &state, const CChainParams &chainparams, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:14
bool IsMsgBlk() const
Definition: protocol.h:500
std::optional< txiter > GetIter(const uint256 &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given hash, if found.
Definition: txmempool.cpp:894
uint256 GetHash() const
Definition: block.cpp:11
256-bit opaque blob.
Definition: uint256.h:124
invalid by consensus rules (excluding any below reasons)
bool HasWitness() const
Definition: transaction.h:332
bool IsReachable(enum Network net)
Definition: net.cpp:276
std::atomic_bool fReindex
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:231
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:451
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:49
std::vector< CTransactionRef > vtx
Definition: block.h:66
bool ShouldRunInactivityChecks(const CNode &node, std::optional< int64_t > now=std::nullopt) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1311
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids"...
Definition: protocol.cpp:37
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static constexpr auto BLOCK_STALLING_TIMEOUT
Time during which a peer must stall block download progress before being disconnected.
the block&#39;s data didn&#39;t match the data committed to by the PoW
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:477
void AddKnownTx(const uint256 &hash)
Definition: net.h:635
std::atomic< int64_t > nLastTXTime
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e...
Definition: net.h:571
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:48
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
virtual void SendPings()=0
Send ping message to all peers.
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
Definition: version.h:30
std::set< uint256 > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:771
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:13
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:137
The block chain is a tree shaped structure starting with the genesis block at the root...
Definition: chain.h:137
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch? Larger windows tolerate larger download speed differences between peer, but increase the potential degree of disordering of blocks on disk (which make reindexing and pruning harder).
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
Definition: version.h:18
static const unsigned int MAX_INV_SZ
The maximum number of entries in an &#39;inv&#39; protocol message.
static const int PROTOCOL_VERSION
network protocol versioning
Definition: version.h:12
virtual void RelayTransaction(const uint256 &txid, const uint256 &wtxid)=0
Relay transaction to all peers.
bool IsTxAvailable(size_t index) const
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN
Default number of orphan+recently-replaced txn to keep around for block reconstruction.
A block this one builds on is invalid.
std::string GetArg(const std::string &strArg, const std::string &strDefault) const
Return string argument or default value.
Definition: system.cpp:588
bool fLogIPs
Definition: logging.cpp:36
int64_t GetAdjustedTime()
Definition: timedata.cpp:34
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:127
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out)
Get a single filter header by block.
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:403
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network) ...
#define LIMITED_STRING(obj, n)
Definition: serialize.h:445
static uint32_t GetFetchFlags(const CNode &pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
std::atomic< int64_t > nTimeOffset
Definition: net.h:428
ArgsManager gArgs
Definition: system.cpp:84
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:19
bool fListen
Definition: net.cpp:109
Fee rate in satoshis per kilobyte: CAmount / kB.
Definition: feerate.h:29
static constexpr auto OVERLOADED_PEER_TX_DELAY
How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT).
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition: net.h:449
CBlockLocator GetLocator(const CBlockIndex *pindex=nullptr) const
Return a CBlockLocator that refers to a block in this chain (by default the tip). ...
Definition: chain.cpp:23
SipHash-2-4.
Definition: siphash.h:13
#define AssertLockNotHeld(cs)
Definition: sync.h:82
bool IsInvalid() const
Definition: validation.h:120
static int count
Definition: tests.c:41
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached if param historicalBlockServingLimit is set true...
Definition: net.cpp:2927
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:60
std::atomic< int > nVersion
Definition: net.h:435
Invalid by a change to consensus rules more recent than SegWit.
std::string ConnectionTypeAsString() const
Definition: net.h:665
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos, const CMessageHeader::MessageStartChars &message_start)
virtual void SetBestHeight(int height)=0
Set the best height.
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:45
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< std::pair< uint256, CTransactionRef >> &extra_txn)
bool m_limited_node
Definition: net.h:447
std::string ToString() const
CTransactionRef get(const uint256 &hash) const
Definition: txmempool.cpp:822
block timestamp was > 2 hours in the future (or our clock is bad)
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:529
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
Definition: time.h:38
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:957
bool IsAddrFetchConn() const
Definition: net.h:491
CChainState & ActiveChainstate() const
The most-work chain.
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:75
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:23
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
The basic transaction that is broadcasted on the network and contained in blocks. ...
Definition: transaction.h:259
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: chain.h:150
Information about a peer.
Definition: net.h:394
static std::unique_ptr< PeerManager > make(const CChainParams &chainparams, CConnman &connman, CAddrMan &addrman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool, bool ignore_incoming_txs)
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:82
std::vector< int > vHeightInFlight
void ForEachNode(const NodeFn &func)
Definition: net.h:832
Simple class for background tasks that should be run periodically or once "after a while"...
Definition: scheduler.h:33
static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT
Maximum number of in-flight transaction requests from a peer.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: chain.cpp:111
full block available in blk*.dat
Definition: chain.h:121
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep)
Determine if a deployment is active for this block.
virtual void InitializeNode(CNode *pnode)=0
Initialize a peer (setup state, queue any initial messages)
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:41
#define LogPrintf(...)
Definition: logging.h:184
void Good(const CService &addr, bool test_before_evict=true, int64_t nTime=GetAdjustedTime()) EXCLUSIVE_LOCKS_REQUIRED(!cs)
Mark an entry as accessible.
Definition: addrman.h:539
int64_t GetTime()
Return system time (or mocked time, if set)
Definition: time.cpp:26
std::atomic_bool fImporting
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS
Default for -maxorphantx, maximum number of orphan transactions kept in memory.
Defined in BIP 339.
Definition: protocol.h:473
int GetCommonVersion() const
Definition: net.h:615
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
Definition: version.h:36
COutPoint prevout
Definition: transaction.h:68
std::atomic_bool fPauseRecv
Definition: net.h:457
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:505
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
Definition: version.h:39
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:443
bool IsInboundConn() const
Definition: net.h:495
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
std::atomic< int64_t > nLastBlockTime
UNIX epoch time of the last block received from this peer that we had not yet seen (e...
Definition: net.h:565
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we&#39;re willing to process on average.
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
uint32_t nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
Definition: protocol.h:449
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams)
Functions for disk access for blocks.
static constexpr int64_t STALE_CHECK_INTERVAL
How frequently to check for stale tips, in seconds.
otherwise didn&#39;t meet our local policy rules
A generic txid reference (txid or wtxid).
Definition: transaction.h:390
bool HaveTxsDownloaded() const
Check whether this block&#39;s and all previous blocks&#39; transactions have been downloaded (and stored to ...
Definition: chain.h:258
CAmount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:56
uint64_t randrange(uint64_t range) noexcept
Generate a random integer in the range [0..range).
Definition: random.h:190
bool GetUseAddrmanOutgoing() const
Definition: net.h:822
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
unsigned int nTx
Number of transactions in this block.
Definition: chain.h:169
void scheduleFromNow(Function f, std::chrono::milliseconds delta)
Call f once after the delta has passed.
Definition: scheduler.h:47
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:20
CChain & ActiveChain() const
Definition: validation.h:965
int in_avail() const
Definition: streams.h:358
virtual bool ProcessMessages(CNode *pnode, std::atomic< bool > &interrupt)=0
Process protocol messages received from a given node.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:32
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it...
Definition: txmempool.h:565
bool IsFullOutboundConn() const
Definition: net.h:475
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:137
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:38
std::unique_ptr< TxRelay > m_tx_relay
Definition: net.h:558
virtual bool SendMessages(CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(pnode -> cs_sendProcessing)=0
Send queued protocol messages to a given node.
uint256 hash
Definition: transaction.h:29
Span< A > constexpr MakeSpan(A(&a)[N])
MakeSpan for arrays:
Definition: span.h:222