1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6 #include <net_processing.h>
7
8 #include <addrman.h>
9 #include <banman.h>
10 #include <blockencodings.h>
11 #include <blockfilter.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <hash.h>
15 #include <index/blockfilterindex.h>
16 #include <merkleblock.h>
17 #include <netbase.h>
18 #include <netmessagemaker.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <streams.h>
27 #include <tinyformat.h>
28 #include <txmempool.h>
29 #include <util/check.h> // For NDEBUG compile time check
30 #include <util/strencodings.h>
31 #include <util/system.h>
32 #include <validation.h>
33
34 #include <memory>
35 #include <typeinfo>
36
37 /** Expiration time for orphan transactions in seconds */
38 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
39 /** Minimum time between orphan transactions expire time checks in seconds */
40 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
41 /** How long to cache transactions in mapRelay for normal relay */
42 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15};
43 /** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */
44 static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2};
45 /** Headers download timeout expressed in microseconds
46 * Timeout = base + per_header * (expected number of headers) */
47 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
48 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
49 /** Protect at least this many outbound peers from disconnection due to slow/
50 * behind headers chain.
51 */
52 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
53 /** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */
54 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
55 /** How frequently to check for stale tips, in seconds */
56 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; // 10 minutes
57 /** How frequently to check for extra outbound peers and disconnect, in seconds */
58 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
59 /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict, in seconds */
60 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
61 /** SHA256("main address relay")[0:8] */
62 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
63 /// Age after which a stale block will no longer be served if requested as
64 /// protection against fingerprinting. Set to one month, denominated in seconds.
65 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
66 /// Age after which a block is considered historical for purposes of rate
67 /// limiting block relay. Set to one week, denominated in seconds.
68 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
69 /** Time between pings automatically sent out for latency probing and keepalive */
70 static constexpr std::chrono::minutes PING_INTERVAL{2};
71 /** The maximum number of entries in a locator */
72 static const unsigned int MAX_LOCATOR_SZ = 101;
73 /** The maximum number of entries in an 'inv' protocol message */
74 static const unsigned int MAX_INV_SZ = 50000;
75 /** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which
76 * point the OVERLOADED_PEER_TX_DELAY kicks in. */
77 static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
78 /** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to
79 * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum
80 * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving
81 * the actual transaction (from any peer) in response to requests for them. */
82 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
83 /** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */
84 static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
85 /** How long to delay requesting transactions from non-preferred peers */
86 static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
87 /** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */
88 static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
89 /** How long to wait (in microseconds) before downloading a transaction from an additional peer */
90 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
91 /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
92 static const unsigned int MAX_GETDATA_SZ = 1000;
93 /** Number of blocks that can be requested at any given time from a single peer. */
94 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
95 /** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
96 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
97 /** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
98 * less than this number, we reached its tip. Changing this value is a protocol upgrade.
99 *
100 * With a protocol upgrade, we now enforce an additional restriction on the
101 * total size of a "headers" message (see below). The absolute limit
102 * on the number of headers still applies as well, so that we do not get
103 * overloaded both with small and large headers.
104 */
105 static const unsigned int MAX_HEADERS_RESULTS = 2000;
106 /** Maximum size of a "headers" message. This is enforced starting with
107 * SIZE_HEADERS_LIMIT_VERSION peers and prevents overloading if we have
108 * very large headers (due to auxpow).
109 */
110 static const unsigned int MAX_HEADERS_SIZE = (6 << 20); // 6 MiB
111 /** Size of a headers message that is the threshold for assuming that the
112 * peer has more headers (even if we have less than MAX_HEADERS_RESULTS).
113 * This is used starting with SIZE_HEADERS_LIMIT_VERSION peers.
114 */
115 static const unsigned int THRESHOLD_HEADERS_SIZE = (4 << 20); // 4 MiB
116 /** Maximum depth of blocks we're willing to serve as compact blocks to peers
117 * when requested. For older blocks, a regular BLOCK response will be sent. */
118 static const int MAX_CMPCTBLOCK_DEPTH = 5;
119 /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
120 static const int MAX_BLOCKTXN_DEPTH = 10;
121 /** Size of the "block download window": how far ahead of our current height do we fetch?
122 * Larger windows tolerate larger download speed differences between peer, but increase the potential
123 * degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
124 * want to make this a per-peer adaptive value at some point. */
125 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
126 /** Block download timeout base, expressed in millionths of the block interval (i.e. 10 min) */
127 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
128 /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
129 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
130 /** Maximum number of headers to announce when relaying blocks with headers message.*/
131 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
132 /** Maximum number of unconnecting headers announcements before DoS score */
133 static const int MAX_UNCONNECTING_HEADERS = 10;
134 /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
135 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
136 /** Average delay between local address broadcasts */
137 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
138 /** Average delay between peer address broadcasts */
139 static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
140 /** Average delay between trickled inventory transmissions in seconds.
141 * Blocks and peers with noban permission bypass this, outbound peers get half this delay. */
142 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
143 /** Maximum rate of inventory items to send per second.
144 * Limits the impact of low-fee transaction floods. */
145 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
146 /** Maximum number of inventory items to send per transmission. */
147 static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * INVENTORY_BROADCAST_INTERVAL;
148 /** The number of most recently announced transactions a peer can request. */
149 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
150 /** Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically
151 * relayed before unconditional relay from the mempool kicks in. This is only a
152 * lower bound, and it should be larger to account for higher inv rate to outbound
153 * peers, and random variations in the broadcast mechanism. */
154 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
155 /** Average delay between feefilter broadcasts in seconds. */
156 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
157 /** Maximum feefilter broadcast delay after significant change. */
158 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
159 /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
160 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
161 /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
162 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
163 /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
164 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
165
166 struct COrphanTx {
167 // When modifying, adapt the copy of this definition in tests/DoS_tests.
168 CTransactionRef tx;
169 NodeId fromPeer;
170 int64_t nTimeExpire;
171 size_t list_pos;
172 };
173
174 /** Guards orphan transactions and extra txs for compact blocks */
175 RecursiveMutex g_cs_orphans;
176 /** Map from txid to orphan transaction record. Limited by
177 * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
178 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
179 /** Index from wtxid into the mapOrphanTransactions to lookup orphan
180 * transactions using their witness ids. */
181 std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans);
182
183 void EraseOrphansFor(NodeId peer);
184
185 // Internal stuff
186 namespace {
187 /** Number of nodes with fSyncStarted. */
188 int nSyncStarted GUARDED_BY(cs_main) = 0;
189
190 /**
191 * Sources of received blocks, saved to be able punish them when processing
192 * happens afterwards.
193 * Set mapBlockSource[hash].second to false if the node should not be
194 * punished if the block is invalid.
195 */
196 std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
197
198 /**
199 * Filter for transactions that were recently rejected by
200 * AcceptToMemoryPool. These are not rerequested until the chain tip
201 * changes, at which point the entire filter is reset.
202 *
203 * Without this filter we'd be re-requesting txs from each of our peers,
204 * increasing bandwidth consumption considerably. For instance, with 100
205 * peers, half of which relay a tx we don't accept, that might be a 50x
206 * bandwidth increase. A flooding attacker attempting to roll-over the
207 * filter using minimum-sized, 60byte, transactions might manage to send
208 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
209 * two minute window to send invs to us.
210 *
211 * Decreasing the false positive rate is fairly cheap, so we pick one in a
212 * million to make it highly unlikely for users to have issues with this
213 * filter.
214 *
215 * We typically only add wtxids to this filter. For non-segwit
216 * transactions, the txid == wtxid, so this only prevents us from
217 * re-downloading non-segwit transactions when communicating with
218 * non-wtxidrelay peers -- which is important for avoiding malleation
219 * attacks that could otherwise interfere with transaction relay from
220 * non-wtxidrelay peers. For communicating with wtxidrelay peers, having
221 * the reject filter store wtxids is exactly what we want to avoid
222 * redownload of a rejected transaction.
223 *
224 * In cases where we can tell that a segwit transaction will fail
225 * validation no matter the witness, we may add the txid of such
226 * transaction to the filter as well. This can be helpful when
227 * communicating with txid-relay peers or if we were to otherwise fetch a
228 * transaction via txid (eg in our orphan handling).
229 *
230 * Memory used: 1.3 MB
231 */
232 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
233 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
234
235 /*
236 * Filter for transactions that have been recently confirmed.
237 * We use this to avoid requesting transactions that have already been
238 * confirnmed.
239 */
240 Mutex g_cs_recent_confirmed_transactions;
241 std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
242
243 /** Blocks that are in flight, and that are in the queue to be downloaded. */
244 struct QueuedBlock {
245 uint256 hash;
246 const CBlockIndex* pindex; //!< Optional.
247 bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
248 std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
249 };
250 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
251
252 /** Stack of nodes which we have set to announce using compact blocks */
253 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
254
255 /** Number of preferable block download peers. */
256 int nPreferredDownload GUARDED_BY(cs_main) = 0;
257
258 /** Number of peers from which we're downloading blocks. */
259 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
260
261 /** Number of peers with wtxid relay. */
262 int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
263
264 /** Number of outbound peers with m_chain_sync.m_protect. */
265 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
266
267 /** When our tip was last updated. */
268 std::atomic<int64_t> g_last_tip_update(0);
269
270 /** Relay map (txid or wtxid -> CTransactionRef) */
271 typedef std::map<uint256, CTransactionRef> MapRelay;
272 MapRelay mapRelay GUARDED_BY(cs_main);
273 /** Expiration-time ordered list of (expire time, relay map entry) pairs. */
274 std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
275
276 struct IteratorComparator
277 {
278 template<typename I>
operator ()__anon8e00832c0111::IteratorComparator279 bool operator()(const I& a, const I& b) const
280 {
281 return &(*a) < &(*b);
282 }
283 };
284
285 /** Index from the parents' COutPoint into the mapOrphanTransactions. Used
286 * to remove orphan transactions from the mapOrphanTransactions */
287 std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
288 /** Orphan transactions in vector for quick random eviction */
289 std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
290
291 /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
292 * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
293 * these are kept in a ring buffer */
294 static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
295 /** Offset into vExtraTxnForCompact to insert the next tx */
296 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
297 } // namespace
298
299 namespace {
300 /**
301 * Maintain validation-specific state about nodes, protected by cs_main, instead
302 * by CNode's own locks. This simplifies asynchronous operation, where
303 * processing of incoming data is done after the ProcessMessage call returns,
304 * and we're no longer holding the node's locks.
305 */
306 struct CNodeState {
307 //! The peer's address
308 const CService address;
309 //! Whether we have a fully established connection.
310 bool fCurrentlyConnected;
311 //! The best known block we know this peer has announced.
312 const CBlockIndex *pindexBestKnownBlock;
313 //! The hash of the last unknown block this peer has announced.
314 uint256 hashLastUnknownBlock;
315 //! The last full block we both have.
316 const CBlockIndex *pindexLastCommonBlock;
317 //! The best header we have sent our peer.
318 const CBlockIndex *pindexBestHeaderSent;
319 //! Length of current-streak of unconnecting headers announcements
320 int nUnconnectingHeaders;
321 //! Whether we've started headers synchronization with this peer.
322 bool fSyncStarted;
323 //! When to potentially disconnect peer for stalling headers download
324 int64_t nHeadersSyncTimeout;
325 //! Since when we're stalling block download progress (in microseconds), or 0.
326 int64_t nStallingSince;
327 std::list<QueuedBlock> vBlocksInFlight;
328 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
329 int64_t nDownloadingSince;
330 int nBlocksInFlight;
331 int nBlocksInFlightValidHeaders;
332 //! Whether we consider this a preferred download peer.
333 bool fPreferredDownload;
334 //! Whether this peer wants invs or headers (when possible) for block announcements.
335 bool fPreferHeaders;
336 //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
337 bool fPreferHeaderAndIDs;
338 /**
339 * Whether this peer will send us cmpctblocks if we request them.
340 * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
341 * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
342 */
343 bool fProvidesHeaderAndIDs;
344 //! Whether this peer can give us witnesses
345 bool fHaveWitness;
346 //! Whether this peer wants witnesses in cmpctblocks/blocktxns
347 bool fWantsCmpctWitness;
348 /**
349 * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
350 * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
351 */
352 bool fSupportsDesiredCmpctVersion;
353
354 /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic.
355 *
356 * Both are only in effect for outbound, non-manual, non-protected connections.
357 * Any peer protected (m_protect = true) is not chosen for eviction. A peer is
358 * marked as protected if all of these are true:
359 * - its connection type is IsBlockOnlyConn() == false
360 * - it gave us a valid connecting header
361 * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
362 * - it has a better chain than we have
363 *
364 * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
365 * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
366 * - If at timeout their best known block now has more work than our tip
367 * when the timeout was set, then either reset the timeout or clear it
368 * (after comparing against our current tip's work)
369 * - If at timeout their best known block still has less work than our
370 * tip did when the timeout was set, then send a getheaders message,
371 * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
372 * If their best known block is still behind when that new timeout is
373 * reached, disconnect.
374 *
375 * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers,
376 * drop the outbound one that least recently announced us a new block.
377 */
378 struct ChainSyncTimeoutState {
379 //! A timeout used for checking whether our peer has sufficiently synced
380 int64_t m_timeout;
381 //! A header with the work we require on our peer's chain
382 const CBlockIndex * m_work_header;
383 //! After timeout is reached, set to true after sending getheaders
384 bool m_sent_getheaders;
385 //! Whether this peer is protected from disconnection due to a bad/slow chain
386 bool m_protect;
387 };
388
389 ChainSyncTimeoutState m_chain_sync;
390
391 //! Time of last new block announcement
392 int64_t m_last_block_announcement;
393
394 //! Whether this peer is an inbound connection
395 bool m_is_inbound;
396
397 //! Whether this peer is a manual connection
398 bool m_is_manual_connection;
399
400 //! A rolling bloom filter of all announced tx CInvs to this peer.
401 CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
402
403 //! Whether this peer relays txs via wtxid
404 bool m_wtxid_relay{false};
405
CNodeState__anon8e00832c0211::CNodeState406 CNodeState(CAddress addrIn, bool is_inbound, bool is_manual)
407 : address(addrIn), m_is_inbound(is_inbound), m_is_manual_connection(is_manual)
408 {
409 fCurrentlyConnected = false;
410 pindexBestKnownBlock = nullptr;
411 hashLastUnknownBlock.SetNull();
412 pindexLastCommonBlock = nullptr;
413 pindexBestHeaderSent = nullptr;
414 nUnconnectingHeaders = 0;
415 fSyncStarted = false;
416 nHeadersSyncTimeout = 0;
417 nStallingSince = 0;
418 nDownloadingSince = 0;
419 nBlocksInFlight = 0;
420 nBlocksInFlightValidHeaders = 0;
421 fPreferredDownload = false;
422 fPreferHeaders = false;
423 fPreferHeaderAndIDs = false;
424 fProvidesHeaderAndIDs = false;
425 fHaveWitness = false;
426 fWantsCmpctWitness = false;
427 fSupportsDesiredCmpctVersion = false;
428 m_chain_sync = { 0, nullptr, false, false };
429 m_last_block_announcement = 0;
430 m_recently_announced_invs.reset();
431 }
432 };
433
434 /** Map maintaining per-node state. */
435 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
436
State(NodeId pnode)437 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
438 std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
439 if (it == mapNodeState.end())
440 return nullptr;
441 return &it->second;
442 }
443
444 /**
445 * Data structure for an individual peer. This struct is not protected by
446 * cs_main since it does not contain validation-critical data.
447 *
448 * Memory is owned by shared pointers and this object is destructed when
449 * the refcount drops to zero.
450 *
451 * TODO: move most members from CNodeState to this structure.
452 * TODO: move remaining application-layer data members from CNode to this structure.
453 */
454 struct Peer {
455 /** Same id as the CNode object for this peer */
456 const NodeId m_id{0};
457
458 /** Protects misbehavior data members */
459 Mutex m_misbehavior_mutex;
460 /** Accumulated misbehavior score for this peer */
GUARDED_BY__anon8e00832c0211::Peer461 int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
462 /** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */
GUARDED_BY__anon8e00832c0211::Peer463 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
464
465 /** Set of txids to reconsider once their parent transactions have been accepted **/
466 std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
467
468 /** Protects m_getdata_requests **/
469 Mutex m_getdata_requests_mutex;
470 /** Work queue of items requested by this peer **/
471 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
472
Peer__anon8e00832c0211::Peer473 Peer(NodeId id) : m_id(id) {}
474 };
475
476 using PeerRef = std::shared_ptr<Peer>;
477
478 /**
479 * Map of all Peer objects, keyed by peer id. This map is protected
480 * by the global g_peer_mutex. Once a shared pointer reference is
481 * taken, the lock may be released. Individual fields are protected by
482 * their own locks.
483 */
484 Mutex g_peer_mutex;
485 static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
486
487 /** Get a shared pointer to the Peer object.
488 * May return nullptr if the Peer object can't be found. */
GetPeerRef(NodeId id)489 static PeerRef GetPeerRef(NodeId id)
490 {
491 LOCK(g_peer_mutex);
492 auto it = g_peer_map.find(id);
493 return it != g_peer_map.end() ? it->second : nullptr;
494 }
495
UpdatePreferredDownload(const CNode & node,CNodeState * state)496 static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
497 {
498 nPreferredDownload -= state->fPreferredDownload;
499
500 // Whether this node should be marked as a preferred download node.
501 state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient;
502
503 nPreferredDownload += state->fPreferredDownload;
504 }
505
PushNodeVersion(CNode & pnode,CConnman & connman,int64_t nTime)506 static void PushNodeVersion(CNode& pnode, CConnman& connman, int64_t nTime)
507 {
508 // Note that pnode->GetLocalServices() is a reflection of the local
509 // services we were offering when the CNode object was created for this
510 // peer.
511 ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
512 uint64_t nonce = pnode.GetLocalNonce();
513 int nNodeStartingHeight = pnode.GetMyStartingHeight();
514 NodeId nodeid = pnode.GetId();
515 CAddress addr = pnode.addr;
516
517 CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
518 addr :
519 CAddress(CService(), addr.nServices);
520 CAddress addrMe = CAddress(CService(), nLocalNodeServices);
521
522 connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
523 nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr));
524
525 if (fLogIPs) {
526 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
527 } else {
528 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
529 }
530 }
531
532 // Returns a bool indicating whether we requested this block.
533 // Also used if a block was /not/ received and timed out or started with another peer
MarkBlockAsReceived(const uint256 & hash)534 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
535 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
536 if (itInFlight != mapBlocksInFlight.end()) {
537 CNodeState *state = State(itInFlight->second.first);
538 assert(state != nullptr);
539 state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
540 if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
541 // Last validated block on the queue was received.
542 nPeersWithValidatedDownloads--;
543 }
544 if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
545 // First block on the queue was received, update the start download time for the next one
546 state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
547 }
548 state->vBlocksInFlight.erase(itInFlight->second.second);
549 state->nBlocksInFlight--;
550 state->nStallingSince = 0;
551 mapBlocksInFlight.erase(itInFlight);
552 return true;
553 }
554 return false;
555 }
556
557 // returns false, still setting pit, if the block was already in flight from the same peer
558 // pit will only be valid as long as the same cs_main lock is being held
MarkBlockAsInFlight(CTxMemPool & mempool,NodeId nodeid,const uint256 & hash,const CBlockIndex * pindex=nullptr,std::list<QueuedBlock>::iterator ** pit=nullptr)559 static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
560 CNodeState *state = State(nodeid);
561 assert(state != nullptr);
562
563 // Short-circuit most stuff in case it is from the same node
564 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
565 if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
566 if (pit) {
567 *pit = &itInFlight->second.second;
568 }
569 return false;
570 }
571
572 // Make sure it's not listed somewhere already.
573 MarkBlockAsReceived(hash);
574
575 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
576 {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
577 state->nBlocksInFlight++;
578 state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
579 if (state->nBlocksInFlight == 1) {
580 // We're starting a block download (batch) from this peer.
581 state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
582 }
583 if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
584 nPeersWithValidatedDownloads++;
585 }
586 itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
587 if (pit)
588 *pit = &itInFlight->second.second;
589 return true;
590 }
591
592 /** Check whether the last unknown block a peer advertised is not yet known. */
ProcessBlockAvailability(NodeId nodeid)593 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
594 CNodeState *state = State(nodeid);
595 assert(state != nullptr);
596
597 if (!state->hashLastUnknownBlock.IsNull()) {
598 const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
599 if (pindex && pindex->nChainWork > 0) {
600 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
601 state->pindexBestKnownBlock = pindex;
602 }
603 state->hashLastUnknownBlock.SetNull();
604 }
605 }
606 }
607
608 /** Update tracking information about which blocks a peer is assumed to have. */
UpdateBlockAvailability(NodeId nodeid,const uint256 & hash)609 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
610 CNodeState *state = State(nodeid);
611 assert(state != nullptr);
612
613 ProcessBlockAvailability(nodeid);
614
615 const CBlockIndex* pindex = LookupBlockIndex(hash);
616 if (pindex && pindex->nChainWork > 0) {
617 // An actually better block was announced.
618 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
619 state->pindexBestKnownBlock = pindex;
620 }
621 } else {
622 // An unknown block was announced; just assume that the latest one is the best one.
623 state->hashLastUnknownBlock = hash;
624 }
625 }
626
627 /**
628 * When a peer sends us a valid block, instruct it to announce blocks to us
629 * using CMPCTBLOCK if possible by adding its nodeid to the end of
630 * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
631 * removing the first element if necessary.
632 */
MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,CConnman & connman)633 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
634 {
635 AssertLockHeld(cs_main);
636 CNodeState* nodestate = State(nodeid);
637 if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
638 // Never ask from peers who can't provide witnesses.
639 return;
640 }
641 if (nodestate->fProvidesHeaderAndIDs) {
642 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
643 if (*it == nodeid) {
644 lNodesAnnouncingHeaderAndIDs.erase(it);
645 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
646 return;
647 }
648 }
649 connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
650 AssertLockHeld(::cs_main);
651 uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
652 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
653 // As per BIP152, we only get 3 of our peers to announce
654 // blocks using compact encodings.
655 connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
656 connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
657 return true;
658 });
659 lNodesAnnouncingHeaderAndIDs.pop_front();
660 }
661 connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
662 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
663 return true;
664 });
665 }
666 }
667
TipMayBeStale(const Consensus::Params & consensusParams)668 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
669 {
670 AssertLockHeld(cs_main);
671 if (g_last_tip_update == 0) {
672 g_last_tip_update = GetTime();
673 }
674 return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
675 }
676
CanDirectFetch(const Consensus::Params & consensusParams)677 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
678 {
679 return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
680 }
681
PeerHasHeader(CNodeState * state,const CBlockIndex * pindex)682 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
683 {
684 if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
685 return true;
686 if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
687 return true;
688 return false;
689 }
690
691 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
692 * at most count entries. */
FindNextBlocksToDownload(NodeId nodeid,unsigned int count,std::vector<const CBlockIndex * > & vBlocks,NodeId & nodeStaller,const Consensus::Params & consensusParams)693 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
694 {
695 if (count == 0)
696 return;
697
698 vBlocks.reserve(vBlocks.size() + count);
699 CNodeState *state = State(nodeid);
700 assert(state != nullptr);
701
702 // Make sure pindexBestKnownBlock is up to date, we'll need it.
703 ProcessBlockAvailability(nodeid);
704
705 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
706 // This peer has nothing interesting.
707 return;
708 }
709
710 if (state->pindexLastCommonBlock == nullptr) {
711 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
712 // Guessing wrong in either direction is not a problem.
713 state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
714 }
715
716 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
717 // of its current tip anymore. Go back enough to fix that.
718 state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
719 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
720 return;
721
722 std::vector<const CBlockIndex*> vToFetch;
723 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
724 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
725 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
726 // download that next block if the window were 1 larger.
727 int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
728 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
729 NodeId waitingfor = -1;
730 while (pindexWalk->nHeight < nMaxHeight) {
731 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
732 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
733 // as iterating over ~100 CBlockIndex* entries anyway.
734 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
735 vToFetch.resize(nToFetch);
736 pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
737 vToFetch[nToFetch - 1] = pindexWalk;
738 for (unsigned int i = nToFetch - 1; i > 0; i--) {
739 vToFetch[i - 1] = vToFetch[i]->pprev;
740 }
741
742 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
743 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
744 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
745 // already part of our chain (and therefore don't need it even if pruned).
746 for (const CBlockIndex* pindex : vToFetch) {
747 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
748 // We consider the chain that this peer is on invalid.
749 return;
750 }
751 if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
752 // We wouldn't download this block or its descendants from this peer.
753 return;
754 }
755 if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) {
756 if (pindex->HaveTxsDownloaded())
757 state->pindexLastCommonBlock = pindex;
758 } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
759 // The block is not already downloaded, and not yet in flight.
760 if (pindex->nHeight > nWindowEnd) {
761 // We reached the end of the window.
762 if (vBlocks.size() == 0 && waitingfor != nodeid) {
763 // We aren't able to fetch anything, but we would be if the download window was one larger.
764 nodeStaller = waitingfor;
765 }
766 return;
767 }
768 vBlocks.push_back(pindex);
769 if (vBlocks.size() == count) {
770 return;
771 }
772 } else if (waitingfor == -1) {
773 // This is the first already-in-flight block.
774 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
775 }
776 }
777 }
778 }
779
780 } // namespace
781
AddTxAnnouncement(const CNode & node,const GenTxid & gtxid,std::chrono::microseconds current_time)782 void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
783 {
784 AssertLockHeld(::cs_main); // For m_txrequest
785 NodeId nodeid = node.GetId();
786 if (!node.HasPermission(PF_RELAY) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
787 // Too many queued announcements from this peer
788 return;
789 }
790 const CNodeState* state = State(nodeid);
791
792 // Decide the TxRequestTracker parameters for this announcement:
793 // - "preferred": if fPreferredDownload is set (= outbound, or PF_NOBAN permission)
794 // - "reqtime": current time plus delays for:
795 // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
796 // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
797 // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
798 // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have PF_RELAY).
799 auto delay = std::chrono::microseconds{0};
800 const bool preferred = state->fPreferredDownload;
801 if (!preferred) delay += NONPREF_PEER_TX_DELAY;
802 if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
803 const bool overloaded = !node.HasPermission(PF_RELAY) &&
804 m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
805 if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
806 m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
807 }
808
809 // This function is used for testing the stale tip eviction logic, see
810 // denialofservice_tests.cpp
UpdateLastBlockAnnounceTime(NodeId node,int64_t time_in_seconds)811 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
812 {
813 LOCK(cs_main);
814 CNodeState *state = State(node);
815 if (state) state->m_last_block_announcement = time_in_seconds;
816 }
817
InitializeNode(CNode * pnode)818 void PeerManager::InitializeNode(CNode *pnode) {
819 CAddress addr = pnode->addr;
820 std::string addrName = pnode->GetAddrName();
821 NodeId nodeid = pnode->GetId();
822 {
823 LOCK(cs_main);
824 mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn()));
825 assert(m_txrequest.Count(nodeid) == 0);
826 }
827 {
828 PeerRef peer = std::make_shared<Peer>(nodeid);
829 LOCK(g_peer_mutex);
830 g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
831 }
832 if (!pnode->IsInboundConn()) {
833 PushNodeVersion(*pnode, m_connman, GetTime());
834 }
835 }
836
ReattemptInitialBroadcast(CScheduler & scheduler) const837 void PeerManager::ReattemptInitialBroadcast(CScheduler& scheduler) const
838 {
839 std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
840
841 for (const auto& txid : unbroadcast_txids) {
842 CTransactionRef tx = m_mempool.get(txid);
843
844 if (tx != nullptr) {
845 LOCK(cs_main);
846 RelayTransaction(txid, tx->GetWitnessHash(), m_connman);
847 } else {
848 m_mempool.RemoveUnbroadcastTx(txid, true);
849 }
850 }
851
852 // Schedule next run for 10-15 minutes in the future.
853 // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
854 const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
855 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
856 }
857
FinalizeNode(const CNode & node,bool & fUpdateConnectionTime)858 void PeerManager::FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) {
859 NodeId nodeid = node.GetId();
860 fUpdateConnectionTime = false;
861 LOCK(cs_main);
862 int misbehavior{0};
863 {
864 PeerRef peer = GetPeerRef(nodeid);
865 assert(peer != nullptr);
866 misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
867 LOCK(g_peer_mutex);
868 g_peer_map.erase(nodeid);
869 }
870 CNodeState *state = State(nodeid);
871 assert(state != nullptr);
872
873 if (state->fSyncStarted)
874 nSyncStarted--;
875
876 if (misbehavior == 0 && state->fCurrentlyConnected && !node.IsBlockOnlyConn()) {
877 // Note: we avoid changing visible addrman state for block-relay-only peers
878 fUpdateConnectionTime = true;
879 }
880
881 for (const QueuedBlock& entry : state->vBlocksInFlight) {
882 mapBlocksInFlight.erase(entry.hash);
883 }
884 EraseOrphansFor(nodeid);
885 m_txrequest.DisconnectedPeer(nodeid);
886 nPreferredDownload -= state->fPreferredDownload;
887 nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
888 assert(nPeersWithValidatedDownloads >= 0);
889 g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
890 assert(g_outbound_peers_with_protect_from_disconnect >= 0);
891 g_wtxid_relay_peers -= state->m_wtxid_relay;
892 assert(g_wtxid_relay_peers >= 0);
893
894 mapNodeState.erase(nodeid);
895
896 if (mapNodeState.empty()) {
897 // Do a consistency check after the last peer is removed.
898 assert(mapBlocksInFlight.empty());
899 assert(nPreferredDownload == 0);
900 assert(nPeersWithValidatedDownloads == 0);
901 assert(g_outbound_peers_with_protect_from_disconnect == 0);
902 assert(g_wtxid_relay_peers == 0);
903 assert(m_txrequest.Size() == 0);
904 }
905 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
906 }
907
GetNodeStateStats(NodeId nodeid,CNodeStateStats & stats)908 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
909 {
910 LOCK(cs_main);
911 CNodeState* state = State(nodeid);
912 if (state == nullptr)
913 return false;
914 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
915 stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
916 for (const QueuedBlock& queue : state->vBlocksInFlight) {
917 if (queue.pindex)
918 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
919 }
920 }
921
922 PeerRef peer = GetPeerRef(nodeid);
923 if (peer == nullptr) return false;
924 stats.m_misbehavior_score = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
925
926 return true;
927 }
928
929 //////////////////////////////////////////////////////////////////////////////
930 //
931 // mapOrphanTransactions
932 //
933
AddToCompactExtraTransactions(const CTransactionRef & tx)934 static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
935 {
936 size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
937 if (max_extra_txn <= 0)
938 return;
939 if (!vExtraTxnForCompact.size())
940 vExtraTxnForCompact.resize(max_extra_txn);
941 vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
942 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
943 }
944
AddOrphanTx(const CTransactionRef & tx,NodeId peer)945 bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
946 {
947 const uint256& hash = tx->GetHash();
948 if (mapOrphanTransactions.count(hash))
949 return false;
950
951 // Ignore big transactions, to avoid a
952 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
953 // large transaction with a missing parent then we assume
954 // it will rebroadcast it later, after the parent transaction(s)
955 // have been mined or received.
956 // 100 orphans, each of which is at most 100,000 bytes big is
957 // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
958 unsigned int sz = GetTransactionWeight(*tx);
959 if (sz > MAX_STANDARD_TX_WEIGHT)
960 {
961 LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
962 return false;
963 }
964
965 auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
966 assert(ret.second);
967 g_orphan_list.push_back(ret.first);
968 // Allow for lookups in the orphan pool by wtxid, as well as txid
969 g_orphans_by_wtxid.emplace(tx->GetWitnessHash(), ret.first);
970 for (const CTxIn& txin : tx->vin) {
971 mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
972 }
973
974 AddToCompactExtraTransactions(tx);
975
976 LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
977 mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
978 return true;
979 }
980
EraseOrphanTx(uint256 hash)981 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
982 {
983 std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
984 if (it == mapOrphanTransactions.end())
985 return 0;
986 for (const CTxIn& txin : it->second.tx->vin)
987 {
988 auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
989 if (itPrev == mapOrphanTransactionsByPrev.end())
990 continue;
991 itPrev->second.erase(it);
992 if (itPrev->second.empty())
993 mapOrphanTransactionsByPrev.erase(itPrev);
994 }
995
996 size_t old_pos = it->second.list_pos;
997 assert(g_orphan_list[old_pos] == it);
998 if (old_pos + 1 != g_orphan_list.size()) {
999 // Unless we're deleting the last entry in g_orphan_list, move the last
1000 // entry to the position we're deleting.
1001 auto it_last = g_orphan_list.back();
1002 g_orphan_list[old_pos] = it_last;
1003 it_last->second.list_pos = old_pos;
1004 }
1005 g_orphan_list.pop_back();
1006 g_orphans_by_wtxid.erase(it->second.tx->GetWitnessHash());
1007
1008 mapOrphanTransactions.erase(it);
1009 return 1;
1010 }
1011
EraseOrphansFor(NodeId peer)1012 void EraseOrphansFor(NodeId peer)
1013 {
1014 LOCK(g_cs_orphans);
1015 int nErased = 0;
1016 std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
1017 while (iter != mapOrphanTransactions.end())
1018 {
1019 std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
1020 if (maybeErase->second.fromPeer == peer)
1021 {
1022 nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1023 }
1024 }
1025 if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
1026 }
1027
1028
LimitOrphanTxSize(unsigned int nMaxOrphans)1029 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
1030 {
1031 LOCK(g_cs_orphans);
1032
1033 unsigned int nEvicted = 0;
1034 static int64_t nNextSweep;
1035 int64_t nNow = GetTime();
1036 if (nNextSweep <= nNow) {
1037 // Sweep out expired orphan pool entries:
1038 int nErased = 0;
1039 int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
1040 std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
1041 while (iter != mapOrphanTransactions.end())
1042 {
1043 std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
1044 if (maybeErase->second.nTimeExpire <= nNow) {
1045 nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
1046 } else {
1047 nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1048 }
1049 }
1050 // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
1051 nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1052 if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
1053 }
1054 FastRandomContext rng;
1055 while (mapOrphanTransactions.size() > nMaxOrphans)
1056 {
1057 // Evict a random orphan:
1058 size_t randompos = rng.randrange(g_orphan_list.size());
1059 EraseOrphanTx(g_orphan_list[randompos]->first);
1060 ++nEvicted;
1061 }
1062 return nEvicted;
1063 }
1064
Misbehaving(const NodeId pnode,const int howmuch,const std::string & message)1065 void PeerManager::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message)
1066 {
1067 assert(howmuch > 0);
1068
1069 PeerRef peer = GetPeerRef(pnode);
1070 if (peer == nullptr) return;
1071
1072 LOCK(peer->m_misbehavior_mutex);
1073 peer->m_misbehavior_score += howmuch;
1074 const std::string message_prefixed = message.empty() ? "" : (": " + message);
1075 if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1076 LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1077 peer->m_should_discourage = true;
1078 } else {
1079 LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed);
1080 }
1081 }
1082
MaybePunishNodeForBlock(NodeId nodeid,const BlockValidationState & state,bool via_compact_block,const std::string & message)1083 bool PeerManager::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1084 bool via_compact_block, const std::string& message)
1085 {
1086 switch (state.GetResult()) {
1087 case BlockValidationResult::BLOCK_RESULT_UNSET:
1088 break;
1089 // The node is providing invalid data:
1090 case BlockValidationResult::BLOCK_CONSENSUS:
1091 case BlockValidationResult::BLOCK_MUTATED:
1092 if (!via_compact_block) {
1093 Misbehaving(nodeid, 100, message);
1094 return true;
1095 }
1096 break;
1097 case BlockValidationResult::BLOCK_CACHED_INVALID:
1098 {
1099 LOCK(cs_main);
1100 CNodeState *node_state = State(nodeid);
1101 if (node_state == nullptr) {
1102 break;
1103 }
1104
1105 // Discourage outbound (but not inbound) peers if on an invalid chain.
1106 // Exempt HB compact block peers and manual connections.
1107 if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) {
1108 Misbehaving(nodeid, 100, message);
1109 return true;
1110 }
1111 break;
1112 }
1113 case BlockValidationResult::BLOCK_INVALID_HEADER:
1114 case BlockValidationResult::BLOCK_CHECKPOINT:
1115 case BlockValidationResult::BLOCK_INVALID_PREV:
1116 Misbehaving(nodeid, 100, message);
1117 return true;
1118 // Conflicting (but not necessarily invalid) data or different policy:
1119 case BlockValidationResult::BLOCK_MISSING_PREV:
1120 // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1121 Misbehaving(nodeid, 10, message);
1122 return true;
1123 case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
1124 case BlockValidationResult::BLOCK_TIME_FUTURE:
1125 break;
1126 }
1127 if (message != "") {
1128 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1129 }
1130 return false;
1131 }
1132
MaybePunishNodeForTx(NodeId nodeid,const TxValidationState & state,const std::string & message)1133 bool PeerManager::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message)
1134 {
1135 switch (state.GetResult()) {
1136 case TxValidationResult::TX_RESULT_UNSET:
1137 break;
1138 // The node is providing invalid data:
1139 case TxValidationResult::TX_CONSENSUS:
1140 Misbehaving(nodeid, 100, message);
1141 return true;
1142 // Conflicting (but not necessarily invalid) data or different policy:
1143 case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
1144 case TxValidationResult::TX_INPUTS_NOT_STANDARD:
1145 case TxValidationResult::TX_NOT_STANDARD:
1146 case TxValidationResult::TX_MISSING_INPUTS:
1147 case TxValidationResult::TX_PREMATURE_SPEND:
1148 case TxValidationResult::TX_WITNESS_MUTATED:
1149 case TxValidationResult::TX_WITNESS_STRIPPED:
1150 case TxValidationResult::TX_CONFLICT:
1151 case TxValidationResult::TX_MEMPOOL_POLICY:
1152 break;
1153 }
1154 if (message != "") {
1155 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1156 }
1157 return false;
1158 }
1159
1160
1161 //////////////////////////////////////////////////////////////////////////////
1162 //
1163 // blockchain -> download logic notification
1164 //
1165
1166 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1167 // active chain if they are no more than a month older (both in time, and in
1168 // best equivalent proof of work) than the best header chain we know about and
1169 // we fully-validated them at some point.
BlockRequestAllowed(const CBlockIndex * pindex,const Consensus::Params & consensusParams)1170 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1171 {
1172 AssertLockHeld(cs_main);
1173 if (::ChainActive().Contains(pindex)) return true;
1174 return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
1175 (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1176 (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
1177 }
1178
PeerManager(const CChainParams & chainparams,CConnman & connman,BanMan * banman,CScheduler & scheduler,ChainstateManager & chainman,CTxMemPool & pool)1179 PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
1180 CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
1181 : m_chainparams(chainparams),
1182 m_connman(connman),
1183 m_banman(banman),
1184 m_chainman(chainman),
1185 m_mempool(pool),
1186 m_stale_tip_check_time(0)
1187 {
1188 // Initialize global variables that cannot be constructed at startup.
1189 recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1190
1191 // Blocks don't typically have more than 4000 transactions, so this should
1192 // be at least six blocks (~1 hr) worth of transactions that we can store,
1193 // inserting both a txid and wtxid for every observed transaction.
1194 // If the number of transactions appearing in a block goes up, or if we are
1195 // seeing getdata requests more than an hour after initial announcement, we
1196 // can increase this number.
1197 // The false positive rate of 1/1M should come out to less than 1
1198 // transaction per day that would be inadvertently ignored (which is the
1199 // same probability that we have in the reject filter).
1200 g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
1201
1202 // Stale tip checking and peer eviction are on two different timers, but we
1203 // don't want them to get out of sync due to drift in the scheduler, so we
1204 // combine them in one function and schedule at the quicker (peer-eviction)
1205 // timer.
1206 static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1207 scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1208
1209 // schedule next run for 10-15 minutes in the future
1210 const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
1211 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1212 }
1213
1214 /**
1215 * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
1216 * block, remember the recently confirmed transactions, and delete tracked
1217 * announcements for them. Also save the time of the last tip update.
1218 */
BlockConnected(const std::shared_ptr<const CBlock> & pblock,const CBlockIndex * pindex)1219 void PeerManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1220 {
1221 {
1222 LOCK(g_cs_orphans);
1223
1224 std::vector<uint256> vOrphanErase;
1225
1226 for (const CTransactionRef& ptx : pblock->vtx) {
1227 const CTransaction& tx = *ptx;
1228
1229 // Which orphan pool entries must we evict?
1230 for (const auto& txin : tx.vin) {
1231 auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1232 if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
1233 for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
1234 const CTransaction& orphanTx = *(*mi)->second.tx;
1235 const uint256& orphanHash = orphanTx.GetHash();
1236 vOrphanErase.push_back(orphanHash);
1237 }
1238 }
1239 }
1240
1241 // Erase orphan transactions included or precluded by this block
1242 if (vOrphanErase.size()) {
1243 int nErased = 0;
1244 for (const uint256& orphanHash : vOrphanErase) {
1245 nErased += EraseOrphanTx(orphanHash);
1246 }
1247 LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
1248 }
1249
1250 g_last_tip_update = GetTime();
1251 }
1252 {
1253 LOCK(g_cs_recent_confirmed_transactions);
1254 for (const auto& ptx : pblock->vtx) {
1255 g_recent_confirmed_transactions->insert(ptx->GetHash());
1256 if (ptx->GetHash() != ptx->GetWitnessHash()) {
1257 g_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
1258 }
1259 }
1260 }
1261 {
1262 LOCK(cs_main);
1263 for (const auto& ptx : pblock->vtx) {
1264 m_txrequest.ForgetTxHash(ptx->GetHash());
1265 m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1266 }
1267 }
1268 }
1269
BlockDisconnected(const std::shared_ptr<const CBlock> & block,const CBlockIndex * pindex)1270 void PeerManager::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1271 {
1272 // To avoid relay problems with transactions that were previously
1273 // confirmed, clear our filter of recently confirmed transactions whenever
1274 // there's a reorg.
1275 // This means that in a 1-block reorg (where 1 block is disconnected and
1276 // then another block reconnected), our filter will drop to having only one
1277 // block's worth of transactions in it, but that should be fine, since
1278 // presumably the most common case of relaying a confirmed transaction
1279 // should be just after a new block containing it is found.
1280 LOCK(g_cs_recent_confirmed_transactions);
1281 g_recent_confirmed_transactions->reset();
1282 }
1283
1284 // All of the following cache a recent block, and are protected by cs_most_recent_block
1285 static RecursiveMutex cs_most_recent_block;
1286 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
1287 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1288 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1289 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
1290
1291 /**
1292 * Maintain state about the best-seen block and fast-announce a compact block
1293 * to compatible peers.
1294 */
NewPoWValidBlock(const CBlockIndex * pindex,const std::shared_ptr<const CBlock> & pblock)1295 void PeerManager::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
1296 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
1297 const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1298
1299 LOCK(cs_main);
1300
1301 static int nHighestFastAnnounce = 0;
1302 if (pindex->nHeight <= nHighestFastAnnounce)
1303 return;
1304 nHighestFastAnnounce = pindex->nHeight;
1305
1306 bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus());
1307 uint256 hashBlock(pblock->GetHash());
1308
1309 {
1310 LOCK(cs_most_recent_block);
1311 most_recent_block_hash = hashBlock;
1312 most_recent_block = pblock;
1313 most_recent_compact_block = pcmpctblock;
1314 fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
1315 }
1316
1317 m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1318 AssertLockHeld(::cs_main);
1319
1320 // TODO: Avoid the repeated-serialization here
1321 if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1322 return;
1323 ProcessBlockAvailability(pnode->GetId());
1324 CNodeState &state = *State(pnode->GetId());
1325 // If the peer has, or we announced to them the previous block already,
1326 // but we don't think they have this one, go ahead and announce it
1327 if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
1328 !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1329
1330 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
1331 hashBlock.ToString(), pnode->GetId());
1332 m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1333 state.pindexBestHeaderSent = pindex;
1334 }
1335 });
1336 }
1337
1338 /**
1339 * Update our best height and announce any block hashes which weren't previously
1340 * in ::ChainActive() to our peers.
1341 */
UpdatedBlockTip(const CBlockIndex * pindexNew,const CBlockIndex * pindexFork,bool fInitialDownload)1342 void PeerManager::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
1343 const int nNewHeight = pindexNew->nHeight;
1344 m_connman.SetBestHeight(nNewHeight);
1345
1346 SetServiceFlagsIBDCache(!fInitialDownload);
1347 if (!fInitialDownload) {
1348 // Find the hashes of all blocks that weren't previously in the best chain.
1349 std::vector<uint256> vHashes;
1350 const CBlockIndex *pindexToAnnounce = pindexNew;
1351 while (pindexToAnnounce != pindexFork) {
1352 vHashes.push_back(pindexToAnnounce->GetBlockHash());
1353 pindexToAnnounce = pindexToAnnounce->pprev;
1354 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1355 // Limit announcements in case of a huge reorganization.
1356 // Rely on the peer's synchronization mechanism in that case.
1357 break;
1358 }
1359 }
1360 // Relay inventory, but don't relay old inventory during initial block download.
1361 m_connman.ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
1362 LOCK(pnode->cs_inventory);
1363 if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
1364 for (const uint256& hash : reverse_iterate(vHashes)) {
1365 pnode->vBlockHashesToAnnounce.push_back(hash);
1366 }
1367 }
1368 });
1369 m_connman.WakeMessageHandler();
1370 }
1371 }
1372
1373 /**
1374 * Handle invalid block rejection and consequent peer discouragement, maintain which
1375 * peers announce compact blocks.
1376 */
BlockChecked(const CBlock & block,const BlockValidationState & state)1377 void PeerManager::BlockChecked(const CBlock& block, const BlockValidationState& state) {
1378 LOCK(cs_main);
1379
1380 const uint256 hash(block.GetHash());
1381 std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1382
1383 // If the block failed validation, we know where it came from and we're still connected
1384 // to that peer, maybe punish.
1385 if (state.IsInvalid() &&
1386 it != mapBlockSource.end() &&
1387 State(it->second.first)) {
1388 MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
1389 }
1390 // Check that:
1391 // 1. The block is valid
1392 // 2. We're not in initial block download
1393 // 3. This is currently the best block we're aware of. We haven't updated
1394 // the tip yet so we have no way to check this directly here. Instead we
1395 // just check that there are currently no other blocks in flight.
1396 else if (state.IsValid() &&
1397 !::ChainstateActive().IsInitialBlockDownload() &&
1398 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1399 if (it != mapBlockSource.end()) {
1400 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
1401 }
1402 }
1403 if (it != mapBlockSource.end())
1404 mapBlockSource.erase(it);
1405 }
1406
1407 //////////////////////////////////////////////////////////////////////////////
1408 //
1409 // Messages
1410 //
1411
1412
AlreadyHaveTx(const GenTxid & gtxid,const CTxMemPool & mempool)1413 bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1414 {
1415 assert(recentRejects);
1416 if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
1417 // If the chain tip has changed previously rejected transactions
1418 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1419 // or a double-spend. Reset the rejects filter and give those
1420 // txs a second chance.
1421 hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash();
1422 recentRejects->reset();
1423 }
1424
1425 const uint256& hash = gtxid.GetHash();
1426
1427 {
1428 LOCK(g_cs_orphans);
1429 if (!gtxid.IsWtxid() && mapOrphanTransactions.count(hash)) {
1430 return true;
1431 } else if (gtxid.IsWtxid() && g_orphans_by_wtxid.count(hash)) {
1432 return true;
1433 }
1434 }
1435
1436 {
1437 LOCK(g_cs_recent_confirmed_transactions);
1438 if (g_recent_confirmed_transactions->contains(hash)) return true;
1439 }
1440
1441 return recentRejects->contains(hash) || mempool.exists(gtxid);
1442 }
1443
AlreadyHaveBlock(const uint256 & block_hash)1444 bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1445 {
1446 return LookupBlockIndex(block_hash) != nullptr;
1447 }
1448
RelayTransaction(const uint256 & txid,const uint256 & wtxid,const CConnman & connman)1449 void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman)
1450 {
1451 connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1452 AssertLockHeld(::cs_main);
1453
1454 CNodeState* state = State(pnode->GetId());
1455 if (state == nullptr) return;
1456 if (state->m_wtxid_relay) {
1457 pnode->PushTxInventory(wtxid);
1458 } else {
1459 pnode->PushTxInventory(txid);
1460 }
1461 });
1462 }
1463
RelayAddress(const CAddress & addr,bool fReachable,const CConnman & connman)1464 static void RelayAddress(const CAddress& addr, bool fReachable, const CConnman& connman)
1465 {
1466 if (!fReachable && !addr.IsRelayable()) return;
1467
1468 // Relay to a limited number of other nodes
1469 // Use deterministic randomness to send to the same nodes for 24 hours
1470 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
1471 uint64_t hashAddr = addr.GetHash();
1472 const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
1473 FastRandomContext insecure_rand;
1474
1475 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
1476 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
1477
1478 std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1479 assert(nRelayNodes <= best.size());
1480
1481 auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1482 if (pnode->RelayAddrsWithConn()) {
1483 uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1484 for (unsigned int i = 0; i < nRelayNodes; i++) {
1485 if (hashKey > best[i].first) {
1486 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1487 best[i] = std::make_pair(hashKey, pnode);
1488 break;
1489 }
1490 }
1491 }
1492 };
1493
1494 auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1495 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1496 best[i].second->PushAddress(addr, insecure_rand);
1497 }
1498 };
1499
1500 connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1501 }
1502
ProcessGetBlockData(CNode & pfrom,const CChainParams & chainparams,const CInv & inv,CConnman & connman)1503 void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, const CInv& inv, CConnman& connman)
1504 {
1505 bool send = false;
1506 std::shared_ptr<const CBlock> a_recent_block;
1507 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1508 bool fWitnessesPresentInARecentCompactBlock;
1509 const Consensus::Params& consensusParams = chainparams.GetConsensus();
1510 {
1511 LOCK(cs_most_recent_block);
1512 a_recent_block = most_recent_block;
1513 a_recent_compact_block = most_recent_compact_block;
1514 fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1515 }
1516
1517 bool need_activate_chain = false;
1518 {
1519 LOCK(cs_main);
1520 const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1521 if (pindex) {
1522 if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1523 pindex->IsValid(BLOCK_VALID_TREE)) {
1524 // If we have the block and all of its parents, but have not yet validated it,
1525 // we might be in the middle of connecting it (ie in the unlock of cs_main
1526 // before ActivateBestChain but after AcceptBlock).
1527 // In this case, we need to run ActivateBestChain prior to checking the relay
1528 // conditions below.
1529 need_activate_chain = true;
1530 }
1531 }
1532 } // release cs_main before calling ActivateBestChain
1533 if (need_activate_chain) {
1534 BlockValidationState state;
1535 if (!ActivateBestChain(state, chainparams, a_recent_block)) {
1536 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
1537 }
1538 }
1539
1540 LOCK(cs_main);
1541 const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1542 if (pindex) {
1543 send = BlockRequestAllowed(pindex, consensusParams);
1544 if (!send) {
1545 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
1546 }
1547 }
1548 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1549 // disconnect node in case we have reached the outbound limit for serving historical blocks
1550 if (send &&
1551 connman.OutboundTargetReached(true) &&
1552 (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
1553 !pfrom.HasPermission(PF_DOWNLOAD) // nodes with the download permission may exceed target
1554 ) {
1555 LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
1556
1557 //disconnect node
1558 pfrom.fDisconnect = true;
1559 send = false;
1560 }
1561 // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1562 if (send && !pfrom.HasPermission(PF_NOBAN) && (
1563 (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1564 )) {
1565 LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom.GetId());
1566
1567 //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1568 pfrom.fDisconnect = true;
1569 send = false;
1570 }
1571 // Pruned nodes may have deleted the block, so check whether
1572 // it's available before trying to send.
1573 if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1574 {
1575 std::shared_ptr<const CBlock> pblock;
1576 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1577 pblock = a_recent_block;
1578 } else if (inv.IsMsgWitnessBlk()) {
1579 // Fast-path: in this case it is possible to serve the block directly from disk,
1580 // as the network format matches the format on disk
1581 std::vector<uint8_t> block_data;
1582 if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1583 assert(!"cannot load block from disk");
1584 }
1585 connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1586 // Don't set pblock as we've sent the block
1587 } else {
1588 // Send block from disk
1589 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1590 if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1591 assert(!"cannot load block from disk");
1592 pblock = pblockRead;
1593 }
1594 if (pblock) {
1595 if (inv.IsMsgBlk()) {
1596 connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1597 } else if (inv.IsMsgWitnessBlk()) {
1598 connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1599 } else if (inv.IsMsgFilteredBlk()) {
1600 bool sendMerkleBlock = false;
1601 CMerkleBlock merkleBlock;
1602 if (pfrom.m_tx_relay != nullptr) {
1603 LOCK(pfrom.m_tx_relay->cs_filter);
1604 if (pfrom.m_tx_relay->pfilter) {
1605 sendMerkleBlock = true;
1606 merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1607 }
1608 }
1609 if (sendMerkleBlock) {
1610 connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1611 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1612 // This avoids hurting performance by pointlessly requiring a round-trip
1613 // Note that there is currently no way for a node to request any single transactions we didn't send here -
1614 // they must either disconnect and retry or request the full block.
1615 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1616 // however we MUST always provide at least what the remote peer needs
1617 typedef std::pair<unsigned int, uint256> PairType;
1618 for (PairType& pair : merkleBlock.vMatchedTxn)
1619 connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1620 }
1621 // else
1622 // no response
1623 } else if (inv.IsMsgCmpctBlk()) {
1624 // If a peer is asking for old blocks, we're almost guaranteed
1625 // they won't have a useful mempool to match against a compact block,
1626 // and we don't feel like constructing the object for them, so
1627 // instead we respond with the full, non-compact block.
1628 bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness;
1629 int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1630 if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
1631 if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1632 connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1633 } else {
1634 CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1635 connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1636 }
1637 } else {
1638 connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1639 }
1640 }
1641 }
1642
1643 // Trigger the peer node to send a getblocks request for the next batch of inventory
1644 if (inv.hash == pfrom.hashContinue)
1645 {
1646 // Send immediately. This must send even if redundant,
1647 // and we want it right after the last block so they don't
1648 // wait for other stuff first.
1649 std::vector<CInv> vInv;
1650 vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1651 connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1652 pfrom.hashContinue.SetNull();
1653 }
1654 }
1655 }
1656
1657 //! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed).
FindTxForGetData(const CTxMemPool & mempool,const CNode & peer,const GenTxid & gtxid,const std::chrono::seconds mempool_req,const std::chrono::seconds now)1658 static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
1659 {
1660 auto txinfo = mempool.info(gtxid);
1661 if (txinfo.tx) {
1662 // If a TX could have been INVed in reply to a MEMPOOL request,
1663 // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
1664 // unconditionally.
1665 if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
1666 return std::move(txinfo.tx);
1667 }
1668 }
1669
1670 {
1671 LOCK(cs_main);
1672 // Otherwise, the transaction must have been announced recently.
1673 if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
1674 // If it was, it can be relayed from either the mempool...
1675 if (txinfo.tx) return std::move(txinfo.tx);
1676 // ... or the relay pool.
1677 auto mi = mapRelay.find(gtxid.GetHash());
1678 if (mi != mapRelay.end()) return mi->second;
1679 }
1680 }
1681
1682 return {};
1683 }
1684
ProcessGetData(CNode & pfrom,Peer & peer,const CChainParams & chainparams,CConnman & connman,CTxMemPool & mempool,const std::atomic<bool> & interruptMsgProc)1685 void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex)
1686 {
1687 AssertLockNotHeld(cs_main);
1688
1689 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
1690 std::vector<CInv> vNotFound;
1691 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1692
1693 const std::chrono::seconds now = GetTime<std::chrono::seconds>();
1694 // Get last mempool request time
1695 const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
1696 : std::chrono::seconds::min();
1697
1698 // Process as many TX items from the front of the getdata queue as
1699 // possible, since they're common and it's efficient to batch process
1700 // them.
1701 while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
1702 if (interruptMsgProc) return;
1703 // The send buffer provides backpressure. If there's no space in
1704 // the buffer, pause processing until the next call.
1705 if (pfrom.fPauseSend) break;
1706
1707 const CInv &inv = *it++;
1708
1709 if (pfrom.m_tx_relay == nullptr) {
1710 // Ignore GETDATA requests for transactions from blocks-only peers.
1711 continue;
1712 }
1713
1714 CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now);
1715 if (tx) {
1716 // WTX and WITNESS_TX imply we serialize with witness
1717 int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1718 connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
1719 mempool.RemoveUnbroadcastTx(tx->GetHash());
1720 // As we're going to send tx, make sure its unconfirmed parents are made requestable.
1721 std::vector<uint256> parent_ids_to_add;
1722 {
1723 LOCK(mempool.cs);
1724 auto txiter = mempool.GetIter(tx->GetHash());
1725 if (txiter) {
1726 const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst();
1727 parent_ids_to_add.reserve(parents.size());
1728 for (const CTxMemPoolEntry& parent : parents) {
1729 if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
1730 parent_ids_to_add.push_back(parent.GetTx().GetHash());
1731 }
1732 }
1733 }
1734 }
1735 for (const uint256& parent_txid : parent_ids_to_add) {
1736 // Relaying a transaction with a recent but unconfirmed parent.
1737 if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
1738 LOCK(cs_main);
1739 State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
1740 }
1741 }
1742 } else {
1743 vNotFound.push_back(inv);
1744 }
1745 }
1746
1747 // Only process one BLOCK item per call, since they're uncommon and can be
1748 // expensive to process.
1749 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
1750 const CInv &inv = *it++;
1751 if (inv.IsGenBlkMsg()) {
1752 ProcessGetBlockData(pfrom, chainparams, inv, connman);
1753 }
1754 // else: If the first item on the queue is an unknown type, we erase it
1755 // and continue processing the queue on the next call.
1756 }
1757
1758 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
1759
1760 if (!vNotFound.empty()) {
1761 // Let the peer know that we didn't find what it asked for, so it doesn't
1762 // have to wait around forever.
1763 // SPV clients care about this message: it's needed when they are
1764 // recursively walking the dependencies of relevant unconfirmed
1765 // transactions. SPV clients want to do that because they want to know
1766 // about (and store and rebroadcast and risk analyze) the dependencies
1767 // of transactions relevant to them, without having to download the
1768 // entire memory pool.
1769 // Also, other nodes can use these messages to automatically request a
1770 // transaction from some other peer that annnounced it, and stop
1771 // waiting for us to respond.
1772 // In normal operation, we often send NOTFOUND messages for parents of
1773 // transactions that we relay; if a peer is missing a parent, they may
1774 // assume we have them and request the parents from us.
1775 connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1776 }
1777 }
1778
GetFetchFlags(const CNode & pfrom)1779 static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1780 uint32_t nFetchFlags = 0;
1781 if ((pfrom.GetLocalServices() & NODE_WITNESS) && State(pfrom.GetId())->fHaveWitness) {
1782 nFetchFlags |= MSG_WITNESS_FLAG;
1783 }
1784 return nFetchFlags;
1785 }
1786
SendBlockTransactions(CNode & pfrom,const CBlock & block,const BlockTransactionsRequest & req)1787 void PeerManager::SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req) {
1788 BlockTransactions resp(req);
1789 for (size_t i = 0; i < req.indexes.size(); i++) {
1790 if (req.indexes[i] >= block.vtx.size()) {
1791 Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices");
1792 return;
1793 }
1794 resp.txn[i] = block.vtx[req.indexes[i]];
1795 }
1796 LOCK(cs_main);
1797 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1798 int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1799 m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1800 }
1801
ProcessHeadersMessage(CNode & pfrom,const std::vector<CBlockHeader> & headers,bool via_compact_block)1802 void PeerManager::ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHeader>& headers, bool via_compact_block)
1803 {
1804 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
1805 size_t nCount = headers.size();
1806
1807 if (nCount == 0) {
1808 // Nothing interesting. Stop asking this peers for more headers.
1809 return;
1810 }
1811
1812 size_t nSize = 0;
1813 for (const auto& header : headers) {
1814 nSize += GetSerializeSize(header, PROTOCOL_VERSION);
1815 if (pfrom.nVersion >= SIZE_HEADERS_LIMIT_VERSION
1816 && nSize > MAX_HEADERS_SIZE) {
1817 LOCK(cs_main);
1818 Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nSize));
1819 return;
1820 }
1821 }
1822
1823 bool received_new_header = false;
1824 const CBlockIndex *pindexLast = nullptr;
1825 {
1826 LOCK(cs_main);
1827 CNodeState *nodestate = State(pfrom.GetId());
1828
1829 // If this looks like it could be a block announcement (nCount <
1830 // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1831 // don't connect:
1832 // - Send a getheaders message in response to try to connect the chain.
1833 // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1834 // don't connect before giving DoS points
1835 // - Once a headers message is received that is valid and does connect,
1836 // nUnconnectingHeaders gets reset back to 0.
1837 if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1838 nodestate->nUnconnectingHeaders++;
1839 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
1840 LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1841 headers[0].GetHash().ToString(),
1842 headers[0].hashPrevBlock.ToString(),
1843 pindexBestHeader->nHeight,
1844 pfrom.GetId(), nodestate->nUnconnectingHeaders);
1845 // Set hashLastUnknownBlock for this peer, so that if we
1846 // eventually get the headers - even from a different peer -
1847 // we can use this peer to download.
1848 UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
1849
1850 if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1851 Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
1852 }
1853 return;
1854 }
1855
1856 uint256 hashLastBlock;
1857 for (const CBlockHeader& header : headers) {
1858 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1859 Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence");
1860 return;
1861 }
1862 hashLastBlock = header.GetHash();
1863 }
1864
1865 // If we don't have the last header, then they'll have given us
1866 // something new (if these headers are valid).
1867 if (!LookupBlockIndex(hashLastBlock)) {
1868 received_new_header = true;
1869 }
1870 }
1871
1872 BlockValidationState state;
1873 if (!m_chainman.ProcessNewBlockHeaders(headers, state, m_chainparams, &pindexLast)) {
1874 if (state.IsInvalid()) {
1875 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
1876 return;
1877 }
1878 }
1879
1880 {
1881 LOCK(cs_main);
1882 CNodeState *nodestate = State(pfrom.GetId());
1883 if (nodestate->nUnconnectingHeaders > 0) {
1884 LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
1885 }
1886 nodestate->nUnconnectingHeaders = 0;
1887
1888 assert(pindexLast);
1889 UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
1890
1891 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1892 // because it is set in UpdateBlockAvailability. Some nullptr checks
1893 // are still present, however, as belt-and-suspenders.
1894
1895 if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
1896 nodestate->m_last_block_announcement = GetTime();
1897 }
1898
1899 bool maxSize = (nCount == MAX_HEADERS_RESULTS);
1900 if (pfrom.nVersion >= SIZE_HEADERS_LIMIT_VERSION
1901 && nSize >= THRESHOLD_HEADERS_SIZE)
1902 maxSize = true;
1903 // FIXME: This change (with hasNewHeaders) is rolled back in Bitcoin,
1904 // but I think it should stay here for merge-mined coins. Try to get
1905 // it fixed again upstream and then update the fix.
1906 if (maxSize && received_new_header) {
1907 // Headers message had its maximum size; the peer may have more headers.
1908 // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue
1909 // from there instead.
1910 LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
1911 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256()));
1912 }
1913
1914 bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus());
1915 // If this set of headers is valid and ends in a block with at least as
1916 // much work as our tip, download as much as possible.
1917 if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
1918 std::vector<const CBlockIndex*> vToFetch;
1919 const CBlockIndex *pindexWalk = pindexLast;
1920 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1921 while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1922 if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1923 !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1924 (!IsWitnessEnabled(pindexWalk->pprev, m_chainparams.GetConsensus()) || State(pfrom.GetId())->fHaveWitness)) {
1925 // We don't have this block, and it's not yet in flight.
1926 vToFetch.push_back(pindexWalk);
1927 }
1928 pindexWalk = pindexWalk->pprev;
1929 }
1930 // If pindexWalk still isn't on our main chain, we're looking at a
1931 // very large reorg at a time we think we're close to caught up to
1932 // the main chain -- this shouldn't really happen. Bail out on the
1933 // direct fetch and rely on parallel download instead.
1934 if (!::ChainActive().Contains(pindexWalk)) {
1935 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1936 pindexLast->GetBlockHash().ToString(),
1937 pindexLast->nHeight);
1938 } else {
1939 std::vector<CInv> vGetData;
1940 // Download as much as possible, from earliest to latest.
1941 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1942 if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1943 // Can't download any more from this peer
1944 break;
1945 }
1946 uint32_t nFetchFlags = GetFetchFlags(pfrom);
1947 vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1948 MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex);
1949 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1950 pindex->GetBlockHash().ToString(), pfrom.GetId());
1951 }
1952 if (vGetData.size() > 1) {
1953 LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1954 pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1955 }
1956 if (vGetData.size() > 0) {
1957 if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1958 // In any case, we want to download using a compact block, not a regular one
1959 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1960 }
1961 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1962 }
1963 }
1964 }
1965 // If we're in IBD, we want outbound peers that will serve us a useful
1966 // chain. Disconnect peers that are on chains with insufficient work.
1967 if (::ChainstateActive().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
1968 // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1969 // headers to fetch from this peer.
1970 if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1971 // This peer has too little work on their headers chain to help
1972 // us sync -- disconnect if it is an outbound disconnection
1973 // candidate.
1974 // Note: We compare their tip to nMinimumChainWork (rather than
1975 // ::ChainActive().Tip()) because we won't start block download
1976 // until we have a headers chain that has at least
1977 // nMinimumChainWork, even if a peer has a chain past our tip,
1978 // as an anti-DoS measure.
1979 if (pfrom.IsOutboundOrBlockRelayConn()) {
1980 LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
1981 pfrom.fDisconnect = true;
1982 }
1983 }
1984 }
1985
1986 // If this is an outbound full-relay peer, check to see if we should protect
1987 // it from the bad/lagging chain logic.
1988 // Note that outbound block-relay peers are excluded from this protection, and
1989 // thus always subject to eviction under the bad/lagging chain logic.
1990 // See ChainSyncTimeoutState.
1991 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
1992 if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1993 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
1994 nodestate->m_chain_sync.m_protect = true;
1995 ++g_outbound_peers_with_protect_from_disconnect;
1996 }
1997 }
1998 }
1999
2000 return;
2001 }
2002
2003 /**
2004 * Reconsider orphan transactions after a parent has been accepted to the mempool.
2005 *
2006 * @param[in/out] orphan_work_set The set of orphan transactions to reconsider. Generally only one
2007 * orphan will be reconsidered on each call of this function. This set
2008 * may be added to if accepting an orphan causes its children to be
2009 * reconsidered.
2010 */
ProcessOrphanTx(std::set<uint256> & orphan_work_set)2011 void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
2012 {
2013 AssertLockHeld(cs_main);
2014 AssertLockHeld(g_cs_orphans);
2015
2016 while (!orphan_work_set.empty()) {
2017 const uint256 orphanHash = *orphan_work_set.begin();
2018 orphan_work_set.erase(orphan_work_set.begin());
2019
2020 auto orphan_it = mapOrphanTransactions.find(orphanHash);
2021 if (orphan_it == mapOrphanTransactions.end()) continue;
2022
2023 const CTransactionRef porphanTx = orphan_it->second.tx;
2024 TxValidationState state;
2025 std::list<CTransactionRef> removed_txn;
2026
2027 if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) {
2028 LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
2029 RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman);
2030 for (unsigned int i = 0; i < porphanTx->vout.size(); i++) {
2031 auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
2032 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2033 for (const auto& elem : it_by_prev->second) {
2034 orphan_work_set.insert(elem->first);
2035 }
2036 }
2037 }
2038 EraseOrphanTx(orphanHash);
2039 for (const CTransactionRef& removedTx : removed_txn) {
2040 AddToCompactExtraTransactions(removedTx);
2041 }
2042 break;
2043 } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2044 if (state.IsInvalid()) {
2045 LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
2046 orphanHash.ToString(),
2047 orphan_it->second.fromPeer,
2048 state.ToString());
2049 // Maybe punish peer that gave us an invalid orphan tx
2050 MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
2051 }
2052 // Has inputs but not accepted to mempool
2053 // Probably non-standard or insufficient fee
2054 LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
2055 if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
2056 // We can add the wtxid of this transaction to our reject filter.
2057 // Do not add txids of witness transactions or witness-stripped
2058 // transactions to the filter, as they can have been malleated;
2059 // adding such txids to the reject filter would potentially
2060 // interfere with relay of valid transactions from peers that
2061 // do not support wtxid-based relay. See
2062 // https://github.com/bitcoin/bitcoin/issues/8279 for details.
2063 // We can remove this restriction (and always add wtxids to
2064 // the filter even for witness stripped transactions) once
2065 // wtxid-based relay is broadly deployed.
2066 // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
2067 // for concerns around weakening security of unupgraded nodes
2068 // if we start doing this too early.
2069 assert(recentRejects);
2070 recentRejects->insert(porphanTx->GetWitnessHash());
2071 // If the transaction failed for TX_INPUTS_NOT_STANDARD,
2072 // then we know that the witness was irrelevant to the policy
2073 // failure, since this check depends only on the txid
2074 // (the scriptPubKey being spent is covered by the txid).
2075 // Add the txid to the reject filter to prevent repeated
2076 // processing of this transaction in the event that child
2077 // transactions are later received (resulting in
2078 // parent-fetching by txid via the orphan-handling logic).
2079 if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
2080 // We only add the txid if it differs from the wtxid, to
2081 // avoid wasting entries in the rolling bloom filter.
2082 recentRejects->insert(porphanTx->GetHash());
2083 }
2084 }
2085 EraseOrphanTx(orphanHash);
2086 break;
2087 }
2088 }
2089 m_mempool.check(m_chainman, &::ChainstateActive().CoinsTip());
2090 }
2091
2092 /**
2093 * Validation logic for compact filters request handling.
2094 *
2095 * May disconnect from the peer in the case of a bad request.
2096 *
2097 * @param[in] peer The peer that we received the request from
2098 * @param[in] chain_params Chain parameters
2099 * @param[in] filter_type The filter type the request is for. Must be basic filters.
2100 * @param[in] start_height The start height for the request
2101 * @param[in] stop_hash The stop_hash for the request
2102 * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157
2103 * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced.
2104 * @param[out] filter_index The filter index, if the request can be serviced.
2105 * @return True if the request can be serviced.
2106 */
PrepareBlockFilterRequest(CNode & peer,const CChainParams & chain_params,BlockFilterType filter_type,uint32_t start_height,const uint256 & stop_hash,uint32_t max_height_diff,const CBlockIndex * & stop_index,BlockFilterIndex * & filter_index)2107 static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params,
2108 BlockFilterType filter_type, uint32_t start_height,
2109 const uint256& stop_hash, uint32_t max_height_diff,
2110 const CBlockIndex*& stop_index,
2111 BlockFilterIndex*& filter_index)
2112 {
2113 const bool supported_filter_type =
2114 (filter_type == BlockFilterType::BASIC &&
2115 (peer.GetLocalServices() & NODE_COMPACT_FILTERS));
2116 if (!supported_filter_type) {
2117 LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
2118 peer.GetId(), static_cast<uint8_t>(filter_type));
2119 peer.fDisconnect = true;
2120 return false;
2121 }
2122
2123 {
2124 LOCK(cs_main);
2125 stop_index = LookupBlockIndex(stop_hash);
2126
2127 // Check that the stop block exists and the peer would be allowed to fetch it.
2128 if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2129 LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2130 peer.GetId(), stop_hash.ToString());
2131 peer.fDisconnect = true;
2132 return false;
2133 }
2134 }
2135
2136 uint32_t stop_height = stop_index->nHeight;
2137 if (start_height > stop_height) {
2138 LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */
2139 "start height %d and stop height %d\n",
2140 peer.GetId(), start_height, stop_height);
2141 peer.fDisconnect = true;
2142 return false;
2143 }
2144 if (stop_height - start_height >= max_height_diff) {
2145 LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2146 peer.GetId(), stop_height - start_height + 1, max_height_diff);
2147 peer.fDisconnect = true;
2148 return false;
2149 }
2150
2151 filter_index = GetBlockFilterIndex(filter_type);
2152 if (!filter_index) {
2153 LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
2154 return false;
2155 }
2156
2157 return true;
2158 }
2159
2160 /**
2161 * Handle a cfilters request.
2162 *
2163 * May disconnect from the peer in the case of a bad request.
2164 *
2165 * @param[in] peer The peer that we received the request from
2166 * @param[in] vRecv The raw message received
2167 * @param[in] chain_params Chain parameters
2168 * @param[in] connman Pointer to the connection manager
2169 */
ProcessGetCFilters(CNode & peer,CDataStream & vRecv,const CChainParams & chain_params,CConnman & connman)2170 static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2171 CConnman& connman)
2172 {
2173 uint8_t filter_type_ser;
2174 uint32_t start_height;
2175 uint256 stop_hash;
2176
2177 vRecv >> filter_type_ser >> start_height >> stop_hash;
2178
2179 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2180
2181 const CBlockIndex* stop_index;
2182 BlockFilterIndex* filter_index;
2183 if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2184 MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2185 return;
2186 }
2187
2188 std::vector<BlockFilter> filters;
2189 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2190 LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2191 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2192 return;
2193 }
2194
2195 for (const auto& filter : filters) {
2196 CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion())
2197 .Make(NetMsgType::CFILTER, filter);
2198 connman.PushMessage(&peer, std::move(msg));
2199 }
2200 }
2201
2202 /**
2203 * Handle a cfheaders request.
2204 *
2205 * May disconnect from the peer in the case of a bad request.
2206 *
2207 * @param[in] peer The peer that we received the request from
2208 * @param[in] vRecv The raw message received
2209 * @param[in] chain_params Chain parameters
2210 * @param[in] connman Pointer to the connection manager
2211 */
ProcessGetCFHeaders(CNode & peer,CDataStream & vRecv,const CChainParams & chain_params,CConnman & connman)2212 static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2213 CConnman& connman)
2214 {
2215 uint8_t filter_type_ser;
2216 uint32_t start_height;
2217 uint256 stop_hash;
2218
2219 vRecv >> filter_type_ser >> start_height >> stop_hash;
2220
2221 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2222
2223 const CBlockIndex* stop_index;
2224 BlockFilterIndex* filter_index;
2225 if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash,
2226 MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2227 return;
2228 }
2229
2230 uint256 prev_header;
2231 if (start_height > 0) {
2232 const CBlockIndex* const prev_block =
2233 stop_index->GetAncestor(static_cast<int>(start_height - 1));
2234 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2235 LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2236 BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
2237 return;
2238 }
2239 }
2240
2241 std::vector<uint256> filter_hashes;
2242 if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
2243 LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
2244 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
2245 return;
2246 }
2247
2248 CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion())
2249 .Make(NetMsgType::CFHEADERS,
2250 filter_type_ser,
2251 stop_index->GetBlockHash(),
2252 prev_header,
2253 filter_hashes);
2254 connman.PushMessage(&peer, std::move(msg));
2255 }
2256
2257 /**
2258 * Handle a getcfcheckpt request.
2259 *
2260 * May disconnect from the peer in the case of a bad request.
2261 *
2262 * @param[in] peer The peer that we received the request from
2263 * @param[in] vRecv The raw message received
2264 * @param[in] chain_params Chain parameters
2265 * @param[in] connman Pointer to the connection manager
2266 */
ProcessGetCFCheckPt(CNode & peer,CDataStream & vRecv,const CChainParams & chain_params,CConnman & connman)2267 static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params,
2268 CConnman& connman)
2269 {
2270 uint8_t filter_type_ser;
2271 uint256 stop_hash;
2272
2273 vRecv >> filter_type_ser >> stop_hash;
2274
2275 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
2276
2277 const CBlockIndex* stop_index;
2278 BlockFilterIndex* filter_index;
2279 if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
2280 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2281 stop_index, filter_index)) {
2282 return;
2283 }
2284
2285 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2286
2287 // Populate headers.
2288 const CBlockIndex* block_index = stop_index;
2289 for (int i = headers.size() - 1; i >= 0; i--) {
2290 int height = (i + 1) * CFCHECKPT_INTERVAL;
2291 block_index = block_index->GetAncestor(height);
2292
2293 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2294 LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
2295 BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
2296 return;
2297 }
2298 }
2299
2300 CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion())
2301 .Make(NetMsgType::CFCHECKPT,
2302 filter_type_ser,
2303 stop_index->GetBlockHash(),
2304 headers);
2305 connman.PushMessage(&peer, std::move(msg));
2306 }
2307
ProcessMessage(CNode & pfrom,const std::string & msg_type,CDataStream & vRecv,const std::chrono::microseconds time_received,const std::atomic<bool> & interruptMsgProc)2308 void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
2309 const std::chrono::microseconds time_received,
2310 const std::atomic<bool>& interruptMsgProc)
2311 {
2312 LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2313 if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
2314 {
2315 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2316 return;
2317 }
2318
2319 PeerRef peer = GetPeerRef(pfrom.GetId());
2320 if (peer == nullptr) return;
2321
2322 if (msg_type == NetMsgType::VERSION) {
2323 // Each connection can only send one version message
2324 if (pfrom.nVersion != 0)
2325 {
2326 Misbehaving(pfrom.GetId(), 1, "redundant version message");
2327 return;
2328 }
2329
2330 int64_t nTime;
2331 CAddress addrMe;
2332 CAddress addrFrom;
2333 uint64_t nNonce = 1;
2334 uint64_t nServiceInt;
2335 ServiceFlags nServices;
2336 int nVersion;
2337 std::string cleanSubVer;
2338 int nStartingHeight = -1;
2339 bool fRelay = true;
2340
2341 vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2342 nServices = ServiceFlags(nServiceInt);
2343 if (!pfrom.IsInboundConn())
2344 {
2345 m_connman.SetServices(pfrom.addr, nServices);
2346 }
2347 if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
2348 {
2349 LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
2350 pfrom.fDisconnect = true;
2351 return;
2352 }
2353
2354 if (nVersion < MIN_PEER_PROTO_VERSION) {
2355 // disconnect from peers older than this proto version
2356 LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
2357 pfrom.fDisconnect = true;
2358 return;
2359 }
2360
2361 if (!vRecv.empty())
2362 vRecv >> addrFrom >> nNonce;
2363 if (!vRecv.empty()) {
2364 std::string strSubVer;
2365 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2366 cleanSubVer = SanitizeString(strSubVer);
2367 }
2368 if (!vRecv.empty()) {
2369 vRecv >> nStartingHeight;
2370 }
2371 if (!vRecv.empty())
2372 vRecv >> fRelay;
2373 // Disconnect if we connected to ourself
2374 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
2375 {
2376 LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
2377 pfrom.fDisconnect = true;
2378 return;
2379 }
2380
2381 if (pfrom.IsInboundConn() && addrMe.IsRoutable())
2382 {
2383 SeenLocal(addrMe);
2384 }
2385
2386 // Be shy and don't send version until we hear
2387 if (pfrom.IsInboundConn())
2388 PushNodeVersion(pfrom, m_connman, GetAdjustedTime());
2389
2390 // Change version
2391 const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
2392 pfrom.SetCommonVersion(greatest_common_version);
2393 pfrom.nVersion = nVersion;
2394
2395 const CNetMsgMaker msg_maker(greatest_common_version);
2396
2397 if (greatest_common_version >= WTXID_RELAY_VERSION) {
2398 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
2399 }
2400
2401 // Signal ADDRv2 support (BIP155).
2402 if (greatest_common_version >= 70016) {
2403 // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
2404 // implementations reject messages they don't know. As a courtesy, don't send
2405 // it to nodes with a version before 70016, as no software is known to support
2406 // BIP155 that doesn't announce at least that protocol version number.
2407 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
2408 }
2409
2410 m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
2411
2412 pfrom.nServices = nServices;
2413 pfrom.SetAddrLocal(addrMe);
2414 {
2415 LOCK(pfrom.cs_SubVer);
2416 pfrom.cleanSubVer = cleanSubVer;
2417 }
2418 pfrom.nStartingHeight = nStartingHeight;
2419
2420 // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
2421 pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
2422
2423 // set nodes not capable of serving the complete blockchain history as "limited nodes"
2424 pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2425
2426 if (pfrom.m_tx_relay != nullptr) {
2427 LOCK(pfrom.m_tx_relay->cs_filter);
2428 pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
2429 }
2430
2431 if((nServices & NODE_WITNESS))
2432 {
2433 LOCK(cs_main);
2434 State(pfrom.GetId())->fHaveWitness = true;
2435 }
2436
2437 // Potentially mark this peer as a preferred download peer.
2438 {
2439 LOCK(cs_main);
2440 UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2441 }
2442
2443 if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) {
2444 // For outbound peers, we try to relay our address (so that other
2445 // nodes can try to find us more quickly, as we have no guarantee
2446 // that an outbound peer is even aware of how to reach us) and do a
2447 // one-time address fetch (to help populate/update our addrman). If
2448 // we're starting up for the first time, our addrman may be pretty
2449 // empty and no one will know who we are, so these mechanisms are
2450 // important to help us connect to the network.
2451 //
2452 // We skip this for BLOCK_RELAY peers to avoid potentially leaking
2453 // information about our BLOCK_RELAY connections via address relay.
2454 if (fListen && !::ChainstateActive().IsInitialBlockDownload())
2455 {
2456 CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2457 FastRandomContext insecure_rand;
2458 if (addr.IsRoutable())
2459 {
2460 LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2461 pfrom.PushAddress(addr, insecure_rand);
2462 } else if (IsPeerAddrLocalGood(&pfrom)) {
2463 addr.SetIP(addrMe);
2464 LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
2465 pfrom.PushAddress(addr, insecure_rand);
2466 }
2467 }
2468
2469 // Get recent addresses
2470 m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
2471 pfrom.fGetAddr = true;
2472 }
2473
2474 if (!pfrom.IsInboundConn()) {
2475 // For non-inbound connections, we update the addrman to record
2476 // connection success so that addrman will have an up-to-date
2477 // notion of which peers are online and available.
2478 //
2479 // While we strive to not leak information about block-relay-only
2480 // connections via the addrman, not moving an address to the tried
2481 // table is also potentially detrimental because new-table entries
2482 // are subject to eviction in the event of addrman collisions. We
2483 // mitigate the information-leak by never calling
2484 // CAddrMan::Connected() on block-relay-only peers; see
2485 // FinalizeNode().
2486 //
2487 // This moves an address from New to Tried table in Addrman,
2488 // resolves tried-table collisions, etc.
2489 m_connman.MarkAddressGood(pfrom.addr);
2490 }
2491
2492 std::string remoteAddr;
2493 if (fLogIPs)
2494 remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2495
2496 LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
2497 cleanSubVer, pfrom.nVersion,
2498 pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
2499 remoteAddr);
2500
2501 int64_t nTimeOffset = nTime - GetTime();
2502 pfrom.nTimeOffset = nTimeOffset;
2503 AddTimeData(pfrom.addr, nTimeOffset);
2504
2505 // If the peer is old enough to have the old alert system, send it the final alert.
2506 if (greatest_common_version <= 70012) {
2507 CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
2508 m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert));
2509 }
2510
2511 // Feeler connections exist only to verify if address is online.
2512 if (pfrom.IsFeelerConn()) {
2513 pfrom.fDisconnect = true;
2514 }
2515 return;
2516 }
2517
2518 if (pfrom.nVersion == 0) {
2519 // Must have a version message before anything else
2520 Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
2521 return;
2522 }
2523
2524 // At this point, the outgoing message serialization version can't change.
2525 const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2526
2527 if (msg_type == NetMsgType::VERACK) {
2528 if (pfrom.fSuccessfullyConnected) return;
2529
2530 if (!pfrom.IsInboundConn()) {
2531 // Mark this node as currently connected, so we update its timestamp later.
2532 LOCK(cs_main);
2533 State(pfrom.GetId())->fCurrentlyConnected = true;
2534 LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
2535 pfrom.nVersion.load(), pfrom.nStartingHeight,
2536 pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
2537 pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
2538 }
2539
2540 if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
2541 // Tell our peer we prefer to receive headers rather than inv's
2542 // We send this to non-NODE NETWORK peers as well, because even
2543 // non-NODE NETWORK peers can announce blocks (such as pruning
2544 // nodes)
2545 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
2546 }
2547 if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
2548 // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
2549 // However, we do not request new block announcements using
2550 // cmpctblock messages.
2551 // We send this to non-NODE NETWORK peers as well, because
2552 // they may wish to request compact blocks from us
2553 bool fAnnounceUsingCMPCTBLOCK = false;
2554 uint64_t nCMPCTBLOCKVersion = 2;
2555 if (pfrom.GetLocalServices() & NODE_WITNESS)
2556 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2557 nCMPCTBLOCKVersion = 1;
2558 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
2559 }
2560 pfrom.fSuccessfullyConnected = true;
2561 return;
2562 }
2563
2564 // Feature negotiation of wtxidrelay should happen between VERSION and
2565 // VERACK, to avoid relay problems from switching after a connection is up
2566 if (msg_type == NetMsgType::WTXIDRELAY) {
2567 if (pfrom.fSuccessfullyConnected) {
2568 // Disconnect peers that send wtxidrelay message after VERACK; this
2569 // must be negotiated between VERSION and VERACK.
2570 pfrom.fDisconnect = true;
2571 return;
2572 }
2573 if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
2574 LOCK(cs_main);
2575 if (!State(pfrom.GetId())->m_wtxid_relay) {
2576 State(pfrom.GetId())->m_wtxid_relay = true;
2577 g_wtxid_relay_peers++;
2578 }
2579 }
2580 return;
2581 }
2582
2583 if (msg_type == NetMsgType::SENDADDRV2) {
2584 if (pfrom.fSuccessfullyConnected) {
2585 // Disconnect peers that send SENDADDRV2 message after VERACK; this
2586 // must be negotiated between VERSION and VERACK.
2587 pfrom.fDisconnect = true;
2588 return;
2589 }
2590 pfrom.m_wants_addrv2 = true;
2591 return;
2592 }
2593
2594 if (!pfrom.fSuccessfullyConnected) {
2595 LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
2596 return;
2597 }
2598
2599 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
2600 int stream_version = vRecv.GetVersion();
2601 if (msg_type == NetMsgType::ADDRV2) {
2602 // Add ADDRV2_FORMAT to the version so that the CNetAddr and CAddress
2603 // unserialize methods know that an address in v2 format is coming.
2604 stream_version |= ADDRV2_FORMAT;
2605 }
2606
2607 OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
2608 std::vector<CAddress> vAddr;
2609
2610 s >> vAddr;
2611
2612 if (!pfrom.RelayAddrsWithConn()) {
2613 return;
2614 }
2615 if (vAddr.size() > MAX_ADDR_TO_SEND)
2616 {
2617 Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
2618 return;
2619 }
2620
2621 // Store the new addresses
2622 std::vector<CAddress> vAddrOk;
2623 int64_t nNow = GetAdjustedTime();
2624 int64_t nSince = nNow - 10 * 60;
2625 for (CAddress& addr : vAddr)
2626 {
2627 if (interruptMsgProc)
2628 return;
2629
2630 // We only bother storing full nodes, though this may include
2631 // things which we would not make an outbound connection to, in
2632 // part because we may make feeler connections to them.
2633 if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
2634 continue;
2635
2636 if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
2637 addr.nTime = nNow - 5 * 24 * 60 * 60;
2638 pfrom.AddAddressKnown(addr);
2639 if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
2640 // Do not process banned/discouraged addresses beyond remembering we received them
2641 continue;
2642 }
2643 bool fReachable = IsReachable(addr);
2644 if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
2645 {
2646 // Relay to a limited number of other nodes
2647 RelayAddress(addr, fReachable, m_connman);
2648 }
2649 // Do not store addresses outside our network
2650 if (fReachable)
2651 vAddrOk.push_back(addr);
2652 }
2653 m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
2654 if (vAddr.size() < 1000)
2655 pfrom.fGetAddr = false;
2656 if (pfrom.IsAddrFetchConn())
2657 pfrom.fDisconnect = true;
2658 return;
2659 }
2660
2661 if (msg_type == NetMsgType::SENDHEADERS) {
2662 LOCK(cs_main);
2663 State(pfrom.GetId())->fPreferHeaders = true;
2664 return;
2665 }
2666
2667 if (msg_type == NetMsgType::SENDCMPCT) {
2668 bool fAnnounceUsingCMPCTBLOCK = false;
2669 uint64_t nCMPCTBLOCKVersion = 0;
2670 vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2671 if (nCMPCTBLOCKVersion == 1 || ((pfrom.GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
2672 LOCK(cs_main);
2673 // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
2674 if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2675 State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2676 State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
2677 }
2678 if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
2679 State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
2680 if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2681 if (pfrom.GetLocalServices() & NODE_WITNESS)
2682 State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
2683 else
2684 State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
2685 }
2686 }
2687 return;
2688 }
2689
2690 if (msg_type == NetMsgType::INV) {
2691 std::vector<CInv> vInv;
2692 vRecv >> vInv;
2693 if (vInv.size() > MAX_INV_SZ)
2694 {
2695 Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
2696 return;
2697 }
2698
2699 // We won't accept tx inv's if we're in blocks-only mode, or this is a
2700 // block-relay-only peer
2701 bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
2702
2703 // Allow peers with relay permission to send data other than blocks in blocks only mode
2704 if (pfrom.HasPermission(PF_RELAY)) {
2705 fBlocksOnly = false;
2706 }
2707
2708 LOCK(cs_main);
2709
2710 const auto current_time = GetTime<std::chrono::microseconds>();
2711 uint256* best_block{nullptr};
2712
2713 for (CInv& inv : vInv) {
2714 if (interruptMsgProc) return;
2715
2716 // Ignore INVs that don't match wtxidrelay setting.
2717 // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
2718 // This is fine as no INV messages are involved in that process.
2719 if (State(pfrom.GetId())->m_wtxid_relay) {
2720 if (inv.IsMsgTx()) continue;
2721 } else {
2722 if (inv.IsMsgWtx()) continue;
2723 }
2724
2725 if (inv.IsMsgBlk()) {
2726 const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
2727 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2728
2729 UpdateBlockAvailability(pfrom.GetId(), inv.hash);
2730 if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2731 // Headers-first is the primary method of announcement on
2732 // the network. If a node fell back to sending blocks by inv,
2733 // it's probably for a re-org. The final block hash
2734 // provided should be the highest, so send a getheaders and
2735 // then fetch the blocks we need to catch up.
2736 best_block = &inv.hash;
2737 }
2738 } else if (inv.IsGenTxMsg()) {
2739 const GenTxid gtxid = ToGenTxid(inv);
2740 const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool);
2741 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
2742
2743 pfrom.AddKnownTx(inv.hash);
2744 if (fBlocksOnly) {
2745 LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
2746 pfrom.fDisconnect = true;
2747 return;
2748 } else if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
2749 AddTxAnnouncement(pfrom, gtxid, current_time);
2750 }
2751 } else {
2752 LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
2753 }
2754 }
2755
2756 if (best_block != nullptr) {
2757 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block));
2758 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId());
2759 }
2760
2761 return;
2762 }
2763
2764 if (msg_type == NetMsgType::GETDATA) {
2765 std::vector<CInv> vInv;
2766 vRecv >> vInv;
2767 if (vInv.size() > MAX_INV_SZ)
2768 {
2769 Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
2770 return;
2771 }
2772
2773 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
2774
2775 if (vInv.size() > 0) {
2776 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
2777 }
2778
2779 {
2780 LOCK(peer->m_getdata_requests_mutex);
2781 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
2782 ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
2783 }
2784
2785 return;
2786 }
2787
2788 if (msg_type == NetMsgType::GETBLOCKS) {
2789 CBlockLocator locator;
2790 uint256 hashStop;
2791 vRecv >> locator >> hashStop;
2792
2793 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2794 LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2795 pfrom.fDisconnect = true;
2796 return;
2797 }
2798
2799 // We might have announced the currently-being-connected tip using a
2800 // compact block, which resulted in the peer sending a getblocks
2801 // request, which we would otherwise respond to without the new block.
2802 // To avoid this situation we simply verify that we are on our best
2803 // known chain now. This is super overkill, but we handle it better
2804 // for getheaders requests, and there are no known nodes which support
2805 // compact blocks but still use getblocks to request blocks.
2806 {
2807 std::shared_ptr<const CBlock> a_recent_block;
2808 {
2809 LOCK(cs_most_recent_block);
2810 a_recent_block = most_recent_block;
2811 }
2812 BlockValidationState state;
2813 if (!ActivateBestChain(state, m_chainparams, a_recent_block)) {
2814 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2815 }
2816 }
2817
2818 LOCK(cs_main);
2819
2820 // Find the last block the caller has in the main chain
2821 const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2822
2823 // Send the rest of the chain
2824 if (pindex)
2825 pindex = ::ChainActive().Next(pindex);
2826 int nLimit = 500;
2827 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
2828 for (; pindex; pindex = ::ChainActive().Next(pindex))
2829 {
2830 if (pindex->GetBlockHash() == hashStop)
2831 {
2832 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2833 break;
2834 }
2835 // If pruning, don't inv blocks unless we have on disk and are likely to still have
2836 // for some reasonable time window (1 hour) that block relay might require.
2837 const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
2838 if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave))
2839 {
2840 LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2841 break;
2842 }
2843 WITH_LOCK(pfrom.cs_inventory, pfrom.vInventoryBlockToSend.push_back(pindex->GetBlockHash()));
2844 if (--nLimit <= 0)
2845 {
2846 // When this block is requested, we'll send an inv that'll
2847 // trigger the peer to getblocks the next batch of inventory.
2848 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2849 pfrom.hashContinue = pindex->GetBlockHash();
2850 break;
2851 }
2852 }
2853 return;
2854 }
2855
2856 if (msg_type == NetMsgType::GETBLOCKTXN) {
2857 BlockTransactionsRequest req;
2858 vRecv >> req;
2859
2860 std::shared_ptr<const CBlock> recent_block;
2861 {
2862 LOCK(cs_most_recent_block);
2863 if (most_recent_block_hash == req.blockhash)
2864 recent_block = most_recent_block;
2865 // Unlock cs_most_recent_block to avoid cs_main lock inversion
2866 }
2867 if (recent_block) {
2868 SendBlockTransactions(pfrom, *recent_block, req);
2869 return;
2870 }
2871
2872 {
2873 LOCK(cs_main);
2874
2875 const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2876 if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2877 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
2878 return;
2879 }
2880
2881 if (pindex->nHeight >= ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
2882 CBlock block;
2883 bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus());
2884 assert(ret);
2885
2886 SendBlockTransactions(pfrom, block, req);
2887 return;
2888 }
2889 }
2890
2891 // If an older block is requested (should never happen in practice,
2892 // but can happen in tests) send a block response instead of a
2893 // blocktxn response. Sending a full block response instead of a
2894 // small blocktxn response is preferable in the case where a peer
2895 // might maliciously send lots of getblocktxn requests to trigger
2896 // expensive disk reads, because it will require the peer to
2897 // actually receive all the data read from disk over the network.
2898 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
2899 CInv inv;
2900 WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
2901 inv.hash = req.blockhash;
2902 WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
2903 // The message processing loop will go around again (without pausing) and we'll respond then
2904 return;
2905 }
2906
2907 if (msg_type == NetMsgType::GETHEADERS) {
2908 CBlockLocator locator;
2909 uint256 hashStop;
2910 vRecv >> locator >> hashStop;
2911
2912 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2913 LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
2914 pfrom.fDisconnect = true;
2915 return;
2916 }
2917
2918 LOCK(cs_main);
2919 if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_DOWNLOAD)) {
2920 LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId());
2921 return;
2922 }
2923
2924 CNodeState *nodestate = State(pfrom.GetId());
2925 const CBlockIndex* pindex = nullptr;
2926 if (locator.IsNull())
2927 {
2928 // If locator is null, return the hashStop block
2929 pindex = LookupBlockIndex(hashStop);
2930 if (!pindex) {
2931 return;
2932 }
2933
2934 if (!BlockRequestAllowed(pindex, m_chainparams.GetConsensus())) {
2935 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
2936 return;
2937 }
2938 }
2939 else
2940 {
2941 // Find the last block the caller has in the main chain
2942 pindex = FindForkInGlobalIndex(::ChainActive(), locator);
2943 if (pindex)
2944 pindex = ::ChainActive().Next(pindex);
2945 }
2946
2947 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2948 std::vector<CBlock> vHeaders;
2949 unsigned nCount = 0;
2950 unsigned nSize = 0;
2951 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
2952 for (; pindex; pindex = ::ChainActive().Next(pindex))
2953 {
2954 const CBlockHeader header = pindex->GetBlockHeader(m_chainparams.GetConsensus());
2955 ++nCount;
2956 nSize += GetSerializeSize(header, PROTOCOL_VERSION);
2957 vHeaders.push_back(header);
2958 if (nCount >= MAX_HEADERS_RESULTS
2959 || pindex->GetBlockHash() == hashStop)
2960 break;
2961 if (pfrom.nVersion >= SIZE_HEADERS_LIMIT_VERSION
2962 && nSize >= THRESHOLD_HEADERS_SIZE)
2963 break;
2964 }
2965
2966 /* Check maximum headers size before pushing the message
2967 if the peer enforces it. This should not fail since we
2968 break above in the loop at the threshold and the threshold
2969 should be small enough in comparison to the hard max size.
2970 Do it nevertheless to be sure. */
2971 if (pfrom.nVersion >= SIZE_HEADERS_LIMIT_VERSION
2972 && nSize > MAX_HEADERS_SIZE)
2973 LogPrintf("ERROR: not pushing 'headers', too large\n");
2974 else
2975 {
2976 LogPrint(BCLog::NET, "pushing %u headers, %u bytes\n", nCount, nSize);
2977 // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
2978 // if our peer has ::ChainActive().Tip() (and thus we are sending an empty
2979 // headers message). In both cases it's safe to update
2980 // pindexBestHeaderSent to be our tip.
2981 //
2982 // It is important that we simply reset the BestHeaderSent value here,
2983 // and not max(BestHeaderSent, newHeaderSent). We might have announced
2984 // the currently-being-connected tip using a compact block, which
2985 // resulted in the peer sending a headers request, which we respond to
2986 // without the new block. By resetting the BestHeaderSent, we ensure we
2987 // will re-announce the new block via headers (or compact blocks again)
2988 // in the SendMessages logic.
2989 nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip();
2990 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2991 }
2992
2993 return;
2994 }
2995
2996 if (msg_type == NetMsgType::TX) {
2997 // Stop processing the transaction early if
2998 // 1) We are in blocks only mode and peer has no relay permission
2999 // 2) This peer is a block-relay-only peer
3000 if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
3001 {
3002 LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
3003 pfrom.fDisconnect = true;
3004 return;
3005 }
3006
3007 CTransactionRef ptx;
3008 vRecv >> ptx;
3009 const CTransaction& tx = *ptx;
3010
3011 const uint256& txid = ptx->GetHash();
3012 const uint256& wtxid = ptx->GetWitnessHash();
3013
3014 LOCK2(cs_main, g_cs_orphans);
3015
3016 CNodeState* nodestate = State(pfrom.GetId());
3017
3018 const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid;
3019 pfrom.AddKnownTx(hash);
3020 if (nodestate->m_wtxid_relay && txid != wtxid) {
3021 // Insert txid into filterInventoryKnown, even for
3022 // wtxidrelay peers. This prevents re-adding of
3023 // unconfirmed parents to the recently_announced
3024 // filter, when a child tx is requested. See
3025 // ProcessGetData().
3026 pfrom.AddKnownTx(txid);
3027 }
3028
3029 m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
3030 if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
3031
3032 // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
3033 // absence of witness malleation, this is strictly better, because the
3034 // recent rejects filter may contain the wtxid but rarely contains
3035 // the txid of a segwit transaction that has been rejected.
3036 // In the presence of witness malleation, it's possible that by only
3037 // doing the check with wtxid, we could overlook a transaction which
3038 // was confirmed with a different witness, or exists in our mempool
3039 // with a different witness, but this has limited downside:
3040 // mempool validation does its own lookup of whether we have the txid
3041 // already; and an adversary can already relay us old transactions
3042 // (older than our recency filter) if trying to DoS us, without any need
3043 // for witness malleation.
3044 if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) {
3045 if (pfrom.HasPermission(PF_FORCERELAY)) {
3046 // Always relay transactions received from peers with forcerelay
3047 // permission, even if they were already in the mempool, allowing
3048 // the node to function as a gateway for nodes hidden behind it.
3049 if (!m_mempool.exists(tx.GetHash())) {
3050 LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
3051 } else {
3052 LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
3053 RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
3054 }
3055 }
3056 return;
3057 }
3058
3059 TxValidationState state;
3060 std::list<CTransactionRef> lRemovedTxn;
3061
3062 if (AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) {
3063 m_mempool.check(m_chainman, &::ChainstateActive().CoinsTip());
3064 // As this version of the transaction was acceptable, we can forget about any
3065 // requests for it.
3066 m_txrequest.ForgetTxHash(tx.GetHash());
3067 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3068 RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
3069 for (unsigned int i = 0; i < tx.vout.size(); i++) {
3070 auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
3071 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
3072 for (const auto& elem : it_by_prev->second) {
3073 peer->m_orphan_work_set.insert(elem->first);
3074 }
3075 }
3076 }
3077
3078 pfrom.nLastTXTime = GetTime();
3079
3080 LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
3081 pfrom.GetId(),
3082 tx.GetHash().ToString(),
3083 m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3084
3085 for (const CTransactionRef& removedTx : lRemovedTxn) {
3086 AddToCompactExtraTransactions(removedTx);
3087 }
3088
3089 // Recursively process any orphan transactions that depended on this one
3090 ProcessOrphanTx(peer->m_orphan_work_set);
3091 }
3092 else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
3093 {
3094 bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
3095
3096 // Deduplicate parent txids, so that we don't have to loop over
3097 // the same parent txid more than once down below.
3098 std::vector<uint256> unique_parents;
3099 unique_parents.reserve(tx.vin.size());
3100 for (const CTxIn& txin : tx.vin) {
3101 // We start with all parents, and then remove duplicates below.
3102 unique_parents.push_back(txin.prevout.hash);
3103 }
3104 std::sort(unique_parents.begin(), unique_parents.end());
3105 unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
3106 for (const uint256& parent_txid : unique_parents) {
3107 if (recentRejects->contains(parent_txid)) {
3108 fRejectedParents = true;
3109 break;
3110 }
3111 }
3112 if (!fRejectedParents) {
3113 const auto current_time = GetTime<std::chrono::microseconds>();
3114
3115 for (const uint256& parent_txid : unique_parents) {
3116 // Here, we only have the txid (and not wtxid) of the
3117 // inputs, so we only request in txid mode, even for
3118 // wtxidrelay peers.
3119 // Eventually we should replace this with an improved
3120 // protocol for getting all unconfirmed parents.
3121 const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
3122 pfrom.AddKnownTx(parent_txid);
3123 if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time);
3124 }
3125 AddOrphanTx(ptx, pfrom.GetId());
3126
3127 // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
3128 m_txrequest.ForgetTxHash(tx.GetHash());
3129 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3130
3131 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded (see CVE-2012-3789)
3132 unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
3133 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
3134 if (nEvicted > 0) {
3135 LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
3136 }
3137 } else {
3138 LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
3139 // We will continue to reject this tx since it has rejected
3140 // parents so avoid re-requesting it from other peers.
3141 // Here we add both the txid and the wtxid, as we know that
3142 // regardless of what witness is provided, we will not accept
3143 // this, so we don't need to allow for redownload of this txid
3144 // from any of our non-wtxidrelay peers.
3145 recentRejects->insert(tx.GetHash());
3146 recentRejects->insert(tx.GetWitnessHash());
3147 m_txrequest.ForgetTxHash(tx.GetHash());
3148 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3149 }
3150 } else {
3151 if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
3152 // We can add the wtxid of this transaction to our reject filter.
3153 // Do not add txids of witness transactions or witness-stripped
3154 // transactions to the filter, as they can have been malleated;
3155 // adding such txids to the reject filter would potentially
3156 // interfere with relay of valid transactions from peers that
3157 // do not support wtxid-based relay. See
3158 // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3159 // We can remove this restriction (and always add wtxids to
3160 // the filter even for witness stripped transactions) once
3161 // wtxid-based relay is broadly deployed.
3162 // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3163 // for concerns around weakening security of unupgraded nodes
3164 // if we start doing this too early.
3165 assert(recentRejects);
3166 recentRejects->insert(tx.GetWitnessHash());
3167 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
3168 // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3169 // then we know that the witness was irrelevant to the policy
3170 // failure, since this check depends only on the txid
3171 // (the scriptPubKey being spent is covered by the txid).
3172 // Add the txid to the reject filter to prevent repeated
3173 // processing of this transaction in the event that child
3174 // transactions are later received (resulting in
3175 // parent-fetching by txid via the orphan-handling logic).
3176 if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
3177 recentRejects->insert(tx.GetHash());
3178 m_txrequest.ForgetTxHash(tx.GetHash());
3179 }
3180 if (RecursiveDynamicUsage(*ptx) < 100000) {
3181 AddToCompactExtraTransactions(ptx);
3182 }
3183 }
3184 }
3185
3186 // If a tx has been detected by recentRejects, we will have reached
3187 // this point and the tx will have been ignored. Because we haven't run
3188 // the tx through AcceptToMemoryPool, we won't have computed a DoS
3189 // score for it or determined exactly why we consider it invalid.
3190 //
3191 // This means we won't penalize any peer subsequently relaying a DoSy
3192 // tx (even if we penalized the first peer who gave it to us) because
3193 // we have to account for recentRejects showing false positives. In
3194 // other words, we shouldn't penalize a peer if we aren't *sure* they
3195 // submitted a DoSy tx.
3196 //
3197 // Note that recentRejects doesn't just record DoSy or invalid
3198 // transactions, but any tx not accepted by the mempool, which may be
3199 // due to node policy (vs. consensus). So we can't blanket penalize a
3200 // peer simply for relaying a tx that our recentRejects has caught,
3201 // regardless of false positives.
3202
3203 if (state.IsInvalid()) {
3204 LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
3205 pfrom.GetId(),
3206 state.ToString());
3207 MaybePunishNodeForTx(pfrom.GetId(), state);
3208 }
3209 return;
3210 }
3211
3212 if (msg_type == NetMsgType::CMPCTBLOCK)
3213 {
3214 // Ignore cmpctblock received while importing
3215 if (fImporting || fReindex) {
3216 LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
3217 return;
3218 }
3219
3220 CBlockHeaderAndShortTxIDs cmpctblock;
3221 vRecv >> cmpctblock;
3222
3223 bool received_new_header = false;
3224
3225 {
3226 LOCK(cs_main);
3227
3228 if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3229 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
3230 if (!::ChainstateActive().IsInitialBlockDownload())
3231 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256()));
3232 return;
3233 }
3234
3235 if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
3236 received_new_header = true;
3237 }
3238 }
3239
3240 const CBlockIndex *pindex = nullptr;
3241 BlockValidationState state;
3242 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) {
3243 if (state.IsInvalid()) {
3244 MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
3245 return;
3246 }
3247 }
3248
3249 // When we succeed in decoding a block's txids from a cmpctblock
3250 // message we typically jump to the BLOCKTXN handling code, with a
3251 // dummy (empty) BLOCKTXN message, to re-use the logic there in
3252 // completing processing of the putative block (without cs_main).
3253 bool fProcessBLOCKTXN = false;
3254 CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
3255
3256 // If we end up treating this as a plain headers message, call that as well
3257 // without cs_main.
3258 bool fRevertToHeaderProcessing = false;
3259
3260 // Keep a CBlock for "optimistic" compactblock reconstructions (see
3261 // below)
3262 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3263 bool fBlockReconstructed = false;
3264
3265 {
3266 LOCK2(cs_main, g_cs_orphans);
3267 // If AcceptBlockHeader returned true, it set pindex
3268 assert(pindex);
3269 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3270
3271 CNodeState *nodestate = State(pfrom.GetId());
3272
3273 // If this was a new header with more work than our tip, update the
3274 // peer's last block announcement time
3275 if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
3276 nodestate->m_last_block_announcement = GetTime();
3277 }
3278
3279 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
3280 bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3281
3282 if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
3283 return;
3284
3285 if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better
3286 pindex->nTx != 0) { // We had this block at some point, but pruned it
3287 if (fAlreadyInFlight) {
3288 // We requested this block for some reason, but our mempool will probably be useless
3289 // so we just grab the block via normal getdata
3290 std::vector<CInv> vInv(1);
3291 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3292 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3293 }
3294 return;
3295 }
3296
3297 // If we're not close to tip yet, give up and let parallel block fetch work its magic
3298 if (!fAlreadyInFlight && !CanDirectFetch(m_chainparams.GetConsensus()))
3299 return;
3300
3301 if (IsWitnessEnabled(pindex->pprev, m_chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
3302 // Don't bother trying to process compact blocks from v1 peers
3303 // after segwit activates.
3304 return;
3305 }
3306
3307 // We want to be a bit conservative just to be extra careful about DoS
3308 // possibilities in compact block processing...
3309 if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3310 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3311 (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
3312 std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
3313 if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
3314 if (!(*queuedBlockIt)->partialBlock)
3315 (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
3316 else {
3317 // The block was already in flight using compact blocks from the same peer
3318 LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
3319 return;
3320 }
3321 }
3322
3323 PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
3324 ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3325 if (status == READ_STATUS_INVALID) {
3326 MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3327 Misbehaving(pfrom.GetId(), 100, "invalid compact block");
3328 return;
3329 } else if (status == READ_STATUS_FAILED) {
3330 // Duplicate txindexes, the block is now in-flight, so just request it
3331 std::vector<CInv> vInv(1);
3332 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3333 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3334 return;
3335 }
3336
3337 BlockTransactionsRequest req;
3338 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3339 if (!partialBlock.IsTxAvailable(i))
3340 req.indexes.push_back(i);
3341 }
3342 if (req.indexes.empty()) {
3343 // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
3344 BlockTransactions txn;
3345 txn.blockhash = cmpctblock.header.GetHash();
3346 blockTxnMsg << txn;
3347 fProcessBLOCKTXN = true;
3348 } else {
3349 req.blockhash = pindex->GetBlockHash();
3350 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3351 }
3352 } else {
3353 // This block is either already in flight from a different
3354 // peer, or this peer has too many blocks outstanding to
3355 // download from.
3356 // Optimistically try to reconstruct anyway since we might be
3357 // able to without any round trips.
3358 PartiallyDownloadedBlock tempBlock(&m_mempool);
3359 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3360 if (status != READ_STATUS_OK) {
3361 // TODO: don't ignore failures
3362 return;
3363 }
3364 std::vector<CTransactionRef> dummy;
3365 status = tempBlock.FillBlock(*pblock, dummy);
3366 if (status == READ_STATUS_OK) {
3367 fBlockReconstructed = true;
3368 }
3369 }
3370 } else {
3371 if (fAlreadyInFlight) {
3372 // We requested this block, but its far into the future, so our
3373 // mempool will probably be useless - request the block normally
3374 std::vector<CInv> vInv(1);
3375 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
3376 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3377 return;
3378 } else {
3379 // If this was an announce-cmpctblock, we want the same treatment as a header message
3380 fRevertToHeaderProcessing = true;
3381 }
3382 }
3383 } // cs_main
3384
3385 if (fProcessBLOCKTXN) {
3386 return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, interruptMsgProc);
3387 }
3388
3389 if (fRevertToHeaderProcessing) {
3390 // Headers received from HB compact block peers are permitted to be
3391 // relayed before full validation (see BIP 152), so we don't want to disconnect
3392 // the peer if the header turns out to be for an invalid block.
3393 // Note that if a peer tries to build on an invalid chain, that
3394 // will be detected and the peer will be disconnected/discouraged.
3395 return ProcessHeadersMessage(pfrom, {cmpctblock.header}, /*via_compact_block=*/true);
3396 }
3397
3398 if (fBlockReconstructed) {
3399 // If we got here, we were able to optimistically reconstruct a
3400 // block that is in flight from some other peer.
3401 {
3402 LOCK(cs_main);
3403 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
3404 }
3405 bool fNewBlock = false;
3406 // Setting fForceProcessing to true means that we bypass some of
3407 // our anti-DoS protections in AcceptBlock, which filters
3408 // unrequested blocks that might be trying to waste our resources
3409 // (eg disk space). Because we only try to reconstruct blocks when
3410 // we're close to caught up (via the CanDirectFetch() requirement
3411 // above, combined with the behavior of not requesting blocks until
3412 // we have a chain with at least nMinimumChainWork), and we ignore
3413 // compact blocks with less work than our tip, it is safe to treat
3414 // reconstructed compact blocks as having been requested.
3415 m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3416 if (fNewBlock) {
3417 pfrom.nLastBlockTime = GetTime();
3418 } else {
3419 LOCK(cs_main);
3420 mapBlockSource.erase(pblock->GetHash());
3421 }
3422 LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
3423 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
3424 // Clear download state for this block, which is in
3425 // process from some other peer. We do this after calling
3426 // ProcessNewBlock so that a malleated cmpctblock announcement
3427 // can't be used to interfere with block relay.
3428 MarkBlockAsReceived(pblock->GetHash());
3429 }
3430 }
3431 return;
3432 }
3433
3434 if (msg_type == NetMsgType::BLOCKTXN)
3435 {
3436 // Ignore blocktxn received while importing
3437 if (fImporting || fReindex) {
3438 LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
3439 return;
3440 }
3441
3442 BlockTransactions resp;
3443 vRecv >> resp;
3444
3445 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3446 bool fBlockRead = false;
3447 {
3448 LOCK(cs_main);
3449
3450 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
3451 if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
3452 it->second.first != pfrom.GetId()) {
3453 LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3454 return;
3455 }
3456
3457 PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
3458 ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3459 if (status == READ_STATUS_INVALID) {
3460 MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect
3461 Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions");
3462 return;
3463 } else if (status == READ_STATUS_FAILED) {
3464 // Might have collided, fall back to getdata now :(
3465 std::vector<CInv> invs;
3466 invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
3467 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3468 } else {
3469 // Block is either okay, or possibly we received
3470 // READ_STATUS_CHECKBLOCK_FAILED.
3471 // Note that CheckBlock can only fail for one of a few reasons:
3472 // 1. bad-proof-of-work (impossible here, because we've already
3473 // accepted the header)
3474 // 2. merkleroot doesn't match the transactions given (already
3475 // caught in FillBlock with READ_STATUS_FAILED, so
3476 // impossible here)
3477 // 3. the block is otherwise invalid (eg invalid coinbase,
3478 // block is too big, too many legacy sigops, etc).
3479 // So if CheckBlock failed, #3 is the only possibility.
3480 // Under BIP 152, we don't discourage the peer unless proof of work is
3481 // invalid (we don't require all the stateless checks to have
3482 // been run). This is handled below, so just treat this as
3483 // though the block was successfully read, and rely on the
3484 // handling in ProcessNewBlock to ensure the block index is
3485 // updated, etc.
3486 MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
3487 fBlockRead = true;
3488 // mapBlockSource is used for potentially punishing peers and
3489 // updating which peers send us compact blocks, so the race
3490 // between here and cs_main in ProcessNewBlock is fine.
3491 // BIP 152 permits peers to relay compact blocks after validating
3492 // the header only; we should not punish peers if the block turns
3493 // out to be invalid.
3494 mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false));
3495 }
3496 } // Don't hold cs_main when we call into ProcessNewBlock
3497 if (fBlockRead) {
3498 bool fNewBlock = false;
3499 // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3500 // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3501 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3502 // disk-space attacks), but this should be safe due to the
3503 // protections in the compact block handler -- see related comment
3504 // in compact block optimistic reconstruction handling.
3505 m_chainman.ProcessNewBlock(m_chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
3506 if (fNewBlock) {
3507 pfrom.nLastBlockTime = GetTime();
3508 } else {
3509 LOCK(cs_main);
3510 mapBlockSource.erase(pblock->GetHash());
3511 }
3512 }
3513 return;
3514 }
3515
3516 if (msg_type == NetMsgType::HEADERS)
3517 {
3518 // Ignore headers received while importing
3519 if (fImporting || fReindex) {
3520 LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
3521 return;
3522 }
3523
3524 std::vector<CBlockHeader> headers;
3525
3526 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
3527 unsigned int nCount = ReadCompactSize(vRecv);
3528 if (nCount > MAX_HEADERS_RESULTS) {
3529 Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount));
3530 return;
3531 }
3532 headers.resize(nCount);
3533 for (unsigned int n = 0; n < nCount; n++) {
3534 vRecv >> headers[n];
3535 ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
3536 }
3537
3538 return ProcessHeadersMessage(pfrom, headers, /*via_compact_block=*/false);
3539 }
3540
3541 if (msg_type == NetMsgType::BLOCK)
3542 {
3543 // Ignore block received while importing
3544 if (fImporting || fReindex) {
3545 LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
3546 return;
3547 }
3548
3549 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3550 vRecv >> *pblock;
3551
3552 LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
3553
3554 bool forceProcessing = false;
3555 const uint256 hash(pblock->GetHash());
3556 {
3557 LOCK(cs_main);
3558 // Also always process if we requested the block explicitly, as we may
3559 // need it even though it is not a candidate for a new best tip.
3560 forceProcessing |= MarkBlockAsReceived(hash);
3561 // mapBlockSource is only used for punishing peers and setting
3562 // which peers send us compact blocks, so the race between here and
3563 // cs_main in ProcessNewBlock is fine.
3564 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3565 }
3566 bool fNewBlock = false;
3567 m_chainman.ProcessNewBlock(m_chainparams, pblock, forceProcessing, &fNewBlock);
3568 if (fNewBlock) {
3569 pfrom.nLastBlockTime = GetTime();
3570 } else {
3571 LOCK(cs_main);
3572 mapBlockSource.erase(pblock->GetHash());
3573 }
3574 return;
3575 }
3576
3577 if (msg_type == NetMsgType::GETADDR) {
3578 // This asymmetric behavior for inbound and outbound connections was introduced
3579 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
3580 // to users' AddrMan and later request them by sending getaddr messages.
3581 // Making nodes which are behind NAT and can only make outgoing connections ignore
3582 // the getaddr message mitigates the attack.
3583 if (!pfrom.IsInboundConn()) {
3584 LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
3585 return;
3586 }
3587
3588 // Only send one GetAddr response per connection to reduce resource waste
3589 // and discourage addr stamping of INV announcements.
3590 if (pfrom.fSentAddr) {
3591 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
3592 return;
3593 }
3594 pfrom.fSentAddr = true;
3595
3596 pfrom.vAddrToSend.clear();
3597 std::vector<CAddress> vAddr;
3598 if (pfrom.HasPermission(PF_ADDR)) {
3599 vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3600 } else {
3601 vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
3602 }
3603 FastRandomContext insecure_rand;
3604 for (const CAddress &addr : vAddr) {
3605 pfrom.PushAddress(addr, insecure_rand);
3606 }
3607 return;
3608 }
3609
3610 if (msg_type == NetMsgType::MEMPOOL) {
3611 if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL))
3612 {
3613 if (!pfrom.HasPermission(PF_NOBAN))
3614 {
3615 LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
3616 pfrom.fDisconnect = true;
3617 }
3618 return;
3619 }
3620
3621 if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL))
3622 {
3623 if (!pfrom.HasPermission(PF_NOBAN))
3624 {
3625 LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
3626 pfrom.fDisconnect = true;
3627 }
3628 return;
3629 }
3630
3631 if (pfrom.m_tx_relay != nullptr) {
3632 LOCK(pfrom.m_tx_relay->cs_tx_inventory);
3633 pfrom.m_tx_relay->fSendMempool = true;
3634 }
3635 return;
3636 }
3637
3638 if (msg_type == NetMsgType::PING) {
3639 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
3640 uint64_t nonce = 0;
3641 vRecv >> nonce;
3642 // Echo the message back with the nonce. This allows for two useful features:
3643 //
3644 // 1) A remote node can quickly check if the connection is operational
3645 // 2) Remote nodes can measure the latency of the network thread. If this node
3646 // is overloaded it won't respond to pings quickly and the remote node can
3647 // avoid sending us more work, like chain download requests.
3648 //
3649 // The nonce stops the remote getting confused between different pings: without
3650 // it, if the remote node sends a ping once per second and this node takes 5
3651 // seconds to respond to each, the 5th ping the remote sends would appear to
3652 // return very quickly.
3653 m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
3654 }
3655 return;
3656 }
3657
3658 if (msg_type == NetMsgType::PONG) {
3659 const auto ping_end = time_received;
3660 uint64_t nonce = 0;
3661 size_t nAvail = vRecv.in_avail();
3662 bool bPingFinished = false;
3663 std::string sProblem;
3664
3665 if (nAvail >= sizeof(nonce)) {
3666 vRecv >> nonce;
3667
3668 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
3669 if (pfrom.nPingNonceSent != 0) {
3670 if (nonce == pfrom.nPingNonceSent) {
3671 // Matching pong received, this ping is no longer outstanding
3672 bPingFinished = true;
3673 const auto ping_time = ping_end - pfrom.m_ping_start.load();
3674 if (ping_time.count() >= 0) {
3675 // Successful ping time measurement, replace previous
3676 pfrom.nPingUsecTime = count_microseconds(ping_time);
3677 pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time));
3678 } else {
3679 // This should never happen
3680 sProblem = "Timing mishap";
3681 }
3682 } else {
3683 // Nonce mismatches are normal when pings are overlapping
3684 sProblem = "Nonce mismatch";
3685 if (nonce == 0) {
3686 // This is most likely a bug in another implementation somewhere; cancel this ping
3687 bPingFinished = true;
3688 sProblem = "Nonce zero";
3689 }
3690 }
3691 } else {
3692 sProblem = "Unsolicited pong without ping";
3693 }
3694 } else {
3695 // This is most likely a bug in another implementation somewhere; cancel this ping
3696 bPingFinished = true;
3697 sProblem = "Short payload";
3698 }
3699
3700 if (!(sProblem.empty())) {
3701 LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
3702 pfrom.GetId(),
3703 sProblem,
3704 pfrom.nPingNonceSent,
3705 nonce,
3706 nAvail);
3707 }
3708 if (bPingFinished) {
3709 pfrom.nPingNonceSent = 0;
3710 }
3711 return;
3712 }
3713
3714 if (msg_type == NetMsgType::FILTERLOAD) {
3715 if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3716 pfrom.fDisconnect = true;
3717 return;
3718 }
3719 CBloomFilter filter;
3720 vRecv >> filter;
3721
3722 if (!filter.IsWithinSizeConstraints())
3723 {
3724 // There is no excuse for sending a too-large filter
3725 Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
3726 }
3727 else if (pfrom.m_tx_relay != nullptr)
3728 {
3729 LOCK(pfrom.m_tx_relay->cs_filter);
3730 pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
3731 pfrom.m_tx_relay->fRelayTxes = true;
3732 }
3733 return;
3734 }
3735
3736 if (msg_type == NetMsgType::FILTERADD) {
3737 if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3738 pfrom.fDisconnect = true;
3739 return;
3740 }
3741 std::vector<unsigned char> vData;
3742 vRecv >> vData;
3743
3744 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
3745 // and thus, the maximum size any matched object can have) in a filteradd message
3746 bool bad = false;
3747 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
3748 bad = true;
3749 } else if (pfrom.m_tx_relay != nullptr) {
3750 LOCK(pfrom.m_tx_relay->cs_filter);
3751 if (pfrom.m_tx_relay->pfilter) {
3752 pfrom.m_tx_relay->pfilter->insert(vData);
3753 } else {
3754 bad = true;
3755 }
3756 }
3757 if (bad) {
3758 Misbehaving(pfrom.GetId(), 100, "bad filteradd message");
3759 }
3760 return;
3761 }
3762
3763 if (msg_type == NetMsgType::FILTERCLEAR) {
3764 if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
3765 pfrom.fDisconnect = true;
3766 return;
3767 }
3768 if (pfrom.m_tx_relay == nullptr) {
3769 return;
3770 }
3771 LOCK(pfrom.m_tx_relay->cs_filter);
3772 pfrom.m_tx_relay->pfilter = nullptr;
3773 pfrom.m_tx_relay->fRelayTxes = true;
3774 return;
3775 }
3776
3777 if (msg_type == NetMsgType::FEEFILTER) {
3778 CAmount newFeeFilter = 0;
3779 vRecv >> newFeeFilter;
3780 if (MoneyRange(newFeeFilter)) {
3781 if (pfrom.m_tx_relay != nullptr) {
3782 LOCK(pfrom.m_tx_relay->cs_feeFilter);
3783 pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
3784 }
3785 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
3786 }
3787 return;
3788 }
3789
3790 if (msg_type == NetMsgType::GETCFILTERS) {
3791 ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman);
3792 return;
3793 }
3794
3795 if (msg_type == NetMsgType::GETCFHEADERS) {
3796 ProcessGetCFHeaders(pfrom, vRecv, m_chainparams, m_connman);
3797 return;
3798 }
3799
3800 if (msg_type == NetMsgType::GETCFCHECKPT) {
3801 ProcessGetCFCheckPt(pfrom, vRecv, m_chainparams, m_connman);
3802 return;
3803 }
3804
3805 if (msg_type == NetMsgType::NOTFOUND) {
3806 std::vector<CInv> vInv;
3807 vRecv >> vInv;
3808 if (vInv.size() <= MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
3809 LOCK(::cs_main);
3810 for (CInv &inv : vInv) {
3811 if (inv.IsGenTxMsg()) {
3812 // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
3813 // completed in TxRequestTracker.
3814 m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
3815 }
3816 }
3817 }
3818 return;
3819 }
3820
3821 // Ignore unknown commands for extensibility
3822 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3823 return;
3824 }
3825
MaybeDiscourageAndDisconnect(CNode & pnode)3826 bool PeerManager::MaybeDiscourageAndDisconnect(CNode& pnode)
3827 {
3828 const NodeId peer_id{pnode.GetId()};
3829 PeerRef peer = GetPeerRef(peer_id);
3830 if (peer == nullptr) return false;
3831
3832 {
3833 LOCK(peer->m_misbehavior_mutex);
3834
3835 // There's nothing to do if the m_should_discourage flag isn't set
3836 if (!peer->m_should_discourage) return false;
3837
3838 peer->m_should_discourage = false;
3839 } // peer.m_misbehavior_mutex
3840
3841 if (pnode.HasPermission(PF_NOBAN)) {
3842 // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag
3843 LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
3844 return false;
3845 }
3846
3847 if (pnode.IsManualConn()) {
3848 // We never disconnect or discourage manual peers for bad behavior
3849 LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id);
3850 return false;
3851 }
3852
3853 if (pnode.addr.IsLocal()) {
3854 // We disconnect local peers for bad behavior but don't discourage (since that would discourage
3855 // all peers on the same local address)
3856 LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id);
3857 pnode.fDisconnect = true;
3858 return true;
3859 }
3860
3861 // Normal case: Disconnect the peer and discourage all nodes sharing the address
3862 LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
3863 if (m_banman) m_banman->Discourage(pnode.addr);
3864 m_connman.DisconnectNode(pnode.addr);
3865 return true;
3866 }
3867
ProcessMessages(CNode * pfrom,std::atomic<bool> & interruptMsgProc)3868 bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3869 {
3870 bool fMoreWork = false;
3871
3872 PeerRef peer = GetPeerRef(pfrom->GetId());
3873 if (peer == nullptr) return false;
3874
3875 {
3876 LOCK(peer->m_getdata_requests_mutex);
3877 if (!peer->m_getdata_requests.empty()) {
3878 ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
3879 }
3880 }
3881
3882 {
3883 LOCK2(cs_main, g_cs_orphans);
3884 if (!peer->m_orphan_work_set.empty()) {
3885 ProcessOrphanTx(peer->m_orphan_work_set);
3886 }
3887 }
3888
3889 if (pfrom->fDisconnect)
3890 return false;
3891
3892 // this maintains the order of responses
3893 // and prevents m_getdata_requests to grow unbounded
3894 {
3895 LOCK(peer->m_getdata_requests_mutex);
3896 if (!peer->m_getdata_requests.empty()) return true;
3897 }
3898
3899 {
3900 LOCK(g_cs_orphans);
3901 if (!peer->m_orphan_work_set.empty()) return true;
3902 }
3903
3904 // Don't bother if send buffer is too full to respond anyway
3905 if (pfrom->fPauseSend)
3906 return false;
3907
3908 std::list<CNetMessage> msgs;
3909 {
3910 LOCK(pfrom->cs_vProcessMsg);
3911 if (pfrom->vProcessMsg.empty())
3912 return false;
3913 // Just take one message
3914 msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3915 pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
3916 pfrom->fPauseRecv = pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
3917 fMoreWork = !pfrom->vProcessMsg.empty();
3918 }
3919 CNetMessage& msg(msgs.front());
3920
3921 msg.SetVersion(pfrom->GetCommonVersion());
3922 const std::string& msg_type = msg.m_command;
3923
3924 // Message size
3925 unsigned int nMessageSize = msg.m_message_size;
3926
3927 try {
3928 ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
3929 if (interruptMsgProc) return false;
3930 {
3931 LOCK(peer->m_getdata_requests_mutex);
3932 if (!peer->m_getdata_requests.empty()) fMoreWork = true;
3933 }
3934 } catch (const std::exception& e) {
3935 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
3936 } catch (...) {
3937 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
3938 }
3939
3940 return fMoreWork;
3941 }
3942
ConsiderEviction(CNode & pto,int64_t time_in_seconds)3943 void PeerManager::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
3944 {
3945 AssertLockHeld(cs_main);
3946
3947 CNodeState &state = *State(pto.GetId());
3948 const CNetMsgMaker msgMaker(pto.GetCommonVersion());
3949
3950 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
3951 // This is an outbound peer subject to disconnection if they don't
3952 // announce a block with as much work as the current tip within
3953 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3954 // their chain has more work than ours, we should sync to it,
3955 // unless it's invalid, in which case we should find that out and
3956 // disconnect from them elsewhere).
3957 if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) {
3958 if (state.m_chain_sync.m_timeout != 0) {
3959 state.m_chain_sync.m_timeout = 0;
3960 state.m_chain_sync.m_work_header = nullptr;
3961 state.m_chain_sync.m_sent_getheaders = false;
3962 }
3963 } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3964 // Our best block known by this peer is behind our tip, and we're either noticing
3965 // that for the first time, OR this peer was able to catch up to some earlier point
3966 // where we checked against our tip.
3967 // Either way, set a new timeout based on current tip.
3968 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3969 state.m_chain_sync.m_work_header = ::ChainActive().Tip();
3970 state.m_chain_sync.m_sent_getheaders = false;
3971 } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3972 // No evidence yet that our peer has synced to a chain with work equal to that
3973 // of our tip, when we first detected it was behind. Send a single getheaders
3974 // message to give the peer a chance to update us.
3975 if (state.m_chain_sync.m_sent_getheaders) {
3976 // They've run out of time to catch up!
3977 LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3978 pto.fDisconnect = true;
3979 } else {
3980 assert(state.m_chain_sync.m_work_header);
3981 LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3982 m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3983 state.m_chain_sync.m_sent_getheaders = true;
3984 constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3985 // Bump the timeout to allow a response, which could clear the timeout
3986 // (if the response shows the peer has synced), reset the timeout (if
3987 // the peer syncs to the required work but not to our tip), or result
3988 // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3989 // has not sufficiently progressed)
3990 state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3991 }
3992 }
3993 }
3994 }
3995
EvictExtraOutboundPeers(int64_t time_in_seconds)3996 void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
3997 {
3998 // Check whether we have too many outbound peers
3999 int extra_peers = m_connman.GetExtraOutboundCount();
4000 if (extra_peers > 0) {
4001 // If we have more outbound peers than we target, disconnect one.
4002 // Pick the outbound peer that least recently announced
4003 // us a new block, with ties broken by choosing the more recent
4004 // connection (higher node id)
4005 NodeId worst_peer = -1;
4006 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
4007
4008 m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
4009 AssertLockHeld(::cs_main);
4010
4011 // Ignore non-outbound peers, or nodes marked for disconnect already
4012 if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
4013 CNodeState *state = State(pnode->GetId());
4014 if (state == nullptr) return; // shouldn't be possible, but just in case
4015 // Don't evict our protected peers
4016 if (state->m_chain_sync.m_protect) return;
4017 // Don't evict our block-relay-only peers.
4018 if (pnode->m_tx_relay == nullptr) return;
4019 if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
4020 worst_peer = pnode->GetId();
4021 oldest_block_announcement = state->m_last_block_announcement;
4022 }
4023 });
4024 if (worst_peer != -1) {
4025 bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
4026 AssertLockHeld(::cs_main);
4027
4028 // Only disconnect a peer that has been connected to us for
4029 // some reasonable fraction of our check-frequency, to give
4030 // it time for new information to have arrived.
4031 // Also don't disconnect any peer we're trying to download a
4032 // block from.
4033 CNodeState &state = *State(pnode->GetId());
4034 if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
4035 LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
4036 pnode->fDisconnect = true;
4037 return true;
4038 } else {
4039 LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
4040 return false;
4041 }
4042 });
4043 if (disconnected) {
4044 // If we disconnected an extra peer, that means we successfully
4045 // connected to at least one peer after the last time we
4046 // detected a stale tip. Don't try any more extra peers until
4047 // we next detect a stale tip, to limit the load we put on the
4048 // network from these extra connections.
4049 m_connman.SetTryNewOutboundPeer(false);
4050 }
4051 }
4052 }
4053 }
4054
CheckForStaleTipAndEvictPeers()4055 void PeerManager::CheckForStaleTipAndEvictPeers()
4056 {
4057 LOCK(cs_main);
4058
4059 int64_t time_in_seconds = GetTime();
4060
4061 EvictExtraOutboundPeers(time_in_seconds);
4062
4063 if (time_in_seconds > m_stale_tip_check_time) {
4064 // Check whether our tip is stale, and if so, allow using an extra
4065 // outbound peer
4066 if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) {
4067 LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
4068 m_connman.SetTryNewOutboundPeer(true);
4069 } else if (m_connman.GetTryNewOutboundPeer()) {
4070 m_connman.SetTryNewOutboundPeer(false);
4071 }
4072 m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
4073 }
4074 }
4075
4076 namespace {
4077 class CompareInvMempoolOrder
4078 {
4079 CTxMemPool *mp;
4080 bool m_wtxid_relay;
4081 public:
CompareInvMempoolOrder(CTxMemPool * _mempool,bool use_wtxid)4082 explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
4083 {
4084 mp = _mempool;
4085 m_wtxid_relay = use_wtxid;
4086 }
4087
operator ()(std::set<uint256>::iterator a,std::set<uint256>::iterator b)4088 bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
4089 {
4090 /* As std::make_heap produces a max-heap, we want the entries with the
4091 * fewest ancestors/highest fee to sort later. */
4092 return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
4093 }
4094 };
4095 }
4096
SendMessages(CNode * pto)4097 bool PeerManager::SendMessages(CNode* pto)
4098 {
4099 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
4100
4101 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
4102 // disconnect misbehaving peers even before the version handshake is complete.
4103 if (MaybeDiscourageAndDisconnect(*pto)) return true;
4104
4105 // Don't send anything until the version handshake is complete
4106 if (!pto->fSuccessfullyConnected || pto->fDisconnect)
4107 return true;
4108
4109 // If we get here, the outgoing message serialization version is set and can't change.
4110 const CNetMsgMaker msgMaker(pto->GetCommonVersion());
4111
4112 //
4113 // Message: ping
4114 //
4115 bool pingSend = false;
4116 if (pto->fPingQueued) {
4117 // RPC ping request by user
4118 pingSend = true;
4119 }
4120 if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) {
4121 // Ping automatically sent as a latency probe & keepalive.
4122 pingSend = true;
4123 }
4124 if (pingSend) {
4125 uint64_t nonce = 0;
4126 while (nonce == 0) {
4127 GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
4128 }
4129 pto->fPingQueued = false;
4130 pto->m_ping_start = GetTime<std::chrono::microseconds>();
4131 if (pto->GetCommonVersion() > BIP0031_VERSION) {
4132 pto->nPingNonceSent = nonce;
4133 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
4134 } else {
4135 // Peer is too old to support ping command with nonce, pong will never arrive.
4136 pto->nPingNonceSent = 0;
4137 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
4138 }
4139 }
4140
4141 {
4142 LOCK(cs_main);
4143
4144 CNodeState &state = *State(pto->GetId());
4145
4146 // Address refresh broadcast
4147 auto current_time = GetTime<std::chrono::microseconds>();
4148
4149 if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
4150 AdvertiseLocal(pto);
4151 pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
4152 }
4153
4154 //
4155 // Message: addr
4156 //
4157 if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
4158 pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
4159 std::vector<CAddress> vAddr;
4160 vAddr.reserve(pto->vAddrToSend.size());
4161 assert(pto->m_addr_known);
4162
4163 const char* msg_type;
4164 int make_flags;
4165 if (pto->m_wants_addrv2) {
4166 msg_type = NetMsgType::ADDRV2;
4167 make_flags = ADDRV2_FORMAT;
4168 } else {
4169 msg_type = NetMsgType::ADDR;
4170 make_flags = 0;
4171 }
4172
4173 for (const CAddress& addr : pto->vAddrToSend)
4174 {
4175 if (!pto->m_addr_known->contains(addr.GetKey()))
4176 {
4177 pto->m_addr_known->insert(addr.GetKey());
4178 vAddr.push_back(addr);
4179 // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
4180 if (vAddr.size() >= MAX_ADDR_TO_SEND)
4181 {
4182 m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4183 vAddr.clear();
4184 }
4185 }
4186 }
4187 pto->vAddrToSend.clear();
4188 if (!vAddr.empty())
4189 m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
4190 // we only send the big addr message once
4191 if (pto->vAddrToSend.capacity() > 40)
4192 pto->vAddrToSend.shrink_to_fit();
4193 }
4194
4195 // Start block sync
4196 if (pindexBestHeader == nullptr)
4197 pindexBestHeader = ::ChainActive().Tip();
4198 bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
4199 if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
4200 // Only actively request headers from a single peer, unless we're close to today.
4201 if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
4202 state.fSyncStarted = true;
4203 state.nHeadersSyncTimeout = count_microseconds(current_time) + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
4204 nSyncStarted++;
4205 const CBlockIndex *pindexStart = pindexBestHeader;
4206 /* If possible, start at the block preceding the currently
4207 best known header. This ensures that we always get a
4208 non-empty list of headers back as long as the peer
4209 is up-to-date. With a non-empty response, we can initialise
4210 the peer's known best block. This wouldn't be possible
4211 if we requested starting at pindexBestHeader and
4212 got back an empty response. */
4213 if (pindexStart->pprev)
4214 pindexStart = pindexStart->pprev;
4215 LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
4216 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256()));
4217 }
4218 }
4219
4220 //
4221 // Try sending block announcements via headers
4222 //
4223 {
4224 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
4225 // list of block hashes we're relaying, and our peer wants
4226 // headers announcements, then find the first header
4227 // not yet known to our peer but would connect, and send.
4228 // If no header would connect, or if we have too many
4229 // blocks, or if the peer doesn't want headers, just
4230 // add all to the inv queue.
4231 LOCK(pto->cs_inventory);
4232 std::vector<CBlock> vHeaders;
4233 bool fRevertToInv = ((!state.fPreferHeaders &&
4234 (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
4235 pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
4236 const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
4237 ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
4238
4239 if (!fRevertToInv) {
4240 bool fFoundStartingHeader = false;
4241 // Try to find first header that our peer doesn't have, and
4242 // then send all headers past that one. If we come across any
4243 // headers that aren't on ::ChainActive(), give up.
4244 for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
4245 const CBlockIndex* pindex = LookupBlockIndex(hash);
4246 assert(pindex);
4247 if (::ChainActive()[pindex->nHeight] != pindex) {
4248 // Bail out if we reorged away from this block
4249 fRevertToInv = true;
4250 break;
4251 }
4252 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
4253 // This means that the list of blocks to announce don't
4254 // connect to each other.
4255 // This shouldn't really be possible to hit during
4256 // regular operation (because reorgs should take us to
4257 // a chain that has some block not on the prior chain,
4258 // which should be caught by the prior check), but one
4259 // way this could happen is by using invalidateblock /
4260 // reconsiderblock repeatedly on the tip, causing it to
4261 // be added multiple times to vBlockHashesToAnnounce.
4262 // Robustly deal with this rare situation by reverting
4263 // to an inv.
4264 fRevertToInv = true;
4265 break;
4266 }
4267 pBestIndex = pindex;
4268 if (fFoundStartingHeader) {
4269 // add this to the headers message
4270 vHeaders.push_back(pindex->GetBlockHeader(consensusParams));
4271 } else if (PeerHasHeader(&state, pindex)) {
4272 continue; // keep looking for the first new block
4273 } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
4274 // Peer doesn't have this header but they do have the prior one.
4275 // Start sending headers.
4276 fFoundStartingHeader = true;
4277 vHeaders.push_back(pindex->GetBlockHeader(consensusParams));
4278 } else {
4279 // Peer doesn't have this header or the prior one -- nothing will
4280 // connect, so bail out.
4281 fRevertToInv = true;
4282 break;
4283 }
4284 }
4285 }
4286 if (!fRevertToInv && !vHeaders.empty()) {
4287 if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4288 // We only send up to 1 block as header-and-ids, as otherwise
4289 // probably means we're doing an initial-ish-sync or they're slow
4290 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
4291 vHeaders.front().GetHash().ToString(), pto->GetId());
4292
4293 int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
4294
4295 bool fGotBlockFromCache = false;
4296 {
4297 LOCK(cs_most_recent_block);
4298 if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
4299 if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
4300 m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
4301 else {
4302 CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
4303 m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4304 }
4305 fGotBlockFromCache = true;
4306 }
4307 }
4308 if (!fGotBlockFromCache) {
4309 CBlock block;
4310 bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
4311 assert(ret);
4312 CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
4313 m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
4314 }
4315 state.pindexBestHeaderSent = pBestIndex;
4316 } else if (state.fPreferHeaders) {
4317 if (vHeaders.size() > 1) {
4318 LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
4319 vHeaders.size(),
4320 vHeaders.front().GetHash().ToString(),
4321 vHeaders.back().GetHash().ToString(), pto->GetId());
4322 } else {
4323 LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
4324 vHeaders.front().GetHash().ToString(), pto->GetId());
4325 }
4326 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4327 state.pindexBestHeaderSent = pBestIndex;
4328 } else
4329 fRevertToInv = true;
4330 }
4331 if (fRevertToInv) {
4332 // If falling back to using an inv, just try to inv the tip.
4333 // The last entry in vBlockHashesToAnnounce was our tip at some point
4334 // in the past.
4335 if (!pto->vBlockHashesToAnnounce.empty()) {
4336 const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
4337 const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
4338 assert(pindex);
4339
4340 // Warn if we're announcing a block that is not on the main chain.
4341 // This should be very rare and could be optimized out.
4342 // Just log for now.
4343 if (::ChainActive()[pindex->nHeight] != pindex) {
4344 LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
4345 hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString());
4346 }
4347
4348 // If the peer's chain has this block, don't inv it back.
4349 if (!PeerHasHeader(&state, pindex)) {
4350 pto->vInventoryBlockToSend.push_back(hashToAnnounce);
4351 LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
4352 pto->GetId(), hashToAnnounce.ToString());
4353 }
4354 }
4355 }
4356 pto->vBlockHashesToAnnounce.clear();
4357 }
4358
4359 //
4360 // Message: inventory
4361 //
4362 std::vector<CInv> vInv;
4363 {
4364 LOCK(pto->cs_inventory);
4365 vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
4366
4367 // Add blocks
4368 for (const uint256& hash : pto->vInventoryBlockToSend) {
4369 vInv.push_back(CInv(MSG_BLOCK, hash));
4370 if (vInv.size() == MAX_INV_SZ) {
4371 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4372 vInv.clear();
4373 }
4374 }
4375 pto->vInventoryBlockToSend.clear();
4376
4377 if (pto->m_tx_relay != nullptr) {
4378 LOCK(pto->m_tx_relay->cs_tx_inventory);
4379 // Check whether periodic sends should happen
4380 bool fSendTrickle = pto->HasPermission(PF_NOBAN);
4381 if (pto->m_tx_relay->nNextInvSend < current_time) {
4382 fSendTrickle = true;
4383 if (pto->IsInboundConn()) {
4384 pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
4385 } else {
4386 // Use half the delay for outbound peers, as there is less privacy concern for them.
4387 pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
4388 }
4389 }
4390
4391 // Time to send but the peer has requested we not relay transactions.
4392 if (fSendTrickle) {
4393 LOCK(pto->m_tx_relay->cs_filter);
4394 if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
4395 }
4396
4397 // Respond to BIP35 mempool requests
4398 if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
4399 auto vtxinfo = m_mempool.infoAll();
4400 pto->m_tx_relay->fSendMempool = false;
4401 CFeeRate filterrate;
4402 {
4403 LOCK(pto->m_tx_relay->cs_feeFilter);
4404 filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4405 }
4406
4407 LOCK(pto->m_tx_relay->cs_filter);
4408
4409 for (const auto& txinfo : vtxinfo) {
4410 const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
4411 CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4412 pto->m_tx_relay->setInventoryTxToSend.erase(hash);
4413 // Don't send transactions that peers will not put into their mempool
4414 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4415 continue;
4416 }
4417 if (pto->m_tx_relay->pfilter) {
4418 if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4419 }
4420 pto->m_tx_relay->filterInventoryKnown.insert(hash);
4421 // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
4422 vInv.push_back(inv);
4423 if (vInv.size() == MAX_INV_SZ) {
4424 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4425 vInv.clear();
4426 }
4427 }
4428 pto->m_tx_relay->m_last_mempool_req = GetTime<std::chrono::seconds>();
4429 }
4430
4431 // Determine transactions to relay
4432 if (fSendTrickle) {
4433 // Produce a vector with all candidates for sending
4434 std::vector<std::set<uint256>::iterator> vInvTx;
4435 vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
4436 for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
4437 vInvTx.push_back(it);
4438 }
4439 CFeeRate filterrate;
4440 {
4441 LOCK(pto->m_tx_relay->cs_feeFilter);
4442 filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
4443 }
4444 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
4445 // A heap is used so that not all items need sorting if only a few are being sent.
4446 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay);
4447 std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4448 // No reason to drain out at many times the network's capacity,
4449 // especially since we have many peers and some will draw much shorter delays.
4450 unsigned int nRelayedTransactions = 0;
4451 LOCK(pto->m_tx_relay->cs_filter);
4452 while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
4453 // Fetch the top element from the heap
4454 std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
4455 std::set<uint256>::iterator it = vInvTx.back();
4456 vInvTx.pop_back();
4457 uint256 hash = *it;
4458 CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
4459 // Remove it from the to-be-sent set
4460 pto->m_tx_relay->setInventoryTxToSend.erase(it);
4461 // Check if not in the filter already
4462 if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
4463 continue;
4464 }
4465 // Not in the mempool anymore? don't bother sending it.
4466 auto txinfo = m_mempool.info(ToGenTxid(inv));
4467 if (!txinfo.tx) {
4468 continue;
4469 }
4470 auto txid = txinfo.tx->GetHash();
4471 auto wtxid = txinfo.tx->GetWitnessHash();
4472 // Peer told you to not send transactions at that feerate? Don't bother sending it.
4473 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
4474 continue;
4475 }
4476 if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
4477 // Send
4478 State(pto->GetId())->m_recently_announced_invs.insert(hash);
4479 vInv.push_back(inv);
4480 nRelayedTransactions++;
4481 {
4482 // Expire old relay messages
4483 while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
4484 {
4485 mapRelay.erase(vRelayExpiration.front().second);
4486 vRelayExpiration.pop_front();
4487 }
4488
4489 auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
4490 if (ret.second) {
4491 vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
4492 }
4493 // Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
4494 auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
4495 if (ret2.second) {
4496 vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
4497 }
4498 }
4499 if (vInv.size() == MAX_INV_SZ) {
4500 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4501 vInv.clear();
4502 }
4503 pto->m_tx_relay->filterInventoryKnown.insert(hash);
4504 if (hash != txid) {
4505 // Insert txid into filterInventoryKnown, even for
4506 // wtxidrelay peers. This prevents re-adding of
4507 // unconfirmed parents to the recently_announced
4508 // filter, when a child tx is requested. See
4509 // ProcessGetData().
4510 pto->m_tx_relay->filterInventoryKnown.insert(txid);
4511 }
4512 }
4513 }
4514 }
4515 }
4516 if (!vInv.empty())
4517 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
4518
4519 // Detect whether we're stalling
4520 current_time = GetTime<std::chrono::microseconds>();
4521 if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
4522 // Stalling only triggers when the block download window cannot move. During normal steady state,
4523 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
4524 // should only happen during initial block download.
4525 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
4526 pto->fDisconnect = true;
4527 return true;
4528 }
4529 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
4530 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
4531 // We compensate for other peers to prevent killing off peers due to our own downstream link
4532 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
4533 // to unreasonably increase our timeout.
4534 if (state.vBlocksInFlight.size() > 0) {
4535 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
4536 int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
4537 if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
4538 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
4539 pto->fDisconnect = true;
4540 return true;
4541 }
4542 }
4543 // Check for headers sync timeouts
4544 if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
4545 // Detect whether this is a stalling initial-headers-sync peer
4546 if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
4547 if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
4548 // Disconnect a peer (without the noban permission) if it is our only sync peer,
4549 // and we have others we could be using instead.
4550 // Note: If all our peers are inbound, then we won't
4551 // disconnect our sync peer for stalling; we have bigger
4552 // problems if we can't get any outbound peers.
4553 if (!pto->HasPermission(PF_NOBAN)) {
4554 LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
4555 pto->fDisconnect = true;
4556 return true;
4557 } else {
4558 LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
4559 // Reset the headers sync state so that we have a
4560 // chance to try downloading from a different peer.
4561 // Note: this will also result in at least one more
4562 // getheaders message to be sent to
4563 // this peer (eventually).
4564 state.fSyncStarted = false;
4565 nSyncStarted--;
4566 state.nHeadersSyncTimeout = 0;
4567 }
4568 }
4569 } else {
4570 // After we've caught up once, reset the timeout so we can't trigger
4571 // disconnect later.
4572 state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
4573 }
4574 }
4575
4576 // Check that outbound peers have reasonable chains
4577 // GetTime() is used by this anti-DoS logic so we can test this using mocktime
4578 ConsiderEviction(*pto, GetTime());
4579
4580 //
4581 // Message: getdata (blocks)
4582 //
4583 std::vector<CInv> vGetData;
4584 if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4585 std::vector<const CBlockIndex*> vToDownload;
4586 NodeId staller = -1;
4587 FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
4588 for (const CBlockIndex *pindex : vToDownload) {
4589 uint32_t nFetchFlags = GetFetchFlags(*pto);
4590 vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
4591 MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex);
4592 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
4593 pindex->nHeight, pto->GetId());
4594 }
4595 if (state.nBlocksInFlight == 0 && staller != -1) {
4596 if (State(staller)->nStallingSince == 0) {
4597 State(staller)->nStallingSince = count_microseconds(current_time);
4598 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
4599 }
4600 }
4601 }
4602
4603 //
4604 // Message: getdata (non-blocks)
4605 //
4606 std::vector<std::pair<NodeId, GenTxid>> expired;
4607 auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
4608 for (const auto& entry : expired) {
4609 LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
4610 entry.second.GetHash().ToString(), entry.first);
4611 }
4612 for (const GenTxid& gtxid : requestable) {
4613 if (!AlreadyHaveTx(gtxid, m_mempool)) {
4614 LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
4615 gtxid.GetHash().ToString(), pto->GetId());
4616 vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());
4617 if (vGetData.size() >= MAX_GETDATA_SZ) {
4618 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4619 vGetData.clear();
4620 }
4621 m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
4622 } else {
4623 // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
4624 // this should already be called whenever a transaction becomes AlreadyHaveTx().
4625 m_txrequest.ForgetTxHash(gtxid.GetHash());
4626 }
4627 }
4628
4629
4630 if (!vGetData.empty())
4631 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
4632
4633 //
4634 // Message: feefilter
4635 //
4636 if (pto->m_tx_relay != nullptr && pto->GetCommonVersion() >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
4637 !pto->HasPermission(PF_FORCERELAY) // peers with the forcerelay permission should not filter txs to us
4638 ) {
4639 CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
4640 static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
4641 if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4642 // Received tx-inv messages are discarded when the active
4643 // chainstate is in IBD, so tell the peer to not send them.
4644 currentFilter = MAX_MONEY;
4645 } else {
4646 static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
4647 if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
4648 // Send the current filter if we sent MAX_FILTER previously
4649 // and made it out of IBD.
4650 pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
4651 }
4652 }
4653 if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
4654 CAmount filterToSend = g_filter_rounder.round(currentFilter);
4655 // We always have a fee filter of at least minRelayTxFee
4656 filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
4657 if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
4658 m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
4659 pto->m_tx_relay->lastSentFeeFilter = filterToSend;
4660 }
4661 pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
4662 }
4663 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
4664 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
4665 else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
4666 (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
4667 pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
4668 }
4669 }
4670 } // release cs_main
4671 return true;
4672 }
4673
4674 class CNetProcessingCleanup
4675 {
4676 public:
CNetProcessingCleanup()4677 CNetProcessingCleanup() {}
~CNetProcessingCleanup()4678 ~CNetProcessingCleanup() {
4679 // orphan transactions
4680 mapOrphanTransactions.clear();
4681 mapOrphanTransactionsByPrev.clear();
4682 g_orphans_by_wtxid.clear();
4683 }
4684 };
4685 static CNetProcessingCleanup instance_of_cnetprocessingcleanup;
4686