1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6 #include <net_processing.h>
7
8 #include <addrman.h>
9 #include <banman.h>
10 #include <arith_uint256.h>
11 #include <blockencodings.h>
12 #include <chainparams.h>
13 #include <consensus/validation.h>
14 #include <hash.h>
15 #include <validation.h>
16 #include <merkleblock.h>
17 #include <netmessagemaker.h>
18 #include <netbase.h>
19 #include <policy/fees.h>
20 #include <policy/policy.h>
21 #include <primitives/block.h>
22 #include <primitives/transaction.h>
23 #include <random.h>
24 #include <reverse_iterator.h>
25 #include <scheduler.h>
26 #include <tinyformat.h>
27 #include <txmempool.h>
28 #include <ui_interface.h>
29 #include <util/system.h>
30 #include <util/moneystr.h>
31 #include <util/strencodings.h>
32
33 #include <memory>
34
35 #if defined(NDEBUG)
36 # error "Litecoin cannot be compiled without assertions."
37 #endif
38
39 /** Expiration time for orphan transactions in seconds */
40 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
41 /** Minimum time between orphan transactions expire time checks in seconds */
42 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
43 /** Headers download timeout expressed in microseconds
44 * Timeout = base + per_header * (expected number of headers) */
45 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
46 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
47 /** Protect at least this many outbound peers from disconnection due to slow/
48 * behind headers chain.
49 */
50 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
51 /** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */
52 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
53 /** How frequently to check for stale tips, in seconds */
54 static constexpr int64_t STALE_CHECK_INTERVAL = 2.5 * 60; // 2.5 minutes
55 /** How frequently to check for extra outbound peers and disconnect, in seconds */
56 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
57 /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict, in seconds */
58 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
59 /** SHA256("main address relay")[0:8] */
60 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
61 /// Age after which a stale block will no longer be served if requested as
62 /// protection against fingerprinting. Set to one month, denominated in seconds.
63 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
64 /// Age after which a block is considered historical for purposes of rate
65 /// limiting block relay. Set to one week, denominated in seconds.
66 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
67
68 struct COrphanTx {
69 // When modifying, adapt the copy of this definition in tests/DoS_tests.
70 CTransactionRef tx;
71 NodeId fromPeer;
72 int64_t nTimeExpire;
73 size_t list_pos;
74 };
75 CCriticalSection g_cs_orphans;
76 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
77
78 void EraseOrphansFor(NodeId peer);
79
80 /** Increase a node's misbehavior score. */
81 void Misbehaving(NodeId nodeid, int howmuch, const std::string& message="") EXCLUSIVE_LOCKS_REQUIRED(cs_main);
82
83 /** Average delay between local address broadcasts in seconds. */
84 static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 60 * 60;
85 /** Average delay between peer address broadcasts in seconds. */
86 static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
87 /** Average delay between trickled inventory transmissions in seconds.
88 * Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */
89 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
90 /** Maximum number of inventory items to send per transmission.
91 * Limits the impact of low-fee transaction floods. */
92 static constexpr unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;
93 /** Average delay between feefilter broadcasts in seconds. */
94 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
95 /** Maximum feefilter broadcast delay after significant change. */
96 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
97
98 // Internal stuff
99 namespace {
100 /** Number of nodes with fSyncStarted. */
101 int nSyncStarted GUARDED_BY(cs_main) = 0;
102
103 /**
104 * Sources of received blocks, saved to be able to send them reject
105 * messages or ban them when processing happens afterwards.
106 * Set mapBlockSource[hash].second to false if the node should not be
107 * punished if the block is invalid.
108 */
109 std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
110
111 /**
112 * Filter for transactions that were recently rejected by
113 * AcceptToMemoryPool. These are not rerequested until the chain tip
114 * changes, at which point the entire filter is reset.
115 *
116 * Without this filter we'd be re-requesting txs from each of our peers,
117 * increasing bandwidth consumption considerably. For instance, with 100
118 * peers, half of which relay a tx we don't accept, that might be a 50x
119 * bandwidth increase. A flooding attacker attempting to roll-over the
120 * filter using minimum-sized, 60byte, transactions might manage to send
121 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
122 * two minute window to send invs to us.
123 *
124 * Decreasing the false positive rate is fairly cheap, so we pick one in a
125 * million to make it highly unlikely for users to have issues with this
126 * filter.
127 *
128 * Memory used: 1.3 MB
129 */
130 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
131 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
132
133 /** Blocks that are in flight, and that are in the queue to be downloaded. */
134 struct QueuedBlock {
135 uint256 hash;
136 const CBlockIndex* pindex; //!< Optional.
137 bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
138 std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
139 };
140 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
141
142 /** Stack of nodes which we have set to announce using compact blocks */
143 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
144
145 /** Number of preferable block download peers. */
146 int nPreferredDownload GUARDED_BY(cs_main) = 0;
147
148 /** Number of peers from which we're downloading blocks. */
149 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
150
151 /** Number of outbound peers with m_chain_sync.m_protect. */
152 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
153
154 /** When our tip was last updated. */
155 std::atomic<int64_t> g_last_tip_update(0);
156
157 /** Relay map */
158 typedef std::map<uint256, CTransactionRef> MapRelay;
159 MapRelay mapRelay GUARDED_BY(cs_main);
160 /** Expiration-time ordered list of (expire time, relay map entry) pairs. */
161 std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
162
163 std::atomic<int64_t> nTimeBestReceived(0); // Used only to inform the wallet of when we last received a block
164
165 struct IteratorComparator
166 {
167 template<typename I>
operator ()__anon599f788a0111::IteratorComparator168 bool operator()(const I& a, const I& b) const
169 {
170 return &(*a) < &(*b);
171 }
172 };
173 std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
174
175 std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans); //! For random eviction
176
177 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
178 static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
179 } // namespace
180
181 namespace {
182 struct CBlockReject {
183 unsigned char chRejectCode;
184 std::string strRejectReason;
185 uint256 hashBlock;
186 };
187
188 /**
189 * Maintain validation-specific state about nodes, protected by cs_main, instead
190 * by CNode's own locks. This simplifies asynchronous operation, where
191 * processing of incoming data is done after the ProcessMessage call returns,
192 * and we're no longer holding the node's locks.
193 */
194 struct CNodeState {
195 //! The peer's address
196 const CService address;
197 //! Whether we have a fully established connection.
198 bool fCurrentlyConnected;
199 //! Accumulated misbehaviour score for this peer.
200 int nMisbehavior;
201 //! Whether this peer should be disconnected and banned (unless whitelisted).
202 bool fShouldBan;
203 //! String name of this peer (debugging/logging purposes).
204 const std::string name;
205 //! List of asynchronously-determined block rejections to notify this peer about.
206 std::vector<CBlockReject> rejects;
207 //! The best known block we know this peer has announced.
208 const CBlockIndex *pindexBestKnownBlock;
209 //! The hash of the last unknown block this peer has announced.
210 uint256 hashLastUnknownBlock;
211 //! The last full block we both have.
212 const CBlockIndex *pindexLastCommonBlock;
213 //! The best header we have sent our peer.
214 const CBlockIndex *pindexBestHeaderSent;
215 //! Length of current-streak of unconnecting headers announcements
216 int nUnconnectingHeaders;
217 //! Whether we've started headers synchronization with this peer.
218 bool fSyncStarted;
219 //! When to potentially disconnect peer for stalling headers download
220 int64_t nHeadersSyncTimeout;
221 //! Since when we're stalling block download progress (in microseconds), or 0.
222 int64_t nStallingSince;
223 std::list<QueuedBlock> vBlocksInFlight;
224 //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
225 int64_t nDownloadingSince;
226 int nBlocksInFlight;
227 int nBlocksInFlightValidHeaders;
228 //! Whether we consider this a preferred download peer.
229 bool fPreferredDownload;
230 //! Whether this peer wants invs or headers (when possible) for block announcements.
231 bool fPreferHeaders;
232 //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
233 bool fPreferHeaderAndIDs;
234 /**
235 * Whether this peer will send us cmpctblocks if we request them.
236 * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
237 * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
238 */
239 bool fProvidesHeaderAndIDs;
240 //! Whether this peer can give us witnesses
241 bool fHaveWitness;
242 //! Whether this peer wants witnesses in cmpctblocks/blocktxns
243 bool fWantsCmpctWitness;
244 /**
245 * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
246 * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
247 */
248 bool fSupportsDesiredCmpctVersion;
249
250 /** State used to enforce CHAIN_SYNC_TIMEOUT
251 * Only in effect for outbound, non-manual connections, with
252 * m_protect == false
253 * Algorithm: if a peer's best known block has less work than our tip,
254 * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
255 * - If at timeout their best known block now has more work than our tip
256 * when the timeout was set, then either reset the timeout or clear it
257 * (after comparing against our current tip's work)
258 * - If at timeout their best known block still has less work than our
259 * tip did when the timeout was set, then send a getheaders message,
260 * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
261 * If their best known block is still behind when that new timeout is
262 * reached, disconnect.
263 */
264 struct ChainSyncTimeoutState {
265 //! A timeout used for checking whether our peer has sufficiently synced
266 int64_t m_timeout;
267 //! A header with the work we require on our peer's chain
268 const CBlockIndex * m_work_header;
269 //! After timeout is reached, set to true after sending getheaders
270 bool m_sent_getheaders;
271 //! Whether this peer is protected from disconnection due to a bad/slow chain
272 bool m_protect;
273 };
274
275 ChainSyncTimeoutState m_chain_sync;
276
277 //! Time of last new block announcement
278 int64_t m_last_block_announcement;
279
CNodeState__anon599f788a0211::CNodeState280 CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
281 fCurrentlyConnected = false;
282 nMisbehavior = 0;
283 fShouldBan = false;
284 pindexBestKnownBlock = nullptr;
285 hashLastUnknownBlock.SetNull();
286 pindexLastCommonBlock = nullptr;
287 pindexBestHeaderSent = nullptr;
288 nUnconnectingHeaders = 0;
289 fSyncStarted = false;
290 nHeadersSyncTimeout = 0;
291 nStallingSince = 0;
292 nDownloadingSince = 0;
293 nBlocksInFlight = 0;
294 nBlocksInFlightValidHeaders = 0;
295 fPreferredDownload = false;
296 fPreferHeaders = false;
297 fPreferHeaderAndIDs = false;
298 fProvidesHeaderAndIDs = false;
299 fHaveWitness = false;
300 fWantsCmpctWitness = false;
301 fSupportsDesiredCmpctVersion = false;
302 m_chain_sync = { 0, nullptr, false, false };
303 m_last_block_announcement = 0;
304 }
305 };
306
307 /** Map maintaining per-node state. */
308 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
309
State(NodeId pnode)310 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
311 std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
312 if (it == mapNodeState.end())
313 return nullptr;
314 return &it->second;
315 }
316
UpdatePreferredDownload(CNode * node,CNodeState * state)317 static void UpdatePreferredDownload(CNode* node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
318 {
319 nPreferredDownload -= state->fPreferredDownload;
320
321 // Whether this node should be marked as a preferred download node.
322 state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
323
324 nPreferredDownload += state->fPreferredDownload;
325 }
326
PushNodeVersion(CNode * pnode,CConnman * connman,int64_t nTime)327 static void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
328 {
329 ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
330 uint64_t nonce = pnode->GetLocalNonce();
331 int nNodeStartingHeight = pnode->GetMyStartingHeight();
332 NodeId nodeid = pnode->GetId();
333 CAddress addr = pnode->addr;
334
335 CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
336 CAddress addrMe = CAddress(CService(), nLocalNodeServices);
337
338 connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
339 nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes));
340
341 if (fLogIPs) {
342 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
343 } else {
344 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
345 }
346 }
347
348 // Returns a bool indicating whether we requested this block.
349 // Also used if a block was /not/ received and timed out or started with another peer
MarkBlockAsReceived(const uint256 & hash)350 static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
351 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
352 if (itInFlight != mapBlocksInFlight.end()) {
353 CNodeState *state = State(itInFlight->second.first);
354 assert(state != nullptr);
355 state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
356 if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
357 // Last validated block on the queue was received.
358 nPeersWithValidatedDownloads--;
359 }
360 if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
361 // First block on the queue was received, update the start download time for the next one
362 state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
363 }
364 state->vBlocksInFlight.erase(itInFlight->second.second);
365 state->nBlocksInFlight--;
366 state->nStallingSince = 0;
367 mapBlocksInFlight.erase(itInFlight);
368 return true;
369 }
370 return false;
371 }
372
373 // returns false, still setting pit, if the block was already in flight from the same peer
374 // pit will only be valid as long as the same cs_main lock is being held
MarkBlockAsInFlight(NodeId nodeid,const uint256 & hash,const CBlockIndex * pindex=nullptr,std::list<QueuedBlock>::iterator ** pit=nullptr)375 static bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
376 CNodeState *state = State(nodeid);
377 assert(state != nullptr);
378
379 // Short-circuit most stuff in case it is from the same node
380 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
381 if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
382 if (pit) {
383 *pit = &itInFlight->second.second;
384 }
385 return false;
386 }
387
388 // Make sure it's not listed somewhere already.
389 MarkBlockAsReceived(hash);
390
391 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
392 {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
393 state->nBlocksInFlight++;
394 state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
395 if (state->nBlocksInFlight == 1) {
396 // We're starting a block download (batch) from this peer.
397 state->nDownloadingSince = GetTimeMicros();
398 }
399 if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
400 nPeersWithValidatedDownloads++;
401 }
402 itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
403 if (pit)
404 *pit = &itInFlight->second.second;
405 return true;
406 }
407
408 /** Check whether the last unknown block a peer advertised is not yet known. */
ProcessBlockAvailability(NodeId nodeid)409 static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
410 CNodeState *state = State(nodeid);
411 assert(state != nullptr);
412
413 if (!state->hashLastUnknownBlock.IsNull()) {
414 const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock);
415 if (pindex && pindex->nChainWork > 0) {
416 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
417 state->pindexBestKnownBlock = pindex;
418 }
419 state->hashLastUnknownBlock.SetNull();
420 }
421 }
422 }
423
424 /** Update tracking information about which blocks a peer is assumed to have. */
UpdateBlockAvailability(NodeId nodeid,const uint256 & hash)425 static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
426 CNodeState *state = State(nodeid);
427 assert(state != nullptr);
428
429 ProcessBlockAvailability(nodeid);
430
431 const CBlockIndex* pindex = LookupBlockIndex(hash);
432 if (pindex && pindex->nChainWork > 0) {
433 // An actually better block was announced.
434 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
435 state->pindexBestKnownBlock = pindex;
436 }
437 } else {
438 // An unknown block was announced; just assume that the latest one is the best one.
439 state->hashLastUnknownBlock = hash;
440 }
441 }
442
443 /**
444 * When a peer sends us a valid block, instruct it to announce blocks to us
445 * using CMPCTBLOCK if possible by adding its nodeid to the end of
446 * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
447 * removing the first element if necessary.
448 */
MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,CConnman * connman)449 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
450 {
451 AssertLockHeld(cs_main);
452 CNodeState* nodestate = State(nodeid);
453 if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
454 // Never ask from peers who can't provide witnesses.
455 return;
456 }
457 if (nodestate->fProvidesHeaderAndIDs) {
458 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
459 if (*it == nodeid) {
460 lNodesAnnouncingHeaderAndIDs.erase(it);
461 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
462 return;
463 }
464 }
465 connman->ForNode(nodeid, [connman](CNode* pfrom){
466 AssertLockHeld(cs_main);
467 uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
468 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
469 // As per BIP152, we only get 3 of our peers to announce
470 // blocks using compact encodings.
471 connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
472 AssertLockHeld(cs_main);
473 connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
474 return true;
475 });
476 lNodesAnnouncingHeaderAndIDs.pop_front();
477 }
478 connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
479 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
480 return true;
481 });
482 }
483 }
484
TipMayBeStale(const Consensus::Params & consensusParams)485 static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
486 {
487 AssertLockHeld(cs_main);
488 if (g_last_tip_update == 0) {
489 g_last_tip_update = GetTime();
490 }
491 return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
492 }
493
CanDirectFetch(const Consensus::Params & consensusParams)494 static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
495 {
496 return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
497 }
498
PeerHasHeader(CNodeState * state,const CBlockIndex * pindex)499 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
500 {
501 if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
502 return true;
503 if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
504 return true;
505 return false;
506 }
507
508 /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
509 * at most count entries. */
FindNextBlocksToDownload(NodeId nodeid,unsigned int count,std::vector<const CBlockIndex * > & vBlocks,NodeId & nodeStaller,const Consensus::Params & consensusParams)510 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
511 {
512 if (count == 0)
513 return;
514
515 vBlocks.reserve(vBlocks.size() + count);
516 CNodeState *state = State(nodeid);
517 assert(state != nullptr);
518
519 // Make sure pindexBestKnownBlock is up to date, we'll need it.
520 ProcessBlockAvailability(nodeid);
521
522 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
523 // This peer has nothing interesting.
524 return;
525 }
526
527 if (state->pindexLastCommonBlock == nullptr) {
528 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
529 // Guessing wrong in either direction is not a problem.
530 state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
531 }
532
533 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
534 // of its current tip anymore. Go back enough to fix that.
535 state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
536 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
537 return;
538
539 std::vector<const CBlockIndex*> vToFetch;
540 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
541 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
542 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
543 // download that next block if the window were 1 larger.
544 int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
545 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
546 NodeId waitingfor = -1;
547 while (pindexWalk->nHeight < nMaxHeight) {
548 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
549 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
550 // as iterating over ~100 CBlockIndex* entries anyway.
551 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
552 vToFetch.resize(nToFetch);
553 pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
554 vToFetch[nToFetch - 1] = pindexWalk;
555 for (unsigned int i = nToFetch - 1; i > 0; i--) {
556 vToFetch[i - 1] = vToFetch[i]->pprev;
557 }
558
559 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
560 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
561 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
562 // already part of our chain (and therefore don't need it even if pruned).
563 for (const CBlockIndex* pindex : vToFetch) {
564 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
565 // We consider the chain that this peer is on invalid.
566 return;
567 }
568 if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
569 // We wouldn't download this block or its descendants from this peer.
570 return;
571 }
572 if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
573 if (pindex->HaveTxsDownloaded())
574 state->pindexLastCommonBlock = pindex;
575 } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
576 // The block is not already downloaded, and not yet in flight.
577 if (pindex->nHeight > nWindowEnd) {
578 // We reached the end of the window.
579 if (vBlocks.size() == 0 && waitingfor != nodeid) {
580 // We aren't able to fetch anything, but we would be if the download window was one larger.
581 nodeStaller = waitingfor;
582 }
583 return;
584 }
585 vBlocks.push_back(pindex);
586 if (vBlocks.size() == count) {
587 return;
588 }
589 } else if (waitingfor == -1) {
590 // This is the first already-in-flight block.
591 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
592 }
593 }
594 }
595 }
596
597 } // namespace
598
599 // This function is used for testing the stale tip eviction logic, see
600 // denialofservice_tests.cpp
UpdateLastBlockAnnounceTime(NodeId node,int64_t time_in_seconds)601 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
602 {
603 LOCK(cs_main);
604 CNodeState *state = State(node);
605 if (state) state->m_last_block_announcement = time_in_seconds;
606 }
607
608 // Returns true for outbound peers, excluding manual connections, feelers, and
609 // one-shots
IsOutboundDisconnectionCandidate(const CNode * node)610 static bool IsOutboundDisconnectionCandidate(const CNode *node)
611 {
612 return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
613 }
614
InitializeNode(CNode * pnode)615 void PeerLogicValidation::InitializeNode(CNode *pnode) {
616 CAddress addr = pnode->addr;
617 std::string addrName = pnode->GetAddrName();
618 NodeId nodeid = pnode->GetId();
619 {
620 LOCK(cs_main);
621 mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
622 }
623 if(!pnode->fInbound)
624 PushNodeVersion(pnode, connman, GetTime());
625 }
626
FinalizeNode(NodeId nodeid,bool & fUpdateConnectionTime)627 void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
628 fUpdateConnectionTime = false;
629 LOCK(cs_main);
630 CNodeState *state = State(nodeid);
631 assert(state != nullptr);
632
633 if (state->fSyncStarted)
634 nSyncStarted--;
635
636 if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
637 fUpdateConnectionTime = true;
638 }
639
640 for (const QueuedBlock& entry : state->vBlocksInFlight) {
641 mapBlocksInFlight.erase(entry.hash);
642 }
643 EraseOrphansFor(nodeid);
644 nPreferredDownload -= state->fPreferredDownload;
645 nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
646 assert(nPeersWithValidatedDownloads >= 0);
647 g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
648 assert(g_outbound_peers_with_protect_from_disconnect >= 0);
649
650 mapNodeState.erase(nodeid);
651
652 if (mapNodeState.empty()) {
653 // Do a consistency check after the last peer is removed.
654 assert(mapBlocksInFlight.empty());
655 assert(nPreferredDownload == 0);
656 assert(nPeersWithValidatedDownloads == 0);
657 assert(g_outbound_peers_with_protect_from_disconnect == 0);
658 }
659 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
660 }
661
GetNodeStateStats(NodeId nodeid,CNodeStateStats & stats)662 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
663 LOCK(cs_main);
664 CNodeState *state = State(nodeid);
665 if (state == nullptr)
666 return false;
667 stats.nMisbehavior = state->nMisbehavior;
668 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
669 stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
670 for (const QueuedBlock& queue : state->vBlocksInFlight) {
671 if (queue.pindex)
672 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
673 }
674 return true;
675 }
676
677 //////////////////////////////////////////////////////////////////////////////
678 //
679 // mapOrphanTransactions
680 //
681
AddToCompactExtraTransactions(const CTransactionRef & tx)682 static void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
683 {
684 size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
685 if (max_extra_txn <= 0)
686 return;
687 if (!vExtraTxnForCompact.size())
688 vExtraTxnForCompact.resize(max_extra_txn);
689 vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
690 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
691 }
692
AddOrphanTx(const CTransactionRef & tx,NodeId peer)693 bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
694 {
695 const uint256& hash = tx->GetHash();
696 if (mapOrphanTransactions.count(hash))
697 return false;
698
699 // Ignore big transactions, to avoid a
700 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
701 // large transaction with a missing parent then we assume
702 // it will rebroadcast it later, after the parent transaction(s)
703 // have been mined or received.
704 // 100 orphans, each of which is at most 100,000 bytes big is
705 // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
706 unsigned int sz = GetTransactionWeight(*tx);
707 if (sz > MAX_STANDARD_TX_WEIGHT)
708 {
709 LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
710 return false;
711 }
712
713 auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()});
714 assert(ret.second);
715 g_orphan_list.push_back(ret.first);
716 for (const CTxIn& txin : tx->vin) {
717 mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
718 }
719
720 AddToCompactExtraTransactions(tx);
721
722 LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
723 mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
724 return true;
725 }
726
EraseOrphanTx(uint256 hash)727 int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
728 {
729 std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
730 if (it == mapOrphanTransactions.end())
731 return 0;
732 for (const CTxIn& txin : it->second.tx->vin)
733 {
734 auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
735 if (itPrev == mapOrphanTransactionsByPrev.end())
736 continue;
737 itPrev->second.erase(it);
738 if (itPrev->second.empty())
739 mapOrphanTransactionsByPrev.erase(itPrev);
740 }
741
742 size_t old_pos = it->second.list_pos;
743 assert(g_orphan_list[old_pos] == it);
744 if (old_pos + 1 != g_orphan_list.size()) {
745 // Unless we're deleting the last entry in g_orphan_list, move the last
746 // entry to the position we're deleting.
747 auto it_last = g_orphan_list.back();
748 g_orphan_list[old_pos] = it_last;
749 it_last->second.list_pos = old_pos;
750 }
751 g_orphan_list.pop_back();
752
753 mapOrphanTransactions.erase(it);
754 return 1;
755 }
756
EraseOrphansFor(NodeId peer)757 void EraseOrphansFor(NodeId peer)
758 {
759 LOCK(g_cs_orphans);
760 int nErased = 0;
761 std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
762 while (iter != mapOrphanTransactions.end())
763 {
764 std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
765 if (maybeErase->second.fromPeer == peer)
766 {
767 nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
768 }
769 }
770 if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
771 }
772
773
LimitOrphanTxSize(unsigned int nMaxOrphans)774 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
775 {
776 LOCK(g_cs_orphans);
777
778 unsigned int nEvicted = 0;
779 static int64_t nNextSweep;
780 int64_t nNow = GetTime();
781 if (nNextSweep <= nNow) {
782 // Sweep out expired orphan pool entries:
783 int nErased = 0;
784 int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
785 std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
786 while (iter != mapOrphanTransactions.end())
787 {
788 std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
789 if (maybeErase->second.nTimeExpire <= nNow) {
790 nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
791 } else {
792 nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
793 }
794 }
795 // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
796 nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
797 if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
798 }
799 FastRandomContext rng;
800 while (mapOrphanTransactions.size() > nMaxOrphans)
801 {
802 // Evict a random orphan:
803 size_t randompos = rng.randrange(g_orphan_list.size());
804 EraseOrphanTx(g_orphan_list[randompos]->first);
805 ++nEvicted;
806 }
807 return nEvicted;
808 }
809
810 /**
811 * Mark a misbehaving peer to be banned depending upon the value of `-banscore`.
812 */
Misbehaving(NodeId pnode,int howmuch,const std::string & message)813 void Misbehaving(NodeId pnode, int howmuch, const std::string& message) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
814 {
815 if (howmuch == 0)
816 return;
817
818 CNodeState *state = State(pnode);
819 if (state == nullptr)
820 return;
821
822 state->nMisbehavior += howmuch;
823 int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
824 std::string message_prefixed = message.empty() ? "" : (": " + message);
825 if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
826 {
827 LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
828 state->fShouldBan = true;
829 } else
830 LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed);
831 }
832
833
834
835
836
837
838
839
840 //////////////////////////////////////////////////////////////////////////////
841 //
842 // blockchain -> download logic notification
843 //
844
845 // To prevent fingerprinting attacks, only send blocks/headers outside of the
846 // active chain if they are no more than a month older (both in time, and in
847 // best equivalent proof of work) than the best header chain we know about and
848 // we fully-validated them at some point.
BlockRequestAllowed(const CBlockIndex * pindex,const Consensus::Params & consensusParams)849 static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
850 {
851 AssertLockHeld(cs_main);
852 if (chainActive.Contains(pindex)) return true;
853 return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
854 (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
855 (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT);
856 }
857
PeerLogicValidation(CConnman * connmanIn,BanMan * banman,CScheduler & scheduler,bool enable_bip61)858 PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CScheduler &scheduler, bool enable_bip61)
859 : connman(connmanIn), m_banman(banman), m_stale_tip_check_time(0), m_enable_bip61(enable_bip61) {
860 // Initialize global variables that cannot be constructed at startup.
861 recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
862
863 const Consensus::Params& consensusParams = Params().GetConsensus();
864 // Stale tip checking and peer eviction are on two different timers, but we
865 // don't want them to get out of sync due to drift in the scheduler, so we
866 // combine them in one function and schedule at the quicker (peer-eviction)
867 // timer.
868 static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
869 scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000);
870 }
871
872 /**
873 * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
874 * block. Also save the time of the last tip update.
875 */
BlockConnected(const std::shared_ptr<const CBlock> & pblock,const CBlockIndex * pindex,const std::vector<CTransactionRef> & vtxConflicted)876 void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
877 LOCK(g_cs_orphans);
878
879 std::vector<uint256> vOrphanErase;
880
881 for (const CTransactionRef& ptx : pblock->vtx) {
882 const CTransaction& tx = *ptx;
883
884 // Which orphan pool entries must we evict?
885 for (const auto& txin : tx.vin) {
886 auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
887 if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
888 for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
889 const CTransaction& orphanTx = *(*mi)->second.tx;
890 const uint256& orphanHash = orphanTx.GetHash();
891 vOrphanErase.push_back(orphanHash);
892 }
893 }
894 }
895
896 // Erase orphan transactions included or precluded by this block
897 if (vOrphanErase.size()) {
898 int nErased = 0;
899 for (const uint256& orphanHash : vOrphanErase) {
900 nErased += EraseOrphanTx(orphanHash);
901 }
902 LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
903 }
904
905 g_last_tip_update = GetTime();
906 }
907
908 // All of the following cache a recent block, and are protected by cs_most_recent_block
909 static CCriticalSection cs_most_recent_block;
910 static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block);
911 static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block);
912 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
913 static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block);
914
915 /**
916 * Maintain state about the best-seen block and fast-announce a compact block
917 * to compatible peers.
918 */
NewPoWValidBlock(const CBlockIndex * pindex,const std::shared_ptr<const CBlock> & pblock)919 void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
920 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
921 const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
922
923 LOCK(cs_main);
924
925 static int nHighestFastAnnounce = 0;
926 if (pindex->nHeight <= nHighestFastAnnounce)
927 return;
928 nHighestFastAnnounce = pindex->nHeight;
929
930 bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
931 uint256 hashBlock(pblock->GetHash());
932
933 {
934 LOCK(cs_most_recent_block);
935 most_recent_block_hash = hashBlock;
936 most_recent_block = pblock;
937 most_recent_compact_block = pcmpctblock;
938 fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
939 }
940
941 connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
942 AssertLockHeld(cs_main);
943
944 // TODO: Avoid the repeated-serialization here
945 if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
946 return;
947 ProcessBlockAvailability(pnode->GetId());
948 CNodeState &state = *State(pnode->GetId());
949 // If the peer has, or we announced to them the previous block already,
950 // but we don't think they have this one, go ahead and announce it
951 if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
952 !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
953
954 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
955 hashBlock.ToString(), pnode->GetId());
956 connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
957 state.pindexBestHeaderSent = pindex;
958 }
959 });
960 }
961
962 /**
963 * Update our best height and announce any block hashes which weren't previously
964 * in chainActive to our peers.
965 */
UpdatedBlockTip(const CBlockIndex * pindexNew,const CBlockIndex * pindexFork,bool fInitialDownload)966 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
967 const int nNewHeight = pindexNew->nHeight;
968 connman->SetBestHeight(nNewHeight);
969
970 SetServiceFlagsIBDCache(!fInitialDownload);
971 if (!fInitialDownload) {
972 // Find the hashes of all blocks that weren't previously in the best chain.
973 std::vector<uint256> vHashes;
974 const CBlockIndex *pindexToAnnounce = pindexNew;
975 while (pindexToAnnounce != pindexFork) {
976 vHashes.push_back(pindexToAnnounce->GetBlockHash());
977 pindexToAnnounce = pindexToAnnounce->pprev;
978 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
979 // Limit announcements in case of a huge reorganization.
980 // Rely on the peer's synchronization mechanism in that case.
981 break;
982 }
983 }
984 // Relay inventory, but don't relay old inventory during initial block download.
985 connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
986 if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
987 for (const uint256& hash : reverse_iterate(vHashes)) {
988 pnode->PushBlockHash(hash);
989 }
990 }
991 });
992 connman->WakeMessageHandler();
993 }
994
995 nTimeBestReceived = GetTime();
996 }
997
998 /**
999 * Handle invalid block rejection and consequent peer banning, maintain which
1000 * peers announce compact blocks.
1001 */
BlockChecked(const CBlock & block,const CValidationState & state)1002 void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
1003 LOCK(cs_main);
1004
1005 const uint256 hash(block.GetHash());
1006 std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1007
1008 int nDoS = 0;
1009 if (state.IsInvalid(nDoS)) {
1010 // Don't send reject message with code 0 or an internal reject code.
1011 if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) {
1012 CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
1013 State(it->second.first)->rejects.push_back(reject);
1014 if (nDoS > 0 && it->second.second)
1015 Misbehaving(it->second.first, nDoS);
1016 }
1017 }
1018 // Check that:
1019 // 1. The block is valid
1020 // 2. We're not in initial block download
1021 // 3. This is currently the best block we're aware of. We haven't updated
1022 // the tip yet so we have no way to check this directly here. Instead we
1023 // just check that there are currently no other blocks in flight.
1024 else if (state.IsValid() &&
1025 !IsInitialBlockDownload() &&
1026 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1027 if (it != mapBlockSource.end()) {
1028 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
1029 }
1030 }
1031 if (it != mapBlockSource.end())
1032 mapBlockSource.erase(it);
1033 }
1034
1035 //////////////////////////////////////////////////////////////////////////////
1036 //
1037 // Messages
1038 //
1039
1040
AlreadyHave(const CInv & inv)1041 bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1042 {
1043 switch (inv.type)
1044 {
1045 case MSG_TX:
1046 case MSG_WITNESS_TX:
1047 {
1048 assert(recentRejects);
1049 if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
1050 {
1051 // If the chain tip has changed previously rejected transactions
1052 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
1053 // or a double-spend. Reset the rejects filter and give those
1054 // txs a second chance.
1055 hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
1056 recentRejects->reset();
1057 }
1058
1059 {
1060 LOCK(g_cs_orphans);
1061 if (mapOrphanTransactions.count(inv.hash)) return true;
1062 }
1063
1064 return recentRejects->contains(inv.hash) ||
1065 mempool.exists(inv.hash) ||
1066 pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
1067 pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
1068 }
1069 case MSG_BLOCK:
1070 case MSG_WITNESS_BLOCK:
1071 return LookupBlockIndex(inv.hash) != nullptr;
1072 }
1073 // Don't know what it is, just say we already got one
1074 return true;
1075 }
1076
RelayTransaction(const CTransaction & tx,CConnman * connman)1077 static void RelayTransaction(const CTransaction& tx, CConnman* connman)
1078 {
1079 CInv inv(MSG_TX, tx.GetHash());
1080 connman->ForEachNode([&inv](CNode* pnode)
1081 {
1082 pnode->PushInventory(inv);
1083 });
1084 }
1085
RelayAddress(const CAddress & addr,bool fReachable,CConnman * connman)1086 static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
1087 {
1088 unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
1089
1090 // Relay to a limited number of other nodes
1091 // Use deterministic randomness to send to the same nodes for 24 hours
1092 // at a time so the addrKnowns of the chosen nodes prevent repeats
1093 uint64_t hashAddr = addr.GetHash();
1094 const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
1095 FastRandomContext insecure_rand;
1096
1097 std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
1098 assert(nRelayNodes <= best.size());
1099
1100 auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
1101 if (pnode->nVersion >= CADDR_TIME_VERSION) {
1102 uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1103 for (unsigned int i = 0; i < nRelayNodes; i++) {
1104 if (hashKey > best[i].first) {
1105 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
1106 best[i] = std::make_pair(hashKey, pnode);
1107 break;
1108 }
1109 }
1110 }
1111 };
1112
1113 auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1114 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1115 best[i].second->PushAddress(addr, insecure_rand);
1116 }
1117 };
1118
1119 connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1120 }
1121
ProcessGetBlockData(CNode * pfrom,const CChainParams & chainparams,const CInv & inv,CConnman * connman)1122 void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, const CInv& inv, CConnman* connman)
1123 {
1124 bool send = false;
1125 std::shared_ptr<const CBlock> a_recent_block;
1126 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1127 bool fWitnessesPresentInARecentCompactBlock;
1128 const Consensus::Params& consensusParams = chainparams.GetConsensus();
1129 {
1130 LOCK(cs_most_recent_block);
1131 a_recent_block = most_recent_block;
1132 a_recent_compact_block = most_recent_compact_block;
1133 fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
1134 }
1135
1136 bool need_activate_chain = false;
1137 {
1138 LOCK(cs_main);
1139 const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1140 if (pindex) {
1141 if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
1142 pindex->IsValid(BLOCK_VALID_TREE)) {
1143 // If we have the block and all of its parents, but have not yet validated it,
1144 // we might be in the middle of connecting it (ie in the unlock of cs_main
1145 // before ActivateBestChain but after AcceptBlock).
1146 // In this case, we need to run ActivateBestChain prior to checking the relay
1147 // conditions below.
1148 need_activate_chain = true;
1149 }
1150 }
1151 } // release cs_main before calling ActivateBestChain
1152 if (need_activate_chain) {
1153 CValidationState state;
1154 if (!ActivateBestChain(state, Params(), a_recent_block)) {
1155 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
1156 }
1157 }
1158
1159 LOCK(cs_main);
1160 const CBlockIndex* pindex = LookupBlockIndex(inv.hash);
1161 if (pindex) {
1162 send = BlockRequestAllowed(pindex, consensusParams);
1163 if (!send) {
1164 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
1165 }
1166 }
1167 const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1168 // disconnect node in case we have reached the outbound limit for serving historical blocks
1169 // never disconnect whitelisted nodes
1170 if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
1171 {
1172 LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
1173
1174 //disconnect node
1175 pfrom->fDisconnect = true;
1176 send = false;
1177 }
1178 // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
1179 if (send && !pfrom->fWhitelisted && (
1180 (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
1181 )) {
1182 LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
1183
1184 //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
1185 pfrom->fDisconnect = true;
1186 send = false;
1187 }
1188 // Pruned nodes may have deleted the block, so check whether
1189 // it's available before trying to send.
1190 if (send && (pindex->nStatus & BLOCK_HAVE_DATA))
1191 {
1192 std::shared_ptr<const CBlock> pblock;
1193 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
1194 pblock = a_recent_block;
1195 } else if (inv.type == MSG_WITNESS_BLOCK) {
1196 // Fast-path: in this case it is possible to serve the block directly from disk,
1197 // as the network format matches the format on disk
1198 std::vector<uint8_t> block_data;
1199 if (!ReadRawBlockFromDisk(block_data, pindex, chainparams.MessageStart())) {
1200 assert(!"cannot load block from disk");
1201 }
1202 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, MakeSpan(block_data)));
1203 // Don't set pblock as we've sent the block
1204 } else {
1205 // Send block from disk
1206 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1207 if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams))
1208 assert(!"cannot load block from disk");
1209 pblock = pblockRead;
1210 }
1211 if (pblock) {
1212 if (inv.type == MSG_BLOCK)
1213 connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
1214 else if (inv.type == MSG_WITNESS_BLOCK)
1215 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
1216 else if (inv.type == MSG_FILTERED_BLOCK)
1217 {
1218 bool sendMerkleBlock = false;
1219 CMerkleBlock merkleBlock;
1220 {
1221 LOCK(pfrom->cs_filter);
1222 if (pfrom->pfilter) {
1223 sendMerkleBlock = true;
1224 merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
1225 }
1226 }
1227 if (sendMerkleBlock) {
1228 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1229 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
1230 // This avoids hurting performance by pointlessly requiring a round-trip
1231 // Note that there is currently no way for a node to request any single transactions we didn't send here -
1232 // they must either disconnect and retry or request the full block.
1233 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
1234 // however we MUST always provide at least what the remote peer needs
1235 typedef std::pair<unsigned int, uint256> PairType;
1236 for (PairType& pair : merkleBlock.vMatchedTxn)
1237 connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
1238 }
1239 // else
1240 // no response
1241 }
1242 else if (inv.type == MSG_CMPCT_BLOCK)
1243 {
1244 // If a peer is asking for old blocks, we're almost guaranteed
1245 // they won't have a useful mempool to match against a compact block,
1246 // and we don't feel like constructing the object for them, so
1247 // instead we respond with the full, non-compact block.
1248 bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
1249 int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1250 if (CanDirectFetch(consensusParams) && pindex->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
1251 if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
1252 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
1253 } else {
1254 CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
1255 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
1256 }
1257 } else {
1258 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1259 }
1260 }
1261 }
1262
1263 // Trigger the peer node to send a getblocks request for the next batch of inventory
1264 if (inv.hash == pfrom->hashContinue)
1265 {
1266 // Bypass PushInventory, this must send even if redundant,
1267 // and we want it right after the last block so they don't
1268 // wait for other stuff first.
1269 std::vector<CInv> vInv;
1270 vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
1271 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1272 pfrom->hashContinue.SetNull();
1273 }
1274 }
1275 }
1276
ProcessGetData(CNode * pfrom,const CChainParams & chainparams,CConnman * connman,const std::atomic<bool> & interruptMsgProc)1277 void static ProcessGetData(CNode* pfrom, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main)
1278 {
1279 AssertLockNotHeld(cs_main);
1280
1281 std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
1282 std::vector<CInv> vNotFound;
1283 const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1284 {
1285 LOCK(cs_main);
1286
1287 while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
1288 if (interruptMsgProc)
1289 return;
1290 // Don't bother if send buffer is too full to respond anyway
1291 if (pfrom->fPauseSend)
1292 break;
1293
1294 const CInv &inv = *it;
1295 it++;
1296
1297 // Send stream from relay memory
1298 bool push = false;
1299 auto mi = mapRelay.find(inv.hash);
1300 int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
1301 if (mi != mapRelay.end()) {
1302 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
1303 push = true;
1304 } else if (pfrom->timeLastMempoolReq) {
1305 auto txinfo = mempool.info(inv.hash);
1306 // To protect privacy, do not answer getdata using the mempool when
1307 // that TX couldn't have been INVed in reply to a MEMPOOL request.
1308 if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
1309 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
1310 push = true;
1311 }
1312 }
1313 if (!push) {
1314 vNotFound.push_back(inv);
1315 }
1316 }
1317 } // release cs_main
1318
1319 if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
1320 const CInv &inv = *it;
1321 if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
1322 it++;
1323 ProcessGetBlockData(pfrom, chainparams, inv, connman);
1324 }
1325 }
1326
1327 pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
1328
1329 if (!vNotFound.empty()) {
1330 // Let the peer know that we didn't find what it asked for, so it doesn't
1331 // have to wait around forever. Currently only SPV clients actually care
1332 // about this message: it's needed when they are recursively walking the
1333 // dependencies of relevant unconfirmed transactions. SPV clients want to
1334 // do that because they want to know about (and store and rebroadcast and
1335 // risk analyze) the dependencies of transactions relevant to them, without
1336 // having to download the entire memory pool.
1337 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
1338 }
1339 }
1340
GetFetchFlags(CNode * pfrom)1341 static uint32_t GetFetchFlags(CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1342 uint32_t nFetchFlags = 0;
1343 if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
1344 nFetchFlags |= MSG_WITNESS_FLAG;
1345 }
1346 return nFetchFlags;
1347 }
1348
SendBlockTransactions(const CBlock & block,const BlockTransactionsRequest & req,CNode * pfrom,CConnman * connman)1349 inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
1350 BlockTransactions resp(req);
1351 for (size_t i = 0; i < req.indexes.size(); i++) {
1352 if (req.indexes[i] >= block.vtx.size()) {
1353 LOCK(cs_main);
1354 Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId()));
1355 return;
1356 }
1357 resp.txn[i] = block.vtx[req.indexes[i]];
1358 }
1359 LOCK(cs_main);
1360 const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1361 int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
1362 connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
1363 }
1364
ProcessHeadersMessage(CNode * pfrom,CConnman * connman,const std::vector<CBlockHeader> & headers,const CChainParams & chainparams,bool punish_duplicate_invalid)1365 bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid)
1366 {
1367 const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1368 size_t nCount = headers.size();
1369
1370 if (nCount == 0) {
1371 // Nothing interesting. Stop asking this peers for more headers.
1372 return true;
1373 }
1374
1375 bool received_new_header = false;
1376 const CBlockIndex *pindexLast = nullptr;
1377 {
1378 LOCK(cs_main);
1379 CNodeState *nodestate = State(pfrom->GetId());
1380
1381 // If this looks like it could be a block announcement (nCount <
1382 // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
1383 // don't connect:
1384 // - Send a getheaders message in response to try to connect the chain.
1385 // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
1386 // don't connect before giving DoS points
1387 // - Once a headers message is received that is valid and does connect,
1388 // nUnconnectingHeaders gets reset back to 0.
1389 if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
1390 nodestate->nUnconnectingHeaders++;
1391 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
1392 LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
1393 headers[0].GetHash().ToString(),
1394 headers[0].hashPrevBlock.ToString(),
1395 pindexBestHeader->nHeight,
1396 pfrom->GetId(), nodestate->nUnconnectingHeaders);
1397 // Set hashLastUnknownBlock for this peer, so that if we
1398 // eventually get the headers - even from a different peer -
1399 // we can use this peer to download.
1400 UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
1401
1402 if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
1403 Misbehaving(pfrom->GetId(), 20);
1404 }
1405 return true;
1406 }
1407
1408 uint256 hashLastBlock;
1409 for (const CBlockHeader& header : headers) {
1410 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
1411 Misbehaving(pfrom->GetId(), 20, "non-continuous headers sequence");
1412 return false;
1413 }
1414 hashLastBlock = header.GetHash();
1415 }
1416
1417 // If we don't have the last header, then they'll have given us
1418 // something new (if these headers are valid).
1419 if (!LookupBlockIndex(hashLastBlock)) {
1420 received_new_header = true;
1421 }
1422 }
1423
1424 CValidationState state;
1425 CBlockHeader first_invalid_header;
1426 if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) {
1427 int nDoS;
1428 if (state.IsInvalid(nDoS)) {
1429 LOCK(cs_main);
1430 if (nDoS > 0) {
1431 Misbehaving(pfrom->GetId(), nDoS, "invalid header received");
1432 } else {
1433 LogPrint(BCLog::NET, "peer=%d: invalid header received\n", pfrom->GetId());
1434 }
1435 if (punish_duplicate_invalid && LookupBlockIndex(first_invalid_header.GetHash())) {
1436 // Goal: don't allow outbound peers to use up our outbound
1437 // connection slots if they are on incompatible chains.
1438 //
1439 // We ask the caller to set punish_invalid appropriately based
1440 // on the peer and the method of header delivery (compact
1441 // blocks are allowed to be invalid in some circumstances,
1442 // under BIP 152).
1443 // Here, we try to detect the narrow situation that we have a
1444 // valid block header (ie it was valid at the time the header
1445 // was received, and hence stored in mapBlockIndex) but know the
1446 // block is invalid, and that a peer has announced that same
1447 // block as being on its active chain.
1448 // Disconnect the peer in such a situation.
1449 //
1450 // Note: if the header that is invalid was not accepted to our
1451 // mapBlockIndex at all, that may also be grounds for
1452 // disconnecting the peer, as the chain they are on is likely
1453 // to be incompatible. However, there is a circumstance where
1454 // that does not hold: if the header's timestamp is more than
1455 // 2 hours ahead of our current time. In that case, the header
1456 // may become valid in the future, and we don't want to
1457 // disconnect a peer merely for serving us one too-far-ahead
1458 // block header, to prevent an attacker from splitting the
1459 // network by mining a block right at the 2 hour boundary.
1460 //
1461 // TODO: update the DoS logic (or, rather, rewrite the
1462 // DoS-interface between validation and net_processing) so that
1463 // the interface is cleaner, and so that we disconnect on all the
1464 // reasons that a peer's headers chain is incompatible
1465 // with ours (eg block->nVersion softforks, MTP violations,
1466 // etc), and not just the duplicate-invalid case.
1467 pfrom->fDisconnect = true;
1468 }
1469 return false;
1470 }
1471 }
1472
1473 {
1474 LOCK(cs_main);
1475 CNodeState *nodestate = State(pfrom->GetId());
1476 if (nodestate->nUnconnectingHeaders > 0) {
1477 LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
1478 }
1479 nodestate->nUnconnectingHeaders = 0;
1480
1481 assert(pindexLast);
1482 UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
1483
1484 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
1485 // because it is set in UpdateBlockAvailability. Some nullptr checks
1486 // are still present, however, as belt-and-suspenders.
1487
1488 if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
1489 nodestate->m_last_block_announcement = GetTime();
1490 }
1491
1492 if (nCount == MAX_HEADERS_RESULTS) {
1493 // Headers message had its maximum size; the peer may have more headers.
1494 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
1495 // from there instead.
1496 LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
1497 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
1498 }
1499
1500 bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
1501 // If this set of headers is valid and ends in a block with at least as
1502 // much work as our tip, download as much as possible.
1503 if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
1504 std::vector<const CBlockIndex*> vToFetch;
1505 const CBlockIndex *pindexWalk = pindexLast;
1506 // Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
1507 while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1508 if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
1509 !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
1510 (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
1511 // We don't have this block, and it's not yet in flight.
1512 vToFetch.push_back(pindexWalk);
1513 }
1514 pindexWalk = pindexWalk->pprev;
1515 }
1516 // If pindexWalk still isn't on our main chain, we're looking at a
1517 // very large reorg at a time we think we're close to caught up to
1518 // the main chain -- this shouldn't really happen. Bail out on the
1519 // direct fetch and rely on parallel download instead.
1520 if (!chainActive.Contains(pindexWalk)) {
1521 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
1522 pindexLast->GetBlockHash().ToString(),
1523 pindexLast->nHeight);
1524 } else {
1525 std::vector<CInv> vGetData;
1526 // Download as much as possible, from earliest to latest.
1527 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
1528 if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
1529 // Can't download any more from this peer
1530 break;
1531 }
1532 uint32_t nFetchFlags = GetFetchFlags(pfrom);
1533 vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
1534 MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
1535 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1536 pindex->GetBlockHash().ToString(), pfrom->GetId());
1537 }
1538 if (vGetData.size() > 1) {
1539 LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
1540 pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
1541 }
1542 if (vGetData.size() > 0) {
1543 if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
1544 // In any case, we want to download using a compact block, not a regular one
1545 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
1546 }
1547 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
1548 }
1549 }
1550 }
1551 // If we're in IBD, we want outbound peers that will serve us a useful
1552 // chain. Disconnect peers that are on chains with insufficient work.
1553 if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
1554 // When nCount < MAX_HEADERS_RESULTS, we know we have no more
1555 // headers to fetch from this peer.
1556 if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1557 // This peer has too little work on their headers chain to help
1558 // us sync -- disconnect if using an outbound slot (unless
1559 // whitelisted or addnode).
1560 // Note: We compare their tip to nMinimumChainWork (rather than
1561 // chainActive.Tip()) because we won't start block download
1562 // until we have a headers chain that has at least
1563 // nMinimumChainWork, even if a peer has a chain past our tip,
1564 // as an anti-DoS measure.
1565 if (IsOutboundDisconnectionCandidate(pfrom)) {
1566 LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
1567 pfrom->fDisconnect = true;
1568 }
1569 }
1570 }
1571
1572 if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) {
1573 // If this is an outbound peer, check to see if we should protect
1574 // it from the bad/lagging chain logic.
1575 if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
1576 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
1577 nodestate->m_chain_sync.m_protect = true;
1578 ++g_outbound_peers_with_protect_from_disconnect;
1579 }
1580 }
1581 }
1582
1583 return true;
1584 }
1585
ProcessOrphanTx(CConnman * connman,std::set<uint256> & orphan_work_set,std::list<CTransactionRef> & removed_txn)1586 void static ProcessOrphanTx(CConnman* connman, std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
1587 {
1588 AssertLockHeld(cs_main);
1589 AssertLockHeld(g_cs_orphans);
1590 std::set<NodeId> setMisbehaving;
1591 bool done = false;
1592 while (!done && !orphan_work_set.empty()) {
1593 const uint256 orphanHash = *orphan_work_set.begin();
1594 orphan_work_set.erase(orphan_work_set.begin());
1595
1596 auto orphan_it = mapOrphanTransactions.find(orphanHash);
1597 if (orphan_it == mapOrphanTransactions.end()) continue;
1598
1599 const CTransactionRef porphanTx = orphan_it->second.tx;
1600 const CTransaction& orphanTx = *porphanTx;
1601 NodeId fromPeer = orphan_it->second.fromPeer;
1602 bool fMissingInputs2 = false;
1603 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
1604 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
1605 // anyone relaying LegitTxX banned)
1606 CValidationState stateDummy;
1607
1608 if (setMisbehaving.count(fromPeer)) continue;
1609 if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, &fMissingInputs2, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
1610 LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
1611 RelayTransaction(orphanTx, connman);
1612 for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
1613 auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
1614 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
1615 for (const auto& elem : it_by_prev->second) {
1616 orphan_work_set.insert(elem->first);
1617 }
1618 }
1619 }
1620 EraseOrphanTx(orphanHash);
1621 done = true;
1622 } else if (!fMissingInputs2) {
1623 int nDos = 0;
1624 if (stateDummy.IsInvalid(nDos) && nDos > 0) {
1625 // Punish peer that gave us an invalid orphan tx
1626 Misbehaving(fromPeer, nDos);
1627 setMisbehaving.insert(fromPeer);
1628 LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
1629 }
1630 // Has inputs but not accepted to mempool
1631 // Probably non-standard or insufficient fee
1632 LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
1633 if (!orphanTx.HasWitness() && !stateDummy.CorruptionPossible()) {
1634 // Do not use rejection cache for witness transactions or
1635 // witness-stripped transactions, as they can have been malleated.
1636 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
1637 assert(recentRejects);
1638 recentRejects->insert(orphanHash);
1639 }
1640 EraseOrphanTx(orphanHash);
1641 done = true;
1642 }
1643 mempool.check(pcoinsTip.get());
1644 }
1645 }
1646
ProcessMessage(CNode * pfrom,const std::string & strCommand,CDataStream & vRecv,int64_t nTimeReceived,const CChainParams & chainparams,CConnman * connman,const std::atomic<bool> & interruptMsgProc,bool enable_bip61)1647 bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc, bool enable_bip61)
1648 {
1649 LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
1650 if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
1651 {
1652 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
1653 return true;
1654 }
1655
1656
1657 if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
1658 (strCommand == NetMsgType::FILTERLOAD ||
1659 strCommand == NetMsgType::FILTERADD))
1660 {
1661 if (pfrom->nVersion >= NO_BLOOM_VERSION) {
1662 LOCK(cs_main);
1663 Misbehaving(pfrom->GetId(), 100);
1664 return false;
1665 } else {
1666 pfrom->fDisconnect = true;
1667 return false;
1668 }
1669 }
1670
1671 if (strCommand == NetMsgType::REJECT)
1672 {
1673 if (LogAcceptCategory(BCLog::NET)) {
1674 try {
1675 std::string strMsg; unsigned char ccode; std::string strReason;
1676 vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
1677
1678 std::ostringstream ss;
1679 ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
1680
1681 if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
1682 {
1683 uint256 hash;
1684 vRecv >> hash;
1685 ss << ": hash " << hash.ToString();
1686 }
1687 LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
1688 } catch (const std::ios_base::failure&) {
1689 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
1690 LogPrint(BCLog::NET, "Unparseable reject message received\n");
1691 }
1692 }
1693 return true;
1694 }
1695
1696 if (strCommand == NetMsgType::VERSION) {
1697 // Each connection can only send one version message
1698 if (pfrom->nVersion != 0)
1699 {
1700 if (enable_bip61) {
1701 connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
1702 }
1703 LOCK(cs_main);
1704 Misbehaving(pfrom->GetId(), 1);
1705 return false;
1706 }
1707
1708 int64_t nTime;
1709 CAddress addrMe;
1710 CAddress addrFrom;
1711 uint64_t nNonce = 1;
1712 uint64_t nServiceInt;
1713 ServiceFlags nServices;
1714 int nVersion;
1715 int nSendVersion;
1716 std::string strSubVer;
1717 std::string cleanSubVer;
1718 int nStartingHeight = -1;
1719 bool fRelay = true;
1720
1721 vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
1722 nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
1723 nServices = ServiceFlags(nServiceInt);
1724 if (!pfrom->fInbound)
1725 {
1726 connman->SetServices(pfrom->addr, nServices);
1727 }
1728 if (!pfrom->fInbound && !pfrom->fFeeler && !pfrom->m_manual_connection && !HasAllDesirableServiceFlags(nServices))
1729 {
1730 LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, GetDesirableServiceFlags(nServices));
1731 if (enable_bip61) {
1732 connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
1733 strprintf("Expected to offer services %08x", GetDesirableServiceFlags(nServices))));
1734 }
1735 pfrom->fDisconnect = true;
1736 return false;
1737 }
1738
1739 if (nVersion < MIN_PEER_PROTO_VERSION) {
1740 // disconnect from peers older than this proto version
1741 LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
1742 if (enable_bip61) {
1743 connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
1744 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
1745 }
1746 pfrom->fDisconnect = true;
1747 return false;
1748 }
1749
1750 if (!vRecv.empty())
1751 vRecv >> addrFrom >> nNonce;
1752 if (!vRecv.empty()) {
1753 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
1754 cleanSubVer = SanitizeString(strSubVer);
1755 }
1756 if (!vRecv.empty()) {
1757 vRecv >> nStartingHeight;
1758 }
1759 if (!vRecv.empty())
1760 vRecv >> fRelay;
1761 // Disconnect if we connected to ourself
1762 if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
1763 {
1764 LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
1765 pfrom->fDisconnect = true;
1766 return true;
1767 }
1768
1769 if (pfrom->fInbound && addrMe.IsRoutable())
1770 {
1771 SeenLocal(addrMe);
1772 }
1773
1774 // Be shy and don't send version until we hear
1775 if (pfrom->fInbound)
1776 PushNodeVersion(pfrom, connman, GetAdjustedTime());
1777
1778 connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
1779
1780 pfrom->nServices = nServices;
1781 pfrom->SetAddrLocal(addrMe);
1782 {
1783 LOCK(pfrom->cs_SubVer);
1784 pfrom->strSubVer = strSubVer;
1785 pfrom->cleanSubVer = cleanSubVer;
1786 }
1787 pfrom->nStartingHeight = nStartingHeight;
1788
1789 // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients"
1790 pfrom->fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED));
1791
1792 // set nodes not capable of serving the complete blockchain history as "limited nodes"
1793 pfrom->m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
1794
1795 {
1796 LOCK(pfrom->cs_filter);
1797 pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message
1798 }
1799
1800 // Change version
1801 pfrom->SetSendVersion(nSendVersion);
1802 pfrom->nVersion = nVersion;
1803
1804 if((nServices & NODE_WITNESS))
1805 {
1806 LOCK(cs_main);
1807 State(pfrom->GetId())->fHaveWitness = true;
1808 }
1809
1810 // Potentially mark this peer as a preferred download peer.
1811 {
1812 LOCK(cs_main);
1813 UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
1814 }
1815
1816 if (!pfrom->fInbound)
1817 {
1818 // Advertise our address
1819 if (fListen && !IsInitialBlockDownload())
1820 {
1821 CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
1822 FastRandomContext insecure_rand;
1823 if (addr.IsRoutable())
1824 {
1825 LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
1826 pfrom->PushAddress(addr, insecure_rand);
1827 } else if (IsPeerAddrLocalGood(pfrom)) {
1828 addr.SetIP(addrMe);
1829 LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
1830 pfrom->PushAddress(addr, insecure_rand);
1831 }
1832 }
1833
1834 // Get recent addresses
1835 if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
1836 {
1837 connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
1838 pfrom->fGetAddr = true;
1839 }
1840 connman->MarkAddressGood(pfrom->addr);
1841 }
1842
1843 std::string remoteAddr;
1844 if (fLogIPs)
1845 remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
1846
1847 LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
1848 cleanSubVer, pfrom->nVersion,
1849 pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
1850 remoteAddr);
1851
1852 int64_t nTimeOffset = nTime - GetTime();
1853 pfrom->nTimeOffset = nTimeOffset;
1854 AddTimeData(pfrom->addr, nTimeOffset);
1855
1856 // If the peer is old enough to have the old alert system, send it the final alert.
1857 if (pfrom->nVersion <= 70012) {
1858 CDataStream finalAlert(ParseHex("5c0100000015f7675900000000ffffff7f00000000ffffff7ffeffff7f0000000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220405f7e7572b176f3316d4e12deab75ad4ff978844f7a7bcd5ed06f6aa094eb6602207880fcc07d0a78e0f46f188d115e04ed4ad48980ea3572cb0e0cb97921048095"), SER_NETWORK, PROTOCOL_VERSION);
1859 connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
1860 }
1861
1862 // Feeler connections exist only to verify if address is online.
1863 if (pfrom->fFeeler) {
1864 assert(pfrom->fInbound == false);
1865 pfrom->fDisconnect = true;
1866 }
1867 return true;
1868 }
1869
1870 if (pfrom->nVersion == 0) {
1871 // Must have a version message before anything else
1872 LOCK(cs_main);
1873 Misbehaving(pfrom->GetId(), 1);
1874 return false;
1875 }
1876
1877 // At this point, the outgoing message serialization version can't change.
1878 const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
1879
1880 if (strCommand == NetMsgType::VERACK)
1881 {
1882 pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
1883
1884 if (!pfrom->fInbound) {
1885 // Mark this node as currently connected, so we update its timestamp later.
1886 LOCK(cs_main);
1887 State(pfrom->GetId())->fCurrentlyConnected = true;
1888 LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s\n",
1889 pfrom->nVersion.load(), pfrom->nStartingHeight, pfrom->GetId(),
1890 (fLogIPs ? strprintf(", peeraddr=%s", pfrom->addr.ToString()) : ""));
1891 }
1892
1893 if (pfrom->nVersion >= SENDHEADERS_VERSION) {
1894 // Tell our peer we prefer to receive headers rather than inv's
1895 // We send this to non-NODE NETWORK peers as well, because even
1896 // non-NODE NETWORK peers can announce blocks (such as pruning
1897 // nodes)
1898 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
1899 }
1900 if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
1901 // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
1902 // However, we do not request new block announcements using
1903 // cmpctblock messages.
1904 // We send this to non-NODE NETWORK peers as well, because
1905 // they may wish to request compact blocks from us
1906 bool fAnnounceUsingCMPCTBLOCK = false;
1907 uint64_t nCMPCTBLOCKVersion = 2;
1908 if (pfrom->GetLocalServices() & NODE_WITNESS)
1909 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
1910 nCMPCTBLOCKVersion = 1;
1911 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
1912 }
1913 pfrom->fSuccessfullyConnected = true;
1914 return true;
1915 }
1916
1917 if (!pfrom->fSuccessfullyConnected) {
1918 // Must have a verack message before anything else
1919 LOCK(cs_main);
1920 Misbehaving(pfrom->GetId(), 1);
1921 return false;
1922 }
1923
1924 if (strCommand == NetMsgType::ADDR) {
1925 std::vector<CAddress> vAddr;
1926 vRecv >> vAddr;
1927
1928 // Don't want addr from older versions unless seeding
1929 if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
1930 return true;
1931 if (vAddr.size() > 1000)
1932 {
1933 LOCK(cs_main);
1934 Misbehaving(pfrom->GetId(), 20, strprintf("message addr size() = %u", vAddr.size()));
1935 return false;
1936 }
1937
1938 // Store the new addresses
1939 std::vector<CAddress> vAddrOk;
1940 int64_t nNow = GetAdjustedTime();
1941 int64_t nSince = nNow - 10 * 60;
1942 for (CAddress& addr : vAddr)
1943 {
1944 if (interruptMsgProc)
1945 return true;
1946
1947 // We only bother storing full nodes, though this may include
1948 // things which we would not make an outbound connection to, in
1949 // part because we may make feeler connections to them.
1950 if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
1951 continue;
1952
1953 if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
1954 addr.nTime = nNow - 5 * 24 * 60 * 60;
1955 pfrom->AddAddressKnown(addr);
1956 if (g_banman->IsBanned(addr)) continue; // Do not process banned addresses beyond remembering we received them
1957 bool fReachable = IsReachable(addr);
1958 if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
1959 {
1960 // Relay to a limited number of other nodes
1961 RelayAddress(addr, fReachable, connman);
1962 }
1963 // Do not store addresses outside our network
1964 if (fReachable)
1965 vAddrOk.push_back(addr);
1966 }
1967 connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
1968 if (vAddr.size() < 1000)
1969 pfrom->fGetAddr = false;
1970 if (pfrom->fOneShot)
1971 pfrom->fDisconnect = true;
1972 return true;
1973 }
1974
1975 if (strCommand == NetMsgType::SENDHEADERS) {
1976 LOCK(cs_main);
1977 State(pfrom->GetId())->fPreferHeaders = true;
1978 return true;
1979 }
1980
1981 if (strCommand == NetMsgType::SENDCMPCT) {
1982 bool fAnnounceUsingCMPCTBLOCK = false;
1983 uint64_t nCMPCTBLOCKVersion = 0;
1984 vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
1985 if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
1986 LOCK(cs_main);
1987 // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
1988 if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
1989 State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
1990 State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
1991 }
1992 if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
1993 State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
1994 if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
1995 if (pfrom->GetLocalServices() & NODE_WITNESS)
1996 State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
1997 else
1998 State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
1999 }
2000 }
2001 return true;
2002 }
2003
2004 if (strCommand == NetMsgType::INV) {
2005 std::vector<CInv> vInv;
2006 vRecv >> vInv;
2007 if (vInv.size() > MAX_INV_SZ)
2008 {
2009 LOCK(cs_main);
2010 Misbehaving(pfrom->GetId(), 20, strprintf("message inv size() = %u", vInv.size()));
2011 return false;
2012 }
2013
2014 bool fBlocksOnly = !g_relay_txes;
2015
2016 // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
2017 if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
2018 fBlocksOnly = false;
2019
2020 LOCK(cs_main);
2021
2022 uint32_t nFetchFlags = GetFetchFlags(pfrom);
2023
2024 for (CInv &inv : vInv)
2025 {
2026 if (interruptMsgProc)
2027 return true;
2028
2029 bool fAlreadyHave = AlreadyHave(inv);
2030 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
2031
2032 if (inv.type == MSG_TX) {
2033 inv.type |= nFetchFlags;
2034 }
2035
2036 if (inv.type == MSG_BLOCK) {
2037 UpdateBlockAvailability(pfrom->GetId(), inv.hash);
2038 if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2039 // We used to request the full block here, but since headers-announcements are now the
2040 // primary method of announcement on the network, and since, in the case that a node
2041 // fell back to inv we probably have a reorg which we should get the headers for first,
2042 // we now only provide a getheaders response here. When we receive the headers, we will
2043 // then ask for the blocks we need.
2044 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
2045 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
2046 }
2047 }
2048 else
2049 {
2050 pfrom->AddInventoryKnown(inv);
2051 if (fBlocksOnly) {
2052 LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->GetId());
2053 } else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
2054 pfrom->AskFor(inv);
2055 }
2056 }
2057 }
2058 return true;
2059 }
2060
2061 if (strCommand == NetMsgType::GETDATA) {
2062 std::vector<CInv> vInv;
2063 vRecv >> vInv;
2064 if (vInv.size() > MAX_INV_SZ)
2065 {
2066 LOCK(cs_main);
2067 Misbehaving(pfrom->GetId(), 20, strprintf("message getdata size() = %u", vInv.size()));
2068 return false;
2069 }
2070
2071 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
2072
2073 if (vInv.size() > 0) {
2074 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
2075 }
2076
2077 pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
2078 ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
2079 return true;
2080 }
2081
2082 if (strCommand == NetMsgType::GETBLOCKS) {
2083 CBlockLocator locator;
2084 uint256 hashStop;
2085 vRecv >> locator >> hashStop;
2086
2087 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2088 LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
2089 pfrom->fDisconnect = true;
2090 return true;
2091 }
2092
2093 // We might have announced the currently-being-connected tip using a
2094 // compact block, which resulted in the peer sending a getblocks
2095 // request, which we would otherwise respond to without the new block.
2096 // To avoid this situation we simply verify that we are on our best
2097 // known chain now. This is super overkill, but we handle it better
2098 // for getheaders requests, and there are no known nodes which support
2099 // compact blocks but still use getblocks to request blocks.
2100 {
2101 std::shared_ptr<const CBlock> a_recent_block;
2102 {
2103 LOCK(cs_most_recent_block);
2104 a_recent_block = most_recent_block;
2105 }
2106 CValidationState state;
2107 if (!ActivateBestChain(state, Params(), a_recent_block)) {
2108 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", FormatStateMessage(state));
2109 }
2110 }
2111
2112 LOCK(cs_main);
2113
2114 // Find the last block the caller has in the main chain
2115 const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
2116
2117 // Send the rest of the chain
2118 if (pindex)
2119 pindex = chainActive.Next(pindex);
2120 int nLimit = 500;
2121 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
2122 for (; pindex; pindex = chainActive.Next(pindex))
2123 {
2124 if (pindex->GetBlockHash() == hashStop)
2125 {
2126 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2127 break;
2128 }
2129 // If pruning, don't inv blocks unless we have on disk and are likely to still have
2130 // for some reasonable time window (1 hour) that block relay might require.
2131 const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
2132 if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
2133 {
2134 LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2135 break;
2136 }
2137 pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
2138 if (--nLimit <= 0)
2139 {
2140 // When this block is requested, we'll send an inv that'll
2141 // trigger the peer to getblocks the next batch of inventory.
2142 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
2143 pfrom->hashContinue = pindex->GetBlockHash();
2144 break;
2145 }
2146 }
2147 return true;
2148 }
2149
2150 if (strCommand == NetMsgType::GETBLOCKTXN) {
2151 BlockTransactionsRequest req;
2152 vRecv >> req;
2153
2154 std::shared_ptr<const CBlock> recent_block;
2155 {
2156 LOCK(cs_most_recent_block);
2157 if (most_recent_block_hash == req.blockhash)
2158 recent_block = most_recent_block;
2159 // Unlock cs_most_recent_block to avoid cs_main lock inversion
2160 }
2161 if (recent_block) {
2162 SendBlockTransactions(*recent_block, req, pfrom, connman);
2163 return true;
2164 }
2165
2166 LOCK(cs_main);
2167
2168 const CBlockIndex* pindex = LookupBlockIndex(req.blockhash);
2169 if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
2170 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom->GetId());
2171 return true;
2172 }
2173
2174 if (pindex->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
2175 // If an older block is requested (should never happen in practice,
2176 // but can happen in tests) send a block response instead of a
2177 // blocktxn response. Sending a full block response instead of a
2178 // small blocktxn response is preferable in the case where a peer
2179 // might maliciously send lots of getblocktxn requests to trigger
2180 // expensive disk reads, because it will require the peer to
2181 // actually receive all the data read from disk over the network.
2182 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
2183 CInv inv;
2184 inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
2185 inv.hash = req.blockhash;
2186 pfrom->vRecvGetData.push_back(inv);
2187 // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
2188 return true;
2189 }
2190
2191 CBlock block;
2192 bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
2193 assert(ret);
2194
2195 SendBlockTransactions(block, req, pfrom, connman);
2196 return true;
2197 }
2198
2199 if (strCommand == NetMsgType::GETHEADERS) {
2200 CBlockLocator locator;
2201 uint256 hashStop;
2202 vRecv >> locator >> hashStop;
2203
2204 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
2205 LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
2206 pfrom->fDisconnect = true;
2207 return true;
2208 }
2209
2210 LOCK(cs_main);
2211 if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
2212 LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
2213 return true;
2214 }
2215
2216 CNodeState *nodestate = State(pfrom->GetId());
2217 const CBlockIndex* pindex = nullptr;
2218 if (locator.IsNull())
2219 {
2220 // If locator is null, return the hashStop block
2221 pindex = LookupBlockIndex(hashStop);
2222 if (!pindex) {
2223 return true;
2224 }
2225
2226 if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
2227 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId());
2228 return true;
2229 }
2230 }
2231 else
2232 {
2233 // Find the last block the caller has in the main chain
2234 pindex = FindForkInGlobalIndex(chainActive, locator);
2235 if (pindex)
2236 pindex = chainActive.Next(pindex);
2237 }
2238
2239 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
2240 std::vector<CBlock> vHeaders;
2241 int nLimit = MAX_HEADERS_RESULTS;
2242 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
2243 for (; pindex; pindex = chainActive.Next(pindex))
2244 {
2245 vHeaders.push_back(pindex->GetBlockHeader());
2246 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
2247 break;
2248 }
2249 // pindex can be nullptr either if we sent chainActive.Tip() OR
2250 // if our peer has chainActive.Tip() (and thus we are sending an empty
2251 // headers message). In both cases it's safe to update
2252 // pindexBestHeaderSent to be our tip.
2253 //
2254 // It is important that we simply reset the BestHeaderSent value here,
2255 // and not max(BestHeaderSent, newHeaderSent). We might have announced
2256 // the currently-being-connected tip using a compact block, which
2257 // resulted in the peer sending a headers request, which we respond to
2258 // without the new block. By resetting the BestHeaderSent, we ensure we
2259 // will re-announce the new block via headers (or compact blocks again)
2260 // in the SendMessages logic.
2261 nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
2262 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
2263 return true;
2264 }
2265
2266 if (strCommand == NetMsgType::TX) {
2267 // Stop processing the transaction early if
2268 // We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
2269 if (!g_relay_txes && (!pfrom->fWhitelisted || !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
2270 {
2271 LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
2272 return true;
2273 }
2274
2275 CTransactionRef ptx;
2276 vRecv >> ptx;
2277 const CTransaction& tx = *ptx;
2278
2279 CInv inv(MSG_TX, tx.GetHash());
2280 pfrom->AddInventoryKnown(inv);
2281
2282 LOCK2(cs_main, g_cs_orphans);
2283
2284 bool fMissingInputs = false;
2285 CValidationState state;
2286
2287 pfrom->setAskFor.erase(inv.hash);
2288 mapAlreadyAskedFor.erase(inv.hash);
2289
2290 std::list<CTransactionRef> lRemovedTxn;
2291
2292 if (!AlreadyHave(inv) &&
2293 AcceptToMemoryPool(mempool, state, ptx, &fMissingInputs, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
2294 mempool.check(pcoinsTip.get());
2295 RelayTransaction(tx, connman);
2296 for (unsigned int i = 0; i < tx.vout.size(); i++) {
2297 auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(inv.hash, i));
2298 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2299 for (const auto& elem : it_by_prev->second) {
2300 pfrom->orphan_work_set.insert(elem->first);
2301 }
2302 }
2303 }
2304
2305 pfrom->nLastTXTime = GetTime();
2306
2307 LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
2308 pfrom->GetId(),
2309 tx.GetHash().ToString(),
2310 mempool.size(), mempool.DynamicMemoryUsage() / 1000);
2311
2312 // Recursively process any orphan transactions that depended on this one
2313 ProcessOrphanTx(connman, pfrom->orphan_work_set, lRemovedTxn);
2314 }
2315 else if (fMissingInputs)
2316 {
2317 bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
2318 for (const CTxIn& txin : tx.vin) {
2319 if (recentRejects->contains(txin.prevout.hash)) {
2320 fRejectedParents = true;
2321 break;
2322 }
2323 }
2324 if (!fRejectedParents) {
2325 uint32_t nFetchFlags = GetFetchFlags(pfrom);
2326 for (const CTxIn& txin : tx.vin) {
2327 CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
2328 pfrom->AddInventoryKnown(_inv);
2329 if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
2330 }
2331 AddOrphanTx(ptx, pfrom->GetId());
2332
2333 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
2334 unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
2335 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
2336 if (nEvicted > 0) {
2337 LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
2338 }
2339 } else {
2340 LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
2341 // We will continue to reject this tx since it has rejected
2342 // parents so avoid re-requesting it from other peers.
2343 recentRejects->insert(tx.GetHash());
2344 }
2345 } else {
2346 if (!tx.HasWitness() && !state.CorruptionPossible()) {
2347 // Do not use rejection cache for witness transactions or
2348 // witness-stripped transactions, as they can have been malleated.
2349 // See https://github.com/bitcoin/bitcoin/issues/8279 for details.
2350 assert(recentRejects);
2351 recentRejects->insert(tx.GetHash());
2352 if (RecursiveDynamicUsage(*ptx) < 100000) {
2353 AddToCompactExtraTransactions(ptx);
2354 }
2355 } else if (tx.HasWitness() && RecursiveDynamicUsage(*ptx) < 100000) {
2356 AddToCompactExtraTransactions(ptx);
2357 }
2358
2359 if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
2360 // Always relay transactions received from whitelisted peers, even
2361 // if they were already in the mempool or rejected from it due
2362 // to policy, allowing the node to function as a gateway for
2363 // nodes hidden behind it.
2364 //
2365 // Never relay transactions that we would assign a non-zero DoS
2366 // score for, as we expect peers to do the same with us in that
2367 // case.
2368 int nDoS = 0;
2369 if (!state.IsInvalid(nDoS) || nDoS == 0) {
2370 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
2371 RelayTransaction(tx, connman);
2372 } else {
2373 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
2374 }
2375 }
2376 }
2377
2378 for (const CTransactionRef& removedTx : lRemovedTxn)
2379 AddToCompactExtraTransactions(removedTx);
2380
2381 // If a tx has been detected by recentRejects, we will have reached
2382 // this point and the tx will have been ignored. Because we haven't run
2383 // the tx through AcceptToMemoryPool, we won't have computed a DoS
2384 // score for it or determined exactly why we consider it invalid.
2385 //
2386 // This means we won't penalize any peer subsequently relaying a DoSy
2387 // tx (even if we penalized the first peer who gave it to us) because
2388 // we have to account for recentRejects showing false positives. In
2389 // other words, we shouldn't penalize a peer if we aren't *sure* they
2390 // submitted a DoSy tx.
2391 //
2392 // Note that recentRejects doesn't just record DoSy or invalid
2393 // transactions, but any tx not accepted by the mempool, which may be
2394 // due to node policy (vs. consensus). So we can't blanket penalize a
2395 // peer simply for relaying a tx that our recentRejects has caught,
2396 // regardless of false positives.
2397
2398 int nDoS = 0;
2399 if (state.IsInvalid(nDoS))
2400 {
2401 LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
2402 pfrom->GetId(),
2403 FormatStateMessage(state));
2404 if (enable_bip61 && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { // Never send AcceptToMemoryPool's internal codes over P2P
2405 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
2406 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
2407 }
2408 if (nDoS > 0) {
2409 Misbehaving(pfrom->GetId(), nDoS);
2410 }
2411 }
2412 return true;
2413 }
2414
2415 if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
2416 {
2417 CBlockHeaderAndShortTxIDs cmpctblock;
2418 vRecv >> cmpctblock;
2419
2420 bool received_new_header = false;
2421
2422 {
2423 LOCK(cs_main);
2424
2425 if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
2426 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
2427 if (!IsInitialBlockDownload())
2428 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
2429 return true;
2430 }
2431
2432 if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
2433 received_new_header = true;
2434 }
2435 }
2436
2437 const CBlockIndex *pindex = nullptr;
2438 CValidationState state;
2439 if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
2440 int nDoS;
2441 if (state.IsInvalid(nDoS)) {
2442 if (nDoS > 0) {
2443 LOCK(cs_main);
2444 Misbehaving(pfrom->GetId(), nDoS, strprintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId()));
2445 } else {
2446 LogPrint(BCLog::NET, "Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId());
2447 }
2448 return true;
2449 }
2450 }
2451
2452 // When we succeed in decoding a block's txids from a cmpctblock
2453 // message we typically jump to the BLOCKTXN handling code, with a
2454 // dummy (empty) BLOCKTXN message, to re-use the logic there in
2455 // completing processing of the putative block (without cs_main).
2456 bool fProcessBLOCKTXN = false;
2457 CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
2458
2459 // If we end up treating this as a plain headers message, call that as well
2460 // without cs_main.
2461 bool fRevertToHeaderProcessing = false;
2462
2463 // Keep a CBlock for "optimistic" compactblock reconstructions (see
2464 // below)
2465 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2466 bool fBlockReconstructed = false;
2467
2468 {
2469 LOCK2(cs_main, g_cs_orphans);
2470 // If AcceptBlockHeader returned true, it set pindex
2471 assert(pindex);
2472 UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
2473
2474 CNodeState *nodestate = State(pfrom->GetId());
2475
2476 // If this was a new header with more work than our tip, update the
2477 // peer's last block announcement time
2478 if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
2479 nodestate->m_last_block_announcement = GetTime();
2480 }
2481
2482 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
2483 bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
2484
2485 if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
2486 return true;
2487
2488 if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
2489 pindex->nTx != 0) { // We had this block at some point, but pruned it
2490 if (fAlreadyInFlight) {
2491 // We requested this block for some reason, but our mempool will probably be useless
2492 // so we just grab the block via normal getdata
2493 std::vector<CInv> vInv(1);
2494 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2495 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2496 }
2497 return true;
2498 }
2499
2500 // If we're not close to tip yet, give up and let parallel block fetch work its magic
2501 if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
2502 return true;
2503
2504 if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
2505 // Don't bother trying to process compact blocks from v1 peers
2506 // after segwit activates.
2507 return true;
2508 }
2509
2510 // We want to be a bit conservative just to be extra careful about DoS
2511 // possibilities in compact block processing...
2512 if (pindex->nHeight <= chainActive.Height() + 2) {
2513 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
2514 (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
2515 std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
2516 if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
2517 if (!(*queuedBlockIt)->partialBlock)
2518 (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
2519 else {
2520 // The block was already in flight using compact blocks from the same peer
2521 LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
2522 return true;
2523 }
2524 }
2525
2526 PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
2527 ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
2528 if (status == READ_STATUS_INVALID) {
2529 MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
2530 Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom->GetId()));
2531 return true;
2532 } else if (status == READ_STATUS_FAILED) {
2533 // Duplicate txindexes, the block is now in-flight, so just request it
2534 std::vector<CInv> vInv(1);
2535 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2536 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2537 return true;
2538 }
2539
2540 BlockTransactionsRequest req;
2541 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
2542 if (!partialBlock.IsTxAvailable(i))
2543 req.indexes.push_back(i);
2544 }
2545 if (req.indexes.empty()) {
2546 // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
2547 BlockTransactions txn;
2548 txn.blockhash = cmpctblock.header.GetHash();
2549 blockTxnMsg << txn;
2550 fProcessBLOCKTXN = true;
2551 } else {
2552 req.blockhash = pindex->GetBlockHash();
2553 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
2554 }
2555 } else {
2556 // This block is either already in flight from a different
2557 // peer, or this peer has too many blocks outstanding to
2558 // download from.
2559 // Optimistically try to reconstruct anyway since we might be
2560 // able to without any round trips.
2561 PartiallyDownloadedBlock tempBlock(&mempool);
2562 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
2563 if (status != READ_STATUS_OK) {
2564 // TODO: don't ignore failures
2565 return true;
2566 }
2567 std::vector<CTransactionRef> dummy;
2568 status = tempBlock.FillBlock(*pblock, dummy);
2569 if (status == READ_STATUS_OK) {
2570 fBlockReconstructed = true;
2571 }
2572 }
2573 } else {
2574 if (fAlreadyInFlight) {
2575 // We requested this block, but its far into the future, so our
2576 // mempool will probably be useless - request the block normally
2577 std::vector<CInv> vInv(1);
2578 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
2579 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
2580 return true;
2581 } else {
2582 // If this was an announce-cmpctblock, we want the same treatment as a header message
2583 fRevertToHeaderProcessing = true;
2584 }
2585 }
2586 } // cs_main
2587
2588 if (fProcessBLOCKTXN)
2589 return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc, enable_bip61);
2590
2591 if (fRevertToHeaderProcessing) {
2592 // Headers received from HB compact block peers are permitted to be
2593 // relayed before full validation (see BIP 152), so we don't want to disconnect
2594 // the peer if the header turns out to be for an invalid block.
2595 // Note that if a peer tries to build on an invalid chain, that
2596 // will be detected and the peer will be banned.
2597 return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false);
2598 }
2599
2600 if (fBlockReconstructed) {
2601 // If we got here, we were able to optimistically reconstruct a
2602 // block that is in flight from some other peer.
2603 {
2604 LOCK(cs_main);
2605 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
2606 }
2607 bool fNewBlock = false;
2608 // Setting fForceProcessing to true means that we bypass some of
2609 // our anti-DoS protections in AcceptBlock, which filters
2610 // unrequested blocks that might be trying to waste our resources
2611 // (eg disk space). Because we only try to reconstruct blocks when
2612 // we're close to caught up (via the CanDirectFetch() requirement
2613 // above, combined with the behavior of not requesting blocks until
2614 // we have a chain with at least nMinimumChainWork), and we ignore
2615 // compact blocks with less work than our tip, it is safe to treat
2616 // reconstructed compact blocks as having been requested.
2617 ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
2618 if (fNewBlock) {
2619 pfrom->nLastBlockTime = GetTime();
2620 } else {
2621 LOCK(cs_main);
2622 mapBlockSource.erase(pblock->GetHash());
2623 }
2624 LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
2625 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
2626 // Clear download state for this block, which is in
2627 // process from some other peer. We do this after calling
2628 // ProcessNewBlock so that a malleated cmpctblock announcement
2629 // can't be used to interfere with block relay.
2630 MarkBlockAsReceived(pblock->GetHash());
2631 }
2632 }
2633 return true;
2634 }
2635
2636 if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
2637 {
2638 BlockTransactions resp;
2639 vRecv >> resp;
2640
2641 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2642 bool fBlockRead = false;
2643 {
2644 LOCK(cs_main);
2645
2646 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
2647 if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
2648 it->second.first != pfrom->GetId()) {
2649 LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
2650 return true;
2651 }
2652
2653 PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
2654 ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
2655 if (status == READ_STATUS_INVALID) {
2656 MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
2657 Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId()));
2658 return true;
2659 } else if (status == READ_STATUS_FAILED) {
2660 // Might have collided, fall back to getdata now :(
2661 std::vector<CInv> invs;
2662 invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
2663 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
2664 } else {
2665 // Block is either okay, or possibly we received
2666 // READ_STATUS_CHECKBLOCK_FAILED.
2667 // Note that CheckBlock can only fail for one of a few reasons:
2668 // 1. bad-proof-of-work (impossible here, because we've already
2669 // accepted the header)
2670 // 2. merkleroot doesn't match the transactions given (already
2671 // caught in FillBlock with READ_STATUS_FAILED, so
2672 // impossible here)
2673 // 3. the block is otherwise invalid (eg invalid coinbase,
2674 // block is too big, too many legacy sigops, etc).
2675 // So if CheckBlock failed, #3 is the only possibility.
2676 // Under BIP 152, we don't DoS-ban unless proof of work is
2677 // invalid (we don't require all the stateless checks to have
2678 // been run). This is handled below, so just treat this as
2679 // though the block was successfully read, and rely on the
2680 // handling in ProcessNewBlock to ensure the block index is
2681 // updated, reject messages go out, etc.
2682 MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
2683 fBlockRead = true;
2684 // mapBlockSource is only used for sending reject messages and DoS scores,
2685 // so the race between here and cs_main in ProcessNewBlock is fine.
2686 // BIP 152 permits peers to relay compact blocks after validating
2687 // the header only; we should not punish peers if the block turns
2688 // out to be invalid.
2689 mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
2690 }
2691 } // Don't hold cs_main when we call into ProcessNewBlock
2692 if (fBlockRead) {
2693 bool fNewBlock = false;
2694 // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
2695 // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
2696 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
2697 // disk-space attacks), but this should be safe due to the
2698 // protections in the compact block handler -- see related comment
2699 // in compact block optimistic reconstruction handling.
2700 ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
2701 if (fNewBlock) {
2702 pfrom->nLastBlockTime = GetTime();
2703 } else {
2704 LOCK(cs_main);
2705 mapBlockSource.erase(pblock->GetHash());
2706 }
2707 }
2708 return true;
2709 }
2710
2711 if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
2712 {
2713 std::vector<CBlockHeader> headers;
2714
2715 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
2716 unsigned int nCount = ReadCompactSize(vRecv);
2717 if (nCount > MAX_HEADERS_RESULTS) {
2718 LOCK(cs_main);
2719 Misbehaving(pfrom->GetId(), 20, strprintf("headers message size = %u", nCount));
2720 return false;
2721 }
2722 headers.resize(nCount);
2723 for (unsigned int n = 0; n < nCount; n++) {
2724 vRecv >> headers[n];
2725 ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
2726 }
2727
2728 // Headers received via a HEADERS message should be valid, and reflect
2729 // the chain the peer is on. If we receive a known-invalid header,
2730 // disconnect the peer if it is using one of our outbound connection
2731 // slots.
2732 bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection;
2733 return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish);
2734 }
2735
2736 if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
2737 {
2738 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2739 vRecv >> *pblock;
2740
2741 LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
2742
2743 bool forceProcessing = false;
2744 const uint256 hash(pblock->GetHash());
2745 {
2746 LOCK(cs_main);
2747 // Also always process if we requested the block explicitly, as we may
2748 // need it even though it is not a candidate for a new best tip.
2749 forceProcessing |= MarkBlockAsReceived(hash);
2750 // mapBlockSource is only used for sending reject messages and DoS scores,
2751 // so the race between here and cs_main in ProcessNewBlock is fine.
2752 mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
2753 }
2754 bool fNewBlock = false;
2755 ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
2756 if (fNewBlock) {
2757 pfrom->nLastBlockTime = GetTime();
2758 } else {
2759 LOCK(cs_main);
2760 mapBlockSource.erase(pblock->GetHash());
2761 }
2762 return true;
2763 }
2764
2765 if (strCommand == NetMsgType::GETADDR) {
2766 // This asymmetric behavior for inbound and outbound connections was introduced
2767 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
2768 // to users' AddrMan and later request them by sending getaddr messages.
2769 // Making nodes which are behind NAT and can only make outgoing connections ignore
2770 // the getaddr message mitigates the attack.
2771 if (!pfrom->fInbound) {
2772 LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
2773 return true;
2774 }
2775
2776 // Only send one GetAddr response per connection to reduce resource waste
2777 // and discourage addr stamping of INV announcements.
2778 if (pfrom->fSentAddr) {
2779 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
2780 return true;
2781 }
2782 pfrom->fSentAddr = true;
2783
2784 pfrom->vAddrToSend.clear();
2785 std::vector<CAddress> vAddr = connman->GetAddresses();
2786 FastRandomContext insecure_rand;
2787 for (const CAddress &addr : vAddr) {
2788 if (!g_banman->IsBanned(addr)) {
2789 pfrom->PushAddress(addr, insecure_rand);
2790 }
2791 }
2792 return true;
2793 }
2794
2795 if (strCommand == NetMsgType::MEMPOOL) {
2796 if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
2797 {
2798 LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
2799 pfrom->fDisconnect = true;
2800 return true;
2801 }
2802
2803 if (connman->OutboundTargetReached(false) && !pfrom->fWhitelisted)
2804 {
2805 LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
2806 pfrom->fDisconnect = true;
2807 return true;
2808 }
2809
2810 LOCK(pfrom->cs_inventory);
2811 pfrom->fSendMempool = true;
2812 return true;
2813 }
2814
2815 if (strCommand == NetMsgType::PING) {
2816 if (pfrom->nVersion > BIP0031_VERSION)
2817 {
2818 uint64_t nonce = 0;
2819 vRecv >> nonce;
2820 // Echo the message back with the nonce. This allows for two useful features:
2821 //
2822 // 1) A remote node can quickly check if the connection is operational
2823 // 2) Remote nodes can measure the latency of the network thread. If this node
2824 // is overloaded it won't respond to pings quickly and the remote node can
2825 // avoid sending us more work, like chain download requests.
2826 //
2827 // The nonce stops the remote getting confused between different pings: without
2828 // it, if the remote node sends a ping once per second and this node takes 5
2829 // seconds to respond to each, the 5th ping the remote sends would appear to
2830 // return very quickly.
2831 connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
2832 }
2833 return true;
2834 }
2835
2836 if (strCommand == NetMsgType::PONG) {
2837 int64_t pingUsecEnd = nTimeReceived;
2838 uint64_t nonce = 0;
2839 size_t nAvail = vRecv.in_avail();
2840 bool bPingFinished = false;
2841 std::string sProblem;
2842
2843 if (nAvail >= sizeof(nonce)) {
2844 vRecv >> nonce;
2845
2846 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
2847 if (pfrom->nPingNonceSent != 0) {
2848 if (nonce == pfrom->nPingNonceSent) {
2849 // Matching pong received, this ping is no longer outstanding
2850 bPingFinished = true;
2851 int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
2852 if (pingUsecTime > 0) {
2853 // Successful ping time measurement, replace previous
2854 pfrom->nPingUsecTime = pingUsecTime;
2855 pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime.load(), pingUsecTime);
2856 } else {
2857 // This should never happen
2858 sProblem = "Timing mishap";
2859 }
2860 } else {
2861 // Nonce mismatches are normal when pings are overlapping
2862 sProblem = "Nonce mismatch";
2863 if (nonce == 0) {
2864 // This is most likely a bug in another implementation somewhere; cancel this ping
2865 bPingFinished = true;
2866 sProblem = "Nonce zero";
2867 }
2868 }
2869 } else {
2870 sProblem = "Unsolicited pong without ping";
2871 }
2872 } else {
2873 // This is most likely a bug in another implementation somewhere; cancel this ping
2874 bPingFinished = true;
2875 sProblem = "Short payload";
2876 }
2877
2878 if (!(sProblem.empty())) {
2879 LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
2880 pfrom->GetId(),
2881 sProblem,
2882 pfrom->nPingNonceSent,
2883 nonce,
2884 nAvail);
2885 }
2886 if (bPingFinished) {
2887 pfrom->nPingNonceSent = 0;
2888 }
2889 return true;
2890 }
2891
2892 if (strCommand == NetMsgType::FILTERLOAD) {
2893 CBloomFilter filter;
2894 vRecv >> filter;
2895
2896 if (!filter.IsWithinSizeConstraints())
2897 {
2898 // There is no excuse for sending a too-large filter
2899 LOCK(cs_main);
2900 Misbehaving(pfrom->GetId(), 100);
2901 }
2902 else
2903 {
2904 LOCK(pfrom->cs_filter);
2905 pfrom->pfilter.reset(new CBloomFilter(filter));
2906 pfrom->pfilter->UpdateEmptyFull();
2907 pfrom->fRelayTxes = true;
2908 }
2909 return true;
2910 }
2911
2912 if (strCommand == NetMsgType::FILTERADD) {
2913 std::vector<unsigned char> vData;
2914 vRecv >> vData;
2915
2916 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
2917 // and thus, the maximum size any matched object can have) in a filteradd message
2918 bool bad = false;
2919 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
2920 bad = true;
2921 } else {
2922 LOCK(pfrom->cs_filter);
2923 if (pfrom->pfilter) {
2924 pfrom->pfilter->insert(vData);
2925 } else {
2926 bad = true;
2927 }
2928 }
2929 if (bad) {
2930 LOCK(cs_main);
2931 Misbehaving(pfrom->GetId(), 100);
2932 }
2933 return true;
2934 }
2935
2936 if (strCommand == NetMsgType::FILTERCLEAR) {
2937 LOCK(pfrom->cs_filter);
2938 if (pfrom->GetLocalServices() & NODE_BLOOM) {
2939 pfrom->pfilter.reset(new CBloomFilter());
2940 }
2941 pfrom->fRelayTxes = true;
2942 return true;
2943 }
2944
2945 if (strCommand == NetMsgType::FEEFILTER) {
2946 CAmount newFeeFilter = 0;
2947 vRecv >> newFeeFilter;
2948 if (MoneyRange(newFeeFilter)) {
2949 {
2950 LOCK(pfrom->cs_feeFilter);
2951 pfrom->minFeeFilter = newFeeFilter;
2952 }
2953 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
2954 }
2955 return true;
2956 }
2957
2958 if (strCommand == NetMsgType::NOTFOUND) {
2959 // We do not care about the NOTFOUND message, but logging an Unknown Command
2960 // message would be undesirable as we transmit it ourselves.
2961 return true;
2962 }
2963
2964 // Ignore unknown commands for extensibility
2965 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
2966 return true;
2967 }
2968
SendRejectsAndCheckIfBanned(CNode * pnode,bool enable_bip61)2969 bool PeerLogicValidation::SendRejectsAndCheckIfBanned(CNode* pnode, bool enable_bip61) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
2970 {
2971 AssertLockHeld(cs_main);
2972 CNodeState &state = *State(pnode->GetId());
2973
2974 if (enable_bip61) {
2975 for (const CBlockReject& reject : state.rejects) {
2976 connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK), reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
2977 }
2978 }
2979 state.rejects.clear();
2980
2981 if (state.fShouldBan) {
2982 state.fShouldBan = false;
2983 if (pnode->fWhitelisted)
2984 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString());
2985 else if (pnode->m_manual_connection)
2986 LogPrintf("Warning: not punishing manually-connected peer %s!\n", pnode->addr.ToString());
2987 else if (pnode->addr.IsLocal()) {
2988 // Disconnect but don't ban _this_ local node
2989 LogPrintf("Warning: disconnecting but not banning local peer %s!\n", pnode->addr.ToString());
2990 pnode->fDisconnect = true;
2991 } else {
2992 // Disconnect and ban all nodes sharing the address
2993 if (m_banman) {
2994 m_banman->Ban(pnode->addr, BanReasonNodeMisbehaving);
2995 }
2996 connman->DisconnectNode(pnode->addr);
2997 }
2998 return true;
2999 }
3000 return false;
3001 }
3002
ProcessMessages(CNode * pfrom,std::atomic<bool> & interruptMsgProc)3003 bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
3004 {
3005 const CChainParams& chainparams = Params();
3006 //
3007 // Message format
3008 // (4) message start
3009 // (12) command
3010 // (4) size
3011 // (4) checksum
3012 // (x) data
3013 //
3014 bool fMoreWork = false;
3015
3016 if (!pfrom->vRecvGetData.empty())
3017 ProcessGetData(pfrom, chainparams, connman, interruptMsgProc);
3018
3019 if (!pfrom->orphan_work_set.empty()) {
3020 std::list<CTransactionRef> removed_txn;
3021 LOCK2(cs_main, g_cs_orphans);
3022 ProcessOrphanTx(connman, pfrom->orphan_work_set, removed_txn);
3023 for (const CTransactionRef& removedTx : removed_txn) {
3024 AddToCompactExtraTransactions(removedTx);
3025 }
3026 }
3027
3028 if (pfrom->fDisconnect)
3029 return false;
3030
3031 // this maintains the order of responses
3032 if (!pfrom->vRecvGetData.empty()) return true;
3033 if (!pfrom->orphan_work_set.empty()) return true;
3034
3035 // Don't bother if send buffer is too full to respond anyway
3036 if (pfrom->fPauseSend)
3037 return false;
3038
3039 std::list<CNetMessage> msgs;
3040 {
3041 LOCK(pfrom->cs_vProcessMsg);
3042 if (pfrom->vProcessMsg.empty())
3043 return false;
3044 // Just take one message
3045 msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
3046 pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
3047 pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
3048 fMoreWork = !pfrom->vProcessMsg.empty();
3049 }
3050 CNetMessage& msg(msgs.front());
3051
3052 msg.SetVersion(pfrom->GetRecvVersion());
3053 // Scan for message start
3054 if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
3055 LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
3056 pfrom->fDisconnect = true;
3057 return false;
3058 }
3059
3060 // Read header
3061 CMessageHeader& hdr = msg.hdr;
3062 if (!hdr.IsValid(chainparams.MessageStart()))
3063 {
3064 LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->GetId());
3065 return fMoreWork;
3066 }
3067 std::string strCommand = hdr.GetCommand();
3068
3069 // Message size
3070 unsigned int nMessageSize = hdr.nMessageSize;
3071
3072 // Checksum
3073 CDataStream& vRecv = msg.vRecv;
3074 const uint256& hash = msg.GetMessageHash();
3075 if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
3076 {
3077 LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
3078 SanitizeString(strCommand), nMessageSize,
3079 HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
3080 HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
3081 return fMoreWork;
3082 }
3083
3084 // Process message
3085 bool fRet = false;
3086 try
3087 {
3088 fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc, m_enable_bip61);
3089 if (interruptMsgProc)
3090 return false;
3091 if (!pfrom->vRecvGetData.empty())
3092 fMoreWork = true;
3093 } catch (const std::exception& e) {
3094 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what(), typeid(e).name());
3095 } catch (...) {
3096 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(strCommand), nMessageSize);
3097 }
3098
3099 if (!fRet) {
3100 LogPrint(BCLog::NET, "%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
3101 }
3102
3103 LOCK(cs_main);
3104 SendRejectsAndCheckIfBanned(pfrom, m_enable_bip61);
3105
3106 return fMoreWork;
3107 }
3108
ConsiderEviction(CNode * pto,int64_t time_in_seconds)3109 void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
3110 {
3111 AssertLockHeld(cs_main);
3112
3113 CNodeState &state = *State(pto->GetId());
3114 const CNetMsgMaker msgMaker(pto->GetSendVersion());
3115
3116 if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
3117 // This is an outbound peer subject to disconnection if they don't
3118 // announce a block with as much work as the current tip within
3119 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
3120 // their chain has more work than ours, we should sync to it,
3121 // unless it's invalid, in which case we should find that out and
3122 // disconnect from them elsewhere).
3123 if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) {
3124 if (state.m_chain_sync.m_timeout != 0) {
3125 state.m_chain_sync.m_timeout = 0;
3126 state.m_chain_sync.m_work_header = nullptr;
3127 state.m_chain_sync.m_sent_getheaders = false;
3128 }
3129 } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
3130 // Our best block known by this peer is behind our tip, and we're either noticing
3131 // that for the first time, OR this peer was able to catch up to some earlier point
3132 // where we checked against our tip.
3133 // Either way, set a new timeout based on current tip.
3134 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
3135 state.m_chain_sync.m_work_header = chainActive.Tip();
3136 state.m_chain_sync.m_sent_getheaders = false;
3137 } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
3138 // No evidence yet that our peer has synced to a chain with work equal to that
3139 // of our tip, when we first detected it was behind. Send a single getheaders
3140 // message to give the peer a chance to update us.
3141 if (state.m_chain_sync.m_sent_getheaders) {
3142 // They've run out of time to catch up!
3143 LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
3144 pto->fDisconnect = true;
3145 } else {
3146 assert(state.m_chain_sync.m_work_header);
3147 LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
3148 connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
3149 state.m_chain_sync.m_sent_getheaders = true;
3150 constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
3151 // Bump the timeout to allow a response, which could clear the timeout
3152 // (if the response shows the peer has synced), reset the timeout (if
3153 // the peer syncs to the required work but not to our tip), or result
3154 // in disconnect (if we advance to the timeout and pindexBestKnownBlock
3155 // has not sufficiently progressed)
3156 state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
3157 }
3158 }
3159 }
3160 }
3161
EvictExtraOutboundPeers(int64_t time_in_seconds)3162 void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
3163 {
3164 // Check whether we have too many outbound peers
3165 int extra_peers = connman->GetExtraOutboundCount();
3166 if (extra_peers > 0) {
3167 // If we have more outbound peers than we target, disconnect one.
3168 // Pick the outbound peer that least recently announced
3169 // us a new block, with ties broken by choosing the more recent
3170 // connection (higher node id)
3171 NodeId worst_peer = -1;
3172 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
3173
3174 connman->ForEachNode([&](CNode* pnode) {
3175 AssertLockHeld(cs_main);
3176
3177 // Ignore non-outbound peers, or nodes marked for disconnect already
3178 if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
3179 CNodeState *state = State(pnode->GetId());
3180 if (state == nullptr) return; // shouldn't be possible, but just in case
3181 // Don't evict our protected peers
3182 if (state->m_chain_sync.m_protect) return;
3183 if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
3184 worst_peer = pnode->GetId();
3185 oldest_block_announcement = state->m_last_block_announcement;
3186 }
3187 });
3188 if (worst_peer != -1) {
3189 bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
3190 AssertLockHeld(cs_main);
3191
3192 // Only disconnect a peer that has been connected to us for
3193 // some reasonable fraction of our check-frequency, to give
3194 // it time for new information to have arrived.
3195 // Also don't disconnect any peer we're trying to download a
3196 // block from.
3197 CNodeState &state = *State(pnode->GetId());
3198 if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
3199 LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
3200 pnode->fDisconnect = true;
3201 return true;
3202 } else {
3203 LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
3204 return false;
3205 }
3206 });
3207 if (disconnected) {
3208 // If we disconnected an extra peer, that means we successfully
3209 // connected to at least one peer after the last time we
3210 // detected a stale tip. Don't try any more extra peers until
3211 // we next detect a stale tip, to limit the load we put on the
3212 // network from these extra connections.
3213 connman->SetTryNewOutboundPeer(false);
3214 }
3215 }
3216 }
3217 }
3218
CheckForStaleTipAndEvictPeers(const Consensus::Params & consensusParams)3219 void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
3220 {
3221 LOCK(cs_main);
3222
3223 if (connman == nullptr) return;
3224
3225 int64_t time_in_seconds = GetTime();
3226
3227 EvictExtraOutboundPeers(time_in_seconds);
3228
3229 if (time_in_seconds > m_stale_tip_check_time) {
3230 // Check whether our tip is stale, and if so, allow using an extra
3231 // outbound peer
3232 if (!fImporting && !fReindex && connman->GetNetworkActive() && connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
3233 LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
3234 connman->SetTryNewOutboundPeer(true);
3235 } else if (connman->GetTryNewOutboundPeer()) {
3236 connman->SetTryNewOutboundPeer(false);
3237 }
3238 m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
3239 }
3240 }
3241
3242 namespace {
3243 class CompareInvMempoolOrder
3244 {
3245 CTxMemPool *mp;
3246 public:
CompareInvMempoolOrder(CTxMemPool * _mempool)3247 explicit CompareInvMempoolOrder(CTxMemPool *_mempool)
3248 {
3249 mp = _mempool;
3250 }
3251
operator ()(std::set<uint256>::iterator a,std::set<uint256>::iterator b)3252 bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
3253 {
3254 /* As std::make_heap produces a max-heap, we want the entries with the
3255 * fewest ancestors/highest fee to sort later. */
3256 return mp->CompareDepthAndScore(*b, *a);
3257 }
3258 };
3259 }
3260
SendMessages(CNode * pto)3261 bool PeerLogicValidation::SendMessages(CNode* pto)
3262 {
3263 const Consensus::Params& consensusParams = Params().GetConsensus();
3264 {
3265 // Don't send anything until the version handshake is complete
3266 if (!pto->fSuccessfullyConnected || pto->fDisconnect)
3267 return true;
3268
3269 // If we get here, the outgoing message serialization version is set and can't change.
3270 const CNetMsgMaker msgMaker(pto->GetSendVersion());
3271
3272 //
3273 // Message: ping
3274 //
3275 bool pingSend = false;
3276 if (pto->fPingQueued) {
3277 // RPC ping request by user
3278 pingSend = true;
3279 }
3280 if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
3281 // Ping automatically sent as a latency probe & keepalive.
3282 pingSend = true;
3283 }
3284 if (pingSend) {
3285 uint64_t nonce = 0;
3286 while (nonce == 0) {
3287 GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
3288 }
3289 pto->fPingQueued = false;
3290 pto->nPingUsecStart = GetTimeMicros();
3291 if (pto->nVersion > BIP0031_VERSION) {
3292 pto->nPingNonceSent = nonce;
3293 connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
3294 } else {
3295 // Peer is too old to support ping command with nonce, pong will never arrive.
3296 pto->nPingNonceSent = 0;
3297 connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
3298 }
3299 }
3300
3301 TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
3302 if (!lockMain)
3303 return true;
3304
3305 if (SendRejectsAndCheckIfBanned(pto, m_enable_bip61)) return true;
3306 CNodeState &state = *State(pto->GetId());
3307
3308 // Address refresh broadcast
3309 int64_t nNow = GetTimeMicros();
3310 if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
3311 AdvertiseLocal(pto);
3312 pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
3313 }
3314
3315 //
3316 // Message: addr
3317 //
3318 if (pto->nNextAddrSend < nNow) {
3319 pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
3320 std::vector<CAddress> vAddr;
3321 vAddr.reserve(pto->vAddrToSend.size());
3322 for (const CAddress& addr : pto->vAddrToSend)
3323 {
3324 if (!pto->addrKnown.contains(addr.GetKey()))
3325 {
3326 pto->addrKnown.insert(addr.GetKey());
3327 vAddr.push_back(addr);
3328 // receiver rejects addr messages larger than 1000
3329 if (vAddr.size() >= 1000)
3330 {
3331 connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3332 vAddr.clear();
3333 }
3334 }
3335 }
3336 pto->vAddrToSend.clear();
3337 if (!vAddr.empty())
3338 connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
3339 // we only send the big addr message once
3340 if (pto->vAddrToSend.capacity() > 40)
3341 pto->vAddrToSend.shrink_to_fit();
3342 }
3343
3344 // Start block sync
3345 if (pindexBestHeader == nullptr)
3346 pindexBestHeader = chainActive.Tip();
3347 bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
3348 if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
3349 // Only actively request headers from a single peer, unless we're close to today.
3350 if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
3351 state.fSyncStarted = true;
3352 state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
3353 nSyncStarted++;
3354 const CBlockIndex *pindexStart = pindexBestHeader;
3355 /* If possible, start at the block preceding the currently
3356 best known header. This ensures that we always get a
3357 non-empty list of headers back as long as the peer
3358 is up-to-date. With a non-empty response, we can initialise
3359 the peer's known best block. This wouldn't be possible
3360 if we requested starting at pindexBestHeader and
3361 got back an empty response. */
3362 if (pindexStart->pprev)
3363 pindexStart = pindexStart->pprev;
3364 LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
3365 connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
3366 }
3367 }
3368
3369 // Resend wallet transactions that haven't gotten in a block yet
3370 // Except during reindex, importing and IBD, when old wallet
3371 // transactions become unconfirmed and spams other nodes.
3372 if (!fReindex && !fImporting && !IsInitialBlockDownload())
3373 {
3374 GetMainSignals().Broadcast(nTimeBestReceived, connman);
3375 }
3376
3377 //
3378 // Try sending block announcements via headers
3379 //
3380 {
3381 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
3382 // list of block hashes we're relaying, and our peer wants
3383 // headers announcements, then find the first header
3384 // not yet known to our peer but would connect, and send.
3385 // If no header would connect, or if we have too many
3386 // blocks, or if the peer doesn't want headers, just
3387 // add all to the inv queue.
3388 LOCK(pto->cs_inventory);
3389 std::vector<CBlock> vHeaders;
3390 bool fRevertToInv = ((!state.fPreferHeaders &&
3391 (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
3392 pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
3393 const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
3394 ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
3395
3396 if (!fRevertToInv) {
3397 bool fFoundStartingHeader = false;
3398 // Try to find first header that our peer doesn't have, and
3399 // then send all headers past that one. If we come across any
3400 // headers that aren't on chainActive, give up.
3401 for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
3402 const CBlockIndex* pindex = LookupBlockIndex(hash);
3403 assert(pindex);
3404 if (chainActive[pindex->nHeight] != pindex) {
3405 // Bail out if we reorged away from this block
3406 fRevertToInv = true;
3407 break;
3408 }
3409 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
3410 // This means that the list of blocks to announce don't
3411 // connect to each other.
3412 // This shouldn't really be possible to hit during
3413 // regular operation (because reorgs should take us to
3414 // a chain that has some block not on the prior chain,
3415 // which should be caught by the prior check), but one
3416 // way this could happen is by using invalidateblock /
3417 // reconsiderblock repeatedly on the tip, causing it to
3418 // be added multiple times to vBlockHashesToAnnounce.
3419 // Robustly deal with this rare situation by reverting
3420 // to an inv.
3421 fRevertToInv = true;
3422 break;
3423 }
3424 pBestIndex = pindex;
3425 if (fFoundStartingHeader) {
3426 // add this to the headers message
3427 vHeaders.push_back(pindex->GetBlockHeader());
3428 } else if (PeerHasHeader(&state, pindex)) {
3429 continue; // keep looking for the first new block
3430 } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
3431 // Peer doesn't have this header but they do have the prior one.
3432 // Start sending headers.
3433 fFoundStartingHeader = true;
3434 vHeaders.push_back(pindex->GetBlockHeader());
3435 } else {
3436 // Peer doesn't have this header or the prior one -- nothing will
3437 // connect, so bail out.
3438 fRevertToInv = true;
3439 break;
3440 }
3441 }
3442 }
3443 if (!fRevertToInv && !vHeaders.empty()) {
3444 if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
3445 // We only send up to 1 block as header-and-ids, as otherwise
3446 // probably means we're doing an initial-ish-sync or they're slow
3447 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
3448 vHeaders.front().GetHash().ToString(), pto->GetId());
3449
3450 int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
3451
3452 bool fGotBlockFromCache = false;
3453 {
3454 LOCK(cs_most_recent_block);
3455 if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
3456 if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
3457 connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
3458 else {
3459 CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
3460 connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
3461 }
3462 fGotBlockFromCache = true;
3463 }
3464 }
3465 if (!fGotBlockFromCache) {
3466 CBlock block;
3467 bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
3468 assert(ret);
3469 CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
3470 connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
3471 }
3472 state.pindexBestHeaderSent = pBestIndex;
3473 } else if (state.fPreferHeaders) {
3474 if (vHeaders.size() > 1) {
3475 LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
3476 vHeaders.size(),
3477 vHeaders.front().GetHash().ToString(),
3478 vHeaders.back().GetHash().ToString(), pto->GetId());
3479 } else {
3480 LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
3481 vHeaders.front().GetHash().ToString(), pto->GetId());
3482 }
3483 connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
3484 state.pindexBestHeaderSent = pBestIndex;
3485 } else
3486 fRevertToInv = true;
3487 }
3488 if (fRevertToInv) {
3489 // If falling back to using an inv, just try to inv the tip.
3490 // The last entry in vBlockHashesToAnnounce was our tip at some point
3491 // in the past.
3492 if (!pto->vBlockHashesToAnnounce.empty()) {
3493 const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
3494 const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce);
3495 assert(pindex);
3496
3497 // Warn if we're announcing a block that is not on the main chain.
3498 // This should be very rare and could be optimized out.
3499 // Just log for now.
3500 if (chainActive[pindex->nHeight] != pindex) {
3501 LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
3502 hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
3503 }
3504
3505 // If the peer's chain has this block, don't inv it back.
3506 if (!PeerHasHeader(&state, pindex)) {
3507 pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
3508 LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
3509 pto->GetId(), hashToAnnounce.ToString());
3510 }
3511 }
3512 }
3513 pto->vBlockHashesToAnnounce.clear();
3514 }
3515
3516 //
3517 // Message: inventory
3518 //
3519 std::vector<CInv> vInv;
3520 {
3521 LOCK(pto->cs_inventory);
3522 vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
3523
3524 // Add blocks
3525 for (const uint256& hash : pto->vInventoryBlockToSend) {
3526 vInv.push_back(CInv(MSG_BLOCK, hash));
3527 if (vInv.size() == MAX_INV_SZ) {
3528 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3529 vInv.clear();
3530 }
3531 }
3532 pto->vInventoryBlockToSend.clear();
3533
3534 // Check whether periodic sends should happen
3535 bool fSendTrickle = pto->fWhitelisted;
3536 if (pto->nNextInvSend < nNow) {
3537 fSendTrickle = true;
3538 if (pto->fInbound) {
3539 pto->nNextInvSend = connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL);
3540 } else {
3541 // Use half the delay for outbound peers, as there is less privacy concern for them.
3542 pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> 1);
3543 }
3544 }
3545
3546 // Time to send but the peer has requested we not relay transactions.
3547 if (fSendTrickle) {
3548 LOCK(pto->cs_filter);
3549 if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
3550 }
3551
3552 // Respond to BIP35 mempool requests
3553 if (fSendTrickle && pto->fSendMempool) {
3554 auto vtxinfo = mempool.infoAll();
3555 pto->fSendMempool = false;
3556 CAmount filterrate = 0;
3557 {
3558 LOCK(pto->cs_feeFilter);
3559 filterrate = pto->minFeeFilter;
3560 }
3561
3562 LOCK(pto->cs_filter);
3563
3564 for (const auto& txinfo : vtxinfo) {
3565 const uint256& hash = txinfo.tx->GetHash();
3566 CInv inv(MSG_TX, hash);
3567 pto->setInventoryTxToSend.erase(hash);
3568 if (filterrate) {
3569 if (txinfo.feeRate.GetFeePerK() < filterrate)
3570 continue;
3571 }
3572 if (pto->pfilter) {
3573 if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
3574 }
3575 pto->filterInventoryKnown.insert(hash);
3576 vInv.push_back(inv);
3577 if (vInv.size() == MAX_INV_SZ) {
3578 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3579 vInv.clear();
3580 }
3581 }
3582 pto->timeLastMempoolReq = GetTime();
3583 }
3584
3585 // Determine transactions to relay
3586 if (fSendTrickle) {
3587 // Produce a vector with all candidates for sending
3588 std::vector<std::set<uint256>::iterator> vInvTx;
3589 vInvTx.reserve(pto->setInventoryTxToSend.size());
3590 for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
3591 vInvTx.push_back(it);
3592 }
3593 CAmount filterrate = 0;
3594 {
3595 LOCK(pto->cs_feeFilter);
3596 filterrate = pto->minFeeFilter;
3597 }
3598 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
3599 // A heap is used so that not all items need sorting if only a few are being sent.
3600 CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
3601 std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
3602 // No reason to drain out at many times the network's capacity,
3603 // especially since we have many peers and some will draw much shorter delays.
3604 unsigned int nRelayedTransactions = 0;
3605 LOCK(pto->cs_filter);
3606 while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
3607 // Fetch the top element from the heap
3608 std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
3609 std::set<uint256>::iterator it = vInvTx.back();
3610 vInvTx.pop_back();
3611 uint256 hash = *it;
3612 // Remove it from the to-be-sent set
3613 pto->setInventoryTxToSend.erase(it);
3614 // Check if not in the filter already
3615 if (pto->filterInventoryKnown.contains(hash)) {
3616 continue;
3617 }
3618 // Not in the mempool anymore? don't bother sending it.
3619 auto txinfo = mempool.info(hash);
3620 if (!txinfo.tx) {
3621 continue;
3622 }
3623 if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
3624 continue;
3625 }
3626 if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
3627 // Send
3628 vInv.push_back(CInv(MSG_TX, hash));
3629 nRelayedTransactions++;
3630 {
3631 // Expire old relay messages
3632 while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
3633 {
3634 mapRelay.erase(vRelayExpiration.front().second);
3635 vRelayExpiration.pop_front();
3636 }
3637
3638 auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
3639 if (ret.second) {
3640 vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
3641 }
3642 }
3643 if (vInv.size() == MAX_INV_SZ) {
3644 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3645 vInv.clear();
3646 }
3647 pto->filterInventoryKnown.insert(hash);
3648 }
3649 }
3650 }
3651 if (!vInv.empty())
3652 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
3653
3654 // Detect whether we're stalling
3655 nNow = GetTimeMicros();
3656 if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
3657 // Stalling only triggers when the block download window cannot move. During normal steady state,
3658 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
3659 // should only happen during initial block download.
3660 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
3661 pto->fDisconnect = true;
3662 return true;
3663 }
3664 // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
3665 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
3666 // We compensate for other peers to prevent killing off peers due to our own downstream link
3667 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
3668 // to unreasonably increase our timeout.
3669 if (state.vBlocksInFlight.size() > 0) {
3670 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
3671 int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
3672 if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
3673 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
3674 pto->fDisconnect = true;
3675 return true;
3676 }
3677 }
3678 // Check for headers sync timeouts
3679 if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
3680 // Detect whether this is a stalling initial-headers-sync peer
3681 if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24*60*60) {
3682 if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
3683 // Disconnect a (non-whitelisted) peer if it is our only sync peer,
3684 // and we have others we could be using instead.
3685 // Note: If all our peers are inbound, then we won't
3686 // disconnect our sync peer for stalling; we have bigger
3687 // problems if we can't get any outbound peers.
3688 if (!pto->fWhitelisted) {
3689 LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
3690 pto->fDisconnect = true;
3691 return true;
3692 } else {
3693 LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
3694 // Reset the headers sync state so that we have a
3695 // chance to try downloading from a different peer.
3696 // Note: this will also result in at least one more
3697 // getheaders message to be sent to
3698 // this peer (eventually).
3699 state.fSyncStarted = false;
3700 nSyncStarted--;
3701 state.nHeadersSyncTimeout = 0;
3702 }
3703 }
3704 } else {
3705 // After we've caught up once, reset the timeout so we can't trigger
3706 // disconnect later.
3707 state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
3708 }
3709 }
3710
3711 // Check that outbound peers have reasonable chains
3712 // GetTime() is used by this anti-DoS logic so we can test this using mocktime
3713 ConsiderEviction(pto, GetTime());
3714
3715 //
3716 // Message: getdata (blocks)
3717 //
3718 std::vector<CInv> vGetData;
3719 if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
3720 std::vector<const CBlockIndex*> vToDownload;
3721 NodeId staller = -1;
3722 FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
3723 for (const CBlockIndex *pindex : vToDownload) {
3724 uint32_t nFetchFlags = GetFetchFlags(pto);
3725 vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
3726 MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
3727 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
3728 pindex->nHeight, pto->GetId());
3729 }
3730 if (state.nBlocksInFlight == 0 && staller != -1) {
3731 if (State(staller)->nStallingSince == 0) {
3732 State(staller)->nStallingSince = nNow;
3733 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
3734 }
3735 }
3736 }
3737
3738 //
3739 // Message: getdata (non-blocks)
3740 //
3741 while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
3742 {
3743 const CInv& inv = (*pto->mapAskFor.begin()).second;
3744 if (!AlreadyHave(inv))
3745 {
3746 LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
3747 vGetData.push_back(inv);
3748 if (vGetData.size() >= 1000)
3749 {
3750 connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
3751 vGetData.clear();
3752 }
3753 } else {
3754 //If we're not going to ask, don't expect a response.
3755 pto->setAskFor.erase(inv.hash);
3756 }
3757 pto->mapAskFor.erase(pto->mapAskFor.begin());
3758 }
3759 if (!vGetData.empty())
3760 connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
3761
3762 //
3763 // Message: feefilter
3764 //
3765 // We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
3766 if (pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
3767 !(pto->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
3768 CAmount currentFilter = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
3769 int64_t timeNow = GetTimeMicros();
3770 if (timeNow > pto->nextSendTimeFeeFilter) {
3771 static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
3772 static FeeFilterRounder filterRounder(default_feerate);
3773 CAmount filterToSend = filterRounder.round(currentFilter);
3774 // We always have a fee filter of at least minRelayTxFee
3775 filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
3776 if (filterToSend != pto->lastSentFeeFilter) {
3777 connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
3778 pto->lastSentFeeFilter = filterToSend;
3779 }
3780 pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
3781 }
3782 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
3783 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
3784 else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter &&
3785 (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
3786 pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
3787 }
3788 }
3789 }
3790 return true;
3791 }
3792
3793 class CNetProcessingCleanup
3794 {
3795 public:
CNetProcessingCleanup()3796 CNetProcessingCleanup() {}
~CNetProcessingCleanup()3797 ~CNetProcessingCleanup() {
3798 // orphan transactions
3799 mapOrphanTransactions.clear();
3800 mapOrphanTransactionsByPrev.clear();
3801 }
3802 } instance_of_cnetprocessingcleanup;
3803