1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2020 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6 #include <validation.h>
7
8 #include <arith_uint256.h>
9 #include <auxpow.h>
10 #include <chain.h>
11 #include <chainparams.h>
12 #include <checkqueue.h>
13 #include <consensus/consensus.h>
14 #include <consensus/merkle.h>
15 #include <consensus/tx_check.h>
16 #include <consensus/tx_verify.h>
17 #include <consensus/validation.h>
18 #include <cuckoocache.h>
19 #include <flatfile.h>
20 #include <hash.h>
21 #include <index/txindex.h>
22 #include <logging.h>
23 #include <logging/timer.h>
24 #include <names/main.h>
25 #include <names/mempool.h>
26 #include <node/ui_interface.h>
27 #include <optional.h>
28 #include <policy/fees.h>
29 #include <policy/policy.h>
30 #include <policy/settings.h>
31 #include <pow.h>
32 #include <primitives/block.h>
33 #include <primitives/transaction.h>
34 #include <random.h>
35 #include <reverse_iterator.h>
36 #include <script/script.h>
37 #include <script/sigcache.h>
38 #include <shutdown.h>
39 #include <signet.h>
40 #include <timedata.h>
41 #include <tinyformat.h>
42 #include <txdb.h>
43 #include <txmempool.h>
44 #include <uint256.h>
45 #include <undo.h>
46 #include <util/check.h> // For NDEBUG compile time check
47 #include <util/moneystr.h>
48 #include <util/rbf.h>
49 #include <util/strencodings.h>
50 #include <util/system.h>
51 #include <util/translation.h>
52 #include <validationinterface.h>
53 #include <warnings.h>
54
55 #include <string>
56
57 #include <boost/algorithm/string/replace.hpp>
58
59 #define MICRO 0.000001
60 #define MILLI 0.001
61
62 /**
63 * An extra transaction can be added to a package, as long as it only has one
64 * ancestor and is no larger than this. Not really any reason to make this
65 * configurable as it doesn't materially change DoS parameters.
66 */
67 static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
68 /** Maximum kilobytes for transactions to store for processing during reorg */
69 static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
70 /** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
71 static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
72 /** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
73 static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
74 /** Time to wait between writing blocks/block index to disk. */
75 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
76 /** Time to wait between flushing chainstate to disk. */
77 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
78 /** Maximum age of our tip for us to be considered current for fee estimation */
79 static constexpr std::chrono::hours MAX_FEE_ESTIMATION_TIP_AGE{3};
80 const std::vector<std::string> CHECKLEVEL_DOC {
81 "level 0 reads the blocks from disk",
82 "level 1 verifies block validity",
83 "level 2 verifies undo data",
84 "level 3 checks disconnection of tip blocks",
85 "level 4 tries to reconnect the blocks",
86 "each level includes the checks of the previous levels",
87 };
88
operator ()(const CBlockIndex * pa,const CBlockIndex * pb) const89 bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
90 // First sort by most total work, ...
91 if (pa->nChainWork > pb->nChainWork) return false;
92 if (pa->nChainWork < pb->nChainWork) return true;
93
94 // ... then by earliest time received, ...
95 if (pa->nSequenceId < pb->nSequenceId) return false;
96 if (pa->nSequenceId > pb->nSequenceId) return true;
97
98 // Use pointer address as tie breaker (should only happen with blocks
99 // loaded from disk, as those all have id 0).
100 if (pa < pb) return false;
101 if (pa > pb) return true;
102
103 // Identical blocks.
104 return false;
105 }
106
107 ChainstateManager g_chainman;
108
ChainstateActive()109 CChainState& ChainstateActive()
110 {
111 LOCK(::cs_main);
112 assert(g_chainman.m_active_chainstate);
113 return *g_chainman.m_active_chainstate;
114 }
115
ChainActive()116 CChain& ChainActive()
117 {
118 LOCK(::cs_main);
119 return ::ChainstateActive().m_chain;
120 }
121
122 /**
123 * Mutex to guard access to validation specific variables, such as reading
124 * or changing the chainstate.
125 *
126 * This may also need to be locked when updating the transaction pool, e.g. on
127 * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
128 *
129 * The transaction pool has a separate lock to allow reading from it and the
130 * chainstate at the same time.
131 */
132 RecursiveMutex cs_main;
133
134 CBlockIndex *pindexBestHeader = nullptr;
135 Mutex g_best_block_mutex;
136 std::condition_variable g_best_block_cv;
137 uint256 g_best_block;
138 bool g_parallel_script_checks{false};
139 std::atomic_bool fImporting(false);
140 std::atomic_bool fReindex(false);
141 bool fHavePruned = false;
142 bool fPruneMode = false;
143 bool fRequireStandard = true;
144 bool fCheckBlockIndex = false;
145 bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
146 uint64_t nPruneTarget = 0;
147 int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
148
149 uint256 hashAssumeValid;
150 arith_uint256 nMinimumChainWork;
151
152 CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
153
154 CBlockPolicyEstimator feeEstimator;
155
156 // Internal stuff
157 namespace {
158 CBlockIndex* pindexBestInvalid = nullptr;
159
160 RecursiveMutex cs_LastBlockFile;
161 std::vector<CBlockFileInfo> vinfoBlockFile;
162 int nLastBlockFile = 0;
163 /** Global flag to indicate we should check to see if there are
164 * block/undo files that should be deleted. Set on startup
165 * or if we allocate more file space when we're in prune mode
166 */
167 bool fCheckForPruning = false;
168
169 /** Dirty block index entries. */
170 std::set<CBlockIndex*> setDirtyBlockIndex;
171
172 /** Dirty block file entries. */
173 std::set<int> setDirtyFileInfo;
174 } // anon namespace
175
LookupBlockIndex(const uint256 & hash)176 CBlockIndex* LookupBlockIndex(const uint256& hash)
177 {
178 AssertLockHeld(cs_main);
179 BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
180 return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
181 }
182
FindForkInGlobalIndex(const CChain & chain,const CBlockLocator & locator)183 CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
184 {
185 AssertLockHeld(cs_main);
186
187 // Find the latest block common to locator and chain - we expect that
188 // locator.vHave is sorted descending by height.
189 for (const uint256& hash : locator.vHave) {
190 CBlockIndex* pindex = LookupBlockIndex(hash);
191 if (pindex) {
192 if (chain.Contains(pindex))
193 return pindex;
194 if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
195 return chain.Tip();
196 }
197 }
198 }
199 return chain.Genesis();
200 }
201
202 std::unique_ptr<CBlockTreeDB> pblocktree;
203
204 bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
205 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
206 static FlatFileSeq BlockFileSeq();
207 static FlatFileSeq UndoFileSeq();
208
CheckFinalTx(const CTransaction & tx,int flags)209 bool CheckFinalTx(const CTransaction &tx, int flags)
210 {
211 AssertLockHeld(cs_main);
212
213 // By convention a negative value for flags indicates that the
214 // current network-enforced consensus rules should be used. In
215 // a future soft-fork scenario that would mean checking which
216 // rules would be enforced for the next block and setting the
217 // appropriate flags. At the present time no soft-forks are
218 // scheduled, so no flags are set.
219 flags = std::max(flags, 0);
220
221 // CheckFinalTx() uses ::ChainActive().Height()+1 to evaluate
222 // nLockTime because when IsFinalTx() is called within
223 // CBlock::AcceptBlock(), the height of the block *being*
224 // evaluated is what is used. Thus if we want to know if a
225 // transaction can be part of the *next* block, we need to call
226 // IsFinalTx() with one more than ::ChainActive().Height().
227 const int nBlockHeight = ::ChainActive().Height() + 1;
228
229 // BIP113 requires that time-locked transactions have nLockTime set to
230 // less than the median time of the previous block they're contained in.
231 // When the next block is created its previous block will be the current
232 // chain tip, so we use that to calculate the median time passed to
233 // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
234 const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
235 ? ::ChainActive().Tip()->GetMedianTimePast()
236 : GetAdjustedTime();
237
238 return IsFinalTx(tx, nBlockHeight, nBlockTime);
239 }
240
TestLockPointValidity(const LockPoints * lp)241 bool TestLockPointValidity(const LockPoints* lp)
242 {
243 AssertLockHeld(cs_main);
244 assert(lp);
245 // If there are relative lock times then the maxInputBlock will be set
246 // If there are no relative lock times, the LockPoints don't depend on the chain
247 if (lp->maxInputBlock) {
248 // Check whether ::ChainActive() is an extension of the block at which the LockPoints
249 // calculation was valid. If not LockPoints are no longer valid
250 if (!::ChainActive().Contains(lp->maxInputBlock)) {
251 return false;
252 }
253 }
254
255 // LockPoints still valid
256 return true;
257 }
258
CheckSequenceLocks(const CTxMemPool & pool,const CTransaction & tx,int flags,LockPoints * lp,bool useExistingLockPoints)259 bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints)
260 {
261 AssertLockHeld(cs_main);
262 AssertLockHeld(pool.cs);
263
264 CBlockIndex* tip = ::ChainActive().Tip();
265 assert(tip != nullptr);
266
267 CBlockIndex index;
268 index.pprev = tip;
269 // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate
270 // height based locks because when SequenceLocks() is called within
271 // ConnectBlock(), the height of the block *being*
272 // evaluated is what is used.
273 // Thus if we want to know if a transaction can be part of the
274 // *next* block, we need to use one more than ::ChainActive().Height()
275 index.nHeight = tip->nHeight + 1;
276
277 std::pair<int, int64_t> lockPair;
278 if (useExistingLockPoints) {
279 assert(lp);
280 lockPair.first = lp->height;
281 lockPair.second = lp->time;
282 }
283 else {
284 // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
285 CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
286 std::vector<int> prevheights;
287 prevheights.resize(tx.vin.size());
288 for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
289 const CTxIn& txin = tx.vin[txinIndex];
290 Coin coin;
291 if (!viewMemPool.GetCoin(txin.prevout, coin)) {
292 return error("%s: Missing input", __func__);
293 }
294 if (coin.nHeight == MEMPOOL_HEIGHT) {
295 // Assume all mempool transaction confirm in the next block
296 prevheights[txinIndex] = tip->nHeight + 1;
297 } else {
298 prevheights[txinIndex] = coin.nHeight;
299 }
300 }
301 lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
302 if (lp) {
303 lp->height = lockPair.first;
304 lp->time = lockPair.second;
305 // Also store the hash of the block with the highest height of
306 // all the blocks which have sequence locked prevouts.
307 // This hash needs to still be on the chain
308 // for these LockPoint calculations to be valid
309 // Note: It is impossible to correctly calculate a maxInputBlock
310 // if any of the sequence locked inputs depend on unconfirmed txs,
311 // except in the special case where the relative lock time/height
312 // is 0, which is equivalent to no sequence lock. Since we assume
313 // input height of tip+1 for mempool txs and test the resulting
314 // lockPair from CalculateSequenceLocks against tip+1. We know
315 // EvaluateSequenceLocks will fail if there was a non-zero sequence
316 // lock on a mempool input, so we can use the return value of
317 // CheckSequenceLocks to indicate the LockPoints validity
318 int maxInputHeight = 0;
319 for (const int height : prevheights) {
320 // Can ignore mempool inputs since we'll fail if they had non-zero locks
321 if (height != tip->nHeight+1) {
322 maxInputHeight = std::max(maxInputHeight, height);
323 }
324 }
325 lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
326 }
327 }
328 return EvaluateSequenceLocks(index, lockPair);
329 }
330
331 // Returns the script flags which should be checked for a given block
332 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
333
LimitMempoolSize(CTxMemPool & pool,size_t limit,std::chrono::seconds age)334 static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age)
335 EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main)
336 {
337 int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
338 if (expired != 0) {
339 LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
340 }
341
342 std::vector<COutPoint> vNoSpendsRemaining;
343 pool.TrimToSize(limit, &vNoSpendsRemaining);
344 for (const COutPoint& removed : vNoSpendsRemaining)
345 ::ChainstateActive().CoinsTip().Uncache(removed);
346 }
347
IsCurrentForFeeEstimation()348 static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
349 {
350 AssertLockHeld(cs_main);
351 if (::ChainstateActive().IsInitialBlockDownload())
352 return false;
353 if (::ChainActive().Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE))
354 return false;
355 if (::ChainActive().Height() < pindexBestHeader->nHeight - 1)
356 return false;
357 return true;
358 }
359
360 /* Make mempool consistent after a reorg, by re-adding or recursively erasing
361 * disconnected block transactions from the mempool, and also removing any
362 * other transactions from the mempool that are no longer valid given the new
363 * tip/height.
364 *
365 * Note: we assume that disconnectpool only contains transactions that are NOT
366 * confirmed in the current chain nor already in the mempool (otherwise,
367 * in-mempool descendants of such transactions would be removed).
368 *
369 * Passing fAddToMempool=false will skip trying to add the transactions back,
370 * and instead just erase from the mempool as needed.
371 */
372
UpdateMempoolForReorg(CTxMemPool & mempool,DisconnectedBlockTransactions & disconnectpool,bool fAddToMempool)373 static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs)
374 {
375 AssertLockHeld(cs_main);
376 AssertLockHeld(mempool.cs);
377 std::vector<uint256> vHashUpdate;
378 // disconnectpool's insertion_order index sorts the entries from
379 // oldest to newest, but the oldest entry will be the last tx from the
380 // latest mined block that was disconnected.
381 // Iterate disconnectpool in reverse, so that we add transactions
382 // back to the mempool starting with the earliest transaction that had
383 // been previously seen in a block.
384 auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
385 while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
386 // ignore validation errors in resurrected transactions
387 TxValidationState stateDummy;
388 if (!fAddToMempool || (*it)->IsCoinBase() ||
389 !AcceptToMemoryPool(mempool, stateDummy, *it,
390 nullptr /* plTxnReplaced */, true /* bypass_limits */)) {
391 // If the transaction doesn't make it in to the mempool, remove any
392 // transactions that depend on it (which would now be orphans).
393 mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
394 } else if (mempool.exists((*it)->GetHash())) {
395 vHashUpdate.push_back((*it)->GetHash());
396 }
397 ++it;
398 }
399 disconnectpool.queuedTx.clear();
400 // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
401 // no in-mempool children, which is generally not true when adding
402 // previously-confirmed transactions back to the mempool.
403 // UpdateTransactionsFromBlock finds descendants of any transactions in
404 // the disconnectpool that were added back and cleans up the mempool state.
405 mempool.UpdateTransactionsFromBlock(vHashUpdate);
406
407 // We also need to remove any now-immature transactions
408 mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
409 // Re-limit mempool size, in case we added any transactions
410 LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
411 }
412
413 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
414 // were somehow broken and returning the wrong scriptPubKeys
CheckInputsFromMempoolAndCache(const CTransaction & tx,TxValidationState & state,const CCoinsViewCache & view,const CTxMemPool & pool,unsigned int flags,PrecomputedTransactionData & txdata)415 static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
416 unsigned int flags, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
417 AssertLockHeld(cs_main);
418
419 // pool.cs should be locked already, but go ahead and re-take the lock here
420 // to enforce that mempool doesn't change between when we check the view
421 // and when we actually call through to CheckInputScripts
422 LOCK(pool.cs);
423
424 assert(!tx.IsCoinBase());
425 for (const CTxIn& txin : tx.vin) {
426 const Coin& coin = view.AccessCoin(txin.prevout);
427
428 // AcceptToMemoryPoolWorker has already checked that the coins are
429 // available, so this shouldn't fail. If the inputs are not available
430 // here then return false.
431 if (coin.IsSpent()) return false;
432
433 // Check equivalence for available inputs.
434 const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
435 if (txFrom) {
436 assert(txFrom->GetHash() == txin.prevout.hash);
437 assert(txFrom->vout.size() > txin.prevout.n);
438 assert(txFrom->vout[txin.prevout.n] == coin.out);
439 } else {
440 const Coin& coinFromDisk = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout);
441 assert(!coinFromDisk.IsSpent());
442 assert(coinFromDisk.out == coin.out);
443 }
444 }
445
446 // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules.
447 return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
448 }
449
450 namespace {
451
452 class MemPoolAccept
453 {
454 public:
MemPoolAccept(CTxMemPool & mempool)455 MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
456 m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
457 m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
458 m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
459 m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {}
460
461 // We put the arguments we're handed into a struct, so we can pass them
462 // around easier.
463 struct ATMPArgs {
464 const CChainParams& m_chainparams;
465 TxValidationState &m_state;
466 const int64_t m_accept_time;
467 std::list<CTransactionRef>* m_replaced_transactions;
468 const bool m_bypass_limits;
469 /*
470 * Return any outpoints which were not previously present in the coins
471 * cache, but were added as a result of validating the tx for mempool
472 * acceptance. This allows the caller to optionally remove the cache
473 * additions if the associated transaction ends up being rejected by
474 * the mempool.
475 */
476 std::vector<COutPoint>& m_coins_to_uncache;
477 const bool m_test_accept;
478 CAmount* m_fee_out;
479 };
480
481 // Single transaction acceptance
482 bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
483
484 private:
485 // All the intermediate state that gets passed between the various levels
486 // of checking a given transaction.
487 struct Workspace {
Workspace__anonc9e407370211::MemPoolAccept::Workspace488 Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
489 std::set<uint256> m_conflicts;
490 CTxMemPool::setEntries m_all_conflicting;
491 CTxMemPool::setEntries m_ancestors;
492 std::unique_ptr<CTxMemPoolEntry> m_entry;
493
494 bool m_replacement_transaction;
495 CAmount m_modified_fees;
496 CAmount m_conflicting_fees;
497 size_t m_conflicting_size;
498
499 const CTransactionRef& m_ptx;
500 const uint256& m_hash;
501 };
502
503 // Run the policy checks on a given transaction, excluding any script checks.
504 // Looks up inputs, calculates feerate, considers replacement, evaluates
505 // package limits, etc. As this function can be invoked for "free" by a peer,
506 // only tests that are fast should be done here (to avoid CPU DoS).
507 bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
508
509 // Run the script checks using our policy flags. As this can be slow, we should
510 // only invoke this on transactions that have otherwise passed policy checks.
511 bool PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
512
513 // Re-run the script checks, using consensus flags, and try to cache the
514 // result in the scriptcache. This should be done after
515 // PolicyScriptChecks(). This requires that all inputs either be in our
516 // utxo set or in the mempool.
517 bool ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
518
519 // Try to add the transaction to the mempool, removing any conflicts first.
520 // Returns true if the transaction is in the mempool after any size
521 // limiting is performed, false otherwise.
522 bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
523
524 // Compare a package's feerate against minimum allowed.
CheckFeeRate(size_t package_size,CAmount package_fee,TxValidationState & state)525 bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state)
526 {
527 CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
528 if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
529 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
530 }
531
532 if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
533 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
534 }
535 return true;
536 }
537
538 private:
539 CTxMemPool& m_pool;
540 CCoinsViewCache m_view;
541 CCoinsViewMemPool m_viewmempool;
542 CCoinsView m_dummy;
543
544 // The package limits in effect at the time of invocation.
545 const size_t m_limit_ancestors;
546 const size_t m_limit_ancestor_size;
547 // These may be modified while evaluating a transaction (eg to account for
548 // in-mempool conflicts; see below).
549 size_t m_limit_descendants;
550 size_t m_limit_descendant_size;
551 };
552
PreChecks(ATMPArgs & args,Workspace & ws)553 bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
554 {
555 const CTransactionRef& ptx = ws.m_ptx;
556 const CTransaction& tx = *ws.m_ptx;
557 const uint256& hash = ws.m_hash;
558
559 // Copy/alias what we need out of args
560 TxValidationState &state = args.m_state;
561 const int64_t nAcceptTime = args.m_accept_time;
562 const bool bypass_limits = args.m_bypass_limits;
563 std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
564
565 // Alias what we need out of ws
566 std::set<uint256>& setConflicts = ws.m_conflicts;
567 CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
568 CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
569 std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
570 bool& fReplacementTransaction = ws.m_replacement_transaction;
571 CAmount& nModifiedFees = ws.m_modified_fees;
572 CAmount& nConflictingFees = ws.m_conflicting_fees;
573 size_t& nConflictingSize = ws.m_conflicting_size;
574
575 if (!CheckTransaction(tx, state)) {
576 return false; // state filled in by CheckTransaction
577 }
578
579 // Coinbase is only valid in a block, not as a loose transaction
580 if (tx.IsCoinBase())
581 return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
582
583 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
584 std::string reason;
585 if (fRequireStandard && !IsStandardTx(tx, reason))
586 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
587
588 // Do not work on transactions that are too small.
589 // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
590 // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
591 // 64-byte transactions.
592 if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
593 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
594
595 // Only accept nLockTime-using transactions that can be mined in the next
596 // block; we don't want our mempool filled up with transactions that can't
597 // be mined yet.
598 if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
599 return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
600
601 // is it already in the memory pool?
602 if (m_pool.exists(hash)) {
603 return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
604 }
605
606 // Check for conflicts with in-memory transactions
607 for (const CTxIn &txin : tx.vin)
608 {
609 const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
610 if (ptxConflicting) {
611 if (!setConflicts.count(ptxConflicting->GetHash()))
612 {
613 // Allow opt-out of transaction replacement by setting
614 // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
615 //
616 // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
617 // non-replaceable transactions. All inputs rather than just one
618 // is for the sake of multi-party protocols, where we don't
619 // want a single party to be able to disable replacement.
620 //
621 // The opt-out ignores descendants as anyone relying on
622 // first-seen mempool behavior should be checking all
623 // unconfirmed ancestors anyway; doing otherwise is hopelessly
624 // insecure.
625 bool fReplacementOptOut = true;
626 for (const CTxIn &_txin : ptxConflicting->vin)
627 {
628 if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
629 {
630 fReplacementOptOut = false;
631 break;
632 }
633 }
634 if (fReplacementOptOut) {
635 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
636 }
637
638 setConflicts.insert(ptxConflicting->GetHash());
639 }
640 }
641 }
642
643 if (!m_pool.checkNameOps(tx))
644 return state.Invalid(TxValidationResult::TX_CONFLICT,
645 "txn-mempool-name-error");
646
647 LockPoints lp;
648 m_view.SetBackend(m_viewmempool);
649
650 CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
651 // do all inputs exist?
652 for (const CTxIn& txin : tx.vin) {
653 if (!coins_cache.HaveCoinInCache(txin.prevout)) {
654 coins_to_uncache.push_back(txin.prevout);
655 }
656
657 // Note: this call may add txin.prevout to the coins cache
658 // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
659 // later (via coins_to_uncache) if this tx turns out to be invalid.
660 if (!m_view.HaveCoin(txin.prevout)) {
661 // Are inputs missing because we already have the tx?
662 for (size_t out = 0; out < tx.vout.size(); out++) {
663 // See if we have any output in the UTXO set.
664 if (coins_cache.HaveCoin(COutPoint(hash, out))) {
665 return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
666 }
667 }
668 // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
669 return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
670 }
671 }
672
673 // Bring the best block into scope
674 m_view.GetBestBlock();
675
676 /* If this is a name update (or firstupdate), make sure that the
677 existing name entry (if any) is in the dummy cache. Otherwise
678 tx validation done below (in CheckInputs) will not be correct. */
679 for (const auto& txout : tx.vout)
680 {
681 const CNameScript nameOp(txout.scriptPubKey);
682 if (nameOp.isNameOp() && nameOp.isAnyUpdate())
683 {
684 const valtype& name = nameOp.getOpName();
685 CNameData data;
686 if (m_view.GetName(name, data))
687 m_view.SetName(name, data, false);
688 }
689 }
690
691 // we have all inputs cached now, so switch back to dummy (to protect
692 // against bugs where we pull more inputs from disk that miss being added
693 // to coins_to_uncache)
694 m_view.SetBackend(m_dummy);
695
696 // Only accept BIP68 sequence locked transactions that can be mined in the next
697 // block; we don't want our mempool filled up with transactions that can't
698 // be mined yet.
699 // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
700 // CoinsViewCache instead of create its own
701 if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
702 return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
703
704 CAmount nFees = 0;
705 if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), SCRIPT_VERIFY_NAMES_MEMPOOL, nFees)) {
706 return false; // state filled in by CheckTxInputs
707 }
708
709 // If fee_out is passed, return the fee to the caller
710 if (args.m_fee_out) {
711 *args.m_fee_out = nFees;
712 }
713
714 // Check for non-standard pay-to-script-hash in inputs
715 const auto& params = args.m_chainparams.GetConsensus();
716 auto taproot_state = VersionBitsState(::ChainActive().Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache);
717 if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) {
718 return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
719 }
720
721 // Check for non-standard witness in P2WSH
722 if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
723 return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
724
725 int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
726
727 // nModifiedFees includes any fee deltas from PrioritiseTransaction
728 nModifiedFees = nFees;
729 m_pool.ApplyDelta(hash, nModifiedFees);
730
731 // Keep track of transactions that spend a coinbase, which we re-scan
732 // during reorgs to ensure COINBASE_MATURITY is still met.
733 bool fSpendsCoinbase = false;
734 for (const CTxIn &txin : tx.vin) {
735 const Coin &coin = m_view.AccessCoin(txin.prevout);
736 if (coin.IsCoinBase()) {
737 fSpendsCoinbase = true;
738 break;
739 }
740 }
741
742 entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
743 fSpendsCoinbase, nSigOpsCost, lp));
744 unsigned int nSize = entry->GetTxSize();
745
746 if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
747 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
748 strprintf("%d", nSigOpsCost));
749
750 // No transactions are allowed below minRelayTxFee except from disconnected
751 // blocks
752 if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
753
754 const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
755 // Calculate in-mempool ancestors, up to a limit.
756 if (setConflicts.size() == 1) {
757 // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
758 // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
759 // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
760 // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
761 // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
762 // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
763 // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
764 // for off-chain contract systems (see link in the comment below).
765 //
766 // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
767 // conflict directly with exactly one other transaction (but may evict children of said transaction),
768 // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
769 // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
770 // amended, we may need to move that check to here instead of removing it wholesale.
771 //
772 // Such transactions are clearly not merging any existing packages, so we are only concerned with
773 // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
774 // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
775 // to.
776 //
777 // To check these we first check if we meet the RBF criteria, above, and increment the descendant
778 // limits by the direct conflict and its descendants (as these are recalculated in
779 // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
780 // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
781 // the ancestor limits should be the same for both our new transaction and any conflicts).
782 // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
783 // into force here (as we're only adding a single transaction).
784 assert(setIterConflicting.size() == 1);
785 CTxMemPool::txiter conflict = *setIterConflicting.begin();
786
787 m_limit_descendants += 1;
788 m_limit_descendant_size += conflict->GetSizeWithDescendants();
789 }
790
791 std::string errString;
792 if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
793 setAncestors.clear();
794 // If CalculateMemPoolAncestors fails second time, we want the original error string.
795 std::string dummy_err_string;
796 // Contracting/payment channels CPFP carve-out:
797 // If the new transaction is relatively small (up to 40k weight)
798 // and has at most one ancestor (ie ancestor limit of 2, including
799 // the new transaction), allow it if its parent has exactly the
800 // descendant limit descendants.
801 //
802 // This allows protocols which rely on distrusting counterparties
803 // being able to broadcast descendants of an unconfirmed transaction
804 // to be secure by simply only having two immediately-spendable
805 // outputs - one for each counterparty. For more info on the uses for
806 // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
807 if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
808 !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
809 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
810 }
811 }
812
813 // A transaction that spends outputs that would be replaced by it is invalid. Now
814 // that we have the set of all ancestors we can detect this
815 // pathological case by making sure setConflicts and setAncestors don't
816 // intersect.
817 for (CTxMemPool::txiter ancestorIt : setAncestors)
818 {
819 const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
820 if (setConflicts.count(hashAncestor))
821 {
822 return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
823 strprintf("%s spends conflicting transaction %s",
824 hash.ToString(),
825 hashAncestor.ToString()));
826 }
827 }
828
829 // Check if it's economically rational to mine this transaction rather
830 // than the ones it replaces.
831 nConflictingFees = 0;
832 nConflictingSize = 0;
833 uint64_t nConflictingCount = 0;
834
835 // If we don't hold the lock allConflicting might be incomplete; the
836 // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
837 // mempool consistency for us.
838 fReplacementTransaction = setConflicts.size();
839 if (fReplacementTransaction)
840 {
841 CFeeRate newFeeRate(nModifiedFees, nSize);
842 std::set<uint256> setConflictsParents;
843 const int maxDescendantsToVisit = 100;
844 for (const auto& mi : setIterConflicting) {
845 // Don't allow the replacement to reduce the feerate of the
846 // mempool.
847 //
848 // We usually don't want to accept replacements with lower
849 // feerates than what they replaced as that would lower the
850 // feerate of the next block. Requiring that the feerate always
851 // be increased is also an easy-to-reason about way to prevent
852 // DoS attacks via replacements.
853 //
854 // We only consider the feerates of transactions being directly
855 // replaced, not their indirect descendants. While that does
856 // mean high feerate children are ignored when deciding whether
857 // or not to replace, we do require the replacement to pay more
858 // overall fees too, mitigating most cases.
859 CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
860 if (newFeeRate <= oldFeeRate)
861 {
862 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
863 strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
864 hash.ToString(),
865 newFeeRate.ToString(),
866 oldFeeRate.ToString()));
867 }
868
869 for (const CTxIn &txin : mi->GetTx().vin)
870 {
871 setConflictsParents.insert(txin.prevout.hash);
872 }
873
874 nConflictingCount += mi->GetCountWithDescendants();
875 }
876 // This potentially overestimates the number of actual descendants
877 // but we just want to be conservative to avoid doing too much
878 // work.
879 if (nConflictingCount <= maxDescendantsToVisit) {
880 // If not too many to replace, then calculate the set of
881 // transactions that would have to be evicted
882 for (CTxMemPool::txiter it : setIterConflicting) {
883 m_pool.CalculateDescendants(it, allConflicting);
884 }
885 for (CTxMemPool::txiter it : allConflicting) {
886 nConflictingFees += it->GetModifiedFee();
887 nConflictingSize += it->GetTxSize();
888 }
889 } else {
890 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
891 strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
892 hash.ToString(),
893 nConflictingCount,
894 maxDescendantsToVisit));
895 }
896
897 for (unsigned int j = 0; j < tx.vin.size(); j++)
898 {
899 // We don't want to accept replacements that require low
900 // feerate junk to be mined first. Ideally we'd keep track of
901 // the ancestor feerates and make the decision based on that,
902 // but for now requiring all new inputs to be confirmed works.
903 //
904 // Note that if you relax this to make RBF a little more useful,
905 // this may break the CalculateMempoolAncestors RBF relaxation,
906 // above. See the comment above the first CalculateMempoolAncestors
907 // call for more info.
908 if (!setConflictsParents.count(tx.vin[j].prevout.hash))
909 {
910 // Rather than check the UTXO set - potentially expensive -
911 // it's cheaper to just check if the new input refers to a
912 // tx that's in the mempool.
913 if (m_pool.exists(tx.vin[j].prevout.hash)) {
914 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
915 strprintf("replacement %s adds unconfirmed input, idx %d",
916 hash.ToString(), j));
917 }
918 }
919 }
920
921 // The replacement must pay greater fees than the transactions it
922 // replaces - if we did the bandwidth used by those conflicting
923 // transactions would not be paid for.
924 if (nModifiedFees < nConflictingFees)
925 {
926 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
927 strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
928 hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
929 }
930
931 // Finally in addition to paying more fees than the conflicts the
932 // new transaction must pay for its own bandwidth.
933 CAmount nDeltaFees = nModifiedFees - nConflictingFees;
934 if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
935 {
936 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
937 strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
938 hash.ToString(),
939 FormatMoney(nDeltaFees),
940 FormatMoney(::incrementalRelayFee.GetFee(nSize))));
941 }
942 }
943 return true;
944 }
945
PolicyScriptChecks(ATMPArgs & args,Workspace & ws,PrecomputedTransactionData & txdata)946 bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
947 {
948 const CTransaction& tx = *ws.m_ptx;
949
950 TxValidationState &state = args.m_state;
951
952 constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS | SCRIPT_VERIFY_NAMES_MEMPOOL;
953
954 // Check input scripts and signatures.
955 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
956 if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
957 // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
958 // need to turn both off, and compare against just turning off CLEANSTACK
959 // to see if the failure is specifically due to witness validation.
960 TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
961 if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
962 !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
963 // Only the witness is missing, so the transaction itself may be fine.
964 state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
965 state.GetRejectReason(), state.GetDebugMessage());
966 }
967 return false; // state filled in by CheckInputScripts
968 }
969
970 return true;
971 }
972
ConsensusScriptChecks(ATMPArgs & args,Workspace & ws,PrecomputedTransactionData & txdata)973 bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
974 {
975 const CTransaction& tx = *ws.m_ptx;
976 const uint256& hash = ws.m_hash;
977
978 TxValidationState &state = args.m_state;
979 const CChainParams& chainparams = args.m_chainparams;
980
981 // Check again against the current block tip's script verification
982 // flags to cache our script execution flags. This is, of course,
983 // useless if the next block has different script flags from the
984 // previous one, but because the cache tracks script flags for us it
985 // will auto-invalidate and we'll just have a few blocks of extra
986 // misses on soft-fork activation.
987 //
988 // This is also useful in case of bugs in the standard flags that cause
989 // transactions to pass as valid when they're actually invalid. For
990 // instance the STRICTENC flag was incorrectly allowing certain
991 // CHECKSIG NOT scripts to pass, even though they were invalid.
992 //
993 // There is a similar check in CreateNewBlock() to prevent creating
994 // invalid blocks (using TestBlockValidity), however allowing such
995 // transactions into the mempool can be exploited as a DoS attack.
996 //
997 // Namecoin actually allows some scripts into the mempool that would
998 // not (yet) be valid in a block, namely premature NAME_FIRSTUPDATE's.
999 // Thus add the mempool-flag here.
1000 unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
1001 currentBlockScriptVerifyFlags |= SCRIPT_VERIFY_NAMES_MEMPOOL;
1002 if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
1003 return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s",
1004 __func__, hash.ToString(), state.ToString());
1005 }
1006
1007 return true;
1008 }
1009
Finalize(ATMPArgs & args,Workspace & ws)1010 bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
1011 {
1012 const CTransaction& tx = *ws.m_ptx;
1013 const uint256& hash = ws.m_hash;
1014 TxValidationState &state = args.m_state;
1015 const bool bypass_limits = args.m_bypass_limits;
1016
1017 CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
1018 CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
1019 const CAmount& nModifiedFees = ws.m_modified_fees;
1020 const CAmount& nConflictingFees = ws.m_conflicting_fees;
1021 const size_t& nConflictingSize = ws.m_conflicting_size;
1022 const bool fReplacementTransaction = ws.m_replacement_transaction;
1023 std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
1024
1025 // Remove conflicting transactions from the mempool
1026 for (CTxMemPool::txiter it : allConflicting)
1027 {
1028 LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s additional fees, %d delta bytes\n",
1029 it->GetTx().GetHash().ToString(),
1030 hash.ToString(),
1031 FormatMoney(nModifiedFees - nConflictingFees),
1032 (int)entry->GetTxSize() - (int)nConflictingSize);
1033 if (args.m_replaced_transactions)
1034 args.m_replaced_transactions->push_back(it->GetSharedTx());
1035 }
1036 m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
1037
1038 // This transaction should only count for fee estimation if:
1039 // - it isn't a BIP 125 replacement transaction (may not be widely supported)
1040 // - it's not being re-added during a reorg which bypasses typical mempool fee limits
1041 // - the node is not behind
1042 // - the transaction is not dependent on any other transactions in the mempool
1043 bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx);
1044
1045 // Store transaction in memory
1046 m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
1047
1048 // trim mempool and check if tx was trimmed
1049 if (!bypass_limits) {
1050 LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
1051 if (!m_pool.exists(hash))
1052 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
1053 }
1054 return true;
1055 }
1056
AcceptSingleTransaction(const CTransactionRef & ptx,ATMPArgs & args)1057 bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
1058 {
1059 AssertLockHeld(cs_main);
1060 LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
1061
1062 Workspace workspace(ptx);
1063
1064 if (!PreChecks(args, workspace)) return false;
1065
1066 // Only compute the precomputed transaction data if we need to verify
1067 // scripts (ie, other policy checks pass). We perform the inexpensive
1068 // checks first and avoid hashing and signature verification unless those
1069 // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
1070 PrecomputedTransactionData txdata;
1071
1072 if (!PolicyScriptChecks(args, workspace, txdata)) return false;
1073
1074 if (!ConsensusScriptChecks(args, workspace, txdata)) return false;
1075
1076 // Tx was accepted, but not added
1077 if (args.m_test_accept) return true;
1078
1079 if (!Finalize(args, workspace)) return false;
1080
1081 GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
1082
1083 return true;
1084 }
1085
1086 } // anon namespace
1087
1088 /** (try to) add transaction to memory pool with a specified acceptance time **/
AcceptToMemoryPoolWithTime(const CChainParams & chainparams,CTxMemPool & pool,TxValidationState & state,const CTransactionRef & tx,int64_t nAcceptTime,std::list<CTransactionRef> * plTxnReplaced,bool bypass_limits,bool test_accept,CAmount * fee_out=nullptr)1089 static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
1090 int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
1091 bool bypass_limits, bool test_accept, CAmount* fee_out=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1092 {
1093 std::vector<COutPoint> coins_to_uncache;
1094 MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, coins_to_uncache, test_accept, fee_out };
1095 bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
1096 if (!res) {
1097 // Remove coins that were not present in the coins cache before calling ATMPW;
1098 // this is to prevent memory DoS in case we receive a large number of
1099 // invalid transactions that attempt to overrun the in-memory coins cache
1100 // (`CCoinsViewCache::cacheCoins`).
1101
1102 for (const COutPoint& hashTx : coins_to_uncache)
1103 ::ChainstateActive().CoinsTip().Uncache(hashTx);
1104 }
1105 // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
1106 BlockValidationState state_dummy;
1107 ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
1108 return res;
1109 }
1110
AcceptToMemoryPool(CTxMemPool & pool,TxValidationState & state,const CTransactionRef & tx,std::list<CTransactionRef> * plTxnReplaced,bool bypass_limits,bool test_accept,CAmount * fee_out)1111 bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
1112 std::list<CTransactionRef>* plTxnReplaced,
1113 bool bypass_limits, bool test_accept, CAmount* fee_out)
1114 {
1115 const CChainParams& chainparams = Params();
1116 return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, test_accept, fee_out);
1117 }
1118
GetTransaction(const CBlockIndex * const block_index,const CTxMemPool * const mempool,const uint256 & hash,const Consensus::Params & consensusParams,uint256 & hashBlock)1119 CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock)
1120 {
1121 LOCK(cs_main);
1122
1123 if (block_index) {
1124 CBlock block;
1125 if (ReadBlockFromDisk(block, block_index, consensusParams)) {
1126 for (const auto& tx : block.vtx) {
1127 if (tx->GetHash() == hash) {
1128 hashBlock = block_index->GetBlockHash();
1129 return tx;
1130 }
1131 }
1132 }
1133 return nullptr;
1134 }
1135 if (mempool) {
1136 CTransactionRef ptx = mempool->get(hash);
1137 if (ptx) return ptx;
1138 }
1139 if (g_txindex) {
1140 CTransactionRef tx;
1141 if (g_txindex->FindTx(hash, hashBlock, tx)) return tx;
1142 }
1143 return nullptr;
1144 }
1145
1146 //////////////////////////////////////////////////////////////////////////////
1147 //
1148 // CBlock and CBlockIndex
1149 //
1150
CheckProofOfWork(const CBlockHeader & block,const Consensus::Params & params)1151 bool CheckProofOfWork(const CBlockHeader& block, const Consensus::Params& params)
1152 {
1153 /* Except for legacy blocks with full version 1, ensure that
1154 the chain ID is correct. Legacy blocks are not allowed since
1155 the merge-mining start, which is checked in AcceptBlockHeader
1156 where the height is known. */
1157 if (!block.IsLegacy() && params.fStrictChainId
1158 && block.GetChainId() != params.nAuxpowChainId)
1159 return error("%s : block does not have our chain ID"
1160 " (got %d, expected %d, full nVersion %d)",
1161 __func__, block.GetChainId(),
1162 params.nAuxpowChainId, block.nVersion);
1163
1164 /* If there is no auxpow, just check the block hash. */
1165 if (!block.auxpow)
1166 {
1167 if (block.IsAuxpow())
1168 return error("%s : no auxpow on block with auxpow version",
1169 __func__);
1170
1171 if (!CheckProofOfWork(block.GetHash(), block.nBits, params))
1172 return error("%s : non-AUX proof of work failed", __func__);
1173
1174 return true;
1175 }
1176
1177 /* We have auxpow. Check it. */
1178
1179 if (!block.IsAuxpow())
1180 return error("%s : auxpow on block with non-auxpow version", __func__);
1181
1182 /* Temporary check: Disallow parent blocks with auxpow version. This is
1183 for compatibility with the old client. */
1184 /* FIXME: Remove this check with a hardfork later on. */
1185 if (block.auxpow->getParentBlock().IsAuxpow())
1186 return error("%s : auxpow parent block has auxpow version", __func__);
1187
1188 if (!CheckProofOfWork(block.auxpow->getParentBlockHash(), block.nBits, params))
1189 return error("%s : AUX proof of work failed", __func__);
1190 if (!block.auxpow->check(block.GetHash(), block.GetChainId(), params))
1191 return error("%s : AUX POW is not valid", __func__);
1192
1193 return true;
1194 }
1195
WriteBlockToDisk(const CBlock & block,FlatFilePos & pos,const CMessageHeader::MessageStartChars & messageStart)1196 static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
1197 {
1198 // Open history file to append
1199 CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
1200 if (fileout.IsNull())
1201 return error("WriteBlockToDisk: OpenBlockFile failed");
1202
1203 // Write index header
1204 unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
1205 fileout << messageStart << nSize;
1206
1207 // Write block
1208 long fileOutPos = ftell(fileout.Get());
1209 if (fileOutPos < 0)
1210 return error("WriteBlockToDisk: ftell failed");
1211 pos.nPos = (unsigned int)fileOutPos;
1212 fileout << block;
1213
1214 return true;
1215 }
1216
1217 /* Generic implementation of block reading that can handle
1218 both a block and its header. */
1219
1220 template<typename T>
ReadBlockOrHeader(T & block,const FlatFilePos & pos,const Consensus::Params & consensusParams)1221 static bool ReadBlockOrHeader(T& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
1222 {
1223 block.SetNull();
1224
1225 // Open history file to read
1226 CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
1227 if (filein.IsNull())
1228 return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
1229
1230 // Read block
1231 try {
1232 filein >> block;
1233 }
1234 catch (const std::exception& e) {
1235 return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
1236 }
1237
1238 // Check the header
1239 if (!CheckProofOfWork(block, consensusParams))
1240 return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
1241
1242 // Signet only: check block solution
1243 if (consensusParams.signet_blocks && !CheckSignetBlockSolution(block, consensusParams)) {
1244 return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString());
1245 }
1246
1247 return true;
1248 }
1249
1250 template<typename T>
ReadBlockOrHeader(T & block,const CBlockIndex * pindex,const Consensus::Params & consensusParams)1251 static bool ReadBlockOrHeader(T& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1252 {
1253 FlatFilePos blockPos;
1254 {
1255 LOCK(cs_main);
1256 blockPos = pindex->GetBlockPos();
1257 }
1258
1259 if (!ReadBlockOrHeader(block, blockPos, consensusParams))
1260 return false;
1261 if (block.GetHash() != pindex->GetBlockHash())
1262 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1263 pindex->ToString(), pindex->GetBlockPos().ToString());
1264 return true;
1265 }
1266
ReadBlockFromDisk(CBlock & block,const FlatFilePos & pos,const Consensus::Params & consensusParams)1267 bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
1268 {
1269 return ReadBlockOrHeader(block, pos, consensusParams);
1270 }
1271
ReadBlockFromDisk(CBlock & block,const CBlockIndex * pindex,const Consensus::Params & consensusParams)1272 bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1273 {
1274 return ReadBlockOrHeader(block, pindex, consensusParams);
1275 }
1276
ReadBlockHeaderFromDisk(CBlockHeader & block,const CBlockIndex * pindex,const Consensus::Params & consensusParams)1277 bool ReadBlockHeaderFromDisk(CBlockHeader& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
1278 {
1279 return ReadBlockOrHeader(block, pindex, consensusParams);
1280 }
1281
ReadRawBlockFromDisk(std::vector<uint8_t> & block,const FlatFilePos & pos,const CMessageHeader::MessageStartChars & message_start)1282 bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
1283 {
1284 FlatFilePos hpos = pos;
1285 hpos.nPos -= 8; // Seek back 8 bytes for meta header
1286 CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
1287 if (filein.IsNull()) {
1288 return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
1289 }
1290
1291 try {
1292 CMessageHeader::MessageStartChars blk_start;
1293 unsigned int blk_size;
1294
1295 filein >> blk_start >> blk_size;
1296
1297 if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
1298 return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
1299 HexStr(blk_start),
1300 HexStr(message_start));
1301 }
1302
1303 if (blk_size > MAX_SIZE) {
1304 return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
1305 blk_size, MAX_SIZE);
1306 }
1307
1308 block.resize(blk_size); // Zeroing of memory is intentional here
1309 filein.read((char*)block.data(), blk_size);
1310 } catch(const std::exception& e) {
1311 return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
1312 }
1313
1314 return true;
1315 }
1316
ReadRawBlockFromDisk(std::vector<uint8_t> & block,const CBlockIndex * pindex,const CMessageHeader::MessageStartChars & message_start)1317 bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
1318 {
1319 FlatFilePos block_pos;
1320 {
1321 LOCK(cs_main);
1322 block_pos = pindex->GetBlockPos();
1323 }
1324
1325 return ReadRawBlockFromDisk(block, block_pos, message_start);
1326 }
1327
GetBlockSubsidy(int nHeight,const Consensus::Params & consensusParams)1328 CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1329 {
1330 int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1331 // Force block reward to zero when right shift is undefined.
1332 if (halvings >= 64)
1333 return 0;
1334
1335 CAmount nSubsidy = 50 * COIN;
1336 // Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
1337 nSubsidy >>= halvings;
1338 return nSubsidy;
1339 }
1340
CoinsViews(std::string ldb_name,size_t cache_size_bytes,bool in_memory,bool should_wipe)1341 CoinsViews::CoinsViews(
1342 std::string ldb_name,
1343 size_t cache_size_bytes,
1344 bool in_memory,
1345 bool should_wipe) : m_dbview(
1346 GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe),
1347 m_catcherview(&m_dbview) {}
1348
InitCache()1349 void CoinsViews::InitCache()
1350 {
1351 m_cacheview = MakeUnique<CCoinsViewCache>(&m_catcherview);
1352 }
1353
CChainState(CTxMemPool & mempool,BlockManager & blockman,uint256 from_snapshot_blockhash)1354 CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash)
1355 : m_blockman(blockman),
1356 m_mempool(mempool),
1357 m_from_snapshot_blockhash(from_snapshot_blockhash) {}
1358
InitCoinsDB(size_t cache_size_bytes,bool in_memory,bool should_wipe,std::string leveldb_name)1359 void CChainState::InitCoinsDB(
1360 size_t cache_size_bytes,
1361 bool in_memory,
1362 bool should_wipe,
1363 std::string leveldb_name)
1364 {
1365 if (!m_from_snapshot_blockhash.IsNull()) {
1366 leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
1367 }
1368
1369 m_coins_views = MakeUnique<CoinsViews>(
1370 leveldb_name, cache_size_bytes, in_memory, should_wipe);
1371 }
1372
InitCoinsCache(size_t cache_size_bytes)1373 void CChainState::InitCoinsCache(size_t cache_size_bytes)
1374 {
1375 assert(m_coins_views != nullptr);
1376 m_coinstip_cache_size_bytes = cache_size_bytes;
1377 m_coins_views->InitCache();
1378 }
1379
1380 // Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
1381 // is a performance-related implementation detail. This function must be marked
1382 // `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
1383 // can call it.
1384 //
IsInitialBlockDownload() const1385 bool CChainState::IsInitialBlockDownload() const
1386 {
1387 // Optimization: pre-test latch before taking the lock.
1388 if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1389 return false;
1390
1391 LOCK(cs_main);
1392 if (m_cached_finished_ibd.load(std::memory_order_relaxed))
1393 return false;
1394 if (fImporting || fReindex)
1395 return true;
1396 if (m_chain.Tip() == nullptr)
1397 return true;
1398 if (m_chain.Tip()->nChainWork < nMinimumChainWork)
1399 return true;
1400 if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
1401 return true;
1402 LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
1403 m_cached_finished_ibd.store(true, std::memory_order_relaxed);
1404 return false;
1405 }
1406
1407 static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
1408
AlertNotify(const std::string & strMessage)1409 static void AlertNotify(const std::string& strMessage)
1410 {
1411 uiInterface.NotifyAlertChanged();
1412 #if HAVE_SYSTEM
1413 std::string strCmd = gArgs.GetArg("-alertnotify", "");
1414 if (strCmd.empty()) return;
1415
1416 // Alert text should be plain ascii coming from a trusted source, but to
1417 // be safe we first strip anything not in safeChars, then add single quotes around
1418 // the whole string before passing it to the shell:
1419 std::string singleQuote("'");
1420 std::string safeStatus = SanitizeString(strMessage);
1421 safeStatus = singleQuote+safeStatus+singleQuote;
1422 boost::replace_all(strCmd, "%s", safeStatus);
1423
1424 std::thread t(runCommand, strCmd);
1425 t.detach(); // thread runs free
1426 #endif
1427 }
1428
CheckForkWarningConditions()1429 static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1430 {
1431 AssertLockHeld(cs_main);
1432 // Before we get past initial download, we cannot reliably alert about forks
1433 // (we assume we don't get stuck on a fork before finishing our initial sync)
1434 if (::ChainstateActive().IsInitialBlockDownload())
1435 return;
1436
1437 // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
1438 // of our head, drop it
1439 if (pindexBestForkTip && ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72)
1440 pindexBestForkTip = nullptr;
1441
1442 if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > ::ChainActive().Tip()->nChainWork + (GetBlockProof(*::ChainActive().Tip()) * 6)))
1443 {
1444 if (!GetfLargeWorkForkFound() && pindexBestForkBase)
1445 {
1446 std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
1447 pindexBestForkBase->phashBlock->ToString() + std::string("'");
1448 AlertNotify(warning);
1449 }
1450 if (pindexBestForkTip && pindexBestForkBase)
1451 {
1452 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
1453 pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
1454 pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
1455 SetfLargeWorkForkFound(true);
1456 }
1457 else
1458 {
1459 LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
1460 SetfLargeWorkInvalidChainFound(true);
1461 }
1462 }
1463 else
1464 {
1465 SetfLargeWorkForkFound(false);
1466 SetfLargeWorkInvalidChainFound(false);
1467 }
1468 }
1469
CheckForkWarningConditionsOnNewFork(CBlockIndex * pindexNewForkTip)1470 static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1471 {
1472 AssertLockHeld(cs_main);
1473 // If we are on a fork that is sufficiently large, set a warning flag
1474 CBlockIndex* pfork = pindexNewForkTip;
1475 CBlockIndex* plonger = ::ChainActive().Tip();
1476 while (pfork && pfork != plonger)
1477 {
1478 while (plonger && plonger->nHeight > pfork->nHeight)
1479 plonger = plonger->pprev;
1480 if (pfork == plonger)
1481 break;
1482 pfork = pfork->pprev;
1483 }
1484
1485 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1486 // with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
1487 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1488 // hash rate operating on the fork.
1489 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1490 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1491 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1492 if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1493 pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
1494 ::ChainActive().Height() - pindexNewForkTip->nHeight < 72)
1495 {
1496 pindexBestForkTip = pindexNewForkTip;
1497 pindexBestForkBase = pfork;
1498 }
1499
1500 CheckForkWarningConditions();
1501 }
1502
1503 // Called both upon regular invalid block discovery *and* InvalidateBlock
InvalidChainFound(CBlockIndex * pindexNew)1504 void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1505 {
1506 if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1507 pindexBestInvalid = pindexNew;
1508 if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1509 pindexBestHeader = ::ChainActive().Tip();
1510 }
1511
1512 LogPrintf("%s: invalid block=%s height=%d log2_work=%f date=%s\n", __func__,
1513 pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1514 log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
1515 CBlockIndex *tip = ::ChainActive().Tip();
1516 assert (tip);
1517 LogPrintf("%s: current best=%s height=%d log2_work=%f date=%s\n", __func__,
1518 tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0),
1519 FormatISO8601DateTime(tip->GetBlockTime()));
1520 CheckForkWarningConditions();
1521 }
1522
1523 // Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
1524 // which does its own setBlockIndexCandidates manageent.
InvalidBlockFound(CBlockIndex * pindex,const BlockValidationState & state)1525 void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) {
1526 if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
1527 pindex->nStatus |= BLOCK_FAILED_VALID;
1528 m_blockman.m_failed_blocks.insert(pindex);
1529 setDirtyBlockIndex.insert(pindex);
1530 setBlockIndexCandidates.erase(pindex);
1531 InvalidChainFound(pindex);
1532 }
1533 }
1534
UpdateCoins(const CTransaction & tx,CCoinsViewCache & inputs,CTxUndo & txundo,int nHeight)1535 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1536 {
1537 // mark inputs spent
1538 if (!tx.IsCoinBase()) {
1539 txundo.vprevout.reserve(tx.vin.size());
1540 for (const CTxIn &txin : tx.vin) {
1541 txundo.vprevout.emplace_back();
1542 bool is_spent = inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
1543 assert(is_spent);
1544 }
1545 }
1546 // add outputs
1547 AddCoins(inputs, tx, nHeight);
1548 }
1549
UpdateCoins(const CTransaction & tx,CCoinsViewCache & inputs,int nHeight)1550 void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1551 {
1552 CTxUndo txundo;
1553 UpdateCoins(tx, inputs, txundo, nHeight);
1554 }
1555
operator ()()1556 bool CScriptCheck::operator()() {
1557 const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1558 const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
1559 return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
1560 }
1561
GetSpendHeight(const CCoinsViewCache & inputs)1562 int GetSpendHeight(const CCoinsViewCache& inputs)
1563 {
1564 LOCK(cs_main);
1565 CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1566 return pindexPrev->nHeight + 1;
1567 }
1568
1569
1570 static CuckooCache::cache<uint256, SignatureCacheHasher> g_scriptExecutionCache;
1571 static CSHA256 g_scriptExecutionCacheHasher;
1572
InitScriptExecutionCache()1573 void InitScriptExecutionCache() {
1574 // Setup the salted hasher
1575 uint256 nonce = GetRandHash();
1576 // We want the nonce to be 64 bytes long to force the hasher to process
1577 // this chunk, which makes later hash computations more efficient. We
1578 // just write our 32-byte entropy twice to fill the 64 bytes.
1579 g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1580 g_scriptExecutionCacheHasher.Write(nonce.begin(), 32);
1581 // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
1582 // setup_bytes creates the minimum possible cache (2 elements).
1583 size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
1584 size_t nElems = g_scriptExecutionCache.setup_bytes(nMaxCacheSize);
1585 LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
1586 (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
1587 }
1588
1589 /**
1590 * Check whether all of this transaction's input scripts succeed.
1591 *
1592 * This involves ECDSA signature checks so can be computationally intensive. This function should
1593 * only be called after the cheap sanity checks in CheckTxInputs passed.
1594 *
1595 * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
1596 * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
1597 * not pushed onto pvChecks/run.
1598 *
1599 * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
1600 * which are matched. This is useful for checking blocks where we will likely never need the cache
1601 * entry again.
1602 *
1603 * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
1604 * callers should probably reset it to CONSENSUS in such cases.
1605 *
1606 * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
1607 */
CheckInputScripts(const CTransaction & tx,TxValidationState & state,const CCoinsViewCache & inputs,unsigned int flags,bool cacheSigStore,bool cacheFullScriptStore,PrecomputedTransactionData & txdata,std::vector<CScriptCheck> * pvChecks)1608 bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1609 {
1610 if (tx.IsCoinBase()) return true;
1611
1612 if (pvChecks) {
1613 pvChecks->reserve(tx.vin.size());
1614 }
1615
1616 // First check if script executions have been cached with the same
1617 // flags. Note that this assumes that the inputs provided are
1618 // correct (ie that the transaction hash which is in tx's prevouts
1619 // properly commits to the scriptPubKey in the inputs view of that
1620 // transaction).
1621 uint256 hashCacheEntry;
1622 CSHA256 hasher = g_scriptExecutionCacheHasher;
1623 hasher.Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
1624 AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
1625 if (g_scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
1626 return true;
1627 }
1628
1629 if (!txdata.m_spent_outputs_ready) {
1630 std::vector<CTxOut> spent_outputs;
1631 spent_outputs.reserve(tx.vin.size());
1632
1633 for (const auto& txin : tx.vin) {
1634 const COutPoint& prevout = txin.prevout;
1635 const Coin& coin = inputs.AccessCoin(prevout);
1636 assert(!coin.IsSpent());
1637 spent_outputs.emplace_back(coin.out);
1638 }
1639 txdata.Init(tx, std::move(spent_outputs));
1640 }
1641 assert(txdata.m_spent_outputs.size() == tx.vin.size());
1642
1643 for (unsigned int i = 0; i < tx.vin.size(); i++) {
1644
1645 // We very carefully only pass in things to CScriptCheck which
1646 // are clearly committed to by tx' witness hash. This provides
1647 // a sanity check that our caching is not introducing consensus
1648 // failures through additional data in, eg, the coins being
1649 // spent being checked as a part of CScriptCheck.
1650
1651 // Verify signature
1652 CScriptCheck check(txdata.m_spent_outputs[i], tx, i, flags, cacheSigStore, &txdata);
1653 if (pvChecks) {
1654 pvChecks->push_back(CScriptCheck());
1655 check.swap(pvChecks->back());
1656 } else if (!check()) {
1657 if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
1658 // Check whether the failure was caused by a
1659 // non-mandatory script verification check, such as
1660 // non-standard DER encodings or non-null dummy
1661 // arguments; if so, ensure we return NOT_STANDARD
1662 // instead of CONSENSUS to avoid downstream users
1663 // splitting the network between upgraded and
1664 // non-upgraded nodes by banning CONSENSUS-failing
1665 // data providers.
1666 CScriptCheck check2(txdata.m_spent_outputs[i], tx, i,
1667 flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
1668 if (check2())
1669 return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
1670 }
1671 // MANDATORY flag failures correspond to
1672 // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
1673 // failures are the most serious case of validation
1674 // failures, we may need to consider using
1675 // RECENT_CONSENSUS_CHANGE for any script failure that
1676 // could be due to non-upgraded nodes which we may want to
1677 // support, to avoid splitting the network (but this
1678 // depends on the details of how net_processing handles
1679 // such errors).
1680 return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
1681 }
1682 }
1683
1684 if (cacheFullScriptStore && !pvChecks) {
1685 // We executed all of the provided scripts, and were told to
1686 // cache the result. Do so now.
1687 g_scriptExecutionCache.insert(hashCacheEntry);
1688 }
1689
1690 return true;
1691 }
1692
UndoWriteToDisk(const CBlockUndo & blockundo,FlatFilePos & pos,const uint256 & hashBlock,const CMessageHeader::MessageStartChars & messageStart)1693 static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
1694 {
1695 // Open history file to append
1696 CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
1697 if (fileout.IsNull())
1698 return error("%s: OpenUndoFile failed", __func__);
1699
1700 // Write index header
1701 unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1702 fileout << messageStart << nSize;
1703
1704 // Write undo data
1705 long fileOutPos = ftell(fileout.Get());
1706 if (fileOutPos < 0)
1707 return error("%s: ftell failed", __func__);
1708 pos.nPos = (unsigned int)fileOutPos;
1709 fileout << blockundo;
1710
1711 // calculate & write checksum
1712 CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
1713 hasher << hashBlock;
1714 hasher << blockundo;
1715 fileout << hasher.GetHash();
1716
1717 return true;
1718 }
1719
UndoReadFromDisk(CBlockUndo & blockundo,const CBlockIndex * pindex)1720 bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
1721 {
1722 FlatFilePos pos = pindex->GetUndoPos();
1723 if (pos.IsNull()) {
1724 return error("%s: no undo data available", __func__);
1725 }
1726
1727 // Open history file to read
1728 CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1729 if (filein.IsNull())
1730 return error("%s: OpenUndoFile failed", __func__);
1731
1732 // Read block
1733 uint256 hashChecksum;
1734 CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
1735 try {
1736 verifier << pindex->pprev->GetBlockHash();
1737 verifier >> blockundo;
1738 filein >> hashChecksum;
1739 }
1740 catch (const std::exception& e) {
1741 return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1742 }
1743
1744 // Verify checksum
1745 if (hashChecksum != verifier.GetHash())
1746 return error("%s: Checksum mismatch", __func__);
1747
1748 return true;
1749 }
1750
1751 /** Abort with a message */
AbortNode(const std::string & strMessage,bilingual_str user_message=bilingual_str ())1752 static bool AbortNode(const std::string& strMessage, bilingual_str user_message = bilingual_str())
1753 {
1754 SetMiscWarning(Untranslated(strMessage));
1755 LogPrintf("*** %s\n", strMessage);
1756 if (user_message.empty()) {
1757 user_message = _("A fatal internal error occurred, see debug.log for details");
1758 }
1759 AbortError(user_message);
1760 StartShutdown();
1761 return false;
1762 }
1763
AbortNode(BlockValidationState & state,const std::string & strMessage,const bilingual_str & userMessage=bilingual_str ())1764 static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
1765 {
1766 AbortNode(strMessage, userMessage);
1767 return state.Error(strMessage);
1768 }
1769
1770 /**
1771 * Restore the UTXO in a Coin at a given COutPoint
1772 * @param undo The Coin to be restored.
1773 * @param view The coins view to which to apply the changes.
1774 * @param out The out point that corresponds to the tx input.
1775 * @return A DisconnectResult as an int
1776 */
ApplyTxInUndo(Coin && undo,CCoinsViewCache & view,const COutPoint & out)1777 int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
1778 {
1779 bool fClean = true;
1780
1781 if (view.HaveCoin(out)) fClean = false; // overwriting transaction output
1782
1783 if (undo.nHeight == 0) {
1784 // Missing undo metadata (height and coinbase). Older versions included this
1785 // information only in undo records for the last spend of a transactions'
1786 // outputs. This implies that it must be present for some other output of the same tx.
1787 const Coin& alternate = AccessByTxid(view, out.hash);
1788 if (!alternate.IsSpent()) {
1789 undo.nHeight = alternate.nHeight;
1790 undo.fCoinBase = alternate.fCoinBase;
1791 } else {
1792 return DISCONNECT_FAILED; // adding output for transaction without known metadata
1793 }
1794 }
1795 // If the coin already exists as an unspent coin in the cache, then the
1796 // possible_overwrite parameter to AddCoin must be set to true. We have
1797 // already checked whether an unspent coin exists above using HaveCoin, so
1798 // we don't need to guess. When fClean is false, an unspent coin already
1799 // existed and it is an overwrite.
1800 view.AddCoin(out, std::move(undo), !fClean);
1801
1802 return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1803 }
1804
1805 /** Undo the effects of this block (with given index) on the UTXO set represented by coins.
1806 * When FAILED is returned, view is left in an indeterminate state. */
DisconnectBlock(const CBlock & block,const CBlockIndex * pindex,CCoinsViewCache & view,std::set<valtype> & unexpiredNames)1807 DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view, std::set<valtype>& unexpiredNames)
1808 {
1809 bool fClean = true;
1810
1811 CBlockUndo blockUndo;
1812 if (!UndoReadFromDisk(blockUndo, pindex)) {
1813 error("DisconnectBlock(): failure reading undo data");
1814 return DISCONNECT_FAILED;
1815 }
1816
1817 if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1818 error("DisconnectBlock(): block and undo data inconsistent");
1819 return DISCONNECT_FAILED;
1820 }
1821
1822 /* Undo name expirations. We use nHeight+1 here in sync with
1823 the call to ExpireNames, because that's the height at which a
1824 possible name_update could be (thus it counts for spendability
1825 of the name). This is done first to match the order
1826 in which names are expired when connecting blocks. */
1827 if (!UnexpireNames (pindex->nHeight + 1, blockUndo, view, unexpiredNames))
1828 fClean = false;
1829
1830 // undo transactions in reverse order
1831 for (int i = block.vtx.size() - 1; i >= 0; i--) {
1832 const CTransaction &tx = *(block.vtx[i]);
1833 uint256 hash = tx.GetHash();
1834 bool is_coinbase = tx.IsCoinBase();
1835
1836 // Check that all outputs are available and match the outputs in the block itself
1837 // exactly.
1838 for (size_t o = 0; o < tx.vout.size(); o++) {
1839 if (!tx.vout[o].scriptPubKey.IsUnspendable()) {
1840 COutPoint out(hash, o);
1841 Coin coin;
1842 bool is_spent = view.SpendCoin(out, &coin);
1843 if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
1844 /* This may be due to a historic bug. For them, some names
1845 are marked immediately as unspendable. They fail this check
1846 when undoing, thus ignore them here. */
1847 CChainParams::BugType type;
1848 if (!Params ().IsHistoricBug (tx.GetHash (), pindex->nHeight, type) || type != CChainParams::BUG_FULLY_IGNORE) {
1849 fClean = false; // transaction output mismatch
1850 }
1851 }
1852 }
1853 }
1854
1855 // restore inputs
1856 if (i > 0) { // not coinbases
1857 CTxUndo &txundo = blockUndo.vtxundo[i-1];
1858 if (txundo.vprevout.size() != tx.vin.size()) {
1859 error("DisconnectBlock(): transaction and undo data inconsistent");
1860 return DISCONNECT_FAILED;
1861 }
1862 for (unsigned int j = tx.vin.size(); j-- > 0;) {
1863 const COutPoint &out = tx.vin[j].prevout;
1864 int res = ApplyTxInUndo(std::move(txundo.vprevout[j]), view, out);
1865 if (res == DISCONNECT_FAILED) return DISCONNECT_FAILED;
1866 fClean = fClean && res != DISCONNECT_UNCLEAN;
1867 }
1868 // At this point, all of txundo.vprevout should have been moved out.
1869 }
1870 }
1871
1872 // undo name operations in reverse order
1873 std::vector<CNameTxUndo>::const_reverse_iterator nameUndoIter;
1874 for (nameUndoIter = blockUndo.vnameundo.rbegin ();
1875 nameUndoIter != blockUndo.vnameundo.rend (); ++nameUndoIter)
1876 nameUndoIter->apply (view);
1877
1878 // move best block pointer to prevout block
1879 view.SetBestBlock(pindex->pprev->GetBlockHash());
1880
1881 return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
1882 }
1883
FlushUndoFile(int block_file,bool finalize=false)1884 static void FlushUndoFile(int block_file, bool finalize = false)
1885 {
1886 FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1887 if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1888 AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
1889 }
1890 }
1891
FlushBlockFile(bool fFinalize=false,bool finalize_undo=false)1892 static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
1893 {
1894 LOCK(cs_LastBlockFile);
1895 FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
1896 if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1897 AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
1898 }
1899 // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
1900 // e.g. during IBD or a sync after a node going offline
1901 if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
1902 }
1903
1904 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
1905
WriteUndoDataForBlock(const CBlockUndo & blockundo,BlockValidationState & state,CBlockIndex * pindex,const CChainParams & chainparams)1906 static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
1907 {
1908 // Write undo information to disk
1909 if (pindex->GetUndoPos().IsNull()) {
1910 FlatFilePos _pos;
1911 if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
1912 return error("ConnectBlock(): FindUndoPos failed");
1913 if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
1914 return AbortNode(state, "Failed to write undo data");
1915 // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1916 // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1917 // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1918 // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1919 // the FindBlockPos function
1920 if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
1921 FlushUndoFile(_pos.nFile, true);
1922 }
1923
1924 // update nUndoPos in block index
1925 pindex->nUndoPos = _pos.nPos;
1926 pindex->nStatus |= BLOCK_HAVE_UNDO;
1927 setDirtyBlockIndex.insert(pindex);
1928 }
1929
1930 return true;
1931 }
1932
1933 static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
1934
ThreadScriptCheck(int worker_num)1935 void ThreadScriptCheck(int worker_num) {
1936 util::ThreadRename(strprintf("scriptch.%i", worker_num));
1937 scriptcheckqueue.Thread();
1938 }
1939
1940 VersionBitsCache versionbitscache GUARDED_BY(cs_main);
1941
ComputeBlockVersion(const CBlockIndex * pindexPrev,const Consensus::Params & params)1942 int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
1943 {
1944 LOCK(cs_main);
1945 int32_t nVersion = VERSIONBITS_TOP_BITS;
1946
1947 for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1948 ThresholdState state = VersionBitsState(pindexPrev, params, static_cast<Consensus::DeploymentPos>(i), versionbitscache);
1949 if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
1950 nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
1951 }
1952 }
1953
1954 return nVersion;
1955 }
1956
1957 /**
1958 * Threshold condition checker that triggers when unknown versionbits are seen on the network.
1959 */
1960 class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
1961 {
1962 private:
1963 int bit;
1964
1965 public:
WarningBitsConditionChecker(int bitIn)1966 explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
1967
BeginTime(const Consensus::Params & params) const1968 int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
EndTime(const Consensus::Params & params) const1969 int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
Period(const Consensus::Params & params) const1970 int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
Threshold(const Consensus::Params & params) const1971 int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
1972
Condition(const CBlockIndex * pindex,const Consensus::Params & params) const1973 bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
1974 {
1975 return pindex->nHeight >= params.MinBIP9WarningHeight &&
1976 ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
1977 ((pindex->nVersion >> bit) & 1) != 0 &&
1978 ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
1979 }
1980 };
1981
1982 static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
1983
GetBlockScriptFlags(const CBlockIndex * pindex,const Consensus::Params & consensusparams)1984 static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1985 AssertLockHeld(cs_main);
1986
1987 unsigned int flags = SCRIPT_VERIFY_NONE;
1988
1989 if (pindex->nHeight >= consensusparams.BIP16Height) {
1990 flags |= SCRIPT_VERIFY_P2SH;
1991 }
1992
1993 // Start enforcing the DERSIG (BIP66) rule
1994 if (pindex->nHeight >= consensusparams.BIP66Height) {
1995 flags |= SCRIPT_VERIFY_DERSIG;
1996 }
1997
1998 // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
1999 if (pindex->nHeight >= consensusparams.BIP65Height) {
2000 flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
2001 }
2002
2003 // Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
2004 if (pindex->nHeight >= consensusparams.CSVHeight) {
2005 flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
2006 }
2007
2008 // Start enforcing Taproot using versionbits logic.
2009 if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE) {
2010 flags |= SCRIPT_VERIFY_TAPROOT;
2011 }
2012
2013 // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit)
2014 if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
2015 flags |= SCRIPT_VERIFY_NULLDUMMY;
2016 flags |= SCRIPT_VERIFY_WITNESS;
2017 }
2018
2019 return flags;
2020 }
2021
2022
2023
2024 static int64_t nTimeCheck = 0;
2025 static int64_t nTimeForks = 0;
2026 static int64_t nTimeVerify = 0;
2027 static int64_t nTimeConnect = 0;
2028 static int64_t nTimeIndex = 0;
2029 static int64_t nTimeCallbacks = 0;
2030 static int64_t nTimeTotal = 0;
2031 static int64_t nBlocksTotal = 0;
2032
2033 /** Apply the effects of this block (with given index) on the UTXO set represented by coins.
2034 * Validity checks that depend on the UTXO set are also done; ConnectBlock()
2035 * can fail if those validity checks fail (among other reasons). */
ConnectBlock(const CBlock & block,BlockValidationState & state,CBlockIndex * pindex,CCoinsViewCache & view,std::set<valtype> & expiredNames,const CChainParams & chainparams,bool fJustCheck)2036 bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
2037 CCoinsViewCache& view,
2038 std::set<valtype>& expiredNames,
2039 const CChainParams& chainparams, bool fJustCheck)
2040 {
2041 AssertLockHeld(cs_main);
2042 assert(pindex);
2043 assert(*pindex->phashBlock == block.GetHash());
2044 int64_t nTimeStart = GetTimeMicros();
2045
2046 // Check it again in case a previous version let a bad block in
2047 // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
2048 // ContextualCheckBlockHeader() here. This means that if we add a new
2049 // consensus rule that is enforced in one of those two functions, then we
2050 // may have let in a block that violates the rule prior to updating the
2051 // software, and we would NOT be enforcing the rule here. Fully solving
2052 // upgrade from one software version to the next after a consensus rule
2053 // change is potentially tricky and issue-specific (see RewindBlockIndex()
2054 // for one general approach that was used for BIP 141 deployment).
2055 // Also, currently the rule against blocks more than 2 hours in the future
2056 // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
2057 // re-enforce that rule here (at least until we make it impossible for
2058 // GetAdjustedTime() to go backward).
2059 if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
2060 if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
2061 // We don't write down blocks to disk if they may have been
2062 // corrupted, so this should be impossible unless we're having hardware
2063 // problems.
2064 return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down");
2065 }
2066 return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
2067 }
2068
2069 // verify that the view's current state corresponds to the previous block
2070 uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
2071 assert(hashPrevBlock == view.GetBestBlock());
2072
2073 nBlocksTotal++;
2074
2075 // Special case for the genesis block, skipping connection of its transactions
2076 // (its coinbase is unspendable)
2077 if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
2078 if (!fJustCheck)
2079 view.SetBestBlock(pindex->GetBlockHash());
2080 return true;
2081 }
2082
2083 bool fScriptChecks = true;
2084 if (!hashAssumeValid.IsNull()) {
2085 // We've been configured with the hash of a block which has been externally verified to have a valid history.
2086 // A suitable default value is included with the software and updated from time to time. Because validity
2087 // relative to a piece of software is an objective fact these defaults can be easily reviewed.
2088 // This setting doesn't force the selection of any particular chain but makes validating some faster by
2089 // effectively caching the result of part of the verification.
2090 BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
2091 if (it != m_blockman.m_block_index.end()) {
2092 if (it->second->GetAncestor(pindex->nHeight) == pindex &&
2093 pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
2094 pindexBestHeader->nChainWork >= nMinimumChainWork) {
2095 // This block is a member of the assumed verified chain and an ancestor of the best header.
2096 // Script verification is skipped when connecting blocks under the
2097 // assumevalid block. Assuming the assumevalid block is valid this
2098 // is safe because block merkle hashes are still computed and checked,
2099 // Of course, if an assumed valid block is invalid due to false scriptSigs
2100 // this optimization would allow an invalid chain to be accepted.
2101 // The equivalent time check discourages hash power from extorting the network via DOS attack
2102 // into accepting an invalid block through telling users they must manually set assumevalid.
2103 // Requiring a software change or burying the invalid block, regardless of the setting, makes
2104 // it hard to hide the implication of the demand. This also avoids having release candidates
2105 // that are hardly doing any signature verification at all in testing without having to
2106 // artificially set the default assumed verified block further back.
2107 // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
2108 // least as good as the expected chain.
2109 fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
2110 }
2111 }
2112 }
2113
2114 int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
2115 LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
2116
2117 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2118 // unless those are already completely spent.
2119 // If such overwrites are allowed, coinbases and transactions depending upon those
2120 // can be duplicated to remove the ability to spend the first instance -- even after
2121 // being sent to another address.
2122 // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
2123 // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
2124 // already refuses previously-known transaction ids entirely.
2125 // FIXME: Enable strict check after appropriate fork.
2126 bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash.
2127 !(true);
2128 assert(pindex->pprev);
2129
2130 if (fEnforceBIP30) {
2131 for (const auto& tx : block.vtx) {
2132 for (size_t o = 0; o < tx->vout.size(); o++) {
2133 if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
2134 LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
2135 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
2136 }
2137 }
2138 }
2139 }
2140
2141 // Start enforcing BIP68 (sequence locks)
2142 int nLockTimeFlags = 0;
2143 if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) {
2144 nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
2145 }
2146
2147 // Get the script flags for this block
2148 unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
2149
2150 int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
2151 LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
2152
2153 CBlockUndo blockundo;
2154
2155 // Precomputed transaction data pointers must not be invalidated
2156 // until after `control` has run the script checks (potentially
2157 // in multiple threads). Preallocate the vector size so a new allocation
2158 // doesn't invalidate pointers into the vector, and keep txsdata in scope
2159 // for as long as `control`.
2160 CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
2161 std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
2162
2163 std::vector<int> prevheights;
2164 CAmount nFees = 0;
2165 int nInputs = 0;
2166 int64_t nSigOpsCost = 0;
2167 blockundo.vtxundo.reserve(block.vtx.size() - 1);
2168 for (unsigned int i = 0; i < block.vtx.size(); i++)
2169 {
2170 const CTransaction &tx = *(block.vtx[i]);
2171
2172 nInputs += tx.vin.size();
2173
2174 if (!tx.IsCoinBase())
2175 {
2176 CAmount txfee = 0;
2177 TxValidationState tx_state;
2178 if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, flags, txfee)) {
2179 // Any transaction validation failure in ConnectBlock is a block consensus failure
2180 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
2181 tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2182 return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
2183 }
2184 nFees += txfee;
2185 if (!MoneyRange(nFees)) {
2186 LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
2187 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
2188 }
2189
2190 // Check that transaction is BIP68 final
2191 // BIP68 lock checks (as opposed to nLockTime checks) must
2192 // be in ConnectBlock because they require the UTXO set
2193 prevheights.resize(tx.vin.size());
2194 for (size_t j = 0; j < tx.vin.size(); j++) {
2195 prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight;
2196 }
2197
2198 if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
2199 LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
2200 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
2201 }
2202 }
2203
2204 // GetTransactionSigOpCost counts 3 types of sigops:
2205 // * legacy (always)
2206 // * p2sh (when P2SH enabled in flags and excludes coinbase)
2207 // * witness (when witness enabled in flags and excludes coinbase)
2208 nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
2209 if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
2210 LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
2211 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
2212 }
2213
2214 if (!tx.IsCoinBase())
2215 {
2216 std::vector<CScriptCheck> vChecks;
2217 bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
2218 TxValidationState tx_state;
2219 if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
2220 // Any transaction validation failure in ConnectBlock is a block consensus failure
2221 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
2222 tx_state.GetRejectReason(), tx_state.GetDebugMessage());
2223 return error("ConnectBlock(): CheckInputScripts on %s failed with %s",
2224 tx.GetHash().ToString(), state.ToString());
2225 }
2226 control.Add(vChecks);
2227 }
2228
2229 CTxUndo undoDummy;
2230 if (i > 0) {
2231 blockundo.vtxundo.push_back(CTxUndo());
2232 }
2233 UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2234 ApplyNameTransaction(tx, pindex->nHeight, view, blockundo);
2235 }
2236 int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
2237 LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
2238
2239 CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
2240 if (block.vtx[0]->GetValueOut() > blockReward) {
2241 LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
2242 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
2243 }
2244
2245 if (!control.Wait()) {
2246 LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
2247 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
2248 }
2249 int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
2250 LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2251
2252 if (fJustCheck)
2253 return true;
2254
2255 /* Remove expired names from the UTXO set. They become permanently
2256 unspendable. Note that we use nHeight+1 here because a possible
2257 spending transaction would be at least at that height. This has
2258 to be done after checking the transactions themselves, because
2259 spending a name would still be valid in the current block. */
2260 if (!ExpireNames(pindex->nHeight + 1, view, blockundo, expiredNames))
2261 return error("%s : ExpireNames failed", __func__);
2262
2263 if (!WriteUndoDataForBlock(blockundo, state, pindex, chainparams))
2264 return false;
2265
2266 if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
2267 pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
2268 setDirtyBlockIndex.insert(pindex);
2269 }
2270
2271 assert(pindex->phashBlock);
2272 // add this block to the view's block chain
2273 view.SetBestBlock(pindex->GetBlockHash());
2274
2275 int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
2276 LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
2277
2278 int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
2279 LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
2280
2281 return true;
2282 }
2283
GetCoinsCacheSizeState(const CTxMemPool * tx_pool)2284 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(const CTxMemPool* tx_pool)
2285 {
2286 return this->GetCoinsCacheSizeState(
2287 tx_pool,
2288 m_coinstip_cache_size_bytes,
2289 gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2290 }
2291
GetCoinsCacheSizeState(const CTxMemPool * tx_pool,size_t max_coins_cache_size_bytes,size_t max_mempool_size_bytes)2292 CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
2293 const CTxMemPool* tx_pool,
2294 size_t max_coins_cache_size_bytes,
2295 size_t max_mempool_size_bytes)
2296 {
2297 const int64_t nMempoolUsage = tx_pool ? tx_pool->DynamicMemoryUsage() : 0;
2298 int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2299 int64_t nTotalSpace =
2300 max_coins_cache_size_bytes + std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2301
2302 //! No need to periodic flush if at least this much space still available.
2303 static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES = 10 * 1024 * 1024; // 10MB
2304 int64_t large_threshold =
2305 std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2306
2307 if (cacheSize > nTotalSpace) {
2308 LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize, nTotalSpace);
2309 return CoinsCacheSizeState::CRITICAL;
2310 } else if (cacheSize > large_threshold) {
2311 return CoinsCacheSizeState::LARGE;
2312 }
2313 return CoinsCacheSizeState::OK;
2314 }
2315
FlushStateToDisk(const CChainParams & chainparams,BlockValidationState & state,FlushStateMode mode,int nManualPruneHeight)2316 bool CChainState::FlushStateToDisk(
2317 const CChainParams& chainparams,
2318 BlockValidationState &state,
2319 FlushStateMode mode,
2320 int nManualPruneHeight)
2321 {
2322 LOCK(cs_main);
2323 assert(this->CanFlushToDisk());
2324 static std::chrono::microseconds nLastWrite{0};
2325 static std::chrono::microseconds nLastFlush{0};
2326 std::set<int> setFilesToPrune;
2327 bool full_flush_completed = false;
2328
2329 const size_t coins_count = CoinsTip().GetCacheSize();
2330 const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2331
2332 try {
2333 {
2334 bool fFlushForPrune = false;
2335 bool fDoFullFlush = false;
2336 CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool);
2337 LOCK(cs_LastBlockFile);
2338 if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
2339 if (nManualPruneHeight > 0) {
2340 LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH);
2341
2342 m_blockman.FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight, m_chain.Height());
2343 } else {
2344 LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
2345
2346 m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), IsInitialBlockDownload());
2347 fCheckForPruning = false;
2348 }
2349 if (!setFilesToPrune.empty()) {
2350 fFlushForPrune = true;
2351 if (!fHavePruned) {
2352 pblocktree->WriteFlag("prunedblockfiles", true);
2353 fHavePruned = true;
2354 }
2355 }
2356 }
2357 const auto nNow = GetTime<std::chrono::microseconds>();
2358 // Avoid writing/flushing immediately after startup.
2359 if (nLastWrite.count() == 0) {
2360 nLastWrite = nNow;
2361 }
2362 if (nLastFlush.count() == 0) {
2363 nLastFlush = nNow;
2364 }
2365 // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
2366 bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
2367 // The cache is over the limit, we have to write now.
2368 bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
2369 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2370 bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2371 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2372 bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2373 // Combine all conditions that result in a full cache flush.
2374 fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2375 // Write blocks and block index to disk.
2376 if (fDoFullFlush || fPeriodicWrite) {
2377 // Depend on nMinDiskSpace to ensure we can write block index
2378 if (!CheckDiskSpace(GetBlocksDir())) {
2379 return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2380 }
2381 {
2382 LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH);
2383
2384 // First make sure all block and undo data is flushed to disk.
2385 FlushBlockFile();
2386 }
2387
2388 // Then update all block file information (which may refer to block and undo files).
2389 {
2390 LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
2391
2392 std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2393 vFiles.reserve(setDirtyFileInfo.size());
2394 for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2395 vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
2396 setDirtyFileInfo.erase(it++);
2397 }
2398 std::vector<const CBlockIndex*> vBlocks;
2399 vBlocks.reserve(setDirtyBlockIndex.size());
2400 for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2401 vBlocks.push_back(*it);
2402 setDirtyBlockIndex.erase(it++);
2403 }
2404 if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2405 return AbortNode(state, "Failed to write to block index database");
2406 }
2407 }
2408 // Finally remove any pruned files
2409 if (fFlushForPrune) {
2410 LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files", BCLog::BENCH);
2411
2412 UnlinkPrunedFiles(setFilesToPrune);
2413 }
2414 nLastWrite = nNow;
2415 }
2416 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2417 if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2418 LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
2419 coins_count, coins_mem_usage / 1000));
2420
2421 // Typical Coin structures on disk are around 48 bytes in size.
2422 // Pushing a new one to the database can cause it to be written
2423 // twice (once in the log, and once in the tables). This is already
2424 // an overestimation, as most will delete an existing entry or
2425 // overwrite one. Still, use a conservative safety factor of 2.
2426 if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2427 return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
2428 }
2429 // Flush the chainstate (which may refer to block index entries).
2430 if (!CoinsTip().Flush())
2431 return AbortNode(state, "Failed to write to coin database");
2432 nLastFlush = nNow;
2433 full_flush_completed = true;
2434 }
2435 }
2436 if (full_flush_completed) {
2437 // Update best block in wallet (so we can detect restored wallets).
2438 GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
2439 }
2440 } catch (const std::runtime_error& e) {
2441 return AbortNode(state, std::string("System error while flushing: ") + e.what());
2442 }
2443 return true;
2444 }
2445
ForceFlushStateToDisk()2446 void CChainState::ForceFlushStateToDisk() {
2447 BlockValidationState state;
2448 const CChainParams& chainparams = Params();
2449 if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2450 LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2451 }
2452 }
2453
PruneAndFlush()2454 void CChainState::PruneAndFlush() {
2455 BlockValidationState state;
2456 fCheckForPruning = true;
2457 const CChainParams& chainparams = Params();
2458
2459 if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2460 LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
2461 }
2462 }
2463
DoWarning(const bilingual_str & warning)2464 static void DoWarning(const bilingual_str& warning)
2465 {
2466 static bool fWarned = false;
2467 SetMiscWarning(warning);
2468 if (!fWarned) {
2469 AlertNotify(warning.original);
2470 fWarned = true;
2471 }
2472 }
2473
2474 /** Private helper function that concatenates warning messages. */
AppendWarning(bilingual_str & res,const bilingual_str & warn)2475 static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
2476 {
2477 if (!res.empty()) res += Untranslated(", ");
2478 res += warn;
2479 }
2480
2481 /** Check warning conditions and do some notifications on new chain tip set. */
UpdateTip(CTxMemPool & mempool,const CBlockIndex * pindexNew,const CChainParams & chainParams)2482 static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const CChainParams& chainParams)
2483 EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
2484 {
2485 // New best block
2486 mempool.AddTransactionsUpdated(1);
2487
2488 {
2489 LOCK(g_best_block_mutex);
2490 g_best_block = pindexNew->GetBlockHash();
2491 g_best_block_cv.notify_all();
2492 }
2493
2494 bilingual_str warning_messages;
2495 int num_unexpected_version = 0;
2496 if (!::ChainstateActive().IsInitialBlockDownload())
2497 {
2498 const CBlockIndex* pindex = pindexNew;
2499 for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
2500 WarningBitsConditionChecker checker(bit);
2501 ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
2502 if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
2503 const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
2504 if (state == ThresholdState::ACTIVE) {
2505 DoWarning(warning);
2506 } else {
2507 AppendWarning(warning_messages, warning);
2508 }
2509 }
2510 }
2511 // Check the version of the last 100 blocks to see if we need to upgrade:
2512 for (int i = 0; i < 100 && pindex != nullptr; i++)
2513 {
2514 int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
2515 if (pindex->GetBaseVersion() > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->GetBaseVersion() & ~nExpectedVersion) != 0)
2516 ++num_unexpected_version;
2517 pindex = pindex->pprev;
2518 }
2519 }
2520 LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
2521 pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
2522 log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
2523 FormatISO8601DateTime(pindexNew->GetBlockTime()),
2524 GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(),
2525 !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : "");
2526
2527 if (num_unexpected_version > 0) {
2528 LogPrint(BCLog::VALIDATION, "%d of last 100 blocks have unexpected version\n", num_unexpected_version);
2529 }
2530 }
2531
2532 /** Disconnect m_chain's tip.
2533 * After calling, the mempool will be in an inconsistent state, with
2534 * transactions from disconnected blocks being added to disconnectpool. You
2535 * should make the mempool consistent again by calling UpdateMempoolForReorg.
2536 * with cs_main held.
2537 *
2538 * If disconnectpool is nullptr, then no disconnected transactions are added to
2539 * disconnectpool (note that the caller is responsible for mempool consistency
2540 * in any case).
2541 */
DisconnectTip(BlockValidationState & state,const CChainParams & chainparams,DisconnectedBlockTransactions * disconnectpool)2542 bool CChainState::DisconnectTip(BlockValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool)
2543 {
2544 AssertLockHeld(cs_main);
2545 AssertLockHeld(m_mempool.cs);
2546
2547 CBlockIndex *pindexDelete = m_chain.Tip();
2548 assert(pindexDelete);
2549 CheckNameDB (g_chainman, true);
2550 // Read block from disk.
2551 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2552 CBlock& block = *pblock;
2553 if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
2554 return error("DisconnectTip(): Failed to read block");
2555 // Apply the block atomically to the chain state.
2556 std::set<valtype> unexpiredNames;
2557 int64_t nStart = GetTimeMicros();
2558 {
2559 CCoinsViewCache view(&CoinsTip());
2560 assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2561 if (DisconnectBlock(block, pindexDelete, view, unexpiredNames) != DISCONNECT_OK)
2562 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2563 bool flushed = view.Flush();
2564 assert(flushed);
2565 }
2566 LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
2567 // Write the chain state to disk, if necessary.
2568 if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2569 return false;
2570
2571 AssertLockHeld(cs_main);
2572
2573 // Fix the memool for conflicts due to unexpired names.
2574 m_mempool.removeUnexpireConflicts(unexpiredNames);
2575
2576 if (disconnectpool) {
2577 // Save transactions to re-add to mempool at end of reorg
2578 for (auto it = block.vtx.rbegin(); it != block.vtx.rend(); ++it) {
2579 disconnectpool->addTransaction(*it);
2580 }
2581 while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
2582 // Drop the earliest entry, and remove its children from the mempool.
2583 auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
2584 m_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
2585 disconnectpool->removeEntry(it);
2586 }
2587 }
2588
2589 m_chain.SetTip(pindexDelete->pprev);
2590
2591 UpdateTip(m_mempool, pindexDelete->pprev, chainparams);
2592 CheckNameDB (g_chainman, true);
2593 // Let wallets know transactions went from 1-confirmed to
2594 // 0-confirmed or conflicted:
2595 GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2596 return true;
2597 }
2598
2599 static int64_t nTimeReadFromDisk = 0;
2600 static int64_t nTimeConnectTotal = 0;
2601 static int64_t nTimeFlush = 0;
2602 static int64_t nTimeChainState = 0;
2603 static int64_t nTimePostConnect = 0;
2604
2605 struct PerBlockConnectTrace {
2606 CBlockIndex* pindex = nullptr;
2607 std::shared_ptr<const CBlock> pblock;
PerBlockConnectTracePerBlockConnectTrace2608 PerBlockConnectTrace() {}
2609 };
2610 /**
2611 * Used to track blocks whose transactions were applied to the UTXO state as a
2612 * part of a single ActivateBestChainStep call.
2613 *
2614 * This class is single-use, once you call GetBlocksConnected() you have to throw
2615 * it away and make a new one.
2616 */
2617 class ConnectTrace {
2618 private:
2619 std::vector<PerBlockConnectTrace> blocksConnected;
2620
2621 public:
ConnectTrace()2622 explicit ConnectTrace() : blocksConnected(1) {}
2623
BlockConnected(CBlockIndex * pindex,std::shared_ptr<const CBlock> pblock)2624 void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
2625 assert(!blocksConnected.back().pindex);
2626 assert(pindex);
2627 assert(pblock);
2628 blocksConnected.back().pindex = pindex;
2629 blocksConnected.back().pblock = std::move(pblock);
2630 blocksConnected.emplace_back();
2631 }
2632
GetBlocksConnected()2633 std::vector<PerBlockConnectTrace>& GetBlocksConnected() {
2634 // We always keep one extra block at the end of our list because
2635 // blocks are added after all the conflicted transactions have
2636 // been filled in. Thus, the last entry should always be an empty
2637 // one waiting for the transactions from the next block. We pop
2638 // the last entry here to make sure the list we return is sane.
2639 assert(!blocksConnected.back().pindex);
2640 blocksConnected.pop_back();
2641 return blocksConnected;
2642 }
2643 };
2644
2645 /**
2646 * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
2647 * corresponding to pindexNew, to bypass loading it again from disk.
2648 *
2649 * The block is added to connectTrace if connection succeeds.
2650 */
ConnectTip(BlockValidationState & state,const CChainParams & chainparams,CBlockIndex * pindexNew,const std::shared_ptr<const CBlock> & pblock,ConnectTrace & connectTrace,DisconnectedBlockTransactions & disconnectpool)2651 bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
2652 {
2653 AssertLockHeld(cs_main);
2654 AssertLockHeld(m_mempool.cs);
2655
2656 assert(pindexNew->pprev == m_chain.Tip());
2657 CheckNameDB (g_chainman, true);
2658 // Read block from disk.
2659 int64_t nTime1 = GetTimeMicros();
2660 std::shared_ptr<const CBlock> pthisBlock;
2661 if (!pblock) {
2662 std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2663 if (!ReadBlockFromDisk(*pblockNew, pindexNew, chainparams.GetConsensus()))
2664 return AbortNode(state, "Failed to read block");
2665 pthisBlock = pblockNew;
2666 } else {
2667 pthisBlock = pblock;
2668 }
2669 const CBlock& blockConnecting = *pthisBlock;
2670 // Apply the block atomically to the chain state.
2671 std::set<valtype> expiredNames;
2672 int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2673 int64_t nTime3;
2674 LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2675 {
2676 CCoinsViewCache view(&CoinsTip());
2677 bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, expiredNames, chainparams);
2678 GetMainSignals().BlockChecked(blockConnecting, state);
2679 if (!rv) {
2680 if (state.IsInvalid())
2681 InvalidBlockFound(pindexNew, state);
2682 return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
2683 }
2684 nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2685 assert(nBlocksTotal > 0);
2686 LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
2687 bool flushed = view.Flush();
2688 assert(flushed);
2689 }
2690 int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2691 LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
2692 // Write the chain state to disk, if necessary.
2693 if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
2694 return false;
2695 int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2696 LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
2697 // Remove conflicting transactions from the mempool.;
2698 m_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2699 m_mempool.removeExpireConflicts(expiredNames);
2700 disconnectpool.removeForBlock(blockConnecting.vtx);
2701 // Update m_chain & related variables.
2702 m_chain.SetTip(pindexNew);
2703 UpdateTip(m_mempool, pindexNew, chainparams);
2704 CheckNameDB (g_chainman, false);
2705
2706 int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2707 LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
2708 LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
2709
2710 connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2711 return true;
2712 }
2713
2714 /**
2715 * Return the tip of the chain with the most work in it, that isn't
2716 * known to be invalid (it's however far from certain to be valid).
2717 */
FindMostWorkChain()2718 CBlockIndex* CChainState::FindMostWorkChain() {
2719 do {
2720 CBlockIndex *pindexNew = nullptr;
2721
2722 // Find the best candidate header.
2723 {
2724 std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2725 if (it == setBlockIndexCandidates.rend())
2726 return nullptr;
2727 pindexNew = *it;
2728 }
2729
2730 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2731 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2732 CBlockIndex *pindexTest = pindexNew;
2733 bool fInvalidAncestor = false;
2734 while (pindexTest && !m_chain.Contains(pindexTest)) {
2735 assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2736
2737 // Pruned nodes may have entries in setBlockIndexCandidates for
2738 // which block files have been deleted. Remove those as candidates
2739 // for the most work chain if we come across them; we can't switch
2740 // to a chain unless we have all the non-active-chain parent blocks.
2741 bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2742 bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2743 if (fFailedChain || fMissingData) {
2744 // Candidate chain is not usable (either invalid or missing data)
2745 if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2746 pindexBestInvalid = pindexNew;
2747 CBlockIndex *pindexFailed = pindexNew;
2748 // Remove the entire chain from the set.
2749 while (pindexTest != pindexFailed) {
2750 if (fFailedChain) {
2751 pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2752 } else if (fMissingData) {
2753 // If we're missing data, then add back to m_blocks_unlinked,
2754 // so that if the block arrives in the future we can try adding
2755 // to setBlockIndexCandidates again.
2756 m_blockman.m_blocks_unlinked.insert(
2757 std::make_pair(pindexFailed->pprev, pindexFailed));
2758 }
2759 setBlockIndexCandidates.erase(pindexFailed);
2760 pindexFailed = pindexFailed->pprev;
2761 }
2762 setBlockIndexCandidates.erase(pindexTest);
2763 fInvalidAncestor = true;
2764 break;
2765 }
2766 pindexTest = pindexTest->pprev;
2767 }
2768 if (!fInvalidAncestor)
2769 return pindexNew;
2770 } while(true);
2771 }
2772
2773 /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
PruneBlockIndexCandidates()2774 void CChainState::PruneBlockIndexCandidates() {
2775 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2776 // reorganization to a better block fails.
2777 std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2778 while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2779 setBlockIndexCandidates.erase(it++);
2780 }
2781 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2782 assert(!setBlockIndexCandidates.empty());
2783 }
2784
2785 /**
2786 * Try to make some progress towards making pindexMostWork the active block.
2787 * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
2788 *
2789 * @returns true unless a system error occurred
2790 */
ActivateBestChainStep(BlockValidationState & state,const CChainParams & chainparams,CBlockIndex * pindexMostWork,const std::shared_ptr<const CBlock> & pblock,bool & fInvalidFound,ConnectTrace & connectTrace)2791 bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
2792 {
2793 AssertLockHeld(cs_main);
2794 AssertLockHeld(m_mempool.cs);
2795
2796 const CBlockIndex *pindexOldTip = m_chain.Tip();
2797 const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2798
2799 // Disconnect active blocks which are no longer in the best chain.
2800 bool fBlocksDisconnected = false;
2801 DisconnectedBlockTransactions disconnectpool;
2802 while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2803 if (!DisconnectTip(state, chainparams, &disconnectpool)) {
2804 // This is likely a fatal error, but keep the mempool consistent,
2805 // just in case. Only remove from the mempool in this case.
2806 UpdateMempoolForReorg(m_mempool, disconnectpool, false);
2807
2808 // If we're unable to disconnect a block during normal operation,
2809 // then that is a failure of our local system -- we should abort
2810 // rather than stay on a less work chain.
2811 AbortNode(state, "Failed to disconnect block; see debug.log for details");
2812 return false;
2813 }
2814 fBlocksDisconnected = true;
2815 }
2816
2817 // Build list of new blocks to connect.
2818 std::vector<CBlockIndex*> vpindexToConnect;
2819 bool fContinue = true;
2820 int nHeight = pindexFork ? pindexFork->nHeight : -1;
2821 while (fContinue && nHeight != pindexMostWork->nHeight) {
2822 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
2823 // a few blocks along the way.
2824 int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2825 vpindexToConnect.clear();
2826 vpindexToConnect.reserve(nTargetHeight - nHeight);
2827 CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2828 while (pindexIter && pindexIter->nHeight != nHeight) {
2829 vpindexToConnect.push_back(pindexIter);
2830 pindexIter = pindexIter->pprev;
2831 }
2832 nHeight = nTargetHeight;
2833
2834 // Connect new blocks.
2835 for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2836 if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
2837 if (state.IsInvalid()) {
2838 // The block violates a consensus rule.
2839 if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
2840 InvalidChainFound(vpindexToConnect.front());
2841 }
2842 state = BlockValidationState();
2843 fInvalidFound = true;
2844 fContinue = false;
2845 break;
2846 } else {
2847 // A system error occurred (disk space, database error, ...).
2848 // Make the mempool consistent with the current tip, just in case
2849 // any observers try to use it before shutdown.
2850 UpdateMempoolForReorg(m_mempool, disconnectpool, false);
2851 return false;
2852 }
2853 } else {
2854 PruneBlockIndexCandidates();
2855 if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2856 // We're in a better position than we were. Return temporarily to release the lock.
2857 fContinue = false;
2858 break;
2859 }
2860 }
2861 }
2862 }
2863
2864 if (fBlocksDisconnected) {
2865 // If any blocks were disconnected, disconnectpool may be non empty. Add
2866 // any disconnected transactions back to the mempool.
2867 UpdateMempoolForReorg(m_mempool, disconnectpool, true);
2868 }
2869 m_mempool.check(g_chainman, &CoinsTip());
2870
2871 // Callbacks/notifications for a new best chain.
2872 if (fInvalidFound)
2873 CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
2874 else
2875 CheckForkWarningConditions();
2876
2877 return true;
2878 }
2879
GetSynchronizationState(bool init)2880 static SynchronizationState GetSynchronizationState(bool init)
2881 {
2882 if (!init) return SynchronizationState::POST_INIT;
2883 if (::fReindex) return SynchronizationState::INIT_REINDEX;
2884 return SynchronizationState::INIT_DOWNLOAD;
2885 }
2886
NotifyHeaderTip()2887 static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
2888 bool fNotify = false;
2889 bool fInitialBlockDownload = false;
2890 static CBlockIndex* pindexHeaderOld = nullptr;
2891 CBlockIndex* pindexHeader = nullptr;
2892 {
2893 LOCK(cs_main);
2894 pindexHeader = pindexBestHeader;
2895
2896 if (pindexHeader != pindexHeaderOld) {
2897 fNotify = true;
2898 fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload();
2899 pindexHeaderOld = pindexHeader;
2900 }
2901 }
2902 // Send block tip changed notifications without cs_main
2903 if (fNotify) {
2904 uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2905 }
2906 return fNotify;
2907 }
2908
LimitValidationInterfaceQueue()2909 static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
2910 AssertLockNotHeld(cs_main);
2911
2912 if (GetMainSignals().CallbacksPending() > 10) {
2913 SyncWithValidationInterfaceQueue();
2914 }
2915 }
2916
ActivateBestChain(BlockValidationState & state,const CChainParams & chainparams,std::shared_ptr<const CBlock> pblock)2917 bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
2918 // Note that while we're often called here from ProcessNewBlock, this is
2919 // far from a guarantee. Things in the P2P/RPC will often end up calling
2920 // us in the middle of ProcessNewBlock - do not assume pblock is set
2921 // sanely for performance or correctness!
2922 AssertLockNotHeld(cs_main);
2923
2924 // ABC maintains a fair degree of expensive-to-calculate internal state
2925 // because this function periodically releases cs_main so that it does not lock up other threads for too long
2926 // during large connects - and to allow for e.g. the callback queue to drain
2927 // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
2928 LOCK(m_cs_chainstate);
2929
2930 CBlockIndex *pindexMostWork = nullptr;
2931 CBlockIndex *pindexNewTip = nullptr;
2932 int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2933 do {
2934 // Block until the validation queue drains. This should largely
2935 // never happen in normal operation, however may happen during
2936 // reindex, causing memory blowup if we run too far ahead.
2937 // Note that if a validationinterface callback ends up calling
2938 // ActivateBestChain this may lead to a deadlock! We should
2939 // probably have a DEBUG_LOCKORDER test for this in the future.
2940 LimitValidationInterfaceQueue();
2941
2942 {
2943 LOCK(cs_main);
2944 LOCK(m_mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed
2945 CBlockIndex* starting_tip = m_chain.Tip();
2946 bool blocks_connected = false;
2947 do {
2948 // We absolutely may not unlock cs_main until we've made forward progress
2949 // (with the exception of shutdown due to hardware issues, low disk space, etc).
2950 ConnectTrace connectTrace; // Destructed before cs_main is unlocked
2951
2952 if (pindexMostWork == nullptr) {
2953 pindexMostWork = FindMostWorkChain();
2954 }
2955
2956 // Whether we have anything to do at all.
2957 if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
2958 break;
2959 }
2960
2961 bool fInvalidFound = false;
2962 std::shared_ptr<const CBlock> nullBlockPtr;
2963 if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
2964 // A system error occurred
2965 return false;
2966 }
2967 blocks_connected = true;
2968
2969 if (fInvalidFound) {
2970 // Wipe cache, we may need another branch now.
2971 pindexMostWork = nullptr;
2972 }
2973 pindexNewTip = m_chain.Tip();
2974
2975 for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
2976 assert(trace.pblock && trace.pindex);
2977 GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
2978 }
2979 } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
2980 if (!blocks_connected) return true;
2981
2982 const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
2983 bool fInitialDownload = IsInitialBlockDownload();
2984
2985 // Notify external listeners about the new tip.
2986 // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected
2987 if (pindexFork != pindexNewTip) {
2988 // Notify ValidationInterface subscribers
2989 GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload);
2990
2991 // Always notify the UI if a new block tip was connected
2992 uiInterface.NotifyBlockTip(GetSynchronizationState(fInitialDownload), pindexNewTip);
2993 }
2994 }
2995 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
2996
2997 if (nStopAtHeight && pindexNewTip && pindexNewTip->nHeight >= nStopAtHeight) StartShutdown();
2998
2999 // We check shutdown only after giving ActivateBestChainStep a chance to run once so that we
3000 // never shutdown before connecting the genesis block during LoadChainTip(). Previously this
3001 // caused an assert() failure during shutdown in such cases as the UTXO DB flushing checks
3002 // that the best block hash is non-null.
3003 if (ShutdownRequested()) break;
3004 } while (pindexNewTip != pindexMostWork);
3005 CheckBlockIndex(chainparams.GetConsensus());
3006
3007 // Write changes periodically to disk, after relay.
3008 if (!FlushStateToDisk(chainparams, state, FlushStateMode::PERIODIC)) {
3009 return false;
3010 }
3011
3012 return true;
3013 }
3014
ActivateBestChain(BlockValidationState & state,const CChainParams & chainparams,std::shared_ptr<const CBlock> pblock)3015 bool ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
3016 return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock));
3017 }
3018
PreciousBlock(BlockValidationState & state,const CChainParams & params,CBlockIndex * pindex)3019 bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex)
3020 {
3021 {
3022 LOCK(cs_main);
3023 if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3024 // Nothing to do, this block is not at the tip.
3025 return true;
3026 }
3027 if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
3028 // The chain has been extended since the last call, reset the counter.
3029 nBlockReverseSequenceId = -1;
3030 }
3031 nLastPreciousChainwork = m_chain.Tip()->nChainWork;
3032 setBlockIndexCandidates.erase(pindex);
3033 pindex->nSequenceId = nBlockReverseSequenceId;
3034 if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3035 // We can't keep reducing the counter if somebody really wants to
3036 // call preciousblock 2**31-1 times on the same set of tips...
3037 nBlockReverseSequenceId--;
3038 }
3039 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
3040 setBlockIndexCandidates.insert(pindex);
3041 PruneBlockIndexCandidates();
3042 }
3043 }
3044
3045 return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
3046 }
PreciousBlock(BlockValidationState & state,const CChainParams & params,CBlockIndex * pindex)3047 bool PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
3048 return ::ChainstateActive().PreciousBlock(state, params, pindex);
3049 }
3050
InvalidateBlock(BlockValidationState & state,const CChainParams & chainparams,CBlockIndex * pindex)3051 bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
3052 {
3053 CBlockIndex* to_mark_failed = pindex;
3054 bool pindex_was_in_chain = false;
3055 int disconnected = 0;
3056
3057 // We do not allow ActivateBestChain() to run while InvalidateBlock() is
3058 // running, as that could cause the tip to change while we disconnect
3059 // blocks.
3060 LOCK(m_cs_chainstate);
3061
3062 // We'll be acquiring and releasing cs_main below, to allow the validation
3063 // callbacks to run. However, we should keep the block index in a
3064 // consistent state as we disconnect blocks -- in particular we need to
3065 // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3066 // To avoid walking the block index repeatedly in search of candidates,
3067 // build a map once so that we can look up candidate blocks by chain
3068 // work as we go.
3069 std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3070
3071 {
3072 LOCK(cs_main);
3073 for (const auto& entry : m_blockman.m_block_index) {
3074 CBlockIndex *candidate = entry.second;
3075 // We don't need to put anything in our active chain into the
3076 // multimap, because those candidates will be found and considered
3077 // as we disconnect.
3078 // Instead, consider only non-active-chain blocks that have at
3079 // least as much work as where we expect the new tip to end up.
3080 if (!m_chain.Contains(candidate) &&
3081 !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3082 candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
3083 candidate->HaveTxsDownloaded()) {
3084 candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
3085 }
3086 }
3087 }
3088
3089 // Disconnect (descendants of) pindex, and mark them invalid.
3090 while (true) {
3091 if (ShutdownRequested()) break;
3092
3093 // Make sure the queue of validation callbacks doesn't grow unboundedly.
3094 LimitValidationInterfaceQueue();
3095
3096 LOCK(cs_main);
3097 LOCK(m_mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between
3098 if (!m_chain.Contains(pindex)) break;
3099 pindex_was_in_chain = true;
3100 CBlockIndex *invalid_walk_tip = m_chain.Tip();
3101
3102 // ActivateBestChain considers blocks already in m_chain
3103 // unconditionally valid already, so force disconnect away from it.
3104 DisconnectedBlockTransactions disconnectpool;
3105 bool ret = DisconnectTip(state, chainparams, &disconnectpool);
3106 // DisconnectTip will add transactions to disconnectpool.
3107 // Adjust the mempool to be consistent with the new tip, adding
3108 // transactions back to the mempool if disconnecting was successful,
3109 // and we're not doing a very deep invalidation (in which case
3110 // keeping the mempool up to date is probably futile anyway).
3111 UpdateMempoolForReorg(m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
3112 if (!ret) return false;
3113 assert(invalid_walk_tip->pprev == m_chain.Tip());
3114
3115 // We immediately mark the disconnected blocks as invalid.
3116 // This prevents a case where pruned nodes may fail to invalidateblock
3117 // and be left unable to start as they have no tip candidates (as there
3118 // are no blocks that meet the "have data and are not invalid per
3119 // nStatus" criteria for inclusion in setBlockIndexCandidates).
3120 invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
3121 setDirtyBlockIndex.insert(invalid_walk_tip);
3122 setBlockIndexCandidates.erase(invalid_walk_tip);
3123 setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3124 if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
3125 // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
3126 // need to be BLOCK_FAILED_CHILD instead.
3127 to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
3128 setDirtyBlockIndex.insert(to_mark_failed);
3129 }
3130
3131 // Add any equal or more work headers to setBlockIndexCandidates
3132 auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
3133 while (candidate_it != candidate_blocks_by_work.end()) {
3134 if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
3135 setBlockIndexCandidates.insert(candidate_it->second);
3136 candidate_it = candidate_blocks_by_work.erase(candidate_it);
3137 } else {
3138 ++candidate_it;
3139 }
3140 }
3141
3142 // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
3143 // iterations, or, if it's the last one, call InvalidChainFound on it.
3144 to_mark_failed = invalid_walk_tip;
3145 }
3146
3147 CheckBlockIndex(chainparams.GetConsensus());
3148
3149 {
3150 LOCK(cs_main);
3151 if (m_chain.Contains(to_mark_failed)) {
3152 // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
3153 return false;
3154 }
3155
3156 // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
3157 to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
3158 setDirtyBlockIndex.insert(to_mark_failed);
3159 setBlockIndexCandidates.erase(to_mark_failed);
3160 m_blockman.m_failed_blocks.insert(to_mark_failed);
3161
3162 // If any new blocks somehow arrived while we were disconnecting
3163 // (above), then the pre-calculation of what should go into
3164 // setBlockIndexCandidates may have missed entries. This would
3165 // technically be an inconsistency in the block index, but if we clean
3166 // it up here, this should be an essentially unobservable error.
3167 // Loop back over all block index entries and add any missing entries
3168 // to setBlockIndexCandidates.
3169 BlockMap::iterator it = m_blockman.m_block_index.begin();
3170 while (it != m_blockman.m_block_index.end()) {
3171 if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
3172 setBlockIndexCandidates.insert(it->second);
3173 }
3174 it++;
3175 }
3176
3177 InvalidChainFound(to_mark_failed);
3178 }
3179
3180 // Only notify about a new block tip if the active chain was modified.
3181 if (pindex_was_in_chain) {
3182 uiInterface.NotifyBlockTip(GetSynchronizationState(IsInitialBlockDownload()), to_mark_failed->pprev);
3183 }
3184 return true;
3185 }
3186
InvalidateBlock(BlockValidationState & state,const CChainParams & chainparams,CBlockIndex * pindex)3187 bool InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
3188 return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex);
3189 }
3190
ResetBlockFailureFlags(CBlockIndex * pindex)3191 void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
3192 AssertLockHeld(cs_main);
3193
3194 int nHeight = pindex->nHeight;
3195
3196 // Remove the invalidity flag from this block and all its descendants.
3197 BlockMap::iterator it = m_blockman.m_block_index.begin();
3198 while (it != m_blockman.m_block_index.end()) {
3199 if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
3200 it->second->nStatus &= ~BLOCK_FAILED_MASK;
3201 setDirtyBlockIndex.insert(it->second);
3202 if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
3203 setBlockIndexCandidates.insert(it->second);
3204 }
3205 if (it->second == pindexBestInvalid) {
3206 // Reset invalid block marker if it was pointing to one of those.
3207 pindexBestInvalid = nullptr;
3208 }
3209 m_blockman.m_failed_blocks.erase(it->second);
3210 }
3211 it++;
3212 }
3213
3214 // Remove the invalidity flag from all ancestors too.
3215 while (pindex != nullptr) {
3216 if (pindex->nStatus & BLOCK_FAILED_MASK) {
3217 pindex->nStatus &= ~BLOCK_FAILED_MASK;
3218 setDirtyBlockIndex.insert(pindex);
3219 m_blockman.m_failed_blocks.erase(pindex);
3220 }
3221 pindex = pindex->pprev;
3222 }
3223 }
3224
ResetBlockFailureFlags(CBlockIndex * pindex)3225 void ResetBlockFailureFlags(CBlockIndex *pindex) {
3226 return ::ChainstateActive().ResetBlockFailureFlags(pindex);
3227 }
3228
AddToBlockIndex(const CBlockHeader & block)3229 CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
3230 {
3231 AssertLockHeld(cs_main);
3232
3233 // Check for duplicate
3234 uint256 hash = block.GetHash();
3235 BlockMap::iterator it = m_block_index.find(hash);
3236 if (it != m_block_index.end())
3237 return it->second;
3238
3239 // Construct new block index object
3240 CBlockIndex* pindexNew = new CBlockIndex(block);
3241 // We assign the sequence id to blocks only when the full data is available,
3242 // to avoid miners withholding blocks but broadcasting headers, to get a
3243 // competitive advantage.
3244 pindexNew->nSequenceId = 0;
3245 BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3246 pindexNew->phashBlock = &((*mi).first);
3247 BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3248 if (miPrev != m_block_index.end())
3249 {
3250 pindexNew->pprev = (*miPrev).second;
3251 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3252 pindexNew->BuildSkip();
3253 }
3254 pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
3255 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
3256 pindexNew->RaiseValidity(BLOCK_VALID_TREE);
3257 if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
3258 pindexBestHeader = pindexNew;
3259
3260 setDirtyBlockIndex.insert(pindexNew);
3261
3262 return pindexNew;
3263 }
3264
3265 /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
ReceivedBlockTransactions(const CBlock & block,CBlockIndex * pindexNew,const FlatFilePos & pos,const Consensus::Params & consensusParams)3266 void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
3267 {
3268 pindexNew->nTx = block.vtx.size();
3269 pindexNew->nChainTx = 0;
3270 pindexNew->nFile = pos.nFile;
3271 pindexNew->nDataPos = pos.nPos;
3272 pindexNew->nUndoPos = 0;
3273 pindexNew->nStatus |= BLOCK_HAVE_DATA;
3274 if (IsWitnessEnabled(pindexNew->pprev, consensusParams)) {
3275 pindexNew->nStatus |= BLOCK_OPT_WITNESS;
3276 }
3277 pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
3278 setDirtyBlockIndex.insert(pindexNew);
3279
3280 if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
3281 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3282 std::deque<CBlockIndex*> queue;
3283 queue.push_back(pindexNew);
3284
3285 // Recursively process any descendant blocks that now may be eligible to be connected.
3286 while (!queue.empty()) {
3287 CBlockIndex *pindex = queue.front();
3288 queue.pop_front();
3289 pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3290 {
3291 LOCK(cs_nBlockSequenceId);
3292 pindex->nSequenceId = nBlockSequenceId++;
3293 }
3294 if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3295 setBlockIndexCandidates.insert(pindex);
3296 }
3297 std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3298 while (range.first != range.second) {
3299 std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3300 queue.push_back(it->second);
3301 range.first++;
3302 m_blockman.m_blocks_unlinked.erase(it);
3303 }
3304 }
3305 } else {
3306 if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3307 m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3308 }
3309 }
3310 }
3311
FindBlockPos(FlatFilePos & pos,unsigned int nAddSize,unsigned int nHeight,uint64_t nTime,bool fKnown=false)3312 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
3313 {
3314 LOCK(cs_LastBlockFile);
3315
3316 unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3317 if (vinfoBlockFile.size() <= nFile) {
3318 vinfoBlockFile.resize(nFile + 1);
3319 }
3320
3321 bool finalize_undo = false;
3322 if (!fKnown) {
3323 while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3324 // when the undo file is keeping up with the block file, we want to flush it explicitly
3325 // when it is lagging behind (more blocks arrive than are being connected), we let the
3326 // undo block write case handle it
3327 finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)ChainActive().Tip()->nHeight);
3328 nFile++;
3329 if (vinfoBlockFile.size() <= nFile) {
3330 vinfoBlockFile.resize(nFile + 1);
3331 }
3332 }
3333 pos.nFile = nFile;
3334 pos.nPos = vinfoBlockFile[nFile].nSize;
3335 }
3336
3337 if ((int)nFile != nLastBlockFile) {
3338 if (!fKnown) {
3339 LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
3340 }
3341 FlushBlockFile(!fKnown, finalize_undo);
3342 nLastBlockFile = nFile;
3343 }
3344
3345 vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3346 if (fKnown)
3347 vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3348 else
3349 vinfoBlockFile[nFile].nSize += nAddSize;
3350
3351 if (!fKnown) {
3352 bool out_of_space;
3353 size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3354 if (out_of_space) {
3355 return AbortNode("Disk space is too low!", _("Disk space is too low!"));
3356 }
3357 if (bytes_allocated != 0 && fPruneMode) {
3358 fCheckForPruning = true;
3359 }
3360 }
3361
3362 setDirtyFileInfo.insert(nFile);
3363 return true;
3364 }
3365
FindUndoPos(BlockValidationState & state,int nFile,FlatFilePos & pos,unsigned int nAddSize)3366 static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
3367 {
3368 pos.nFile = nFile;
3369
3370 LOCK(cs_LastBlockFile);
3371
3372 pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3373 vinfoBlockFile[nFile].nUndoSize += nAddSize;
3374 setDirtyFileInfo.insert(nFile);
3375
3376 bool out_of_space;
3377 size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3378 if (out_of_space) {
3379 return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
3380 }
3381 if (bytes_allocated != 0 && fPruneMode) {
3382 fCheckForPruning = true;
3383 }
3384
3385 return true;
3386 }
3387
3388 /* Temporary check that blocks are compatible with BDB's 10,000 lock limit.
3389 This is based on Bitcoin's commit 8c222dca4f961ad13ec64d690134a40d09b20813.
3390 Each "object" touched in the DB may cause two locks (one read and one
3391 write lock). Objects are transaction IDs and names. Thus, count the
3392 total number of transaction IDs (tx themselves plus all distinct inputs).
3393 In addition, each Namecoin transaction could touch at most one name,
3394 so add them as well. */
CheckDbLockLimit(const std::vector<CTransactionRef> & vtx)3395 bool CheckDbLockLimit(const std::vector<CTransactionRef>& vtx)
3396 {
3397 std::set<uint256> setTxIds;
3398 unsigned nNames = 0;
3399 for (const auto& tx : vtx)
3400 {
3401 setTxIds.insert(tx->GetHash());
3402 if (tx->IsNamecoin())
3403 ++nNames;
3404
3405 for (const auto& txIn : tx->vin)
3406 setTxIds.insert(txIn.prevout.hash);
3407 }
3408
3409 const unsigned nTotalIds = setTxIds.size() + nNames;
3410 if (nTotalIds > 4500)
3411 return error("%s : %u locks estimated, that is too much for BDB",
3412 __func__, nTotalIds);
3413
3414 //LogPrintf ("%s : need %u locks\n", __func__, nTotalIds);
3415 return true;
3416 }
3417
CheckBlockHeader(const CBlockHeader & block,BlockValidationState & state,const Consensus::Params & consensusParams,bool fCheckPOW=true)3418 static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
3419 {
3420 // Check proof of work matches claimed amount
3421 if (fCheckPOW && !CheckProofOfWork(block, consensusParams))
3422 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
3423
3424 return true;
3425 }
3426
CheckBlock(const CBlock & block,BlockValidationState & state,const Consensus::Params & consensusParams,bool fCheckPOW,bool fCheckMerkleRoot)3427 bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
3428 {
3429 // These are checks that are independent of context.
3430
3431 if (block.fChecked)
3432 return true;
3433
3434 // Check that the header is valid (particularly PoW). This is mostly
3435 // redundant with the call in AcceptBlockHeader.
3436 if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
3437 return false;
3438
3439 // Signet only: check block solution
3440 if (consensusParams.signet_blocks && fCheckPOW && !CheckSignetBlockSolution(block, consensusParams)) {
3441 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-signet-blksig", "signet block signature validation failure");
3442 }
3443
3444 // Check the merkle root.
3445 if (fCheckMerkleRoot) {
3446 bool mutated;
3447 uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3448 if (block.hashMerkleRoot != hashMerkleRoot2)
3449 return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
3450
3451 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3452 // of transactions in a block without affecting the merkle root of a block,
3453 // while still invalidating it.
3454 if (mutated)
3455 return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
3456 }
3457
3458 // All potential-corruption validation must be done before we do any
3459 // transaction validation, as otherwise we may mark the header as invalid
3460 // because we receive the wrong transactions for it.
3461 // Note that witness malleability is checked in ContextualCheckBlock, so no
3462 // checks that use witness data may be performed here.
3463
3464 // Size limits
3465 if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
3466 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
3467
3468 // Enforce the temporary DB lock limit.
3469 // TODO: Remove with a hardfork in the future.
3470 if (!CheckDbLockLimit(block.vtx))
3471 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
3472 "bad-db-locks",
3473 "DB lock limit exceeded");
3474
3475 // First transaction must be coinbase, the rest must not be
3476 if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
3477 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
3478 for (unsigned int i = 1; i < block.vtx.size(); i++)
3479 if (block.vtx[i]->IsCoinBase())
3480 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
3481
3482 // Check transactions
3483 // Must check for duplicate inputs (see CVE-2018-17144)
3484 for (const auto& tx : block.vtx) {
3485 TxValidationState tx_state;
3486 if (!CheckTransaction(*tx, tx_state)) {
3487 // CheckBlock() does context-free validation checks. The only
3488 // possible failures are consensus failures.
3489 assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
3490 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
3491 strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
3492 }
3493 }
3494 unsigned int nSigOps = 0;
3495 for (const auto& tx : block.vtx)
3496 {
3497 nSigOps += GetLegacySigOpCount(*tx);
3498 }
3499 if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
3500 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
3501
3502 if (fCheckPOW && fCheckMerkleRoot)
3503 block.fChecked = true;
3504
3505 return true;
3506 }
3507
IsWitnessEnabled(const CBlockIndex * pindexPrev,const Consensus::Params & params)3508 bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
3509 {
3510 int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3511 return (height >= params.SegwitHeight);
3512 }
3513
UpdateUncommittedBlockStructures(CBlock & block,const CBlockIndex * pindexPrev,const Consensus::Params & consensusParams)3514 void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3515 {
3516 int commitpos = GetWitnessCommitmentIndex(block);
3517 static const std::vector<unsigned char> nonce(32, 0x00);
3518 if (commitpos != NO_WITNESS_COMMITMENT && IsWitnessEnabled(pindexPrev, consensusParams) && !block.vtx[0]->HasWitness()) {
3519 CMutableTransaction tx(*block.vtx[0]);
3520 tx.vin[0].scriptWitness.stack.resize(1);
3521 tx.vin[0].scriptWitness.stack[0] = nonce;
3522 block.vtx[0] = MakeTransactionRef(std::move(tx));
3523 }
3524 }
3525
GenerateCoinbaseCommitment(CBlock & block,const CBlockIndex * pindexPrev,const Consensus::Params & consensusParams)3526 std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
3527 {
3528 std::vector<unsigned char> commitment;
3529 int commitpos = GetWitnessCommitmentIndex(block);
3530 std::vector<unsigned char> ret(32, 0x00);
3531 if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) {
3532 if (commitpos == NO_WITNESS_COMMITMENT) {
3533 uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
3534 CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot);
3535 CTxOut out;
3536 out.nValue = 0;
3537 out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT);
3538 out.scriptPubKey[0] = OP_RETURN;
3539 out.scriptPubKey[1] = 0x24;
3540 out.scriptPubKey[2] = 0xaa;
3541 out.scriptPubKey[3] = 0x21;
3542 out.scriptPubKey[4] = 0xa9;
3543 out.scriptPubKey[5] = 0xed;
3544 memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
3545 commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
3546 CMutableTransaction tx(*block.vtx[0]);
3547 tx.vout.push_back(out);
3548 block.vtx[0] = MakeTransactionRef(std::move(tx));
3549 }
3550 }
3551 UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
3552 return commitment;
3553 }
3554
3555 //! Returns last CBlockIndex* that is a checkpoint
GetLastCheckpoint(const CCheckpointData & data)3556 static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3557 {
3558 const MapCheckpoints& checkpoints = data.mapCheckpoints;
3559
3560 for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
3561 {
3562 const uint256& hash = i.second;
3563 CBlockIndex* pindex = LookupBlockIndex(hash);
3564 if (pindex) {
3565 return pindex;
3566 }
3567 }
3568 return nullptr;
3569 }
3570
3571 /** Context-dependent validity checks.
3572 * By "context", we mean only the previous block headers, but not the UTXO
3573 * set; UTXO-related validity checks are done in ConnectBlock().
3574 * NOTE: This function is not currently invoked by ConnectBlock(), so we
3575 * should consider upgrade issues if we change which consensus rules are
3576 * enforced in this function (eg by adding a new consensus rule). See comment
3577 * in ConnectBlock().
3578 * Note that -reindex-chainstate skips the validation that happens here!
3579 */
ContextualCheckBlockHeader(const CBlockHeader & block,BlockValidationState & state,const CChainParams & params,const CBlockIndex * pindexPrev,int64_t nAdjustedTime)3580 static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
3581 {
3582 assert(pindexPrev != nullptr);
3583 const int nHeight = pindexPrev->nHeight + 1;
3584
3585 // Disallow legacy blocks after merge-mining start.
3586 const Consensus::Params& consensusParams = params.GetConsensus();
3587 if (!consensusParams.AllowLegacyBlocks(nHeight) && block.IsLegacy())
3588 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
3589 "late-legacy-block",
3590 "legacy block after auxpow start");
3591
3592 // Check proof of work
3593 if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3594 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
3595
3596 // Check against checkpoints
3597 if (fCheckpointsEnabled) {
3598 // Don't accept any forks from the main chain prior to last checkpoint.
3599 // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
3600 // BlockIndex().
3601 CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
3602 if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3603 LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
3604 return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
3605 }
3606 }
3607
3608 // Check timestamp against prev
3609 if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3610 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
3611
3612 // Check timestamp
3613 if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
3614 return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
3615
3616 // Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
3617 // check for version 2, 3 and 4 upgrades
3618 if((block.GetBaseVersion() < 2 && nHeight >= consensusParams.BIP34Height) ||
3619 (block.GetBaseVersion() < 3 && nHeight >= consensusParams.BIP66Height) ||
3620 (block.GetBaseVersion() < 4 && nHeight >= consensusParams.BIP65Height))
3621 return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
3622 strprintf("rejected nVersion=0x%08x block", block.nVersion));
3623
3624 return true;
3625 }
3626
3627 /** NOTE: This function is not currently invoked by ConnectBlock(), so we
3628 * should consider upgrade issues if we change which consensus rules are
3629 * enforced in this function (eg by adding a new consensus rule). See comment
3630 * in ConnectBlock().
3631 * Note that -reindex-chainstate skips the validation that happens here!
3632 */
ContextualCheckBlock(const CBlock & block,BlockValidationState & state,const Consensus::Params & consensusParams,const CBlockIndex * pindexPrev)3633 static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
3634 {
3635 const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3636
3637 // Start enforcing BIP113 (Median Time Past).
3638 int nLockTimeFlags = 0;
3639 if (nHeight >= consensusParams.CSVHeight) {
3640 assert(pindexPrev != nullptr);
3641 nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3642 }
3643
3644 int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3645 ? pindexPrev->GetMedianTimePast()
3646 : block.GetBlockTime();
3647
3648 // Check that all transactions are finalized
3649 for (const auto& tx : block.vtx) {
3650 if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
3651 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
3652 }
3653 }
3654
3655 // Enforce rule that the coinbase starts with serialized block height
3656 if (nHeight >= consensusParams.BIP34Height)
3657 {
3658 CScript expect = CScript() << nHeight;
3659 if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
3660 !std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
3661 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
3662 }
3663 }
3664
3665 // Validation for witness commitments.
3666 // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
3667 // coinbase (where 0x0000....0000 is used instead).
3668 // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness reserved value (unconstrained).
3669 // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
3670 // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
3671 // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are
3672 // multiple, the last one is used.
3673 bool fHaveWitness = false;
3674 if (nHeight >= consensusParams.SegwitHeight) {
3675 int commitpos = GetWitnessCommitmentIndex(block);
3676 if (commitpos != NO_WITNESS_COMMITMENT) {
3677 bool malleated = false;
3678 uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
3679 // The malleation check is ignored; as the transaction tree itself
3680 // already does not permit it, it is impossible to trigger in the
3681 // witness tree.
3682 if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
3683 return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
3684 }
3685 CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness);
3686 if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
3687 return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
3688 }
3689 fHaveWitness = true;
3690 }
3691 }
3692
3693 // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
3694 if (!fHaveWitness) {
3695 for (const auto& tx : block.vtx) {
3696 if (tx->HasWitness()) {
3697 return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
3698 }
3699 }
3700 }
3701
3702 // After the coinbase witness reserved value and commitment are verified,
3703 // we can check if the block weight passes (before we've checked the
3704 // coinbase witness, it would be possible for the weight to be too
3705 // large by filling up the coinbase witness, which doesn't change
3706 // the block hash, so we couldn't mark the block as permanently
3707 // failed).
3708 if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
3709 return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
3710 }
3711
3712 return true;
3713 }
3714
AcceptBlockHeader(const CBlockHeader & block,BlockValidationState & state,const CChainParams & chainparams,CBlockIndex ** ppindex)3715 bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
3716 {
3717 AssertLockHeld(cs_main);
3718 // Check for duplicate
3719 uint256 hash = block.GetHash();
3720 BlockMap::iterator miSelf = m_block_index.find(hash);
3721 CBlockIndex *pindex = nullptr;
3722 if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3723 if (miSelf != m_block_index.end()) {
3724 // Block header is already known.
3725 pindex = miSelf->second;
3726 if (ppindex)
3727 *ppindex = pindex;
3728 if (pindex->nStatus & BLOCK_FAILED_MASK) {
3729 LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
3730 return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
3731 }
3732 return true;
3733 }
3734
3735 if (!CheckBlockHeader(block, state, chainparams.GetConsensus())) {
3736 LogPrint(BCLog::VALIDATION, "%s: Consensus::CheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
3737 return false;
3738 }
3739
3740 // Get prev block index
3741 CBlockIndex* pindexPrev = nullptr;
3742 BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
3743 if (mi == m_block_index.end()) {
3744 LogPrintf("ERROR: %s: prev block not found\n", __func__);
3745 return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
3746 }
3747 pindexPrev = (*mi).second;
3748 if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
3749 LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3750 return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3751 }
3752 if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3753 return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
3754
3755 /* Determine if this block descends from any block which has been found
3756 * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
3757 * them as failed. For example:
3758 *
3759 * D3
3760 * /
3761 * B2 - C2
3762 * / \
3763 * A D2 - E2 - F2
3764 * \
3765 * B1 - C1 - D1 - E1
3766 *
3767 * In the case that we attempted to reorg from E1 to F2, only to find
3768 * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
3769 * but NOT D3 (it was not in any of our candidate sets at the time).
3770 *
3771 * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
3772 * in LoadBlockIndex.
3773 */
3774 if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
3775 // The above does not mean "invalid": it checks if the previous block
3776 // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
3777 // optimization, in the common case of adding a new block to the tip,
3778 // we don't need to iterate over the failed blocks list.
3779 for (const CBlockIndex* failedit : m_failed_blocks) {
3780 if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
3781 assert(failedit->nStatus & BLOCK_FAILED_VALID);
3782 CBlockIndex* invalid_walk = pindexPrev;
3783 while (invalid_walk != failedit) {
3784 invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
3785 setDirtyBlockIndex.insert(invalid_walk);
3786 invalid_walk = invalid_walk->pprev;
3787 }
3788 LogPrintf("ERROR: %s: prev block invalid\n", __func__);
3789 return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
3790 }
3791 }
3792 }
3793 }
3794 if (pindex == nullptr)
3795 pindex = AddToBlockIndex(block);
3796
3797 if (ppindex)
3798 *ppindex = pindex;
3799
3800 return true;
3801 }
3802
3803 // Exposed wrapper for AcceptBlockHeader
ProcessNewBlockHeaders(const std::vector<CBlockHeader> & headers,BlockValidationState & state,const CChainParams & chainparams,const CBlockIndex ** ppindex)3804 bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
3805 {
3806 AssertLockNotHeld(cs_main);
3807 {
3808 LOCK(cs_main);
3809 for (const CBlockHeader& header : headers) {
3810 CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
3811 bool accepted = m_blockman.AcceptBlockHeader(
3812 header, state, chainparams, &pindex);
3813 ::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus());
3814
3815 if (!accepted) {
3816 return false;
3817 }
3818 if (ppindex) {
3819 *ppindex = pindex;
3820 }
3821 }
3822 }
3823 if (NotifyHeaderTip()) {
3824 if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) {
3825 LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
3826 }
3827 }
3828 return true;
3829 }
3830
3831 /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
SaveBlockToDisk(const CBlock & block,int nHeight,const CChainParams & chainparams,const FlatFilePos * dbp)3832 static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const FlatFilePos* dbp) {
3833 unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
3834 FlatFilePos blockPos;
3835 if (dbp != nullptr)
3836 blockPos = *dbp;
3837 if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
3838 error("%s: FindBlockPos failed", __func__);
3839 return FlatFilePos();
3840 }
3841 if (dbp == nullptr) {
3842 if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
3843 AbortNode("Failed to write block");
3844 return FlatFilePos();
3845 }
3846 }
3847 return blockPos;
3848 }
3849
3850 /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
AcceptBlock(const std::shared_ptr<const CBlock> & pblock,BlockValidationState & state,const CChainParams & chainparams,CBlockIndex ** ppindex,bool fRequested,const FlatFilePos * dbp,bool * fNewBlock)3851 bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
3852 {
3853 const CBlock& block = *pblock;
3854
3855 if (fNewBlock) *fNewBlock = false;
3856 AssertLockHeld(cs_main);
3857
3858 CBlockIndex *pindexDummy = nullptr;
3859 CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
3860
3861 bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex);
3862 CheckBlockIndex(chainparams.GetConsensus());
3863
3864 if (!accepted_header)
3865 return false;
3866
3867 // Try to process all requested blocks that we don't have, but only
3868 // process an unrequested block if it's new and has enough work to
3869 // advance our tip, and isn't too many blocks ahead.
3870 bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3871 bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
3872 // Blocks that are too out-of-order needlessly limit the effectiveness of
3873 // pruning, because pruning will not delete block files that contain any
3874 // blocks which are too close in height to the tip. Apply this test
3875 // regardless of whether pruning is enabled; it should generally be safe to
3876 // not process unrequested blocks.
3877 bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
3878
3879 // TODO: Decouple this function from the block download logic by removing fRequested
3880 // This requires some new chain data structure to efficiently look up if a
3881 // block is in a chain leading to a candidate for best tip, despite not
3882 // being such a candidate itself.
3883
3884 // TODO: deal better with return value and error conditions for duplicate
3885 // and unrequested blocks.
3886 if (fAlreadyHave) return true;
3887 if (!fRequested) { // If we didn't ask for it:
3888 if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3889 if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
3890 if (fTooFarAhead) return true; // Block height is too high
3891
3892 // Protect against DoS attacks from low-work chains.
3893 // If our tip is behind, a peer could try to send us
3894 // low-work blocks on a fake chain that we would never
3895 // request; don't process these.
3896 if (pindex->nChainWork < nMinimumChainWork) return true;
3897 }
3898
3899 if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
3900 !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
3901 if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
3902 pindex->nStatus |= BLOCK_FAILED_VALID;
3903 setDirtyBlockIndex.insert(pindex);
3904 }
3905 return error("%s: %s", __func__, state.ToString());
3906 }
3907
3908 // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
3909 // (but if it does not build on our best tip, let the SendMessages loop relay it)
3910 if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
3911 GetMainSignals().NewPoWValidBlock(pindex, pblock);
3912
3913 // Write block to history file
3914 if (fNewBlock) *fNewBlock = true;
3915 try {
3916 FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
3917 if (blockPos.IsNull()) {
3918 state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
3919 return false;
3920 }
3921 ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
3922 } catch (const std::runtime_error& e) {
3923 return AbortNode(state, std::string("System error: ") + e.what());
3924 }
3925
3926 FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
3927
3928 CheckBlockIndex(chainparams.GetConsensus());
3929
3930 return true;
3931 }
3932
ProcessNewBlock(const CChainParams & chainparams,const std::shared_ptr<const CBlock> pblock,bool fForceProcessing,bool * fNewBlock)3933 bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock)
3934 {
3935 AssertLockNotHeld(cs_main);
3936
3937 {
3938 CBlockIndex *pindex = nullptr;
3939 if (fNewBlock) *fNewBlock = false;
3940 BlockValidationState state;
3941
3942 // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
3943 // Therefore, the following critical section must include the CheckBlock() call as well.
3944 LOCK(cs_main);
3945
3946 // Ensure that CheckBlock() passes before calling AcceptBlock, as
3947 // belt-and-suspenders.
3948 bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
3949 if (ret) {
3950 // Store to disk
3951 ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
3952 }
3953 if (!ret) {
3954 GetMainSignals().BlockChecked(*pblock, state);
3955 return error("%s: AcceptBlock FAILED (%s)", __func__, state.ToString());
3956 }
3957 }
3958
3959 NotifyHeaderTip();
3960
3961 BlockValidationState state; // Only used to report errors, not invalidity - ignore it
3962 if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
3963 return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
3964
3965 return true;
3966 }
3967
TestBlockValidity(BlockValidationState & state,const CChainParams & chainparams,const CBlock & block,CBlockIndex * pindexPrev,bool fCheckPOW,bool fCheckMerkleRoot)3968 bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
3969 {
3970 AssertLockHeld(cs_main);
3971 assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
3972 std::set<valtype> namesDummy;
3973 CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
3974 uint256 block_hash(block.GetHash());
3975 CBlockIndex indexDummy(block);
3976 indexDummy.pprev = pindexPrev;
3977 indexDummy.nHeight = pindexPrev->nHeight + 1;
3978 indexDummy.phashBlock = &block_hash;
3979
3980 // NOTE: CheckBlockHeader is called by CheckBlock
3981 if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
3982 return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString());
3983 if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
3984 return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString());
3985 if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
3986 return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString());
3987 if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, namesDummy, chainparams, true))
3988 return false;
3989 assert(state.IsValid());
3990
3991 return true;
3992 }
3993
3994 /**
3995 * BLOCK PRUNING CODE
3996 */
3997
3998 /* Calculate the amount of disk space the block & undo files currently use */
CalculateCurrentUsage()3999 uint64_t CalculateCurrentUsage()
4000 {
4001 LOCK(cs_LastBlockFile);
4002
4003 uint64_t retval = 0;
4004 for (const CBlockFileInfo &file : vinfoBlockFile) {
4005 retval += file.nSize + file.nUndoSize;
4006 }
4007 return retval;
4008 }
4009
PruneOneBlockFile(const int fileNumber)4010 void BlockManager::PruneOneBlockFile(const int fileNumber)
4011 {
4012 AssertLockHeld(cs_main);
4013 LOCK(cs_LastBlockFile);
4014
4015 for (const auto& entry : m_block_index) {
4016 CBlockIndex* pindex = entry.second;
4017 if (pindex->nFile == fileNumber) {
4018 pindex->nStatus &= ~BLOCK_HAVE_DATA;
4019 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
4020 pindex->nFile = 0;
4021 pindex->nDataPos = 0;
4022 pindex->nUndoPos = 0;
4023 setDirtyBlockIndex.insert(pindex);
4024
4025 // Prune from m_blocks_unlinked -- any block we prune would have
4026 // to be downloaded again in order to consider its chain, at which
4027 // point it would be considered as a candidate for
4028 // m_blocks_unlinked or setBlockIndexCandidates.
4029 auto range = m_blocks_unlinked.equal_range(pindex->pprev);
4030 while (range.first != range.second) {
4031 std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
4032 range.first++;
4033 if (_it->second == pindex) {
4034 m_blocks_unlinked.erase(_it);
4035 }
4036 }
4037 }
4038 }
4039
4040 vinfoBlockFile[fileNumber].SetNull();
4041 setDirtyFileInfo.insert(fileNumber);
4042 }
4043
4044
UnlinkPrunedFiles(const std::set<int> & setFilesToPrune)4045 void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
4046 {
4047 for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
4048 FlatFilePos pos(*it, 0);
4049 fs::remove(BlockFileSeq().FileName(pos));
4050 fs::remove(UndoFileSeq().FileName(pos));
4051 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
4052 }
4053 }
4054
FindFilesToPruneManual(std::set<int> & setFilesToPrune,int nManualPruneHeight,int chain_tip_height)4055 void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
4056 {
4057 assert(fPruneMode && nManualPruneHeight > 0);
4058
4059 LOCK2(cs_main, cs_LastBlockFile);
4060 if (chain_tip_height < 0) {
4061 return;
4062 }
4063
4064 // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
4065 unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
4066 int count = 0;
4067 for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4068 if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4069 continue;
4070 }
4071 PruneOneBlockFile(fileNumber);
4072 setFilesToPrune.insert(fileNumber);
4073 count++;
4074 }
4075 LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
4076 }
4077
4078 /* This function is called from the RPC code for pruneblockchain */
PruneBlockFilesManual(int nManualPruneHeight)4079 void PruneBlockFilesManual(int nManualPruneHeight)
4080 {
4081 BlockValidationState state;
4082 const CChainParams& chainparams = Params();
4083 if (!::ChainstateActive().FlushStateToDisk(
4084 chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
4085 LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString());
4086 }
4087 }
4088
FindFilesToPrune(std::set<int> & setFilesToPrune,uint64_t nPruneAfterHeight,int chain_tip_height,bool is_ibd)4089 void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, bool is_ibd)
4090 {
4091 LOCK2(cs_main, cs_LastBlockFile);
4092 if (chain_tip_height < 0 || nPruneTarget == 0) {
4093 return;
4094 }
4095 if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
4096 return;
4097 }
4098
4099 unsigned int nLastBlockWeCanPrune = chain_tip_height - MIN_BLOCKS_TO_KEEP;
4100 uint64_t nCurrentUsage = CalculateCurrentUsage();
4101 // We don't check to prune until after we've allocated new space for files
4102 // So we should leave a buffer under our target to account for another allocation
4103 // before the next pruning.
4104 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4105 uint64_t nBytesToPrune;
4106 int count = 0;
4107
4108 if (nCurrentUsage + nBuffer >= nPruneTarget) {
4109 // On a prune event, the chainstate DB is flushed.
4110 // To avoid excessive prune events negating the benefit of high dbcache
4111 // values, we should not prune too rapidly.
4112 // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
4113 if (is_ibd) {
4114 // Since this is only relevant during IBD, we use a fixed 10%
4115 nBuffer += nPruneTarget / 10;
4116 }
4117
4118 for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4119 nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
4120
4121 if (vinfoBlockFile[fileNumber].nSize == 0) {
4122 continue;
4123 }
4124
4125 if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
4126 break;
4127 }
4128
4129 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4130 if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4131 continue;
4132 }
4133
4134 PruneOneBlockFile(fileNumber);
4135 // Queue up the files for removal
4136 setFilesToPrune.insert(fileNumber);
4137 nCurrentUsage -= nBytesToPrune;
4138 count++;
4139 }
4140 }
4141
4142 LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
4143 nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
4144 ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
4145 nLastBlockWeCanPrune, count);
4146 }
4147
BlockFileSeq()4148 static FlatFileSeq BlockFileSeq()
4149 {
4150 return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
4151 }
4152
UndoFileSeq()4153 static FlatFileSeq UndoFileSeq()
4154 {
4155 return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
4156 }
4157
OpenBlockFile(const FlatFilePos & pos,bool fReadOnly)4158 FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
4159 return BlockFileSeq().Open(pos, fReadOnly);
4160 }
4161
4162 /** Open an undo file (rev?????.dat) */
OpenUndoFile(const FlatFilePos & pos,bool fReadOnly)4163 static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
4164 return UndoFileSeq().Open(pos, fReadOnly);
4165 }
4166
GetBlockPosFilename(const FlatFilePos & pos)4167 fs::path GetBlockPosFilename(const FlatFilePos &pos)
4168 {
4169 return BlockFileSeq().FileName(pos);
4170 }
4171
InsertBlockIndex(const uint256 & hash)4172 CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
4173 {
4174 AssertLockHeld(cs_main);
4175
4176 if (hash.IsNull())
4177 return nullptr;
4178
4179 // Return existing
4180 BlockMap::iterator mi = m_block_index.find(hash);
4181 if (mi != m_block_index.end())
4182 return (*mi).second;
4183
4184 // Create new
4185 CBlockIndex* pindexNew = new CBlockIndex();
4186 mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4187 pindexNew->phashBlock = &((*mi).first);
4188
4189 return pindexNew;
4190 }
4191
LoadBlockIndex(const Consensus::Params & consensus_params,CBlockTreeDB & blocktree,std::set<CBlockIndex *,CBlockIndexWorkComparator> & block_index_candidates)4192 bool BlockManager::LoadBlockIndex(
4193 const Consensus::Params& consensus_params,
4194 CBlockTreeDB& blocktree,
4195 std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates)
4196 {
4197 if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
4198 return false;
4199
4200 // Calculate nChainWork
4201 std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
4202 vSortedByHeight.reserve(m_block_index.size());
4203 for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
4204 {
4205 CBlockIndex* pindex = item.second;
4206 vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4207 }
4208 sort(vSortedByHeight.begin(), vSortedByHeight.end());
4209 for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
4210 {
4211 if (ShutdownRequested()) return false;
4212 CBlockIndex* pindex = item.second;
4213 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
4214 pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
4215 // We can link the chain of blocks for which we've received transactions at some point.
4216 // Pruned nodes may have deleted the block.
4217 if (pindex->nTx > 0) {
4218 if (pindex->pprev) {
4219 if (pindex->pprev->HaveTxsDownloaded()) {
4220 pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
4221 } else {
4222 pindex->nChainTx = 0;
4223 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4224 }
4225 } else {
4226 pindex->nChainTx = pindex->nTx;
4227 }
4228 }
4229 if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
4230 pindex->nStatus |= BLOCK_FAILED_CHILD;
4231 setDirtyBlockIndex.insert(pindex);
4232 }
4233 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4234 block_index_candidates.insert(pindex);
4235 }
4236 if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
4237 pindexBestInvalid = pindex;
4238 if (pindex->pprev)
4239 pindex->BuildSkip();
4240 if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
4241 pindexBestHeader = pindex;
4242 }
4243
4244 return true;
4245 }
4246
Unload()4247 void BlockManager::Unload() {
4248 m_failed_blocks.clear();
4249 m_blocks_unlinked.clear();
4250
4251 for (const BlockMap::value_type& entry : m_block_index) {
4252 delete entry.second;
4253 }
4254
4255 m_block_index.clear();
4256 }
4257
LoadBlockIndexDB(ChainstateManager & chainman,const CChainParams & chainparams)4258 bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
4259 {
4260 if (!chainman.m_blockman.LoadBlockIndex(
4261 chainparams.GetConsensus(), *pblocktree,
4262 ::ChainstateActive().setBlockIndexCandidates)) {
4263 return false;
4264 }
4265
4266 // Load block file info
4267 pblocktree->ReadLastBlockFile(nLastBlockFile);
4268 vinfoBlockFile.resize(nLastBlockFile + 1);
4269 LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4270 for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4271 pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4272 }
4273 LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
4274 for (int nFile = nLastBlockFile + 1; true; nFile++) {
4275 CBlockFileInfo info;
4276 if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4277 vinfoBlockFile.push_back(info);
4278 } else {
4279 break;
4280 }
4281 }
4282
4283 // Check presence of blk files
4284 LogPrintf("Checking all blk files are present...\n");
4285 std::set<int> setBlkDataFiles;
4286 for (const std::pair<const uint256, CBlockIndex*>& item : chainman.BlockIndex()) {
4287 CBlockIndex* pindex = item.second;
4288 if (pindex->nStatus & BLOCK_HAVE_DATA) {
4289 setBlkDataFiles.insert(pindex->nFile);
4290 }
4291 }
4292 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
4293 {
4294 FlatFilePos pos(*it, 0);
4295 if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
4296 return false;
4297 }
4298 }
4299
4300 // Check whether we have ever pruned block & undo files
4301 pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4302 if (fHavePruned)
4303 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4304
4305 // Check whether we need to continue reindexing
4306 bool fReindexing = false;
4307 pblocktree->ReadReindexing(fReindexing);
4308 if(fReindexing) fReindex = true;
4309
4310 // Check whether we have the name history
4311 pblocktree->ReadFlag("namehistory", fNameHistory);
4312 LogPrintf("LoadBlockIndexDB(): name history %s\n", fNameHistory ? "enabled" : "disabled");
4313
4314 return true;
4315 }
4316
LoadMempool(const ArgsManager & args)4317 void CChainState::LoadMempool(const ArgsManager& args)
4318 {
4319 if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
4320 ::LoadMempool(m_mempool);
4321 }
4322 m_mempool.SetIsLoaded(!ShutdownRequested());
4323 }
4324
LoadChainTip(const CChainParams & chainparams)4325 bool CChainState::LoadChainTip(const CChainParams& chainparams)
4326 {
4327 AssertLockHeld(cs_main);
4328 const CCoinsViewCache& coins_cache = CoinsTip();
4329 assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
4330 const CBlockIndex* tip = m_chain.Tip();
4331
4332 if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4333 return true;
4334 }
4335
4336 // Load pointer to end of best chain
4337 CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4338 if (!pindex) {
4339 return false;
4340 }
4341 m_chain.SetTip(pindex);
4342 PruneBlockIndexCandidates();
4343
4344 tip = m_chain.Tip();
4345 LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4346 tip->GetBlockHash().ToString(),
4347 m_chain.Height(),
4348 FormatISO8601DateTime(tip->GetBlockTime()),
4349 GuessVerificationProgress(chainparams.TxData(), tip));
4350 return true;
4351 }
4352
CVerifyDB()4353 CVerifyDB::CVerifyDB()
4354 {
4355 uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4356 }
4357
~CVerifyDB()4358 CVerifyDB::~CVerifyDB()
4359 {
4360 uiInterface.ShowProgress("", 100, false);
4361 }
4362
VerifyDB(const CChainParams & chainparams,CCoinsView * coinsview,int nCheckLevel,int nCheckDepth)4363 bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
4364 {
4365 LOCK(cs_main);
4366 if (::ChainActive().Tip() == nullptr || ::ChainActive().Tip()->pprev == nullptr)
4367 return true;
4368
4369 // Verify blocks in the best chain
4370 if (nCheckDepth <= 0 || nCheckDepth > ::ChainActive().Height())
4371 nCheckDepth = ::ChainActive().Height();
4372 nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4373 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4374 CCoinsViewCache coins(coinsview);
4375 std::set<valtype> dummyNames;
4376 CBlockIndex* pindex;
4377 CBlockIndex* pindexFailure = nullptr;
4378 int nGoodTransactions = 0;
4379 BlockValidationState state;
4380 int reportDone = 0;
4381 LogPrintf("[0%%]..."); /* Continued */
4382 for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
4383 const int percentageDone = std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
4384 if (reportDone < percentageDone/10) {
4385 // report every 10% step
4386 LogPrintf("[%d%%]...", percentageDone); /* Continued */
4387 reportDone = percentageDone/10;
4388 }
4389 uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4390 if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth)
4391 break;
4392 if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4393 // If pruning, only go back as far as we have data.
4394 LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight);
4395 break;
4396 }
4397 CBlock block;
4398 // check level 0: read from disk
4399 if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4400 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4401 // check level 1: verify block validity
4402 if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
4403 return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
4404 pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4405 // check level 2: verify undo validity
4406 if (nCheckLevel >= 2 && pindex) {
4407 CBlockUndo undo;
4408 if (!pindex->GetUndoPos().IsNull()) {
4409 if (!UndoReadFromDisk(undo, pindex)) {
4410 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4411 }
4412 }
4413 }
4414 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4415 if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= ::ChainstateActive().m_coinstip_cache_size_bytes) {
4416 assert(coins.GetBestBlock() == pindex->GetBlockHash());
4417 DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins, dummyNames);
4418 if (res == DISCONNECT_FAILED) {
4419 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4420 }
4421 if (res == DISCONNECT_UNCLEAN) {
4422 nGoodTransactions = 0;
4423 pindexFailure = pindex;
4424 } else {
4425 nGoodTransactions += block.vtx.size();
4426 }
4427 }
4428 if (ShutdownRequested()) return true;
4429 }
4430 if (pindexFailure)
4431 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", ::ChainActive().Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4432
4433 // store block count as we move pindex at check level >= 4
4434 int block_count = ::ChainActive().Height() - pindex->nHeight;
4435
4436 // check level 4: try reconnecting blocks
4437 if (nCheckLevel >= 4) {
4438 while (pindex != ::ChainActive().Tip()) {
4439 const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
4440 if (reportDone < percentageDone/10) {
4441 // report every 10% step
4442 LogPrintf("[%d%%]...", percentageDone); /* Continued */
4443 reportDone = percentageDone/10;
4444 }
4445 uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false);
4446 pindex = ::ChainActive().Next(pindex);
4447 CBlock block;
4448 if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
4449 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4450 if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, dummyNames, chainparams))
4451 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString());
4452 if (ShutdownRequested()) return true;
4453 }
4454 }
4455
4456 LogPrintf("[DONE].\n");
4457 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
4458
4459 return true;
4460 }
4461
4462 /** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
RollforwardBlock(const CBlockIndex * pindex,CCoinsViewCache & inputs,const CChainParams & params)4463 bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
4464 {
4465 // TODO: merge with ConnectBlock
4466 CBlock block;
4467 if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
4468 return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4469 }
4470
4471 for (const CTransactionRef& tx : block.vtx) {
4472 if (!tx->IsCoinBase()) {
4473 for (const CTxIn &txin : tx->vin) {
4474 inputs.SpendCoin(txin.prevout);
4475 }
4476 }
4477 // Pass check = true as every addition may be an overwrite.
4478 AddCoins(inputs, *tx, pindex->nHeight, true);
4479 }
4480 return true;
4481 }
4482
ReplayBlocks(const CChainParams & params)4483 bool CChainState::ReplayBlocks(const CChainParams& params)
4484 {
4485 LOCK(cs_main);
4486
4487 CCoinsView& db = this->CoinsDB();
4488 CCoinsViewCache cache(&db);
4489
4490 std::vector<uint256> hashHeads = db.GetHeadBlocks();
4491 if (hashHeads.empty()) return true; // We're already in a consistent state.
4492 if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
4493
4494 uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
4495 LogPrintf("Replaying blocks\n");
4496
4497 const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
4498 const CBlockIndex* pindexNew; // New tip during the interrupted flush.
4499 const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
4500
4501 if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
4502 return error("ReplayBlocks(): reorganization to unknown block requested");
4503 }
4504 pindexNew = m_blockman.m_block_index[hashHeads[0]];
4505
4506 if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
4507 if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
4508 return error("ReplayBlocks(): reorganization from unknown block requested");
4509 }
4510 pindexOld = m_blockman.m_block_index[hashHeads[1]];
4511 pindexFork = LastCommonAncestor(pindexOld, pindexNew);
4512 assert(pindexFork != nullptr);
4513 }
4514
4515 // Rollback along the old branch.
4516 while (pindexOld != pindexFork) {
4517 if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
4518 CBlock block;
4519 if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
4520 return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4521 }
4522 LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
4523 std::set<valtype> dummyNames;
4524 DisconnectResult res = DisconnectBlock(block, pindexOld, cache, dummyNames);
4525 if (res == DISCONNECT_FAILED) {
4526 return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
4527 }
4528 // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
4529 // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
4530 // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
4531 // the result is still a version of the UTXO set with the effects of that block undone.
4532 }
4533 pindexOld = pindexOld->pprev;
4534 }
4535
4536 // Roll forward from the forking point to the new tip.
4537 int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
4538 for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
4539 const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
4540 LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
4541 uiInterface.ShowProgress(_("Replaying blocks...").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
4542 if (!RollforwardBlock(pindex, cache, params)) return false;
4543 }
4544
4545 cache.SetBestBlock(pindexNew->GetBlockHash());
4546 cache.Flush();
4547 uiInterface.ShowProgress("", 100, false);
4548 return true;
4549 }
4550
4551 //! Helper for CChainState::RewindBlockIndex
EraseBlockData(CBlockIndex * index)4552 void CChainState::EraseBlockData(CBlockIndex* index)
4553 {
4554 AssertLockHeld(cs_main);
4555 assert(!m_chain.Contains(index)); // Make sure this block isn't active
4556
4557 // Reduce validity
4558 index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
4559 // Remove have-data flags.
4560 index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
4561 // Remove storage location.
4562 index->nFile = 0;
4563 index->nDataPos = 0;
4564 index->nUndoPos = 0;
4565 // Remove various other things
4566 index->nTx = 0;
4567 index->nChainTx = 0;
4568 index->nSequenceId = 0;
4569 // Make sure it gets written.
4570 setDirtyBlockIndex.insert(index);
4571 // Update indexes
4572 setBlockIndexCandidates.erase(index);
4573 auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev);
4574 while (ret.first != ret.second) {
4575 if (ret.first->second == index) {
4576 m_blockman.m_blocks_unlinked.erase(ret.first++);
4577 } else {
4578 ++ret.first;
4579 }
4580 }
4581 // Mark parent as eligible for main chain again
4582 if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
4583 setBlockIndexCandidates.insert(index->pprev);
4584 }
4585 }
4586
RewindBlockIndex(const CChainParams & params)4587 bool CChainState::RewindBlockIndex(const CChainParams& params)
4588 {
4589 // Note that during -reindex-chainstate we are called with an empty m_chain!
4590
4591 // First erase all post-segwit blocks without witness not in the main chain,
4592 // as this can we done without costly DisconnectTip calls. Active
4593 // blocks will be dealt with below (releasing cs_main in between).
4594 {
4595 LOCK(cs_main);
4596 for (const auto& entry : m_blockman.m_block_index) {
4597 if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
4598 EraseBlockData(entry.second);
4599 }
4600 }
4601 }
4602
4603 // Find what height we need to reorganize to.
4604 CBlockIndex *tip;
4605 int nHeight = 1;
4606 {
4607 LOCK(cs_main);
4608 while (nHeight <= m_chain.Height()) {
4609 // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
4610 // blocks in ConnectBlock, we don't need to go back and
4611 // re-download/re-verify blocks from before segwit actually activated.
4612 if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
4613 break;
4614 }
4615 nHeight++;
4616 }
4617
4618 tip = m_chain.Tip();
4619 }
4620 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4621
4622 BlockValidationState state;
4623 // Loop until the tip is below nHeight, or we reach a pruned block.
4624 while (!ShutdownRequested()) {
4625 {
4626 LOCK(cs_main);
4627 LOCK(m_mempool.cs);
4628 // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
4629 assert(tip == m_chain.Tip());
4630 if (tip == nullptr || tip->nHeight < nHeight) break;
4631 if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
4632 // If pruning, don't try rewinding past the HAVE_DATA point;
4633 // since older blocks can't be served anyway, there's
4634 // no need to walk further, and trying to DisconnectTip()
4635 // will fail (and require a needless reindex/redownload
4636 // of the blockchain).
4637 break;
4638 }
4639
4640 // Disconnect block
4641 if (!DisconnectTip(state, params, nullptr)) {
4642 return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, state.ToString());
4643 }
4644
4645 // Reduce validity flag and have-data flags.
4646 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4647 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4648 // Note: If we encounter an insufficiently validated block that
4649 // is on m_chain, it must be because we are a pruning node, and
4650 // this block or some successor doesn't HAVE_DATA, so we were unable to
4651 // rewind all the way. Blocks remaining on m_chain at this point
4652 // must not have their validity reduced.
4653 EraseBlockData(tip);
4654
4655 tip = tip->pprev;
4656 }
4657 // Make sure the queue of validation callbacks doesn't grow unboundedly.
4658 LimitValidationInterfaceQueue();
4659
4660 // Occasionally flush state to disk.
4661 if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
4662 LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4663 return false;
4664 }
4665 }
4666
4667 {
4668 LOCK(cs_main);
4669 if (m_chain.Tip() != nullptr) {
4670 // We can't prune block index candidates based on our tip if we have
4671 // no tip due to m_chain being empty!
4672 PruneBlockIndexCandidates();
4673
4674 CheckBlockIndex(params.GetConsensus());
4675
4676 // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
4677 // and skip it here, we're about to -reindex-chainstate anyway, so
4678 // it'll get called a bunch real soon.
4679 BlockValidationState state;
4680 if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
4681 LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", state.ToString());
4682 return false;
4683 }
4684 }
4685 }
4686
4687 return true;
4688 }
4689
UnloadBlockIndex()4690 void CChainState::UnloadBlockIndex() {
4691 nBlockSequenceId = 1;
4692 setBlockIndexCandidates.clear();
4693 }
4694
4695 // May NOT be used after any connections are up as much
4696 // of the peer-processing logic assumes a consistent
4697 // block index state
UnloadBlockIndex(CTxMemPool * mempool,ChainstateManager & chainman)4698 void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman)
4699 {
4700 LOCK(cs_main);
4701 chainman.Unload();
4702 pindexBestInvalid = nullptr;
4703 pindexBestHeader = nullptr;
4704 if (mempool) mempool->clear();
4705 vinfoBlockFile.clear();
4706 nLastBlockFile = 0;
4707 setDirtyBlockIndex.clear();
4708 setDirtyFileInfo.clear();
4709 versionbitscache.Clear();
4710 for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
4711 warningcache[b].clear();
4712 }
4713 fHavePruned = false;
4714 }
4715
LoadBlockIndex(const CChainParams & chainparams)4716 bool ChainstateManager::LoadBlockIndex(const CChainParams& chainparams)
4717 {
4718 AssertLockHeld(cs_main);
4719 // Load block index from databases
4720 bool needs_init = fReindex;
4721 if (!fReindex) {
4722 bool ret = LoadBlockIndexDB(*this, chainparams);
4723 if (!ret) return false;
4724 needs_init = m_blockman.m_block_index.empty();
4725 }
4726
4727 if (needs_init) {
4728 // Everything here is for *new* reindex/DBs. Thus, though
4729 // LoadBlockIndexDB may have set fReindex if we shut down
4730 // mid-reindex previously, we don't check fReindex and
4731 // instead only check it prior to LoadBlockIndexDB to set
4732 // needs_init.
4733
4734 LogPrintf("Initializing databases...\n");
4735 fNameHistory = gArgs.GetBoolArg("-namehistory", false);
4736 pblocktree->WriteFlag("namehistory", fNameHistory);
4737 }
4738 return true;
4739 }
4740
LoadGenesisBlock(const CChainParams & chainparams)4741 bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
4742 {
4743 LOCK(cs_main);
4744
4745 // Check whether we're already initialized by checking for genesis in
4746 // m_blockman.m_block_index. Note that we can't use m_chain here, since it is
4747 // set based on the coins db, not the block index db, which is the only
4748 // thing loaded at this point.
4749 if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash()))
4750 return true;
4751
4752 try {
4753 const CBlock& block = chainparams.GenesisBlock();
4754 FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
4755 if (blockPos.IsNull())
4756 return error("%s: writing genesis block to disk failed", __func__);
4757 CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
4758 ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus());
4759 } catch (const std::runtime_error& e) {
4760 return error("%s: failed to write genesis block: %s", __func__, e.what());
4761 }
4762
4763 return true;
4764 }
4765
LoadGenesisBlock(const CChainParams & chainparams)4766 bool LoadGenesisBlock(const CChainParams& chainparams)
4767 {
4768 return ::ChainstateActive().LoadGenesisBlock(chainparams);
4769 }
4770
LoadExternalBlockFile(const CChainParams & chainparams,FILE * fileIn,FlatFilePos * dbp)4771 void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp)
4772 {
4773 // Map of disk positions for blocks with unknown parent (only used for reindex)
4774 static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
4775 int64_t nStart = GetTimeMillis();
4776
4777 int nLoaded = 0;
4778 try {
4779 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4780 CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
4781 uint64_t nRewind = blkdat.GetPos();
4782 while (!blkdat.eof()) {
4783 if (ShutdownRequested()) return;
4784
4785 blkdat.SetPos(nRewind);
4786 nRewind++; // start one byte further next time, in case of failure
4787 blkdat.SetLimit(); // remove former limit
4788 unsigned int nSize = 0;
4789 try {
4790 // locate a header
4791 unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
4792 blkdat.FindByte(chainparams.MessageStart()[0]);
4793 nRewind = blkdat.GetPos()+1;
4794 blkdat >> buf;
4795 if (memcmp(buf, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE))
4796 continue;
4797 // read size
4798 blkdat >> nSize;
4799 if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
4800 continue;
4801 } catch (const std::exception&) {
4802 // no valid block header found; don't complain
4803 break;
4804 }
4805 try {
4806 // read block
4807 uint64_t nBlockPos = blkdat.GetPos();
4808 if (dbp)
4809 dbp->nPos = nBlockPos;
4810 blkdat.SetLimit(nBlockPos + nSize);
4811 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4812 CBlock& block = *pblock;
4813 blkdat >> block;
4814 nRewind = blkdat.GetPos();
4815
4816 uint256 hash = block.GetHash();
4817 {
4818 LOCK(cs_main);
4819 // detect out of order blocks, and store them for later
4820 if (hash != chainparams.GetConsensus().hashGenesisBlock && !LookupBlockIndex(block.hashPrevBlock)) {
4821 LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4822 block.hashPrevBlock.ToString());
4823 if (dbp)
4824 mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4825 continue;
4826 }
4827
4828 // process in case the block isn't known yet
4829 CBlockIndex* pindex = LookupBlockIndex(hash);
4830 if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
4831 BlockValidationState state;
4832 if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
4833 nLoaded++;
4834 }
4835 if (state.IsError()) {
4836 break;
4837 }
4838 } else if (hash != chainparams.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
4839 LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
4840 }
4841 }
4842
4843 // Activate the genesis block so normal node progress can continue
4844 if (hash == chainparams.GetConsensus().hashGenesisBlock) {
4845 BlockValidationState state;
4846 if (!ActivateBestChain(state, chainparams, nullptr)) {
4847 break;
4848 }
4849 }
4850
4851 NotifyHeaderTip();
4852
4853 // Recursively process earlier encountered successors of this block
4854 std::deque<uint256> queue;
4855 queue.push_back(hash);
4856 while (!queue.empty()) {
4857 uint256 head = queue.front();
4858 queue.pop_front();
4859 std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4860 while (range.first != range.second) {
4861 std::multimap<uint256, FlatFilePos>::iterator it = range.first;
4862 std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
4863 if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
4864 {
4865 LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
4866 head.ToString());
4867 LOCK(cs_main);
4868 BlockValidationState dummy;
4869 if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
4870 {
4871 nLoaded++;
4872 queue.push_back(pblockrecursive->GetHash());
4873 }
4874 }
4875 range.first++;
4876 mapBlocksUnknownParent.erase(it);
4877 NotifyHeaderTip();
4878 }
4879 }
4880 } catch (const std::exception& e) {
4881 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4882 }
4883 }
4884 } catch (const std::runtime_error& e) {
4885 AbortNode(std::string("System error: ") + e.what());
4886 }
4887 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4888 }
4889
CheckBlockIndex(const Consensus::Params & consensusParams)4890 void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
4891 {
4892 if (!fCheckBlockIndex) {
4893 return;
4894 }
4895
4896 LOCK(cs_main);
4897
4898 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4899 // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the
4900 // tests when iterating the block tree require that m_chain has been initialized.)
4901 if (m_chain.Height() < 0) {
4902 assert(m_blockman.m_block_index.size() <= 1);
4903 return;
4904 }
4905
4906 // Build forward-pointing map of the entire block tree.
4907 std::multimap<CBlockIndex*,CBlockIndex*> forward;
4908 for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) {
4909 forward.insert(std::make_pair(entry.second->pprev, entry.second));
4910 }
4911
4912 assert(forward.size() == m_blockman.m_block_index.size());
4913
4914 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
4915 CBlockIndex *pindex = rangeGenesis.first->second;
4916 rangeGenesis.first++;
4917 assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
4918
4919 // Iterate over the entire block tree, using depth-first search.
4920 // Along the way, remember whether there are blocks on the path from genesis
4921 // block being explored which are the first to have certain properties.
4922 size_t nNodes = 0;
4923 int nHeight = 0;
4924 CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
4925 CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4926 CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
4927 CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4928 CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4929 CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4930 CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4931 while (pindex != nullptr) {
4932 nNodes++;
4933 if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4934 if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4935 if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4936 if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4937 if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4938 if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4939 if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4940
4941 // Begin: actual consistency checks.
4942 if (pindex->pprev == nullptr) {
4943 // Genesis block checks.
4944 assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
4945 assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
4946 }
4947 if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
4948 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4949 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4950 if (!fHavePruned) {
4951 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4952 assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4953 assert(pindexFirstMissing == pindexFirstNeverProcessed);
4954 } else {
4955 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4956 if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4957 }
4958 if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4959 assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4960 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
4961 assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
4962 assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
4963 assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4964 assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4965 assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4966 assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid
4967 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
4968 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
4969 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
4970 if (pindexFirstInvalid == nullptr) {
4971 // Checks for not-invalid blocks.
4972 assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4973 }
4974 if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
4975 if (pindexFirstInvalid == nullptr) {
4976 // If this block sorts at least as good as the current tip and
4977 // is valid and we have all data for its parents, it must be in
4978 // setBlockIndexCandidates. m_chain.Tip() must also be there
4979 // even if some data has been pruned.
4980 if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
4981 assert(setBlockIndexCandidates.count(pindex));
4982 }
4983 // If some parent is missing, then it could be that this block was in
4984 // setBlockIndexCandidates but had to be removed because of the missing data.
4985 // In this case it must be in m_blocks_unlinked -- see test below.
4986 }
4987 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4988 assert(setBlockIndexCandidates.count(pindex) == 0);
4989 }
4990 // Check whether this block is in m_blocks_unlinked.
4991 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4992 bool foundInUnlinked = false;
4993 while (rangeUnlinked.first != rangeUnlinked.second) {
4994 assert(rangeUnlinked.first->first == pindex->pprev);
4995 if (rangeUnlinked.first->second == pindex) {
4996 foundInUnlinked = true;
4997 break;
4998 }
4999 rangeUnlinked.first++;
5000 }
5001 if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
5002 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked.
5003 assert(foundInUnlinked);
5004 }
5005 if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA
5006 if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked.
5007 if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
5008 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
5009 assert(fHavePruned); // We must have pruned.
5010 // This block may have entered m_blocks_unlinked if:
5011 // - it has a descendant that at some point had more work than the
5012 // tip, and
5013 // - we tried switching to that descendant but were missing
5014 // data for some intermediate block between m_chain and the
5015 // tip.
5016 // So if this block is itself better than m_chain.Tip() and it wasn't in
5017 // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
5018 if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
5019 if (pindexFirstInvalid == nullptr) {
5020 assert(foundInUnlinked);
5021 }
5022 }
5023 }
5024 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
5025 // End: actual consistency checks.
5026
5027 // Try descending into the first subnode.
5028 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
5029 if (range.first != range.second) {
5030 // A subnode was found.
5031 pindex = range.first->second;
5032 nHeight++;
5033 continue;
5034 }
5035 // This is a leaf node.
5036 // Move upwards until we reach a node of which we have not yet visited the last child.
5037 while (pindex) {
5038 // We are going to either move to a parent or a sibling of pindex.
5039 // If pindex was the first with a certain property, unset the corresponding variable.
5040 if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
5041 if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
5042 if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
5043 if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
5044 if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
5045 if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
5046 if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
5047 // Find our parent.
5048 CBlockIndex* pindexPar = pindex->pprev;
5049 // Find which child we just visited.
5050 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
5051 while (rangePar.first->second != pindex) {
5052 assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
5053 rangePar.first++;
5054 }
5055 // Proceed to the next one.
5056 rangePar.first++;
5057 if (rangePar.first != rangePar.second) {
5058 // Move to the sibling.
5059 pindex = rangePar.first->second;
5060 break;
5061 } else {
5062 // Move up further.
5063 pindex = pindexPar;
5064 nHeight--;
5065 continue;
5066 }
5067 }
5068 }
5069
5070 // Check that we actually traversed the entire map.
5071 assert(nNodes == forward.size());
5072 }
5073
ToString()5074 std::string CChainState::ToString()
5075 {
5076 CBlockIndex* tip = m_chain.Tip();
5077 return strprintf("Chainstate [%s] @ height %d (%s)",
5078 m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
5079 tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
5080 }
5081
ResizeCoinsCaches(size_t coinstip_size,size_t coinsdb_size)5082 bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
5083 {
5084 if (coinstip_size == m_coinstip_cache_size_bytes &&
5085 coinsdb_size == m_coinsdb_cache_size_bytes) {
5086 // Cache sizes are unchanged, no need to continue.
5087 return true;
5088 }
5089 size_t old_coinstip_size = m_coinstip_cache_size_bytes;
5090 m_coinstip_cache_size_bytes = coinstip_size;
5091 m_coinsdb_cache_size_bytes = coinsdb_size;
5092 CoinsDB().ResizeCache(coinsdb_size);
5093
5094 LogPrintf("[%s] resized coinsdb cache to %.1f MiB\n",
5095 this->ToString(), coinsdb_size * (1.0 / 1024 / 1024));
5096 LogPrintf("[%s] resized coinstip cache to %.1f MiB\n",
5097 this->ToString(), coinstip_size * (1.0 / 1024 / 1024));
5098
5099 BlockValidationState state;
5100 const CChainParams& chainparams = Params();
5101
5102 bool ret;
5103
5104 if (coinstip_size > old_coinstip_size) {
5105 // Likely no need to flush if cache sizes have grown.
5106 ret = FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED);
5107 } else {
5108 // Otherwise, flush state to disk and deallocate the in-memory coins map.
5109 ret = FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS);
5110 CoinsTip().ReallocateCache();
5111 }
5112 return ret;
5113 }
5114
ToString() const5115 std::string CBlockFileInfo::ToString() const
5116 {
5117 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
5118 }
5119
GetBlockFileInfo(size_t n)5120 CBlockFileInfo* GetBlockFileInfo(size_t n)
5121 {
5122 LOCK(cs_LastBlockFile);
5123
5124 return &vinfoBlockFile.at(n);
5125 }
5126
VersionBitsTipState(const Consensus::Params & params,Consensus::DeploymentPos pos)5127 ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
5128 {
5129 LOCK(cs_main);
5130 return VersionBitsState(::ChainActive().Tip(), params, pos, versionbitscache);
5131 }
5132
VersionBitsTipStatistics(const Consensus::Params & params,Consensus::DeploymentPos pos)5133 BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
5134 {
5135 LOCK(cs_main);
5136 return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
5137 }
5138
VersionBitsTipStateSinceHeight(const Consensus::Params & params,Consensus::DeploymentPos pos)5139 int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
5140 {
5141 LOCK(cs_main);
5142 return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos, versionbitscache);
5143 }
5144
5145 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
5146
LoadMempool(CTxMemPool & pool)5147 bool LoadMempool(CTxMemPool& pool)
5148 {
5149 const CChainParams& chainparams = Params();
5150 int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
5151 FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
5152 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5153 if (file.IsNull()) {
5154 LogPrintf("Failed to open mempool file from disk. Continuing anyway.\n");
5155 return false;
5156 }
5157
5158 int64_t count = 0;
5159 int64_t expired = 0;
5160 int64_t failed = 0;
5161 int64_t already_there = 0;
5162 int64_t unbroadcast = 0;
5163 int64_t nNow = GetTime();
5164
5165 try {
5166 uint64_t version;
5167 file >> version;
5168 if (version != MEMPOOL_DUMP_VERSION) {
5169 return false;
5170 }
5171 uint64_t num;
5172 file >> num;
5173 while (num--) {
5174 CTransactionRef tx;
5175 int64_t nTime;
5176 int64_t nFeeDelta;
5177 file >> tx;
5178 file >> nTime;
5179 file >> nFeeDelta;
5180
5181 CAmount amountdelta = nFeeDelta;
5182 if (amountdelta) {
5183 pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
5184 }
5185 TxValidationState state;
5186 if (nTime > nNow - nExpiryTimeout) {
5187 LOCK(cs_main);
5188 AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime,
5189 nullptr /* plTxnReplaced */, false /* bypass_limits */,
5190 false /* test_accept */);
5191 if (state.IsValid()) {
5192 ++count;
5193 } else {
5194 // mempool may contain the transaction already, e.g. from
5195 // wallet(s) having loaded it while we were processing
5196 // mempool transactions; consider these as valid, instead of
5197 // failed, but mark them as 'already there'
5198 if (pool.exists(tx->GetHash())) {
5199 ++already_there;
5200 } else {
5201 ++failed;
5202 }
5203 }
5204 } else {
5205 ++expired;
5206 }
5207 if (ShutdownRequested())
5208 return false;
5209 }
5210 std::map<uint256, CAmount> mapDeltas;
5211 file >> mapDeltas;
5212
5213 for (const auto& i : mapDeltas) {
5214 pool.PrioritiseTransaction(i.first, i.second);
5215 }
5216
5217 // TODO: remove this try except in v0.22
5218 std::set<uint256> unbroadcast_txids;
5219 try {
5220 file >> unbroadcast_txids;
5221 unbroadcast = unbroadcast_txids.size();
5222 } catch (const std::exception&) {
5223 // mempool.dat files created prior to v0.21 will not have an
5224 // unbroadcast set. No need to log a failure if parsing fails here.
5225 }
5226 for (const auto& txid : unbroadcast_txids) {
5227 // Ensure transactions were accepted to mempool then add to
5228 // unbroadcast set.
5229 if (pool.get(txid) != nullptr) pool.AddUnbroadcastTx(txid);
5230 }
5231 } catch (const std::exception& e) {
5232 LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
5233 return false;
5234 }
5235
5236 LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there, %i waiting for initial broadcast\n", count, failed, expired, already_there, unbroadcast);
5237 return true;
5238 }
5239
DumpMempool(const CTxMemPool & pool)5240 bool DumpMempool(const CTxMemPool& pool)
5241 {
5242 int64_t start = GetTimeMicros();
5243
5244 std::map<uint256, CAmount> mapDeltas;
5245 std::vector<TxMempoolInfo> vinfo;
5246 std::set<uint256> unbroadcast_txids;
5247
5248 static Mutex dump_mutex;
5249 LOCK(dump_mutex);
5250
5251 {
5252 LOCK(pool.cs);
5253 for (const auto &i : pool.mapDeltas) {
5254 mapDeltas[i.first] = i.second;
5255 }
5256 vinfo = pool.infoAll();
5257 unbroadcast_txids = pool.GetUnbroadcastTxs();
5258 }
5259
5260 int64_t mid = GetTimeMicros();
5261
5262 try {
5263 FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
5264 if (!filestr) {
5265 return false;
5266 }
5267
5268 CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
5269
5270 uint64_t version = MEMPOOL_DUMP_VERSION;
5271 file << version;
5272
5273 file << (uint64_t)vinfo.size();
5274 for (const auto& i : vinfo) {
5275 file << *(i.tx);
5276 file << int64_t{count_seconds(i.m_time)};
5277 file << int64_t{i.nFeeDelta};
5278 mapDeltas.erase(i.tx->GetHash());
5279 }
5280
5281 file << mapDeltas;
5282
5283 LogPrintf("Writing %d unbroadcast transactions to disk.\n", unbroadcast_txids.size());
5284 file << unbroadcast_txids;
5285
5286 if (!FileCommit(file.Get()))
5287 throw std::runtime_error("FileCommit failed");
5288 file.fclose();
5289 RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
5290 int64_t last = GetTimeMicros();
5291 LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
5292 } catch (const std::exception& e) {
5293 LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
5294 return false;
5295 }
5296 return true;
5297 }
5298
5299 //! Guess how far we are in the verification process at the given block index
5300 //! require cs_main if pindex has not been validated yet (because nChainTx might be unset)
GuessVerificationProgress(const ChainTxData & data,const CBlockIndex * pindex)5301 double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
5302 if (pindex == nullptr)
5303 return 0.0;
5304
5305 int64_t nNow = time(nullptr);
5306
5307 double fTxTotal;
5308
5309 if (pindex->nChainTx <= data.nTxCount) {
5310 fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
5311 } else {
5312 fTxTotal = pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
5313 }
5314
5315 return std::min<double>(pindex->nChainTx / fTxTotal, 1.0);
5316 }
5317
SnapshotBlockhash() const5318 Optional<uint256> ChainstateManager::SnapshotBlockhash() const {
5319 if (m_active_chainstate != nullptr) {
5320 // If a snapshot chainstate exists, it will always be our active.
5321 return m_active_chainstate->m_from_snapshot_blockhash;
5322 }
5323 return {};
5324 }
5325
GetAll()5326 std::vector<CChainState*> ChainstateManager::GetAll()
5327 {
5328 std::vector<CChainState*> out;
5329
5330 if (!IsSnapshotValidated() && m_ibd_chainstate) {
5331 out.push_back(m_ibd_chainstate.get());
5332 }
5333
5334 if (m_snapshot_chainstate) {
5335 out.push_back(m_snapshot_chainstate.get());
5336 }
5337
5338 return out;
5339 }
5340
InitializeChainstate(CTxMemPool & mempool,const uint256 & snapshot_blockhash)5341 CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const uint256& snapshot_blockhash)
5342 {
5343 bool is_snapshot = !snapshot_blockhash.IsNull();
5344 std::unique_ptr<CChainState>& to_modify =
5345 is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
5346
5347 if (to_modify) {
5348 throw std::logic_error("should not be overwriting a chainstate");
5349 }
5350 to_modify.reset(new CChainState(mempool, m_blockman, snapshot_blockhash));
5351
5352 // Snapshot chainstates and initial IBD chaintates always become active.
5353 if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
5354 LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
5355 m_active_chainstate = to_modify.get();
5356 } else {
5357 throw std::logic_error("unexpected chainstate activation");
5358 }
5359
5360 return *to_modify;
5361 }
5362
ActiveChainstate() const5363 CChainState& ChainstateManager::ActiveChainstate() const
5364 {
5365 assert(m_active_chainstate);
5366 return *m_active_chainstate;
5367 }
5368
IsSnapshotActive() const5369 bool ChainstateManager::IsSnapshotActive() const
5370 {
5371 return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get();
5372 }
5373
ValidatedChainstate() const5374 CChainState& ChainstateManager::ValidatedChainstate() const
5375 {
5376 if (m_snapshot_chainstate && IsSnapshotValidated()) {
5377 return *m_snapshot_chainstate.get();
5378 }
5379 assert(m_ibd_chainstate);
5380 return *m_ibd_chainstate.get();
5381 }
5382
IsBackgroundIBD(CChainState * chainstate) const5383 bool ChainstateManager::IsBackgroundIBD(CChainState* chainstate) const
5384 {
5385 return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get());
5386 }
5387
Unload()5388 void ChainstateManager::Unload()
5389 {
5390 for (CChainState* chainstate : this->GetAll()) {
5391 chainstate->m_chain.SetTip(nullptr);
5392 chainstate->UnloadBlockIndex();
5393 }
5394
5395 m_blockman.Unload();
5396 }
5397
Reset()5398 void ChainstateManager::Reset()
5399 {
5400 m_ibd_chainstate.reset();
5401 m_snapshot_chainstate.reset();
5402 m_active_chainstate = nullptr;
5403 m_snapshot_validated = false;
5404 }
5405
MaybeRebalanceCaches()5406 void ChainstateManager::MaybeRebalanceCaches()
5407 {
5408 if (m_ibd_chainstate && !m_snapshot_chainstate) {
5409 LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n");
5410 // Allocate everything to the IBD chainstate.
5411 m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5412 }
5413 else if (m_snapshot_chainstate && !m_ibd_chainstate) {
5414 LogPrintf("[snapshot] allocating all cache to the snapshot chainstate\n");
5415 // Allocate everything to the snapshot chainstate.
5416 m_snapshot_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache);
5417 }
5418 else if (m_ibd_chainstate && m_snapshot_chainstate) {
5419 // If both chainstates exist, determine who needs more cache based on IBD status.
5420 //
5421 // Note: shrink caches first so that we don't inadvertently overwhelm available memory.
5422 if (m_snapshot_chainstate->IsInitialBlockDownload()) {
5423 m_ibd_chainstate->ResizeCoinsCaches(
5424 m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
5425 m_snapshot_chainstate->ResizeCoinsCaches(
5426 m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
5427 } else {
5428 m_snapshot_chainstate->ResizeCoinsCaches(
5429 m_total_coinstip_cache * 0.05, m_total_coinsdb_cache * 0.05);
5430 m_ibd_chainstate->ResizeCoinsCaches(
5431 m_total_coinstip_cache * 0.95, m_total_coinsdb_cache * 0.95);
5432 }
5433 }
5434 }
5435