1 // Copyright (c) 2017-2020 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #include <chainparams.h>
6 #include <index/base.h>
7 #include <node/blockstorage.h>
8 #include <node/ui_interface.h>
9 #include <shutdown.h>
10 #include <tinyformat.h>
11 #include <util/thread.h>
12 #include <util/translation.h>
13 #include <validation.h> // For g_chainman
14 #include <warnings.h>
15 
16 constexpr uint8_t DB_BEST_BLOCK{'B'};
17 
18 constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
19 constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
20 
21 template <typename... Args>
FatalError(const char * fmt,const Args &...args)22 static void FatalError(const char* fmt, const Args&... args)
23 {
24     std::string strMessage = tfm::format(fmt, args...);
25     SetMiscWarning(Untranslated(strMessage));
26     LogPrintf("*** %s\n", strMessage);
27     AbortError(_("A fatal internal error occurred, see debug.log for details"));
28     StartShutdown();
29 }
30 
DB(const fs::path & path,size_t n_cache_size,bool f_memory,bool f_wipe,bool f_obfuscate)31 BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
32     CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate)
33 {}
34 
ReadBestBlock(CBlockLocator & locator) const35 bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
36 {
37     bool success = Read(DB_BEST_BLOCK, locator);
38     if (!success) {
39         locator.SetNull();
40     }
41     return success;
42 }
43 
WriteBestBlock(CDBBatch & batch,const CBlockLocator & locator)44 void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
45 {
46     batch.Write(DB_BEST_BLOCK, locator);
47 }
48 
~BaseIndex()49 BaseIndex::~BaseIndex()
50 {
51     Interrupt();
52     Stop();
53 }
54 
Init()55 bool BaseIndex::Init()
56 {
57     CBlockLocator locator;
58     if (!GetDB().ReadBestBlock(locator)) {
59         locator.SetNull();
60     }
61 
62     LOCK(cs_main);
63     CChain& active_chain = m_chainstate->m_chain;
64     if (locator.IsNull()) {
65         m_best_block_index = nullptr;
66     } else {
67         m_best_block_index = m_chainstate->m_blockman.FindForkInGlobalIndex(active_chain, locator);
68     }
69     m_synced = m_best_block_index.load() == active_chain.Tip();
70     if (!m_synced) {
71         bool prune_violation = false;
72         if (!m_best_block_index) {
73             // index is not built yet
74             // make sure we have all block data back to the genesis
75             const CBlockIndex* block = active_chain.Tip();
76             while (block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) {
77                 block = block->pprev;
78             }
79             prune_violation = block != active_chain.Genesis();
80         }
81         // in case the index has a best block set and is not fully synced
82         // check if we have the required blocks to continue building the index
83         else {
84             const CBlockIndex* block_to_test = m_best_block_index.load();
85             if (!active_chain.Contains(block_to_test)) {
86                 // if the bestblock is not part of the mainchain, find the fork
87                 // and make sure we have all data down to the fork
88                 block_to_test = active_chain.FindFork(block_to_test);
89             }
90             const CBlockIndex* block = active_chain.Tip();
91             prune_violation = true;
92             // check backwards from the tip if we have all block data until we reach the indexes bestblock
93             while (block_to_test && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) {
94                 if (block_to_test == block) {
95                     prune_violation = false;
96                     break;
97                 }
98                 block = block->pprev;
99             }
100         }
101         if (prune_violation) {
102             return InitError(strprintf(Untranslated("%s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"), GetName()));
103         }
104     }
105     return true;
106 }
107 
NextSyncBlock(const CBlockIndex * pindex_prev,CChain & chain)108 static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
109 {
110     AssertLockHeld(cs_main);
111 
112     if (!pindex_prev) {
113         return chain.Genesis();
114     }
115 
116     const CBlockIndex* pindex = chain.Next(pindex_prev);
117     if (pindex) {
118         return pindex;
119     }
120 
121     return chain.Next(chain.FindFork(pindex_prev));
122 }
123 
ThreadSync()124 void BaseIndex::ThreadSync()
125 {
126     const CBlockIndex* pindex = m_best_block_index.load();
127     if (!m_synced) {
128         auto& consensus_params = Params().GetConsensus();
129 
130         int64_t last_log_time = 0;
131         int64_t last_locator_write_time = 0;
132         while (true) {
133             if (m_interrupt) {
134                 m_best_block_index = pindex;
135                 // No need to handle errors in Commit. If it fails, the error will be already be
136                 // logged. The best way to recover is to continue, as index cannot be corrupted by
137                 // a missed commit to disk for an advanced index state.
138                 Commit();
139                 return;
140             }
141 
142             {
143                 LOCK(cs_main);
144                 const CBlockIndex* pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
145                 if (!pindex_next) {
146                     m_best_block_index = pindex;
147                     m_synced = true;
148                     // No need to handle errors in Commit. See rationale above.
149                     Commit();
150                     break;
151                 }
152                 if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
153                     FatalError("%s: Failed to rewind index %s to a previous chain tip",
154                                __func__, GetName());
155                     return;
156                 }
157                 pindex = pindex_next;
158             }
159 
160             int64_t current_time = GetTime();
161             if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
162                 LogPrintf("Syncing %s with block chain from height %d\n",
163                           GetName(), pindex->nHeight);
164                 last_log_time = current_time;
165             }
166 
167             if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
168                 m_best_block_index = pindex;
169                 last_locator_write_time = current_time;
170                 // No need to handle errors in Commit. See rationale above.
171                 Commit();
172             }
173 
174             CBlock block;
175             if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
176                 FatalError("%s: Failed to read block %s from disk",
177                            __func__, pindex->GetBlockHash().ToString());
178                 return;
179             }
180             if (!WriteBlock(block, pindex)) {
181                 FatalError("%s: Failed to write block %s to index database",
182                            __func__, pindex->GetBlockHash().ToString());
183                 return;
184             }
185         }
186     }
187 
188     if (pindex) {
189         LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
190     } else {
191         LogPrintf("%s is enabled\n", GetName());
192     }
193 }
194 
Commit()195 bool BaseIndex::Commit()
196 {
197     CDBBatch batch(GetDB());
198     if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) {
199         return error("%s: Failed to commit latest %s state", __func__, GetName());
200     }
201     return true;
202 }
203 
CommitInternal(CDBBatch & batch)204 bool BaseIndex::CommitInternal(CDBBatch& batch)
205 {
206     LOCK(cs_main);
207     GetDB().WriteBestBlock(batch, m_chainstate->m_chain.GetLocator(m_best_block_index));
208     return true;
209 }
210 
Rewind(const CBlockIndex * current_tip,const CBlockIndex * new_tip)211 bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
212 {
213     assert(current_tip == m_best_block_index);
214     assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
215 
216     // In the case of a reorg, ensure persisted block locator is not stale.
217     // Pruning has a minimum of 288 blocks-to-keep and getting the index
218     // out of sync may be possible but a users fault.
219     // In case we reorg beyond the pruned depth, ReadBlockFromDisk would
220     // throw and lead to a graceful shutdown
221     m_best_block_index = new_tip;
222     if (!Commit()) {
223         // If commit fails, revert the best block index to avoid corruption.
224         m_best_block_index = current_tip;
225         return false;
226     }
227 
228     return true;
229 }
230 
BlockConnected(const std::shared_ptr<const CBlock> & block,const CBlockIndex * pindex)231 void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
232 {
233     if (!m_synced) {
234         return;
235     }
236 
237     const CBlockIndex* best_block_index = m_best_block_index.load();
238     if (!best_block_index) {
239         if (pindex->nHeight != 0) {
240             FatalError("%s: First block connected is not the genesis block (height=%d)",
241                        __func__, pindex->nHeight);
242             return;
243         }
244     } else {
245         // Ensure block connects to an ancestor of the current best block. This should be the case
246         // most of the time, but may not be immediately after the sync thread catches up and sets
247         // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
248         // in the ValidationInterface queue backlog even after the sync thread has caught up to the
249         // new chain tip. In this unlikely event, log a warning and let the queue clear.
250         if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
251             LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of " /* Continued */
252                       "known best chain (tip=%s); not updating index\n",
253                       __func__, pindex->GetBlockHash().ToString(),
254                       best_block_index->GetBlockHash().ToString());
255             return;
256         }
257         if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
258             FatalError("%s: Failed to rewind index %s to a previous chain tip",
259                        __func__, GetName());
260             return;
261         }
262     }
263 
264     if (WriteBlock(*block, pindex)) {
265         m_best_block_index = pindex;
266     } else {
267         FatalError("%s: Failed to write block %s to index",
268                    __func__, pindex->GetBlockHash().ToString());
269         return;
270     }
271 }
272 
ChainStateFlushed(const CBlockLocator & locator)273 void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
274 {
275     if (!m_synced) {
276         return;
277     }
278 
279     const uint256& locator_tip_hash = locator.vHave.front();
280     const CBlockIndex* locator_tip_index;
281     {
282         LOCK(cs_main);
283         locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
284     }
285 
286     if (!locator_tip_index) {
287         FatalError("%s: First block (hash=%s) in locator was not found",
288                    __func__, locator_tip_hash.ToString());
289         return;
290     }
291 
292     // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
293     // immediately after the sync thread catches up and sets m_synced. Consider the case where
294     // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
295     // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
296     // event, log a warning and let the queue clear.
297     const CBlockIndex* best_block_index = m_best_block_index.load();
298     if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
299         LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best " /* Continued */
300                   "chain (tip=%s); not writing index locator\n",
301                   __func__, locator_tip_hash.ToString(),
302                   best_block_index->GetBlockHash().ToString());
303         return;
304     }
305 
306     // No need to handle errors in Commit. If it fails, the error will be already be logged. The
307     // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
308     // for an advanced index state.
309     Commit();
310 }
311 
BlockUntilSyncedToCurrentChain() const312 bool BaseIndex::BlockUntilSyncedToCurrentChain() const
313 {
314     AssertLockNotHeld(cs_main);
315 
316     if (!m_synced) {
317         return false;
318     }
319 
320     {
321         // Skip the queue-draining stuff if we know we're caught up with
322         // ::ChainActive().Tip().
323         LOCK(cs_main);
324         const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
325         const CBlockIndex* best_block_index = m_best_block_index.load();
326         if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
327             return true;
328         }
329     }
330 
331     LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
332     SyncWithValidationInterfaceQueue();
333     return true;
334 }
335 
Interrupt()336 void BaseIndex::Interrupt()
337 {
338     m_interrupt();
339 }
340 
Start(CChainState & active_chainstate)341 bool BaseIndex::Start(CChainState& active_chainstate)
342 {
343     m_chainstate = &active_chainstate;
344     // Need to register this ValidationInterface before running Init(), so that
345     // callbacks are not missed if Init sets m_synced to true.
346     RegisterValidationInterface(this);
347     if (!Init()) {
348         return false;
349     }
350 
351     m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
352     return true;
353 }
354 
Stop()355 void BaseIndex::Stop()
356 {
357     UnregisterValidationInterface(this);
358 
359     if (m_thread_sync.joinable()) {
360         m_thread_sync.join();
361     }
362 }
363 
GetSummary() const364 IndexSummary BaseIndex::GetSummary() const
365 {
366     IndexSummary summary{};
367     summary.name = GetName();
368     summary.synced = m_synced;
369     summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0;
370     return summary;
371 }
372