1#!/usr/bin/env python3
2# Copyright (c) 2014-2016 The Bitcoin Core developers
3# Distributed under the MIT software license, see the accompanying
4# file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#
7# Test pruning code
8# ********
9# WARNING:
10# This test uses 4GB of disk space.
11# This test takes 30 mins or more (up to 2 hours)
12# ********
13
14from test_framework.test_framework import BitcoinTestFramework
15from test_framework.util import *
16
17def calc_usage(blockdir):
18    return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
19
20class PruneTest(BitcoinTestFramework):
21
22    def __init__(self):
23        super().__init__()
24        self.setup_clean_chain = True
25        self.num_nodes = 3
26
27        self.utxo = []
28        self.address = ["",""]
29        self.txouts = gen_return_txouts()
30
31    def setup_network(self):
32        self.nodes = []
33        self.is_network_split = False
34
35        # Create nodes 0 and 1 to mine
36        self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
37        self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
38
39        # Create node 2 to test pruning
40        self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
41        self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
42
43        self.address[0] = self.nodes[0].getnewaddress()
44        self.address[1] = self.nodes[1].getnewaddress()
45
46        # Determine default relay fee
47        self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
48
49        connect_nodes(self.nodes[0], 1)
50        connect_nodes(self.nodes[1], 2)
51        connect_nodes(self.nodes[2], 0)
52        sync_blocks(self.nodes[0:3])
53
54    def create_big_chain(self):
55        # Start by creating some coinbases we can spend later
56        self.nodes[1].generate(200)
57        sync_blocks(self.nodes[0:2])
58        self.nodes[0].generate(150)
59        # Then mine enough full blocks to create more than 550MiB of data
60        for i in range(645):
61            self.mine_full_block(self.nodes[0], self.address[0])
62
63        sync_blocks(self.nodes[0:3])
64
65    def test_height_min(self):
66        if not os.path.isfile(self.prunedir+"blk00000.dat"):
67            raise AssertionError("blk00000.dat is missing, pruning too early")
68        print("Success")
69        print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
70        print("Mining 25 more blocks should cause the first block file to be pruned")
71        # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
72        for i in range(25):
73            self.mine_full_block(self.nodes[0],self.address[0])
74
75        waitstart = time.time()
76        while os.path.isfile(self.prunedir+"blk00000.dat"):
77            time.sleep(0.1)
78            if time.time() - waitstart > 30:
79                raise AssertionError("blk00000.dat not pruned when it should be")
80
81        print("Success")
82        usage = calc_usage(self.prunedir)
83        print("Usage should be below target:", usage)
84        if (usage > 550):
85            raise AssertionError("Pruning target not being met")
86
87    def create_chain_with_staleblocks(self):
88        # Create stale blocks in manageable sized chunks
89        print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
90
91        for j in range(12):
92            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
93            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
94            # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
95            stop_node(self.nodes[0],0)
96            self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
97            # Mine 24 blocks in node 1
98            self.utxo = self.nodes[1].listunspent()
99            for i in range(24):
100                if j == 0:
101                    self.mine_full_block(self.nodes[1],self.address[1])
102                else:
103                    self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
104
105            # Reorg back with 25 block chain from node 0
106            self.utxo = self.nodes[0].listunspent()
107            for i in range(25):
108                self.mine_full_block(self.nodes[0],self.address[0])
109
110            # Create connections in the order so both nodes can see the reorg at the same time
111            connect_nodes(self.nodes[1], 0)
112            connect_nodes(self.nodes[2], 0)
113            sync_blocks(self.nodes[0:3])
114
115        print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
116
117    def reorg_test(self):
118        # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
119        # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
120        # Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
121        # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
122        stop_node(self.nodes[1],1)
123        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
124
125        height = self.nodes[1].getblockcount()
126        print("Current block height:", height)
127
128        invalidheight = height-287
129        badhash = self.nodes[1].getblockhash(invalidheight)
130        print("Invalidating block at height:",invalidheight,badhash)
131        self.nodes[1].invalidateblock(badhash)
132
133        # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
134        # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
135        mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
136        curhash = self.nodes[1].getblockhash(invalidheight - 1)
137        while curhash != mainchainhash:
138            self.nodes[1].invalidateblock(curhash)
139            curhash = self.nodes[1].getblockhash(invalidheight - 1)
140
141        assert(self.nodes[1].getblockcount() == invalidheight - 1)
142        print("New best height", self.nodes[1].getblockcount())
143
144        # Reboot node1 to clear those giant tx's from mempool
145        stop_node(self.nodes[1],1)
146        self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
147
148        print("Generating new longer chain of 300 more blocks")
149        self.nodes[1].generate(300)
150
151        print("Reconnect nodes")
152        connect_nodes(self.nodes[0], 1)
153        connect_nodes(self.nodes[2], 1)
154        sync_blocks(self.nodes[0:3], timeout=120)
155
156        print("Verify height on node 2:",self.nodes[2].getblockcount())
157        print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
158
159        print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
160        for i in range(22):
161            # This can be slow, so do this in multiple RPC calls to avoid
162            # RPC timeouts.
163            self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
164        sync_blocks(self.nodes[0:3], timeout=300)
165
166        usage = calc_usage(self.prunedir)
167        print("Usage should be below target:", usage)
168        if (usage > 550):
169            raise AssertionError("Pruning target not being met")
170
171        return invalidheight,badhash
172
173    def reorg_back(self):
174        # Verify that a block on the old main chain fork has been pruned away
175        try:
176            self.nodes[2].getblock(self.forkhash)
177            raise AssertionError("Old block wasn't pruned so can't test redownload")
178        except JSONRPCException as e:
179            print("Will need to redownload block",self.forkheight)
180
181        # Verify that we have enough history to reorg back to the fork point
182        # Although this is more than 288 blocks, because this chain was written more recently
183        # and only its other 299 small and 220 large block are in the block files after it,
184        # its expected to still be retained
185        self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
186
187        first_reorg_height = self.nodes[2].getblockcount()
188        curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
189        self.nodes[2].invalidateblock(curchainhash)
190        goalbestheight = self.mainchainheight
191        goalbesthash = self.mainchainhash2
192
193        # As of 0.10 the current block download logic is not able to reorg to the original chain created in
194        # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
195        # redownload its missing blocks.
196        # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
197        # because it has all the block data.
198        # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
199        # to trigger node 2's block download logic.
200        # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
201        if self.nodes[2].getblockcount() < self.mainchainheight:
202            blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
203            print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
204            self.nodes[0].invalidateblock(curchainhash)
205            assert(self.nodes[0].getblockcount() == self.mainchainheight)
206            assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
207            goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
208            goalbestheight = first_reorg_height + 1
209
210        print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
211        waitstart = time.time()
212        while self.nodes[2].getblockcount() < goalbestheight:
213            time.sleep(0.1)
214            if time.time() - waitstart > 900:
215                raise AssertionError("Node 2 didn't reorg to proper height")
216        assert(self.nodes[2].getbestblockhash() == goalbesthash)
217        # Verify we can now have the data for a block previously pruned
218        assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
219
220    def mine_full_block(self, node, address):
221        # Want to create a full block
222        # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
223        for j in range(14):
224            if len(self.utxo) < 14:
225                self.utxo = node.listunspent()
226            inputs=[]
227            outputs = {}
228            t = self.utxo.pop()
229            inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
230            remchange = t["amount"] - 100*self.relayfee # Fee must be above min relay rate for 66kb tx
231            outputs[address]=remchange
232            # Create a basic transaction that will send change back to ourself after account for a fee
233            # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
234            # of txouts is stored and is the only thing we overwrite from the original transaction
235            rawtx = node.createrawtransaction(inputs, outputs)
236            newtx = rawtx[0:92]
237            newtx = newtx + self.txouts
238            newtx = newtx + rawtx[94:]
239            # Appears to be ever so slightly faster to sign with SIGHASH_NONE
240            signresult = node.signrawtransaction(newtx,None,None,"NONE")
241            txid = node.sendrawtransaction(signresult["hex"], True)
242        # Mine a full sized block which will be these transactions we just created
243        node.generate(1)
244
245
246    def run_test(self):
247        print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
248        print("Mining a big blockchain of 995 blocks")
249        self.create_big_chain()
250        # Chain diagram key:
251        # *   blocks on main chain
252        # +,&,$,@ blocks on other forks
253        # X   invalidated block
254        # N1  Node 1
255        #
256        # Start by mining a simple chain that all nodes have
257        # N0=N1=N2 **...*(995)
258
259        print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
260        self.test_height_min()
261        # Extend this chain past the PruneAfterHeight
262        # N0=N1=N2 **...*(1020)
263
264        print("Check that we'll exceed disk space target if we have a very high stale block rate")
265        self.create_chain_with_staleblocks()
266        # Disconnect N0
267        # And mine a 24 block chain on N1 and a separate 25 block chain on N0
268        # N1=N2 **...*+...+(1044)
269        # N0    **...**...**(1045)
270        #
271        # reconnect nodes causing reorg on N1 and N2
272        # N1=N2 **...*(1020) *...**(1045)
273        #                   \
274        #                    +...+(1044)
275        #
276        # repeat this process until you have 12 stale forks hanging off the
277        # main chain on N1 and N2
278        # N0    *************************...***************************(1320)
279        #
280        # N1=N2 **...*(1020) *...**(1045) *..         ..**(1295) *...**(1320)
281        #                   \            \                      \
282        #                    +...+(1044)  &..                    $...$(1319)
283
284        # Save some current chain state for later use
285        self.mainchainheight = self.nodes[2].getblockcount()   #1320
286        self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
287
288        print("Check that we can survive a 288 block reorg still")
289        (self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
290        # Now create a 288 block reorg by mining a longer chain on N1
291        # First disconnect N1
292        # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
293        # N1   **...*(1020) **...**(1032)X..
294        #                  \
295        #                   ++...+(1031)X..
296        #
297        # Now mine 300 more blocks on N1
298        # N1    **...*(1020) **...**(1032) @@...@(1332)
299        #                 \               \
300        #                  \               X...
301        #                   \                 \
302        #                    ++...+(1031)X..   ..
303        #
304        # Reconnect nodes and mine 220 more blocks on N1
305        # N1    **...*(1020) **...**(1032) @@...@@@(1552)
306        #                 \               \
307        #                  \               X...
308        #                   \                 \
309        #                    ++...+(1031)X..   ..
310        #
311        # N2    **...*(1020) **...**(1032) @@...@@@(1552)
312        #                 \               \
313        #                  \               *...**(1320)
314        #                   \                 \
315        #                    ++...++(1044)     ..
316        #
317        # N0    ********************(1032) @@...@@@(1552)
318        #                                 \
319        #                                  *...**(1320)
320
321        print("Test that we can rerequest a block we previously pruned if needed for a reorg")
322        self.reorg_back()
323        # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
324        # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
325        # original main chain (*), but will require redownload of some blocks
326        # In order to have a peer we think we can download from, must also perform this invalidation
327        # on N0 and mine a new longest chain to trigger.
328        # Final result:
329        # N0    ********************(1032) **...****(1553)
330        #                                 \
331        #                                  X@...@@@(1552)
332        #
333        # N2    **...*(1020) **...**(1032) **...****(1553)
334        #                 \               \
335        #                  \               X@...@@@(1552)
336        #                   \
337        #                    +..
338        #
339        # N1 doesn't change because 1033 on main chain (*) is invalid
340
341        print("Done")
342
343if __name__ == '__main__':
344    PruneTest().main()
345