1// Copyright 2015 The go-ethereum Authors
2// This file is part of the go-ethereum library.
3//
4// The go-ethereum library is free software: you can redistribute it and/or modify
5// it under the terms of the GNU Lesser General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// The go-ethereum library is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU Lesser General Public License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public License
15// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
16
17package trie
18
19import (
20	"errors"
21	"fmt"
22
23	"github.com/ethereum/go-ethereum/common"
24	"github.com/ethereum/go-ethereum/common/prque"
25	"github.com/ethereum/go-ethereum/core/rawdb"
26	"github.com/ethereum/go-ethereum/ethdb"
27)
28
29// ErrNotRequested is returned by the trie sync when it's requested to process a
30// node it did not request.
31var ErrNotRequested = errors.New("not requested")
32
33// ErrAlreadyProcessed is returned by the trie sync when it's requested to process a
34// node it already processed previously.
35var ErrAlreadyProcessed = errors.New("already processed")
36
37// maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The
38// role of this value is to limit the number of trie nodes that get expanded in
39// memory if the node was configured with a significant number of peers.
40const maxFetchesPerDepth = 16384
41
42// request represents a scheduled or already in-flight state retrieval request.
43type request struct {
44	path []byte      // Merkle path leading to this node for prioritization
45	hash common.Hash // Hash of the node data content to retrieve
46	data []byte      // Data content of the node, cached until all subtrees complete
47	code bool        // Whether this is a code entry
48
49	parents []*request // Parent state nodes referencing this entry (notify all upon completion)
50	deps    int        // Number of dependencies before allowed to commit this node
51
52	callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
53}
54
55// SyncPath is a path tuple identifying a particular trie node either in a single
56// trie (account) or a layered trie (account -> storage).
57//
58// Content wise the tuple either has 1 element if it addresses a node in a single
59// trie or 2 elements if it addresses a node in a stacked trie.
60//
61// To support aiming arbitrary trie nodes, the path needs to support odd nibble
62// lengths. To avoid transferring expanded hex form over the network, the last
63// part of the tuple (which needs to index into the middle of a trie) is compact
64// encoded. In case of a 2-tuple, the first item is always 32 bytes so that is
65// simple binary encoded.
66//
67// Examples:
68//   - Path 0x9  -> {0x19}
69//   - Path 0x99 -> {0x0099}
70//   - Path 0x01234567890123456789012345678901012345678901234567890123456789019  -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19}
71//   - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
72type SyncPath [][]byte
73
74// newSyncPath converts an expanded trie path from nibble form into a compact
75// version that can be sent over the network.
76func newSyncPath(path []byte) SyncPath {
77	// If the hash is from the account trie, append a single item, if it
78	// is from the a storage trie, append a tuple. Note, the length 64 is
79	// clashing between account leaf and storage root. It's fine though
80	// because having a trie node at 64 depth means a hash collision was
81	// found and we're long dead.
82	if len(path) < 64 {
83		return SyncPath{hexToCompact(path)}
84	}
85	return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])}
86}
87
88// SyncResult is a response with requested data along with it's hash.
89type SyncResult struct {
90	Hash common.Hash // Hash of the originally unknown trie node
91	Data []byte      // Data content of the retrieved node
92}
93
94// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
95// persisted data items.
96type syncMemBatch struct {
97	nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes
98	codes map[common.Hash][]byte // In-memory membatch of recently completed codes
99}
100
101// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
102func newSyncMemBatch() *syncMemBatch {
103	return &syncMemBatch{
104		nodes: make(map[common.Hash][]byte),
105		codes: make(map[common.Hash][]byte),
106	}
107}
108
109// hasNode reports the trie node with specific hash is already cached.
110func (batch *syncMemBatch) hasNode(hash common.Hash) bool {
111	_, ok := batch.nodes[hash]
112	return ok
113}
114
115// hasCode reports the contract code with specific hash is already cached.
116func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
117	_, ok := batch.codes[hash]
118	return ok
119}
120
121// Sync is the main state trie synchronisation scheduler, which provides yet
122// unknown trie hashes to retrieve, accepts node data associated with said hashes
123// and reconstructs the trie step by step until all is done.
124type Sync struct {
125	database ethdb.KeyValueReader     // Persistent database to check for existing entries
126	membatch *syncMemBatch            // Memory buffer to avoid frequent database writes
127	nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash
128	codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash
129	queue    *prque.Prque             // Priority queue with the pending requests
130	fetches  map[int]int              // Number of active fetches per trie node depth
131}
132
133// NewSync creates a new trie data download scheduler.
134func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync {
135	ts := &Sync{
136		database: database,
137		membatch: newSyncMemBatch(),
138		nodeReqs: make(map[common.Hash]*request),
139		codeReqs: make(map[common.Hash]*request),
140		queue:    prque.New(nil),
141		fetches:  make(map[int]int),
142	}
143	ts.AddSubTrie(root, nil, common.Hash{}, callback)
144	return ts
145}
146
147// AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
148func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) {
149	// Short circuit if the trie is empty or already known
150	if root == emptyRoot {
151		return
152	}
153	if s.membatch.hasNode(root) {
154		return
155	}
156	// If database says this is a duplicate, then at least the trie node is
157	// present, and we hold the assumption that it's NOT legacy contract code.
158	blob := rawdb.ReadTrieNode(s.database, root)
159	if len(blob) > 0 {
160		return
161	}
162	// Assemble the new sub-trie sync request
163	req := &request{
164		path:     path,
165		hash:     root,
166		callback: callback,
167	}
168	// If this sub-trie has a designated parent, link them together
169	if parent != (common.Hash{}) {
170		ancestor := s.nodeReqs[parent]
171		if ancestor == nil {
172			panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
173		}
174		ancestor.deps++
175		req.parents = append(req.parents, ancestor)
176	}
177	s.schedule(req)
178}
179
180// AddCodeEntry schedules the direct retrieval of a contract code that should not
181// be interpreted as a trie node, but rather accepted and stored into the database
182// as is.
183func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
184	// Short circuit if the entry is empty or already known
185	if hash == emptyState {
186		return
187	}
188	if s.membatch.hasCode(hash) {
189		return
190	}
191	// If database says duplicate, the blob is present for sure.
192	// Note we only check the existence with new code scheme, fast
193	// sync is expected to run with a fresh new node. Even there
194	// exists the code with legacy format, fetch and store with
195	// new scheme anyway.
196	if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
197		return
198	}
199	// Assemble the new sub-trie sync request
200	req := &request{
201		path: path,
202		hash: hash,
203		code: true,
204	}
205	// If this sub-trie has a designated parent, link them together
206	if parent != (common.Hash{}) {
207		ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq
208		if ancestor == nil {
209			panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
210		}
211		ancestor.deps++
212		req.parents = append(req.parents, ancestor)
213	}
214	s.schedule(req)
215}
216
217// Missing retrieves the known missing nodes from the trie for retrieval. To aid
218// both eth/6x style fast sync and snap/1x style state sync, the paths of trie
219// nodes are returned too, as well as separate hash list for codes.
220func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) {
221	var (
222		nodeHashes []common.Hash
223		nodePaths  []SyncPath
224		codeHashes []common.Hash
225	)
226	for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) {
227		// Retrieve th enext item in line
228		item, prio := s.queue.Peek()
229
230		// If we have too many already-pending tasks for this depth, throttle
231		depth := int(prio >> 56)
232		if s.fetches[depth] > maxFetchesPerDepth {
233			break
234		}
235		// Item is allowed to be scheduled, add it to the task list
236		s.queue.Pop()
237		s.fetches[depth]++
238
239		hash := item.(common.Hash)
240		if req, ok := s.nodeReqs[hash]; ok {
241			nodeHashes = append(nodeHashes, hash)
242			nodePaths = append(nodePaths, newSyncPath(req.path))
243		} else {
244			codeHashes = append(codeHashes, hash)
245		}
246	}
247	return nodeHashes, nodePaths, codeHashes
248}
249
250// Process injects the received data for requested item. Note it can
251// happpen that the single response commits two pending requests(e.g.
252// there are two requests one for code and one for node but the hash
253// is same). In this case the second response for the same hash will
254// be treated as "non-requested" item or "already-processed" item but
255// there is no downside.
256func (s *Sync) Process(result SyncResult) error {
257	// If the item was not requested either for code or node, bail out
258	if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil {
259		return ErrNotRequested
260	}
261	// There is an pending code request for this data, commit directly
262	var filled bool
263	if req := s.codeReqs[result.Hash]; req != nil && req.data == nil {
264		filled = true
265		req.data = result.Data
266		s.commit(req)
267	}
268	// There is an pending node request for this data, fill it.
269	if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil {
270		filled = true
271		// Decode the node data content and update the request
272		node, err := decodeNode(result.Hash[:], result.Data)
273		if err != nil {
274			return err
275		}
276		req.data = result.Data
277
278		// Create and schedule a request for all the children nodes
279		requests, err := s.children(req, node)
280		if err != nil {
281			return err
282		}
283		if len(requests) == 0 && req.deps == 0 {
284			s.commit(req)
285		} else {
286			req.deps += len(requests)
287			for _, child := range requests {
288				s.schedule(child)
289			}
290		}
291	}
292	if !filled {
293		return ErrAlreadyProcessed
294	}
295	return nil
296}
297
298// Commit flushes the data stored in the internal membatch out to persistent
299// storage, returning any occurred error.
300func (s *Sync) Commit(dbw ethdb.Batch) error {
301	// Dump the membatch into a database dbw
302	for key, value := range s.membatch.nodes {
303		rawdb.WriteTrieNode(dbw, key, value)
304	}
305	for key, value := range s.membatch.codes {
306		rawdb.WriteCode(dbw, key, value)
307	}
308	// Drop the membatch data and return
309	s.membatch = newSyncMemBatch()
310	return nil
311}
312
313// Pending returns the number of state entries currently pending for download.
314func (s *Sync) Pending() int {
315	return len(s.nodeReqs) + len(s.codeReqs)
316}
317
318// schedule inserts a new state retrieval request into the fetch queue. If there
319// is already a pending request for this node, the new request will be discarded
320// and only a parent reference added to the old one.
321func (s *Sync) schedule(req *request) {
322	var reqset = s.nodeReqs
323	if req.code {
324		reqset = s.codeReqs
325	}
326	// If we're already requesting this node, add a new reference and stop
327	if old, ok := reqset[req.hash]; ok {
328		old.parents = append(old.parents, req.parents...)
329		return
330	}
331	reqset[req.hash] = req
332
333	// Schedule the request for future retrieval. This queue is shared
334	// by both node requests and code requests. It can happen that there
335	// is a trie node and code has same hash. In this case two elements
336	// with same hash and same or different depth will be pushed. But it's
337	// ok the worst case is the second response will be treated as duplicated.
338	prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents
339	for i := 0; i < 14 && i < len(req.path); i++ {
340		prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order
341	}
342	s.queue.Push(req.hash, prio)
343}
344
345// children retrieves all the missing children of a state trie entry for future
346// retrieval scheduling.
347func (s *Sync) children(req *request, object node) ([]*request, error) {
348	// Gather all the children of the node, irrelevant whether known or not
349	type child struct {
350		path []byte
351		node node
352	}
353	var children []child
354
355	switch node := (object).(type) {
356	case *shortNode:
357		key := node.Key
358		if hasTerm(key) {
359			key = key[:len(key)-1]
360		}
361		children = []child{{
362			node: node.Val,
363			path: append(append([]byte(nil), req.path...), key...),
364		}}
365	case *fullNode:
366		for i := 0; i < 17; i++ {
367			if node.Children[i] != nil {
368				children = append(children, child{
369					node: node.Children[i],
370					path: append(append([]byte(nil), req.path...), byte(i)),
371				})
372			}
373		}
374	default:
375		panic(fmt.Sprintf("unknown node: %+v", node))
376	}
377	// Iterate over the children, and request all unknown ones
378	requests := make([]*request, 0, len(children))
379	for _, child := range children {
380		// Notify any external watcher of a new key/value node
381		if req.callback != nil {
382			if node, ok := (child.node).(valueNode); ok {
383				var paths [][]byte
384				if len(child.path) == 2*common.HashLength {
385					paths = append(paths, hexToKeybytes(child.path))
386				} else if len(child.path) == 4*common.HashLength {
387					paths = append(paths, hexToKeybytes(child.path[:2*common.HashLength]))
388					paths = append(paths, hexToKeybytes(child.path[2*common.HashLength:]))
389				}
390				if err := req.callback(paths, child.path, node, req.hash); err != nil {
391					return nil, err
392				}
393			}
394		}
395		// If the child references another node, resolve or schedule
396		if node, ok := (child.node).(hashNode); ok {
397			// Try to resolve the node from the local database
398			hash := common.BytesToHash(node)
399			if s.membatch.hasNode(hash) {
400				continue
401			}
402			// If database says duplicate, then at least the trie node is present
403			// and we hold the assumption that it's NOT legacy contract code.
404			if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 {
405				continue
406			}
407			// Locally unknown node, schedule for retrieval
408			requests = append(requests, &request{
409				path:     child.path,
410				hash:     hash,
411				parents:  []*request{req},
412				callback: req.callback,
413			})
414		}
415	}
416	return requests, nil
417}
418
419// commit finalizes a retrieval request and stores it into the membatch. If any
420// of the referencing parent requests complete due to this commit, they are also
421// committed themselves.
422func (s *Sync) commit(req *request) (err error) {
423	// Write the node content to the membatch
424	if req.code {
425		s.membatch.codes[req.hash] = req.data
426		delete(s.codeReqs, req.hash)
427		s.fetches[len(req.path)]--
428	} else {
429		s.membatch.nodes[req.hash] = req.data
430		delete(s.nodeReqs, req.hash)
431		s.fetches[len(req.path)]--
432	}
433	// Check all parents for completion
434	for _, parent := range req.parents {
435		parent.deps--
436		if parent.deps == 0 {
437			if err := s.commit(parent); err != nil {
438				return err
439			}
440		}
441	}
442	return nil
443}
444