1// Copyright 2015 The go-ethereum Authors
2// This file is part of the go-ethereum library.
3//
4// The go-ethereum library is free software: you can redistribute it and/or modify
5// it under the terms of the GNU Lesser General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// The go-ethereum library is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU Lesser General Public License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public License
15// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
16
17// This is a temporary package whilst working on the eth/66 blocking refactors.
18// After that work is done, les needs to be refactored to use the new package,
19// or alternatively use a stripped down version of it. Either way, we need to
20// keep the changes scoped so duplicating temporarily seems the sanest.
21package fetcher
22
23import (
24	"errors"
25	"math/rand"
26	"time"
27
28	"github.com/ethereum/go-ethereum/common"
29	"github.com/ethereum/go-ethereum/common/prque"
30	"github.com/ethereum/go-ethereum/consensus"
31	"github.com/ethereum/go-ethereum/core/types"
32	"github.com/ethereum/go-ethereum/log"
33	"github.com/ethereum/go-ethereum/metrics"
34	"github.com/ethereum/go-ethereum/trie"
35)
36
37const (
38	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
39	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
40	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
41	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
42)
43
44const (
45	maxUncleDist = 7   // Maximum allowed backward distance from the chain head
46	maxQueueDist = 32  // Maximum allowed distance from the chain head to queue
47	hashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced
48	blockLimit   = 64  // Maximum number of unique blocks a peer may have delivered
49)
50
51var (
52	blockAnnounceInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
53	blockAnnounceOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
54	blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
55	blockAnnounceDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
56
57	blockBroadcastInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
58	blockBroadcastOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
59	blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
60	blockBroadcastDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
61
62	headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
63	bodyFetchMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
64
65	headerFilterInMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
66	headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
67	bodyFilterInMeter    = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
68	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
69)
70
71var errTerminated = errors.New("terminated")
72
73// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
74type HeaderRetrievalFn func(common.Hash) *types.Header
75
76// blockRetrievalFn is a callback type for retrieving a block from the local chain.
77type blockRetrievalFn func(common.Hash) *types.Block
78
79// headerRequesterFn is a callback type for sending a header retrieval request.
80type headerRequesterFn func(common.Hash) error
81
82// bodyRequesterFn is a callback type for sending a body retrieval request.
83type bodyRequesterFn func([]common.Hash) error
84
85// headerVerifierFn is a callback type to verify a block's header for fast propagation.
86type headerVerifierFn func(header *types.Header) error
87
88// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
89type blockBroadcasterFn func(block *types.Block, propagate bool)
90
91// chainHeightFn is a callback type to retrieve the current chain height.
92type chainHeightFn func() uint64
93
94// headersInsertFn is a callback type to insert a batch of headers into the local chain.
95type headersInsertFn func(headers []*types.Header) (int, error)
96
97// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
98type chainInsertFn func(types.Blocks) (int, error)
99
100// peerDropFn is a callback type for dropping a peer detected as malicious.
101type peerDropFn func(id string)
102
103// blockAnnounce is the hash notification of the availability of a new block in the
104// network.
105type blockAnnounce struct {
106	hash   common.Hash   // Hash of the block being announced
107	number uint64        // Number of the block being announced (0 = unknown | old protocol)
108	header *types.Header // Header of the block partially reassembled (new protocol)
109	time   time.Time     // Timestamp of the announcement
110
111	origin string // Identifier of the peer originating the notification
112
113	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
114	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
115}
116
117// headerFilterTask represents a batch of headers needing fetcher filtering.
118type headerFilterTask struct {
119	peer    string          // The source peer of block headers
120	headers []*types.Header // Collection of headers to filter
121	time    time.Time       // Arrival time of the headers
122}
123
124// bodyFilterTask represents a batch of block bodies (transactions and uncles)
125// needing fetcher filtering.
126type bodyFilterTask struct {
127	peer         string                 // The source peer of block bodies
128	transactions [][]*types.Transaction // Collection of transactions per block bodies
129	uncles       [][]*types.Header      // Collection of uncles per block bodies
130	time         time.Time              // Arrival time of the blocks' contents
131}
132
133// blockOrHeaderInject represents a schedules import operation.
134type blockOrHeaderInject struct {
135	origin string
136
137	header *types.Header // Used for light mode fetcher which only cares about header.
138	block  *types.Block  // Used for normal mode fetcher which imports full block.
139}
140
141// number returns the block number of the injected object.
142func (inject *blockOrHeaderInject) number() uint64 {
143	if inject.header != nil {
144		return inject.header.Number.Uint64()
145	}
146	return inject.block.NumberU64()
147}
148
149// number returns the block hash of the injected object.
150func (inject *blockOrHeaderInject) hash() common.Hash {
151	if inject.header != nil {
152		return inject.header.Hash()
153	}
154	return inject.block.Hash()
155}
156
157// BlockFetcher is responsible for accumulating block announcements from various peers
158// and scheduling them for retrieval.
159type BlockFetcher struct {
160	light bool // The indicator whether it's a light fetcher or normal one.
161
162	// Various event channels
163	notify chan *blockAnnounce
164	inject chan *blockOrHeaderInject
165
166	headerFilter chan chan *headerFilterTask
167	bodyFilter   chan chan *bodyFilterTask
168
169	done chan common.Hash
170	quit chan struct{}
171
172	// Announce states
173	announces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion
174	announced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
175	fetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching
176	fetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
177	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
178
179	// Block cache
180	queue  *prque.Prque                         // Queue containing the import operations (block number sorted)
181	queues map[string]int                       // Per peer block counts to prevent memory exhaustion
182	queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
183
184	// Callbacks
185	getHeader      HeaderRetrievalFn  // Retrieves a header from the local chain
186	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
187	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
188	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
189	chainHeight    chainHeightFn      // Retrieves the current chain's height
190	insertHeaders  headersInsertFn    // Injects a batch of headers into the chain
191	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
192	dropPeer       peerDropFn         // Drops a peer for misbehaving
193
194	// Testing hooks
195	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
196	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
197	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch
198	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)
199	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
200}
201
202// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
203func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
204	return &BlockFetcher{
205		light:          light,
206		notify:         make(chan *blockAnnounce),
207		inject:         make(chan *blockOrHeaderInject),
208		headerFilter:   make(chan chan *headerFilterTask),
209		bodyFilter:     make(chan chan *bodyFilterTask),
210		done:           make(chan common.Hash),
211		quit:           make(chan struct{}),
212		announces:      make(map[string]int),
213		announced:      make(map[common.Hash][]*blockAnnounce),
214		fetching:       make(map[common.Hash]*blockAnnounce),
215		fetched:        make(map[common.Hash][]*blockAnnounce),
216		completing:     make(map[common.Hash]*blockAnnounce),
217		queue:          prque.New(nil),
218		queues:         make(map[string]int),
219		queued:         make(map[common.Hash]*blockOrHeaderInject),
220		getHeader:      getHeader,
221		getBlock:       getBlock,
222		verifyHeader:   verifyHeader,
223		broadcastBlock: broadcastBlock,
224		chainHeight:    chainHeight,
225		insertHeaders:  insertHeaders,
226		insertChain:    insertChain,
227		dropPeer:       dropPeer,
228	}
229}
230
231// Start boots up the announcement based synchroniser, accepting and processing
232// hash notifications and block fetches until termination requested.
233func (f *BlockFetcher) Start() {
234	go f.loop()
235}
236
237// Stop terminates the announcement based synchroniser, canceling all pending
238// operations.
239func (f *BlockFetcher) Stop() {
240	close(f.quit)
241}
242
243// Notify announces the fetcher of the potential availability of a new block in
244// the network.
245func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
246	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
247	block := &blockAnnounce{
248		hash:        hash,
249		number:      number,
250		time:        time,
251		origin:      peer,
252		fetchHeader: headerFetcher,
253		fetchBodies: bodyFetcher,
254	}
255	select {
256	case f.notify <- block:
257		return nil
258	case <-f.quit:
259		return errTerminated
260	}
261}
262
263// Enqueue tries to fill gaps the fetcher's future import queue.
264func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
265	op := &blockOrHeaderInject{
266		origin: peer,
267		block:  block,
268	}
269	select {
270	case f.inject <- op:
271		return nil
272	case <-f.quit:
273		return errTerminated
274	}
275}
276
277// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
278// returning those that should be handled differently.
279func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
280	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
281
282	// Send the filter channel to the fetcher
283	filter := make(chan *headerFilterTask)
284
285	select {
286	case f.headerFilter <- filter:
287	case <-f.quit:
288		return nil
289	}
290	// Request the filtering of the header list
291	select {
292	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
293	case <-f.quit:
294		return nil
295	}
296	// Retrieve the headers remaining after filtering
297	select {
298	case task := <-filter:
299		return task.headers
300	case <-f.quit:
301		return nil
302	}
303}
304
305// FilterBodies extracts all the block bodies that were explicitly requested by
306// the fetcher, returning those that should be handled differently.
307func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
308	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
309
310	// Send the filter channel to the fetcher
311	filter := make(chan *bodyFilterTask)
312
313	select {
314	case f.bodyFilter <- filter:
315	case <-f.quit:
316		return nil, nil
317	}
318	// Request the filtering of the body list
319	select {
320	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
321	case <-f.quit:
322		return nil, nil
323	}
324	// Retrieve the bodies remaining after filtering
325	select {
326	case task := <-filter:
327		return task.transactions, task.uncles
328	case <-f.quit:
329		return nil, nil
330	}
331}
332
333// Loop is the main fetcher loop, checking and processing various notification
334// events.
335func (f *BlockFetcher) loop() {
336	// Iterate the block fetching until a quit is requested
337	var (
338		fetchTimer    = time.NewTimer(0)
339		completeTimer = time.NewTimer(0)
340	)
341	<-fetchTimer.C // clear out the channel
342	<-completeTimer.C
343	defer fetchTimer.Stop()
344	defer completeTimer.Stop()
345
346	for {
347		// Clean up any expired block fetches
348		for hash, announce := range f.fetching {
349			if time.Since(announce.time) > fetchTimeout {
350				f.forgetHash(hash)
351			}
352		}
353		// Import any queued blocks that could potentially fit
354		height := f.chainHeight()
355		for !f.queue.Empty() {
356			op := f.queue.PopItem().(*blockOrHeaderInject)
357			hash := op.hash()
358			if f.queueChangeHook != nil {
359				f.queueChangeHook(hash, false)
360			}
361			// If too high up the chain or phase, continue later
362			number := op.number()
363			if number > height+1 {
364				f.queue.Push(op, -int64(number))
365				if f.queueChangeHook != nil {
366					f.queueChangeHook(hash, true)
367				}
368				break
369			}
370			// Otherwise if fresh and still unknown, try and import
371			if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
372				f.forgetBlock(hash)
373				continue
374			}
375			if f.light {
376				f.importHeaders(op.origin, op.header)
377			} else {
378				f.importBlocks(op.origin, op.block)
379			}
380		}
381		// Wait for an outside event to occur
382		select {
383		case <-f.quit:
384			// BlockFetcher terminating, abort all operations
385			return
386
387		case notification := <-f.notify:
388			// A block was announced, make sure the peer isn't DOSing us
389			blockAnnounceInMeter.Mark(1)
390
391			count := f.announces[notification.origin] + 1
392			if count > hashLimit {
393				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
394				blockAnnounceDOSMeter.Mark(1)
395				break
396			}
397			// If we have a valid block number, check that it's potentially useful
398			if notification.number > 0 {
399				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
400					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
401					blockAnnounceDropMeter.Mark(1)
402					break
403				}
404			}
405			// All is well, schedule the announce if block's not yet downloading
406			if _, ok := f.fetching[notification.hash]; ok {
407				break
408			}
409			if _, ok := f.completing[notification.hash]; ok {
410				break
411			}
412			f.announces[notification.origin] = count
413			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
414			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
415				f.announceChangeHook(notification.hash, true)
416			}
417			if len(f.announced) == 1 {
418				f.rescheduleFetch(fetchTimer)
419			}
420
421		case op := <-f.inject:
422			// A direct block insertion was requested, try and fill any pending gaps
423			blockBroadcastInMeter.Mark(1)
424
425			// Now only direct block injection is allowed, drop the header injection
426			// here silently if we receive.
427			if f.light {
428				continue
429			}
430			f.enqueue(op.origin, nil, op.block)
431
432		case hash := <-f.done:
433			// A pending import finished, remove all traces of the notification
434			f.forgetHash(hash)
435			f.forgetBlock(hash)
436
437		case <-fetchTimer.C:
438			// At least one block's timer ran out, check for needing retrieval
439			request := make(map[string][]common.Hash)
440
441			for hash, announces := range f.announced {
442				// In current LES protocol(les2/les3), only header announce is
443				// available, no need to wait too much time for header broadcast.
444				timeout := arriveTimeout - gatherSlack
445				if f.light {
446					timeout = 0
447				}
448				if time.Since(announces[0].time) > timeout {
449					// Pick a random peer to retrieve from, reset all others
450					announce := announces[rand.Intn(len(announces))]
451					f.forgetHash(hash)
452
453					// If the block still didn't arrive, queue for fetching
454					if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
455						request[announce.origin] = append(request[announce.origin], hash)
456						f.fetching[hash] = announce
457					}
458				}
459			}
460			// Send out all block header requests
461			for peer, hashes := range request {
462				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
463
464				// Create a closure of the fetch and schedule in on a new thread
465				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
466				go func() {
467					if f.fetchingHook != nil {
468						f.fetchingHook(hashes)
469					}
470					for _, hash := range hashes {
471						headerFetchMeter.Mark(1)
472						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
473					}
474				}()
475			}
476			// Schedule the next fetch if blocks are still pending
477			f.rescheduleFetch(fetchTimer)
478
479		case <-completeTimer.C:
480			// At least one header's timer ran out, retrieve everything
481			request := make(map[string][]common.Hash)
482
483			for hash, announces := range f.fetched {
484				// Pick a random peer to retrieve from, reset all others
485				announce := announces[rand.Intn(len(announces))]
486				f.forgetHash(hash)
487
488				// If the block still didn't arrive, queue for completion
489				if f.getBlock(hash) == nil {
490					request[announce.origin] = append(request[announce.origin], hash)
491					f.completing[hash] = announce
492				}
493			}
494			// Send out all block body requests
495			for peer, hashes := range request {
496				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
497
498				// Create a closure of the fetch and schedule in on a new thread
499				if f.completingHook != nil {
500					f.completingHook(hashes)
501				}
502				bodyFetchMeter.Mark(int64(len(hashes)))
503				go f.completing[hashes[0]].fetchBodies(hashes)
504			}
505			// Schedule the next fetch if blocks are still pending
506			f.rescheduleComplete(completeTimer)
507
508		case filter := <-f.headerFilter:
509			// Headers arrived from a remote peer. Extract those that were explicitly
510			// requested by the fetcher, and return everything else so it's delivered
511			// to other parts of the system.
512			var task *headerFilterTask
513			select {
514			case task = <-filter:
515			case <-f.quit:
516				return
517			}
518			headerFilterInMeter.Mark(int64(len(task.headers)))
519
520			// Split the batch of headers into unknown ones (to return to the caller),
521			// known incomplete ones (requiring body retrievals) and completed blocks.
522			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
523			for _, header := range task.headers {
524				hash := header.Hash()
525
526				// Filter fetcher-requested headers from other synchronisation algorithms
527				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
528					// If the delivered header does not match the promised number, drop the announcer
529					if header.Number.Uint64() != announce.number {
530						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
531						f.dropPeer(announce.origin)
532						f.forgetHash(hash)
533						continue
534					}
535					// Collect all headers only if we are running in light
536					// mode and the headers are not imported by other means.
537					if f.light {
538						if f.getHeader(hash) == nil {
539							announce.header = header
540							lightHeaders = append(lightHeaders, announce)
541						}
542						f.forgetHash(hash)
543						continue
544					}
545					// Only keep if not imported by other means
546					if f.getBlock(hash) == nil {
547						announce.header = header
548						announce.time = task.time
549
550						// If the block is empty (header only), short circuit into the final import queue
551						if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash {
552							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
553
554							block := types.NewBlockWithHeader(header)
555							block.ReceivedAt = task.time
556
557							complete = append(complete, block)
558							f.completing[hash] = announce
559							continue
560						}
561						// Otherwise add to the list of blocks needing completion
562						incomplete = append(incomplete, announce)
563					} else {
564						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
565						f.forgetHash(hash)
566					}
567				} else {
568					// BlockFetcher doesn't know about it, add to the return list
569					unknown = append(unknown, header)
570				}
571			}
572			headerFilterOutMeter.Mark(int64(len(unknown)))
573			select {
574			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
575			case <-f.quit:
576				return
577			}
578			// Schedule the retrieved headers for body completion
579			for _, announce := range incomplete {
580				hash := announce.header.Hash()
581				if _, ok := f.completing[hash]; ok {
582					continue
583				}
584				f.fetched[hash] = append(f.fetched[hash], announce)
585				if len(f.fetched) == 1 {
586					f.rescheduleComplete(completeTimer)
587				}
588			}
589			// Schedule the header for light fetcher import
590			for _, announce := range lightHeaders {
591				f.enqueue(announce.origin, announce.header, nil)
592			}
593			// Schedule the header-only blocks for import
594			for _, block := range complete {
595				if announce := f.completing[block.Hash()]; announce != nil {
596					f.enqueue(announce.origin, nil, block)
597				}
598			}
599
600		case filter := <-f.bodyFilter:
601			// Block bodies arrived, extract any explicitly requested blocks, return the rest
602			var task *bodyFilterTask
603			select {
604			case task = <-filter:
605			case <-f.quit:
606				return
607			}
608			bodyFilterInMeter.Mark(int64(len(task.transactions)))
609			blocks := []*types.Block{}
610			// abort early if there's nothing explicitly requested
611			if len(f.completing) > 0 {
612				for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
613					// Match up a body to any possible completion request
614					var (
615						matched   = false
616						uncleHash common.Hash // calculated lazily and reused
617						txnHash   common.Hash // calculated lazily and reused
618					)
619					for hash, announce := range f.completing {
620						if f.queued[hash] != nil || announce.origin != task.peer {
621							continue
622						}
623						if uncleHash == (common.Hash{}) {
624							uncleHash = types.CalcUncleHash(task.uncles[i])
625						}
626						if uncleHash != announce.header.UncleHash {
627							continue
628						}
629						if txnHash == (common.Hash{}) {
630							txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))
631						}
632						if txnHash != announce.header.TxHash {
633							continue
634						}
635						// Mark the body matched, reassemble if still unknown
636						matched = true
637						if f.getBlock(hash) == nil {
638							block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
639							block.ReceivedAt = task.time
640							blocks = append(blocks, block)
641						} else {
642							f.forgetHash(hash)
643						}
644
645					}
646					if matched {
647						task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
648						task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
649						i--
650						continue
651					}
652				}
653			}
654			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
655			select {
656			case filter <- task:
657			case <-f.quit:
658				return
659			}
660			// Schedule the retrieved blocks for ordered import
661			for _, block := range blocks {
662				if announce := f.completing[block.Hash()]; announce != nil {
663					f.enqueue(announce.origin, nil, block)
664				}
665			}
666		}
667	}
668}
669
670// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
671func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
672	// Short circuit if no blocks are announced
673	if len(f.announced) == 0 {
674		return
675	}
676	// Schedule announcement retrieval quickly for light mode
677	// since server won't send any headers to client.
678	if f.light {
679		fetch.Reset(lightTimeout)
680		return
681	}
682	// Otherwise find the earliest expiring announcement
683	earliest := time.Now()
684	for _, announces := range f.announced {
685		if earliest.After(announces[0].time) {
686			earliest = announces[0].time
687		}
688	}
689	fetch.Reset(arriveTimeout - time.Since(earliest))
690}
691
692// rescheduleComplete resets the specified completion timer to the next fetch timeout.
693func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
694	// Short circuit if no headers are fetched
695	if len(f.fetched) == 0 {
696		return
697	}
698	// Otherwise find the earliest expiring announcement
699	earliest := time.Now()
700	for _, announces := range f.fetched {
701		if earliest.After(announces[0].time) {
702			earliest = announces[0].time
703		}
704	}
705	complete.Reset(gatherSlack - time.Since(earliest))
706}
707
708// enqueue schedules a new header or block import operation, if the component
709// to be imported has not yet been seen.
710func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
711	var (
712		hash   common.Hash
713		number uint64
714	)
715	if header != nil {
716		hash, number = header.Hash(), header.Number.Uint64()
717	} else {
718		hash, number = block.Hash(), block.NumberU64()
719	}
720	// Ensure the peer isn't DOSing us
721	count := f.queues[peer] + 1
722	if count > blockLimit {
723		log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
724		blockBroadcastDOSMeter.Mark(1)
725		f.forgetHash(hash)
726		return
727	}
728	// Discard any past or too distant blocks
729	if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
730		log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
731		blockBroadcastDropMeter.Mark(1)
732		f.forgetHash(hash)
733		return
734	}
735	// Schedule the block for future importing
736	if _, ok := f.queued[hash]; !ok {
737		op := &blockOrHeaderInject{origin: peer}
738		if header != nil {
739			op.header = header
740		} else {
741			op.block = block
742		}
743		f.queues[peer] = count
744		f.queued[hash] = op
745		f.queue.Push(op, -int64(number))
746		if f.queueChangeHook != nil {
747			f.queueChangeHook(hash, true)
748		}
749		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
750	}
751}
752
753// importHeaders spawns a new goroutine to run a header insertion into the chain.
754// If the header's number is at the same height as the current import phase, it
755// updates the phase states accordingly.
756func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
757	hash := header.Hash()
758	log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
759
760	go func() {
761		defer func() { f.done <- hash }()
762		// If the parent's unknown, abort insertion
763		parent := f.getHeader(header.ParentHash)
764		if parent == nil {
765			log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
766			return
767		}
768		// Validate the header and if something went wrong, drop the peer
769		if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
770			log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
771			f.dropPeer(peer)
772			return
773		}
774		// Run the actual import and log any issues
775		if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
776			log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
777			return
778		}
779		// Invoke the testing hook if needed
780		if f.importedHook != nil {
781			f.importedHook(header, nil)
782		}
783	}()
784}
785
786// importBlocks spawns a new goroutine to run a block insertion into the chain. If the
787// block's number is at the same height as the current import phase, it updates
788// the phase states accordingly.
789func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
790	hash := block.Hash()
791
792	// Run the import on a new thread
793	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
794	go func() {
795		defer func() { f.done <- hash }()
796
797		// If the parent's unknown, abort insertion
798		parent := f.getBlock(block.ParentHash())
799		if parent == nil {
800			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
801			return
802		}
803		// Quickly validate the header and propagate the block if it passes
804		switch err := f.verifyHeader(block.Header()); err {
805		case nil:
806			// All ok, quickly propagate to our peers
807			blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
808			go f.broadcastBlock(block, true)
809
810		case consensus.ErrFutureBlock:
811			// Weird future block, don't fail, but neither propagate
812
813		default:
814			// Something went very wrong, drop the peer
815			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
816			f.dropPeer(peer)
817			return
818		}
819		// Run the actual import and log any issues
820		if _, err := f.insertChain(types.Blocks{block}); err != nil {
821			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
822			return
823		}
824		// If import succeeded, broadcast the block
825		blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
826		go f.broadcastBlock(block, false)
827
828		// Invoke the testing hook if needed
829		if f.importedHook != nil {
830			f.importedHook(nil, block)
831		}
832	}()
833}
834
835// forgetHash removes all traces of a block announcement from the fetcher's
836// internal state.
837func (f *BlockFetcher) forgetHash(hash common.Hash) {
838	// Remove all pending announces and decrement DOS counters
839	if announceMap, ok := f.announced[hash]; ok {
840		for _, announce := range announceMap {
841			f.announces[announce.origin]--
842			if f.announces[announce.origin] <= 0 {
843				delete(f.announces, announce.origin)
844			}
845		}
846		delete(f.announced, hash)
847		if f.announceChangeHook != nil {
848			f.announceChangeHook(hash, false)
849		}
850	}
851	// Remove any pending fetches and decrement the DOS counters
852	if announce := f.fetching[hash]; announce != nil {
853		f.announces[announce.origin]--
854		if f.announces[announce.origin] <= 0 {
855			delete(f.announces, announce.origin)
856		}
857		delete(f.fetching, hash)
858	}
859
860	// Remove any pending completion requests and decrement the DOS counters
861	for _, announce := range f.fetched[hash] {
862		f.announces[announce.origin]--
863		if f.announces[announce.origin] <= 0 {
864			delete(f.announces, announce.origin)
865		}
866	}
867	delete(f.fetched, hash)
868
869	// Remove any pending completions and decrement the DOS counters
870	if announce := f.completing[hash]; announce != nil {
871		f.announces[announce.origin]--
872		if f.announces[announce.origin] <= 0 {
873			delete(f.announces, announce.origin)
874		}
875		delete(f.completing, hash)
876	}
877}
878
879// forgetBlock removes all traces of a queued block from the fetcher's internal
880// state.
881func (f *BlockFetcher) forgetBlock(hash common.Hash) {
882	if insert := f.queued[hash]; insert != nil {
883		f.queues[insert.origin]--
884		if f.queues[insert.origin] == 0 {
885			delete(f.queues, insert.origin)
886		}
887		delete(f.queued, hash)
888	}
889}
890