1// Copyright 2015 The go-ethereum Authors
2// This file is part of the go-ethereum library.
3//
4// The go-ethereum library is free software: you can redistribute it and/or modify
5// it under the terms of the GNU Lesser General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// The go-ethereum library is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU Lesser General Public License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public License
15// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
16
17// Contains the block download scheduler to collect download tasks and schedule
18// them in an ordered, and throttled way.
19
20package downloader
21
22import (
23	"errors"
24	"fmt"
25	"sync"
26	"sync/atomic"
27	"time"
28
29	"github.com/ethereum/go-ethereum/common"
30	"github.com/ethereum/go-ethereum/common/prque"
31	"github.com/ethereum/go-ethereum/core/types"
32	"github.com/ethereum/go-ethereum/log"
33	"github.com/ethereum/go-ethereum/metrics"
34)
35
36const (
37	bodyType    = uint(0)
38	receiptType = uint(1)
39)
40
41var (
42	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
43	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
44	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
45	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
46)
47
48var (
49	errNoFetchesPending = errors.New("no fetches pending")
50	errStaleDelivery    = errors.New("stale delivery")
51)
52
53// fetchRequest is a currently running data retrieval operation.
54type fetchRequest struct {
55	Peer    *peerConnection // Peer to which the request was sent
56	From    uint64          // Requested chain element index (used for skeleton fills only)
57	Headers []*types.Header // Requested headers, sorted by request order
58	Time    time.Time       // Time when the request was made
59}
60
61// fetchResult is a struct collecting partial results from data fetchers until
62// all outstanding pieces complete and the result as a whole can be processed.
63type fetchResult struct {
64	pending int32 // Flag telling what deliveries are outstanding
65
66	Header       *types.Header
67	Uncles       []*types.Header
68	Transactions types.Transactions
69	Receipts     types.Receipts
70}
71
72func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
73	item := &fetchResult{
74		Header: header,
75	}
76	if !header.EmptyBody() {
77		item.pending |= (1 << bodyType)
78	}
79	if fastSync && !header.EmptyReceipts() {
80		item.pending |= (1 << receiptType)
81	}
82	return item
83}
84
85// SetBodyDone flags the body as finished.
86func (f *fetchResult) SetBodyDone() {
87	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
88		atomic.AddInt32(&f.pending, -1)
89	}
90}
91
92// AllDone checks if item is done.
93func (f *fetchResult) AllDone() bool {
94	return atomic.LoadInt32(&f.pending) == 0
95}
96
97// SetReceiptsDone flags the receipts as finished.
98func (f *fetchResult) SetReceiptsDone() {
99	if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
100		atomic.AddInt32(&f.pending, -2)
101	}
102}
103
104// Done checks if the given type is done already
105func (f *fetchResult) Done(kind uint) bool {
106	v := atomic.LoadInt32(&f.pending)
107	return v&(1<<kind) == 0
108}
109
110// queue represents hashes that are either need fetching or are being fetched
111type queue struct {
112	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
113
114	// Headers are "special", they download in batches, supported by a skeleton chain
115	headerHead      common.Hash                    // Hash of the last queued header to verify order
116	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
117	headerTaskQueue *prque.Prque                   // Priority queue of the skeleton indexes to fetch the filling headers for
118	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
119	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
120	headerResults   []*types.Header                // Result cache accumulating the completed headers
121	headerHashes    []common.Hash                  // Result cache accumulating the completed header hashes
122	headerProced    int                            // Number of headers already processed from the results
123	headerOffset    uint64                         // Number of the first header in the result cache
124	headerContCh    chan bool                      // Channel to notify when header download finishes
125
126	// All data retrievals below are based on an already assembles header chain
127	blockTaskPool  map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
128	blockTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the blocks (bodies) for
129	blockPendPool  map[string]*fetchRequest      // Currently pending block (body) retrieval operations
130	blockWakeCh    chan bool                     // Channel to notify the block fetcher of new tasks
131
132	receiptTaskPool  map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
133	receiptTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the receipts for
134	receiptPendPool  map[string]*fetchRequest      // Currently pending receipt retrieval operations
135	receiptWakeCh    chan bool                     // Channel to notify when receipt fetcher of new tasks
136
137	resultCache *resultStore       // Downloaded but not yet delivered fetch results
138	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
139
140	lock   *sync.RWMutex
141	active *sync.Cond
142	closed bool
143
144	lastStatLog time.Time
145}
146
147// newQueue creates a new download queue for scheduling block retrieval.
148func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {
149	lock := new(sync.RWMutex)
150	q := &queue{
151		headerContCh:     make(chan bool, 1),
152		blockTaskQueue:   prque.New(nil),
153		blockWakeCh:      make(chan bool, 1),
154		receiptTaskQueue: prque.New(nil),
155		receiptWakeCh:    make(chan bool, 1),
156		active:           sync.NewCond(lock),
157		lock:             lock,
158	}
159	q.Reset(blockCacheLimit, thresholdInitialSize)
160	return q
161}
162
163// Reset clears out the queue contents.
164func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {
165	q.lock.Lock()
166	defer q.lock.Unlock()
167
168	q.closed = false
169	q.mode = FullSync
170
171	q.headerHead = common.Hash{}
172	q.headerPendPool = make(map[string]*fetchRequest)
173
174	q.blockTaskPool = make(map[common.Hash]*types.Header)
175	q.blockTaskQueue.Reset()
176	q.blockPendPool = make(map[string]*fetchRequest)
177
178	q.receiptTaskPool = make(map[common.Hash]*types.Header)
179	q.receiptTaskQueue.Reset()
180	q.receiptPendPool = make(map[string]*fetchRequest)
181
182	q.resultCache = newResultStore(blockCacheLimit)
183	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))
184}
185
186// Close marks the end of the sync, unblocking Results.
187// It may be called even if the queue is already closed.
188func (q *queue) Close() {
189	q.lock.Lock()
190	q.closed = true
191	q.active.Signal()
192	q.lock.Unlock()
193}
194
195// PendingHeaders retrieves the number of header requests pending for retrieval.
196func (q *queue) PendingHeaders() int {
197	q.lock.Lock()
198	defer q.lock.Unlock()
199
200	return q.headerTaskQueue.Size()
201}
202
203// PendingBodies retrieves the number of block body requests pending for retrieval.
204func (q *queue) PendingBodies() int {
205	q.lock.Lock()
206	defer q.lock.Unlock()
207
208	return q.blockTaskQueue.Size()
209}
210
211// PendingReceipts retrieves the number of block receipts pending for retrieval.
212func (q *queue) PendingReceipts() int {
213	q.lock.Lock()
214	defer q.lock.Unlock()
215
216	return q.receiptTaskQueue.Size()
217}
218
219// InFlightBlocks retrieves whether there are block fetch requests currently in
220// flight.
221func (q *queue) InFlightBlocks() bool {
222	q.lock.Lock()
223	defer q.lock.Unlock()
224
225	return len(q.blockPendPool) > 0
226}
227
228// InFlightReceipts retrieves whether there are receipt fetch requests currently
229// in flight.
230func (q *queue) InFlightReceipts() bool {
231	q.lock.Lock()
232	defer q.lock.Unlock()
233
234	return len(q.receiptPendPool) > 0
235}
236
237// Idle returns if the queue is fully idle or has some data still inside.
238func (q *queue) Idle() bool {
239	q.lock.Lock()
240	defer q.lock.Unlock()
241
242	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
243	pending := len(q.blockPendPool) + len(q.receiptPendPool)
244
245	return (queued + pending) == 0
246}
247
248// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
249// up an already retrieved header skeleton.
250func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
251	q.lock.Lock()
252	defer q.lock.Unlock()
253
254	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
255	if q.headerResults != nil {
256		panic("skeleton assembly already in progress")
257	}
258	// Schedule all the header retrieval tasks for the skeleton assembly
259	q.headerTaskPool = make(map[uint64]*types.Header)
260	q.headerTaskQueue = prque.New(nil)
261	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
262	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
263	q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch)
264	q.headerProced = 0
265	q.headerOffset = from
266	q.headerContCh = make(chan bool, 1)
267
268	for i, header := range skeleton {
269		index := from + uint64(i*MaxHeaderFetch)
270
271		q.headerTaskPool[index] = header
272		q.headerTaskQueue.Push(index, -int64(index))
273	}
274}
275
276// RetrieveHeaders retrieves the header chain assemble based on the scheduled
277// skeleton.
278func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) {
279	q.lock.Lock()
280	defer q.lock.Unlock()
281
282	headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced
283	q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0
284
285	return headers, hashes, proced
286}
287
288// Schedule adds a set of headers for the download queue for scheduling, returning
289// the new headers encountered.
290func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header {
291	q.lock.Lock()
292	defer q.lock.Unlock()
293
294	// Insert all the headers prioritised by the contained block number
295	inserts := make([]*types.Header, 0, len(headers))
296	for i, header := range headers {
297		// Make sure chain order is honoured and preserved throughout
298		hash := hashes[i]
299		if header.Number == nil || header.Number.Uint64() != from {
300			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
301			break
302		}
303		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
304			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
305			break
306		}
307		// Make sure no duplicate requests are executed
308		// We cannot skip this, even if the block is empty, since this is
309		// what triggers the fetchResult creation.
310		if _, ok := q.blockTaskPool[hash]; ok {
311			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
312		} else {
313			q.blockTaskPool[hash] = header
314			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
315		}
316		// Queue for receipt retrieval
317		if q.mode == SnapSync && !header.EmptyReceipts() {
318			if _, ok := q.receiptTaskPool[hash]; ok {
319				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
320			} else {
321				q.receiptTaskPool[hash] = header
322				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
323			}
324		}
325		inserts = append(inserts, header)
326		q.headerHead = hash
327		from++
328	}
329	return inserts
330}
331
332// Results retrieves and permanently removes a batch of fetch results from
333// the cache. the result slice will be empty if the queue has been closed.
334// Results can be called concurrently with Deliver and Schedule,
335// but assumes that there are not two simultaneous callers to Results
336func (q *queue) Results(block bool) []*fetchResult {
337	// Abort early if there are no items and non-blocking requested
338	if !block && !q.resultCache.HasCompletedItems() {
339		return nil
340	}
341	closed := false
342	for !closed && !q.resultCache.HasCompletedItems() {
343		// In order to wait on 'active', we need to obtain the lock.
344		// That may take a while, if someone is delivering at the same
345		// time, so after obtaining the lock, we check again if there
346		// are any results to fetch.
347		// Also, in-between we ask for the lock and the lock is obtained,
348		// someone can have closed the queue. In that case, we should
349		// return the available results and stop blocking
350		q.lock.Lock()
351		if q.resultCache.HasCompletedItems() || q.closed {
352			q.lock.Unlock()
353			break
354		}
355		// No items available, and not closed
356		q.active.Wait()
357		closed = q.closed
358		q.lock.Unlock()
359	}
360	// Regardless if closed or not, we can still deliver whatever we have
361	results := q.resultCache.GetCompleted(maxResultsProcess)
362	for _, result := range results {
363		// Recalculate the result item weights to prevent memory exhaustion
364		size := result.Header.Size()
365		for _, uncle := range result.Uncles {
366			size += uncle.Size()
367		}
368		for _, receipt := range result.Receipts {
369			size += receipt.Size()
370		}
371		for _, tx := range result.Transactions {
372			size += tx.Size()
373		}
374		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
375			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
376	}
377	// Using the newly calibrated resultsize, figure out the new throttle limit
378	// on the result cache
379	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
380	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
381
382	// With results removed from the cache, wake throttled fetchers
383	for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} {
384		select {
385		case ch <- true:
386		default:
387		}
388	}
389	// Log some info at certain times
390	if time.Since(q.lastStatLog) > 60*time.Second {
391		q.lastStatLog = time.Now()
392		info := q.Stats()
393		info = append(info, "throttle", throttleThreshold)
394		log.Info("Downloader queue stats", info...)
395	}
396	return results
397}
398
399func (q *queue) Stats() []interface{} {
400	q.lock.RLock()
401	defer q.lock.RUnlock()
402
403	return q.stats()
404}
405
406func (q *queue) stats() []interface{} {
407	return []interface{}{
408		"receiptTasks", q.receiptTaskQueue.Size(),
409		"blockTasks", q.blockTaskQueue.Size(),
410		"itemSize", q.resultSize,
411	}
412}
413
414// ReserveHeaders reserves a set of headers for the given peer, skipping any
415// previously failed batches.
416func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
417	q.lock.Lock()
418	defer q.lock.Unlock()
419
420	// Short circuit if the peer's already downloading something (sanity check to
421	// not corrupt state)
422	if _, ok := q.headerPendPool[p.id]; ok {
423		return nil
424	}
425	// Retrieve a batch of hashes, skipping previously failed ones
426	send, skip := uint64(0), []uint64{}
427	for send == 0 && !q.headerTaskQueue.Empty() {
428		from, _ := q.headerTaskQueue.Pop()
429		if q.headerPeerMiss[p.id] != nil {
430			if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
431				skip = append(skip, from.(uint64))
432				continue
433			}
434		}
435		send = from.(uint64)
436	}
437	// Merge all the skipped batches back
438	for _, from := range skip {
439		q.headerTaskQueue.Push(from, -int64(from))
440	}
441	// Assemble and return the block download request
442	if send == 0 {
443		return nil
444	}
445	request := &fetchRequest{
446		Peer: p,
447		From: send,
448		Time: time.Now(),
449	}
450	q.headerPendPool[p.id] = request
451	return request
452}
453
454// ReserveBodies reserves a set of body fetches for the given peer, skipping any
455// previously failed downloads. Beside the next batch of needed fetches, it also
456// returns a flag whether empty blocks were queued requiring processing.
457func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
458	q.lock.Lock()
459	defer q.lock.Unlock()
460
461	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
462}
463
464// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
465// any previously failed downloads. Beside the next batch of needed fetches, it
466// also returns a flag whether empty receipts were queued requiring importing.
467func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
468	q.lock.Lock()
469	defer q.lock.Unlock()
470
471	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
472}
473
474// reserveHeaders reserves a set of data download operations for a given peer,
475// skipping any previously failed ones. This method is a generic version used
476// by the individual special reservation functions.
477//
478// Note, this method expects the queue lock to be already held for writing. The
479// reason the lock is not obtained in here is because the parameters already need
480// to access the queue, so they already need a lock anyway.
481//
482// Returns:
483//   item     - the fetchRequest
484//   progress - whether any progress was made
485//   throttle - if the caller should throttle for a while
486func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
487	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
488	// Short circuit if the pool has been depleted, or if the peer's already
489	// downloading something (sanity check not to corrupt state)
490	if taskQueue.Empty() {
491		return nil, false, true
492	}
493	if _, ok := pendPool[p.id]; ok {
494		return nil, false, false
495	}
496	// Retrieve a batch of tasks, skipping previously failed ones
497	send := make([]*types.Header, 0, count)
498	skip := make([]*types.Header, 0)
499	progress := false
500	throttled := false
501	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
502		// the task queue will pop items in order, so the highest prio block
503		// is also the lowest block number.
504		h, _ := taskQueue.Peek()
505		header := h.(*types.Header)
506		// we can ask the resultcache if this header is within the
507		// "prioritized" segment of blocks. If it is not, we need to throttle
508
509		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync)
510		if stale {
511			// Don't put back in the task queue, this item has already been
512			// delivered upstream
513			taskQueue.PopItem()
514			progress = true
515			delete(taskPool, header.Hash())
516			proc = proc - 1
517			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
518			continue
519		}
520		if throttle {
521			// There are no resultslots available. Leave it in the task queue
522			// However, if there are any left as 'skipped', we should not tell
523			// the caller to throttle, since we still want some other
524			// peer to fetch those for us
525			throttled = len(skip) == 0
526			break
527		}
528		if err != nil {
529			// this most definitely should _not_ happen
530			log.Warn("Failed to reserve headers", "err", err)
531			// There are no resultslots available. Leave it in the task queue
532			break
533		}
534		if item.Done(kind) {
535			// If it's a noop, we can skip this task
536			delete(taskPool, header.Hash())
537			taskQueue.PopItem()
538			proc = proc - 1
539			progress = true
540			continue
541		}
542		// Remove it from the task queue
543		taskQueue.PopItem()
544		// Otherwise unless the peer is known not to have the data, add to the retrieve list
545		if p.Lacks(header.Hash()) {
546			skip = append(skip, header)
547		} else {
548			send = append(send, header)
549		}
550	}
551	// Merge all the skipped headers back
552	for _, header := range skip {
553		taskQueue.Push(header, -int64(header.Number.Uint64()))
554	}
555	if q.resultCache.HasCompletedItems() {
556		// Wake Results, resultCache was modified
557		q.active.Signal()
558	}
559	// Assemble and return the block download request
560	if len(send) == 0 {
561		return nil, progress, throttled
562	}
563	request := &fetchRequest{
564		Peer:    p,
565		Headers: send,
566		Time:    time.Now(),
567	}
568	pendPool[p.id] = request
569	return request, progress, throttled
570}
571
572// Revoke cancels all pending requests belonging to a given peer. This method is
573// meant to be called during a peer drop to quickly reassign owned data fetches
574// to remaining nodes.
575func (q *queue) Revoke(peerID string) {
576	q.lock.Lock()
577	defer q.lock.Unlock()
578
579	if request, ok := q.headerPendPool[peerID]; ok {
580		q.headerTaskQueue.Push(request.From, -int64(request.From))
581		delete(q.headerPendPool, peerID)
582	}
583	if request, ok := q.blockPendPool[peerID]; ok {
584		for _, header := range request.Headers {
585			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
586		}
587		delete(q.blockPendPool, peerID)
588	}
589	if request, ok := q.receiptPendPool[peerID]; ok {
590		for _, header := range request.Headers {
591			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
592		}
593		delete(q.receiptPendPool, peerID)
594	}
595}
596
597// ExpireHeaders cancels a request that timed out and moves the pending fetch
598// task back into the queue for rescheduling.
599func (q *queue) ExpireHeaders(peer string) int {
600	q.lock.Lock()
601	defer q.lock.Unlock()
602
603	headerTimeoutMeter.Mark(1)
604	return q.expire(peer, q.headerPendPool, q.headerTaskQueue)
605}
606
607// ExpireBodies checks for in flight block body requests that exceeded a timeout
608// allowance, canceling them and returning the responsible peers for penalisation.
609func (q *queue) ExpireBodies(peer string) int {
610	q.lock.Lock()
611	defer q.lock.Unlock()
612
613	bodyTimeoutMeter.Mark(1)
614	return q.expire(peer, q.blockPendPool, q.blockTaskQueue)
615}
616
617// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
618// allowance, canceling them and returning the responsible peers for penalisation.
619func (q *queue) ExpireReceipts(peer string) int {
620	q.lock.Lock()
621	defer q.lock.Unlock()
622
623	receiptTimeoutMeter.Mark(1)
624	return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue)
625}
626
627// expire is the generic check that moves a specific expired task from a pending
628// pool back into a task pool.
629//
630// Note, this method expects the queue lock to be already held. The reason the
631// lock is not obtained in here is that the parameters already need to access
632// the queue, so they already need a lock anyway.
633func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int {
634	// Retrieve the request being expired and log an error if it's non-existnet,
635	// as there's no order of events that should lead to such expirations.
636	req := pendPool[peer]
637	if req == nil {
638		log.Error("Expired request does not exist", "peer", peer)
639		return 0
640	}
641	delete(pendPool, peer)
642
643	// Return any non-satisfied requests to the pool
644	if req.From > 0 {
645		taskQueue.Push(req.From, -int64(req.From))
646	}
647	for _, header := range req.Headers {
648		taskQueue.Push(header, -int64(header.Number.Uint64()))
649	}
650	return len(req.Headers)
651}
652
653// DeliverHeaders injects a header retrieval response into the header results
654// cache. This method either accepts all headers it received, or none of them
655// if they do not map correctly to the skeleton.
656//
657// If the headers are accepted, the method makes an attempt to deliver the set
658// of ready headers to the processor to keep the pipeline full. However, it will
659// not block to prevent stalling other pending deliveries.
660func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) {
661	q.lock.Lock()
662	defer q.lock.Unlock()
663
664	var logger log.Logger
665	if len(id) < 16 {
666		// Tests use short IDs, don't choke on them
667		logger = log.New("peer", id)
668	} else {
669		logger = log.New("peer", id[:16])
670	}
671	// Short circuit if the data was never requested
672	request := q.headerPendPool[id]
673	if request == nil {
674		headerDropMeter.Mark(int64(len(headers)))
675		return 0, errNoFetchesPending
676	}
677	delete(q.headerPendPool, id)
678
679	headerReqTimer.UpdateSince(request.Time)
680	headerInMeter.Mark(int64(len(headers)))
681
682	// Ensure headers can be mapped onto the skeleton chain
683	target := q.headerTaskPool[request.From].Hash()
684
685	accepted := len(headers) == MaxHeaderFetch
686	if accepted {
687		if headers[0].Number.Uint64() != request.From {
688			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From)
689			accepted = false
690		} else if hashes[len(headers)-1] != target {
691			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target)
692			accepted = false
693		}
694	}
695	if accepted {
696		parentHash := hashes[0]
697		for i, header := range headers[1:] {
698			hash := hashes[i+1]
699			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
700				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
701				accepted = false
702				break
703			}
704			if parentHash != header.ParentHash {
705				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
706				accepted = false
707				break
708			}
709			// Set-up parent hash for next round
710			parentHash = hash
711		}
712	}
713	// If the batch of headers wasn't accepted, mark as unavailable
714	if !accepted {
715		logger.Trace("Skeleton filling not accepted", "from", request.From)
716		headerDropMeter.Mark(int64(len(headers)))
717
718		miss := q.headerPeerMiss[id]
719		if miss == nil {
720			q.headerPeerMiss[id] = make(map[uint64]struct{})
721			miss = q.headerPeerMiss[id]
722		}
723		miss[request.From] = struct{}{}
724
725		q.headerTaskQueue.Push(request.From, -int64(request.From))
726		return 0, errors.New("delivery not accepted")
727	}
728	// Clean up a successful fetch and try to deliver any sub-results
729	copy(q.headerResults[request.From-q.headerOffset:], headers)
730	copy(q.headerHashes[request.From-q.headerOffset:], hashes)
731
732	delete(q.headerTaskPool, request.From)
733
734	ready := 0
735	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
736		ready += MaxHeaderFetch
737	}
738	if ready > 0 {
739		// Headers are ready for delivery, gather them and push forward (non blocking)
740		processHeaders := make([]*types.Header, ready)
741		copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready])
742
743		processHashes := make([]common.Hash, ready)
744		copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready])
745
746		select {
747		case headerProcCh <- &headerTask{
748			headers: processHeaders,
749			hashes:  processHashes,
750		}:
751			logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number)
752			q.headerProced += len(processHeaders)
753		default:
754		}
755	}
756	// Check for termination and return
757	if len(q.headerTaskPool) == 0 {
758		q.headerContCh <- false
759	}
760	return len(headers), nil
761}
762
763// DeliverBodies injects a block body retrieval response into the results queue.
764// The method returns the number of blocks bodies accepted from the delivery and
765// also wakes any threads waiting for data delivery.
766func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, uncleLists [][]*types.Header, uncleListHashes []common.Hash) (int, error) {
767	q.lock.Lock()
768	defer q.lock.Unlock()
769
770	validate := func(index int, header *types.Header) error {
771		if txListHashes[index] != header.TxHash {
772			return errInvalidBody
773		}
774		if uncleListHashes[index] != header.UncleHash {
775			return errInvalidBody
776		}
777		return nil
778	}
779
780	reconstruct := func(index int, result *fetchResult) {
781		result.Transactions = txLists[index]
782		result.Uncles = uncleLists[index]
783		result.SetBodyDone()
784	}
785	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
786		bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct)
787}
788
789// DeliverReceipts injects a receipt retrieval response into the results queue.
790// The method returns the number of transaction receipts accepted from the delivery
791// and also wakes any threads waiting for data delivery.
792func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) {
793	q.lock.Lock()
794	defer q.lock.Unlock()
795
796	validate := func(index int, header *types.Header) error {
797		if receiptListHashes[index] != header.ReceiptHash {
798			return errInvalidReceipt
799		}
800		return nil
801	}
802	reconstruct := func(index int, result *fetchResult) {
803		result.Receipts = receiptList[index]
804		result.SetReceiptsDone()
805	}
806	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
807		receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct)
808}
809
810// deliver injects a data retrieval response into the results queue.
811//
812// Note, this method expects the queue lock to be already held for writing. The
813// reason this lock is not obtained in here is because the parameters already need
814// to access the queue, so they already need a lock anyway.
815func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
816	taskQueue *prque.Prque, pendPool map[string]*fetchRequest,
817	reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter,
818	results int, validate func(index int, header *types.Header) error,
819	reconstruct func(index int, result *fetchResult)) (int, error) {
820
821	// Short circuit if the data was never requested
822	request := pendPool[id]
823	if request == nil {
824		resDropMeter.Mark(int64(results))
825		return 0, errNoFetchesPending
826	}
827	delete(pendPool, id)
828
829	reqTimer.UpdateSince(request.Time)
830	resInMeter.Mark(int64(results))
831
832	// If no data items were retrieved, mark them as unavailable for the origin peer
833	if results == 0 {
834		for _, header := range request.Headers {
835			request.Peer.MarkLacking(header.Hash())
836		}
837	}
838	// Assemble each of the results with their headers and retrieved data parts
839	var (
840		accepted int
841		failure  error
842		i        int
843		hashes   []common.Hash
844	)
845	for _, header := range request.Headers {
846		// Short circuit assembly if no more fetch results are found
847		if i >= results {
848			break
849		}
850		// Validate the fields
851		if err := validate(i, header); err != nil {
852			failure = err
853			break
854		}
855		hashes = append(hashes, header.Hash())
856		i++
857	}
858
859	for _, header := range request.Headers[:i] {
860		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
861			reconstruct(accepted, res)
862		} else {
863			// else: betweeen here and above, some other peer filled this result,
864			// or it was indeed a no-op. This should not happen, but if it does it's
865			// not something to panic about
866			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
867			failure = errStaleDelivery
868		}
869		// Clean up a successful fetch
870		delete(taskPool, hashes[accepted])
871		accepted++
872	}
873	resDropMeter.Mark(int64(results - accepted))
874
875	// Return all failed or missing fetches to the queue
876	for _, header := range request.Headers[accepted:] {
877		taskQueue.Push(header, -int64(header.Number.Uint64()))
878	}
879	// Wake up Results
880	if accepted > 0 {
881		q.active.Signal()
882	}
883	if failure == nil {
884		return accepted, nil
885	}
886	// If none of the data was good, it's a stale delivery
887	if accepted > 0 {
888		return accepted, fmt.Errorf("partial failure: %v", failure)
889	}
890	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
891}
892
893// Prepare configures the result cache to allow accepting and caching inbound
894// fetch results.
895func (q *queue) Prepare(offset uint64, mode SyncMode) {
896	q.lock.Lock()
897	defer q.lock.Unlock()
898
899	// Prepare the queue for sync results
900	q.resultCache.Prepare(offset)
901	q.mode = mode
902}
903