1// Copyright 2020 The go-ethereum Authors
2// This file is part of the go-ethereum library.
3//
4// The go-ethereum library is free software: you can redistribute it and/or modify
5// it under the terms of the GNU Lesser General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// The go-ethereum library is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU Lesser General Public License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public License
15// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
16
17package server
18
19import (
20	"math"
21	"sync"
22	"time"
23
24	"github.com/ethereum/go-ethereum/common/mclock"
25	"github.com/ethereum/go-ethereum/common/prque"
26	"github.com/ethereum/go-ethereum/log"
27	"github.com/ethereum/go-ethereum/p2p/enode"
28	"github.com/ethereum/go-ethereum/p2p/nodestate"
29)
30
31const (
32	lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
33)
34
35// priorityPool handles a set of nodes where each node has a capacity (a scalar value)
36// and a priority (which can change over time and can also depend on the capacity).
37// A node is active if it has at least the necessary minimal amount of capacity while
38// inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
39// The pool ensures that the number and total capacity of all active nodes are limited
40// and the highest priority nodes are active at all times (limits can be changed
41// during operation with immediate effect).
42//
43// When activating clients a priority bias is applied in favor of the already active
44// nodes in order to avoid nodes quickly alternating between active and inactive states
45// when their priorities are close to each other. The bias is specified in terms of
46// duration (time) because priorities are expected to usually get lower over time and
47// therefore a future minimum prediction (see EstMinPriority) should monotonously
48// decrease with the specified time parameter.
49// This time bias can be interpreted as minimum expected active time at the given
50// capacity (if the threshold priority stays the same).
51//
52// Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is
53// added to the pool by externally setting inactiveFlag. priorityPool can switch a node
54// between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool
55// by externally resetting both flags. activeFlag should not be set externally.
56//
57// The highest priority nodes in "inactive" state are moved to "active" state as soon as
58// the minimum capacity can be granted for them. The capacity of lower priority active
59// nodes is reduced or they are demoted to "inactive" state if their priority is
60// insufficient even at minimal capacity.
61type priorityPool struct {
62	setup                        *serverSetup
63	ns                           *nodestate.NodeStateMachine
64	clock                        mclock.Clock
65	lock                         sync.Mutex
66	maxCount, maxCap             uint64
67	minCap                       uint64
68	activeBias                   time.Duration
69	capacityStepDiv, fineStepDiv uint64
70
71	// The snapshot of priority pool for query.
72	cachedCurve    *capacityCurve
73	ccUpdatedAt    mclock.AbsTime
74	ccUpdateForced bool
75
76	// Runtime status of prioritypool, represents the
77	// temporary state if tempState is not empty
78	tempState              []*ppNodeInfo
79	activeCount, activeCap uint64
80	activeQueue            *prque.LazyQueue
81	inactiveQueue          *prque.Prque
82}
83
84// ppNodeInfo is the internal node descriptor of priorityPool
85type ppNodeInfo struct {
86	nodePriority               nodePriority
87	node                       *enode.Node
88	connected                  bool
89	capacity                   uint64 // only changed when temporary state is committed
90	activeIndex, inactiveIndex int
91
92	tempState    bool   // should only be true while the priorityPool lock is held
93	tempCapacity uint64 // equals capacity when tempState is false
94
95	// the following fields only affect the temporary state and they are set to their
96	// default value when leaving the temp state
97	minTarget, stepDiv uint64
98	bias               time.Duration
99}
100
101// newPriorityPool creates a new priorityPool
102func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool {
103	pp := &priorityPool{
104		setup:           setup,
105		ns:              ns,
106		clock:           clock,
107		inactiveQueue:   prque.New(inactiveSetIndex),
108		minCap:          minCap,
109		activeBias:      activeBias,
110		capacityStepDiv: capacityStepDiv,
111		fineStepDiv:     fineStepDiv,
112	}
113	if pp.activeBias < time.Duration(1) {
114		pp.activeBias = time.Duration(1)
115	}
116	pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh)
117
118	ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
119		if newValue != nil {
120			c := &ppNodeInfo{
121				node:          node,
122				nodePriority:  newValue.(nodePriority),
123				activeIndex:   -1,
124				inactiveIndex: -1,
125			}
126			ns.SetFieldSub(node, pp.setup.queueField, c)
127			ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0)
128		} else {
129			ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0)
130			if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil {
131				pp.disconnectNode(n)
132			}
133			ns.SetFieldSub(node, pp.setup.capacityField, nil)
134			ns.SetFieldSub(node, pp.setup.queueField, nil)
135		}
136	})
137	ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
138		if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil {
139			if oldState.IsEmpty() {
140				pp.connectNode(c)
141			}
142			if newState.IsEmpty() {
143				pp.disconnectNode(c)
144			}
145		}
146	})
147	ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
148		if !newState.IsEmpty() {
149			pp.updatePriority(node)
150		}
151	})
152	return pp
153}
154
155// requestCapacity tries to set the capacity of a connected node to the highest possible
156// value inside the given target range. If maxTarget is not reachable then the capacity is
157// iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached.
158// The function returns the new capacity if successful and the original capacity otherwise.
159// Note: this function should run inside a NodeStateMachine operation
160func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 {
161	pp.lock.Lock()
162	pp.activeQueue.Refresh()
163
164	if minTarget < pp.minCap {
165		minTarget = pp.minCap
166	}
167	if maxTarget < minTarget {
168		maxTarget = minTarget
169	}
170	if bias < pp.activeBias {
171		bias = pp.activeBias
172	}
173	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
174	if c == nil {
175		log.Error("requestCapacity called for unknown node", "id", node.ID())
176		pp.lock.Unlock()
177		return 0
178	}
179	pp.setTempState(c)
180	if maxTarget > c.capacity {
181		pp.setTempStepDiv(c, pp.fineStepDiv)
182		pp.setTempBias(c, bias)
183	}
184	pp.setTempCapacity(c, maxTarget)
185	c.minTarget = minTarget
186	pp.activeQueue.Remove(c.activeIndex)
187	pp.inactiveQueue.Remove(c.inactiveIndex)
188	pp.activeQueue.Push(c)
189	pp.enforceLimits()
190	updates := pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity)
191	pp.lock.Unlock()
192	pp.updateFlags(updates)
193	return c.capacity
194}
195
196// SetLimits sets the maximum number and total capacity of simultaneously active nodes
197func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) {
198	pp.lock.Lock()
199	pp.activeQueue.Refresh()
200	inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
201	dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
202	pp.maxCount, pp.maxCap = maxCount, maxCap
203
204	var updates []capUpdate
205	if dec {
206		pp.enforceLimits()
207		updates = pp.finalizeChanges(true)
208	}
209	if inc {
210		updates = append(updates, pp.tryActivate(false)...)
211	}
212	pp.lock.Unlock()
213	pp.ns.Operation(func() { pp.updateFlags(updates) })
214}
215
216// setActiveBias sets the bias applied when trying to activate inactive nodes
217func (pp *priorityPool) setActiveBias(bias time.Duration) {
218	pp.lock.Lock()
219	pp.activeBias = bias
220	if pp.activeBias < time.Duration(1) {
221		pp.activeBias = time.Duration(1)
222	}
223	updates := pp.tryActivate(false)
224	pp.lock.Unlock()
225	pp.ns.Operation(func() { pp.updateFlags(updates) })
226}
227
228// Active returns the number and total capacity of currently active nodes
229func (pp *priorityPool) Active() (uint64, uint64) {
230	pp.lock.Lock()
231	defer pp.lock.Unlock()
232
233	return pp.activeCount, pp.activeCap
234}
235
236// Inactive returns the number of currently inactive nodes
237func (pp *priorityPool) Inactive() int {
238	pp.lock.Lock()
239	defer pp.lock.Unlock()
240
241	return pp.inactiveQueue.Size()
242}
243
244// Limits returns the maximum allowed number and total capacity of active nodes
245func (pp *priorityPool) Limits() (uint64, uint64) {
246	pp.lock.Lock()
247	defer pp.lock.Unlock()
248
249	return pp.maxCount, pp.maxCap
250}
251
252// inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
253func inactiveSetIndex(a interface{}, index int) {
254	a.(*ppNodeInfo).inactiveIndex = index
255}
256
257// activeSetIndex callback updates ppNodeInfo item index in activeQueue
258func activeSetIndex(a interface{}, index int) {
259	a.(*ppNodeInfo).activeIndex = index
260}
261
262// invertPriority inverts a priority value. The active queue uses inverted priorities
263// because the node on the top is the first to be deactivated.
264func invertPriority(p int64) int64 {
265	if p == math.MinInt64 {
266		return math.MaxInt64
267	}
268	return -p
269}
270
271// activePriority callback returns actual priority of ppNodeInfo item in activeQueue
272func activePriority(a interface{}) int64 {
273	c := a.(*ppNodeInfo)
274	if c.bias == 0 {
275		return invertPriority(c.nodePriority.priority(c.tempCapacity))
276	} else {
277		return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true))
278	}
279}
280
281// activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
282func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {
283	c := a.(*ppNodeInfo)
284	future := time.Duration(until - pp.clock.Now())
285	if future < 0 {
286		future = 0
287	}
288	return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false))
289}
290
291// inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
292func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 {
293	return p.nodePriority.priority(pp.minCap)
294}
295
296// connectNode is called when a new node has been added to the pool (inactiveFlag set)
297// Note: this function should run inside a NodeStateMachine operation
298func (pp *priorityPool) connectNode(c *ppNodeInfo) {
299	pp.lock.Lock()
300	pp.activeQueue.Refresh()
301	if c.connected {
302		pp.lock.Unlock()
303		return
304	}
305	c.connected = true
306	pp.inactiveQueue.Push(c, pp.inactivePriority(c))
307	updates := pp.tryActivate(false)
308	pp.lock.Unlock()
309	pp.updateFlags(updates)
310}
311
312// disconnectNode is called when a node has been removed from the pool (both inactiveFlag
313// and activeFlag reset)
314// Note: this function should run inside a NodeStateMachine operation
315func (pp *priorityPool) disconnectNode(c *ppNodeInfo) {
316	pp.lock.Lock()
317	pp.activeQueue.Refresh()
318	if !c.connected {
319		pp.lock.Unlock()
320		return
321	}
322	c.connected = false
323	pp.activeQueue.Remove(c.activeIndex)
324	pp.inactiveQueue.Remove(c.inactiveIndex)
325
326	var updates []capUpdate
327	if c.capacity != 0 {
328		pp.setTempState(c)
329		pp.setTempCapacity(c, 0)
330		updates = pp.tryActivate(true)
331	}
332	pp.lock.Unlock()
333	pp.updateFlags(updates)
334}
335
336// setTempState internally puts a node in a temporary state that can either be reverted
337// or confirmed later. This temporary state allows changing the capacity of a node and
338// moving it between the active and inactive queue. activeFlag/inactiveFlag and
339// capacityField are not changed while the changes are still temporary.
340func (pp *priorityPool) setTempState(c *ppNodeInfo) {
341	if c.tempState {
342		return
343	}
344	c.tempState = true
345	if c.tempCapacity != c.capacity { // should never happen
346		log.Error("tempCapacity != capacity when entering tempState")
347	}
348	// Assign all the defaults to the temp state.
349	c.minTarget = pp.minCap
350	c.stepDiv = pp.capacityStepDiv
351	c.bias = 0
352	pp.tempState = append(pp.tempState, c)
353}
354
355// unsetTempState revokes the temp status of the node and reset all internal
356// fields to the default value.
357func (pp *priorityPool) unsetTempState(c *ppNodeInfo) {
358	if !c.tempState {
359		return
360	}
361	c.tempState = false
362	if c.tempCapacity != c.capacity { // should never happen
363		log.Error("tempCapacity != capacity when leaving tempState")
364	}
365	c.minTarget = pp.minCap
366	c.stepDiv = pp.capacityStepDiv
367	c.bias = 0
368}
369
370// setTempCapacity changes the capacity of a node in the temporary state and adjusts
371// activeCap and activeCount accordingly. Since this change is performed in the temporary
372// state it should be called after setTempState and before finalizeChanges.
373func (pp *priorityPool) setTempCapacity(c *ppNodeInfo, cap uint64) {
374	if !c.tempState { // should never happen
375		log.Error("Node is not in temporary state")
376		return
377	}
378	pp.activeCap += cap - c.tempCapacity
379	if c.tempCapacity == 0 {
380		pp.activeCount++
381	}
382	if cap == 0 {
383		pp.activeCount--
384	}
385	c.tempCapacity = cap
386}
387
388// setTempBias changes the connection bias of a node in the temporary state.
389func (pp *priorityPool) setTempBias(c *ppNodeInfo, bias time.Duration) {
390	if !c.tempState { // should never happen
391		log.Error("Node is not in temporary state")
392		return
393	}
394	c.bias = bias
395}
396
397// setTempStepDiv changes the capacity divisor of a node in the temporary state.
398func (pp *priorityPool) setTempStepDiv(c *ppNodeInfo, stepDiv uint64) {
399	if !c.tempState { // should never happen
400		log.Error("Node is not in temporary state")
401		return
402	}
403	c.stepDiv = stepDiv
404}
405
406// enforceLimits enforces active node count and total capacity limits. It returns the
407// lowest active node priority. Note that this function is performed on the temporary
408// internal state.
409func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) {
410	if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
411		return nil, math.MinInt64
412	}
413	var (
414		c                 *ppNodeInfo
415		maxActivePriority int64
416	)
417	pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {
418		c = data.(*ppNodeInfo)
419		pp.setTempState(c)
420		maxActivePriority = priority
421		if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount {
422			pp.setTempCapacity(c, 0)
423		} else {
424			sub := c.tempCapacity / c.stepDiv
425			if sub == 0 {
426				sub = 1
427			}
428			if c.tempCapacity-sub < c.minTarget {
429				sub = c.tempCapacity - c.minTarget
430			}
431			pp.setTempCapacity(c, c.tempCapacity-sub)
432			pp.activeQueue.Push(c)
433		}
434		return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
435	})
436	return c, invertPriority(maxActivePriority)
437}
438
439// finalizeChanges either commits or reverts temporary changes. The necessary capacity
440// field and according flag updates are not performed here but returned in a list because
441// they should be performed while the mutex is not held.
442func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
443	for _, c := range pp.tempState {
444		// always remove and push back in order to update biased priority
445		pp.activeQueue.Remove(c.activeIndex)
446		pp.inactiveQueue.Remove(c.inactiveIndex)
447		oldCapacity := c.capacity
448		if commit {
449			c.capacity = c.tempCapacity
450		} else {
451			pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap
452		}
453		pp.unsetTempState(c)
454
455		if c.connected {
456			if c.capacity != 0 {
457				pp.activeQueue.Push(c)
458			} else {
459				pp.inactiveQueue.Push(c, pp.inactivePriority(c))
460			}
461			if c.capacity != oldCapacity {
462				updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity})
463			}
464		}
465	}
466	pp.tempState = nil
467	if commit {
468		pp.ccUpdateForced = true
469	}
470	return
471}
472
473// capUpdate describes a capacityField and activeFlag/inactiveFlag update
474type capUpdate struct {
475	node           *enode.Node
476	oldCap, newCap uint64
477}
478
479// updateFlags performs capacityField and activeFlag/inactiveFlag updates while the
480// pool mutex is not held
481// Note: this function should run inside a NodeStateMachine operation
482func (pp *priorityPool) updateFlags(updates []capUpdate) {
483	for _, f := range updates {
484		if f.oldCap == 0 {
485			pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0)
486		}
487		if f.newCap == 0 {
488			pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0)
489			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil)
490		} else {
491			pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap)
492		}
493	}
494}
495
496// tryActivate tries to activate inactive nodes if possible
497func (pp *priorityPool) tryActivate(commit bool) []capUpdate {
498	for pp.inactiveQueue.Size() > 0 {
499		c := pp.inactiveQueue.PopItem().(*ppNodeInfo)
500		pp.setTempState(c)
501		pp.setTempBias(c, pp.activeBias)
502		pp.setTempCapacity(c, pp.minCap)
503		pp.activeQueue.Push(c)
504		pp.enforceLimits()
505		if c.tempCapacity > 0 {
506			commit = true
507			pp.setTempBias(c, 0)
508		} else {
509			break
510		}
511	}
512	pp.ccUpdateForced = true
513	return pp.finalizeChanges(commit)
514}
515
516// updatePriority gets the current priority value of the given node from the nodePriority
517// interface and performs the necessary changes. It is triggered by updateFlag.
518// Note: this function should run inside a NodeStateMachine operation
519func (pp *priorityPool) updatePriority(node *enode.Node) {
520	pp.lock.Lock()
521	pp.activeQueue.Refresh()
522	c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
523	if c == nil || !c.connected {
524		pp.lock.Unlock()
525		return
526	}
527	pp.activeQueue.Remove(c.activeIndex)
528	pp.inactiveQueue.Remove(c.inactiveIndex)
529	if c.capacity != 0 {
530		pp.activeQueue.Push(c)
531	} else {
532		pp.inactiveQueue.Push(c, pp.inactivePriority(c))
533	}
534	updates := pp.tryActivate(false)
535	pp.lock.Unlock()
536	pp.updateFlags(updates)
537}
538
539// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently
540// estimate how much capacity could be granted to a given node at a given priority level.
541type capacityCurve struct {
542	points       []curvePoint       // curve points sorted in descending order of priority
543	index        map[enode.ID][]int // curve point indexes belonging to each node
544	excludeList  []int              // curve point indexes of excluded node
545	excludeFirst bool               // true if activeCount == maxCount
546}
547
548type curvePoint struct {
549	freeCap uint64 // available capacity and node count at the current priority level
550	nextPri int64  // next priority level where more capacity will be available
551}
552
553// getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool
554func (pp *priorityPool) getCapacityCurve() *capacityCurve {
555	pp.lock.Lock()
556	defer pp.lock.Unlock()
557
558	now := pp.clock.Now()
559	dt := time.Duration(now - pp.ccUpdatedAt)
560	if !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 {
561		return pp.cachedCurve
562	}
563
564	pp.ccUpdateForced = false
565	pp.ccUpdatedAt = now
566	curve := &capacityCurve{
567		index: make(map[enode.ID][]int),
568	}
569	pp.cachedCurve = curve
570
571	var excludeID enode.ID
572	excludeFirst := pp.maxCount == pp.activeCount
573	// reduce node capacities or remove nodes until nothing is left in the queue;
574	// record the available capacity and the necessary priority after each step
575	lastPri := int64(math.MinInt64)
576	for pp.activeCap > 0 {
577		cp := curvePoint{}
578		if pp.activeCap > pp.maxCap {
579			log.Error("Active capacity is greater than allowed maximum", "active", pp.activeCap, "maximum", pp.maxCap)
580		} else {
581			cp.freeCap = pp.maxCap - pp.activeCap
582		}
583		// temporarily increase activeCap to enforce reducing or removing a node capacity
584		tempCap := cp.freeCap + 1
585		pp.activeCap += tempCap
586		var next *ppNodeInfo
587		// enforceLimits removes the lowest priority node if it has minimal capacity,
588		// otherwise reduces its capacity
589		next, cp.nextPri = pp.enforceLimits()
590		if cp.nextPri < lastPri {
591			// enforce monotonicity which may be broken by continuously changing priorities
592			cp.nextPri = lastPri
593		} else {
594			lastPri = cp.nextPri
595		}
596		pp.activeCap -= tempCap
597		if next == nil {
598			log.Error("getCapacityCurve: cannot remove next element from the priority queue")
599			break
600		}
601		id := next.node.ID()
602		if excludeFirst {
603			// if the node count limit is already reached then mark the node with the
604			// lowest priority for exclusion
605			curve.excludeFirst = true
606			excludeID = id
607			excludeFirst = false
608		}
609		// multiple curve points and therefore multiple indexes may belong to a node
610		// if it was removed in multiple steps (if its capacity was more than the minimum)
611		curve.index[id] = append(curve.index[id], len(curve.points))
612		curve.points = append(curve.points, cp)
613	}
614	// restore original state of the queue
615	pp.finalizeChanges(false)
616	curve.points = append(curve.points, curvePoint{
617		freeCap: pp.maxCap,
618		nextPri: math.MaxInt64,
619	})
620	if curve.excludeFirst {
621		curve.excludeList = curve.index[excludeID]
622	}
623	return curve
624}
625
626// exclude returns a capacityCurve with the given node excluded from the original curve
627func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve {
628	if excludeList, ok := cc.index[id]; ok {
629		// return a new version of the curve (only one excluded node can be selected)
630		// Note: if the first node was excluded by default (excludeFirst == true) then
631		// we can forget about that and exclude the node with the given id instead.
632		return &capacityCurve{
633			points:      cc.points,
634			index:       cc.index,
635			excludeList: excludeList,
636		}
637	}
638	return cc
639}
640
641func (cc *capacityCurve) getPoint(i int) curvePoint {
642	cp := cc.points[i]
643	if i == 0 && cc.excludeFirst {
644		cp.freeCap = 0
645		return cp
646	}
647	for ii := len(cc.excludeList) - 1; ii >= 0; ii-- {
648		ei := cc.excludeList[ii]
649		if ei < i {
650			break
651		}
652		e1, e2 := cc.points[ei], cc.points[ei+1]
653		cp.freeCap += e2.freeCap - e1.freeCap
654	}
655	return cp
656}
657
658// maxCapacity calculates the maximum capacity available for a node with a given
659// (monotonically decreasing) priority vs. capacity function. Note that if the requesting
660// node is already in the pool then it should be excluded from the curve in order to get
661// the correct result.
662func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 {
663	min, max := 0, len(cc.points)-1 // the curve always has at least one point
664	for min < max {
665		mid := (min + max) / 2
666		cp := cc.getPoint(mid)
667		if cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri {
668			min = mid + 1
669		} else {
670			max = mid
671		}
672	}
673	cp2 := cc.getPoint(min)
674	if cp2.freeCap == 0 || min == 0 {
675		return cp2.freeCap
676	}
677	cp1 := cc.getPoint(min - 1)
678	if priority(cp2.freeCap) > cp1.nextPri {
679		return cp2.freeCap
680	}
681	minc, maxc := cp1.freeCap, cp2.freeCap-1
682	for minc < maxc {
683		midc := (minc + maxc + 1) / 2
684		if midc == 0 || priority(midc) > cp1.nextPri {
685			minc = midc
686		} else {
687			maxc = midc - 1
688		}
689	}
690	return maxc
691}
692