1// Copyright 2017 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package trace
6
7import (
8	"container/heap"
9	"math"
10	"sort"
11	"strings"
12	"time"
13)
14
15// MutatorUtil is a change in mutator utilization at a particular
16// time. Mutator utilization functions are represented as a
17// time-ordered []MutatorUtil.
18type MutatorUtil struct {
19	Time int64
20	// Util is the mean mutator utilization starting at Time. This
21	// is in the range [0, 1].
22	Util float64
23}
24
25// UtilFlags controls the behavior of MutatorUtilization.
26type UtilFlags int
27
28const (
29	// UtilSTW means utilization should account for STW events.
30	UtilSTW UtilFlags = 1 << iota
31	// UtilBackground means utilization should account for
32	// background mark workers.
33	UtilBackground
34	// UtilAssist means utilization should account for mark
35	// assists.
36	UtilAssist
37	// UtilSweep means utilization should account for sweeping.
38	UtilSweep
39
40	// UtilPerProc means each P should be given a separate
41	// utilization function. Otherwise, there is a single function
42	// and each P is given a fraction of the utilization.
43	UtilPerProc
44)
45
46// MutatorUtilization returns a set of mutator utilization functions
47// for the given trace. Each function will always end with 0
48// utilization. The bounds of each function are implicit in the first
49// and last event; outside of these bounds each function is undefined.
50//
51// If the UtilPerProc flag is not given, this always returns a single
52// utilization function. Otherwise, it returns one function per P.
53func MutatorUtilization(events []*Event, flags UtilFlags) [][]MutatorUtil {
54	if len(events) == 0 {
55		return nil
56	}
57
58	type perP struct {
59		// gc > 0 indicates that GC is active on this P.
60		gc int
61		// series the logical series number for this P. This
62		// is necessary because Ps may be removed and then
63		// re-added, and then the new P needs a new series.
64		series int
65	}
66	ps := []perP{}
67	stw := 0
68
69	out := [][]MutatorUtil{}
70	assists := map[uint64]bool{}
71	block := map[uint64]*Event{}
72	bgMark := map[uint64]bool{}
73
74	for _, ev := range events {
75		switch ev.Type {
76		case EvGomaxprocs:
77			gomaxprocs := int(ev.Args[0])
78			if len(ps) > gomaxprocs {
79				if flags&UtilPerProc != 0 {
80					// End each P's series.
81					for _, p := range ps[gomaxprocs:] {
82						out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, 0})
83					}
84				}
85				ps = ps[:gomaxprocs]
86			}
87			for len(ps) < gomaxprocs {
88				// Start new P's series.
89				series := 0
90				if flags&UtilPerProc != 0 || len(out) == 0 {
91					series = len(out)
92					out = append(out, []MutatorUtil{{ev.Ts, 1}})
93				}
94				ps = append(ps, perP{series: series})
95			}
96		case EvGCSTWStart:
97			if flags&UtilSTW != 0 {
98				stw++
99			}
100		case EvGCSTWDone:
101			if flags&UtilSTW != 0 {
102				stw--
103			}
104		case EvGCMarkAssistStart:
105			if flags&UtilAssist != 0 {
106				ps[ev.P].gc++
107				assists[ev.G] = true
108			}
109		case EvGCMarkAssistDone:
110			if flags&UtilAssist != 0 {
111				ps[ev.P].gc--
112				delete(assists, ev.G)
113			}
114		case EvGCSweepStart:
115			if flags&UtilSweep != 0 {
116				ps[ev.P].gc++
117			}
118		case EvGCSweepDone:
119			if flags&UtilSweep != 0 {
120				ps[ev.P].gc--
121			}
122		case EvGoStartLabel:
123			if flags&UtilBackground != 0 && strings.HasPrefix(ev.SArgs[0], "GC ") && ev.SArgs[0] != "GC (idle)" {
124				// Background mark worker.
125				//
126				// If we're in per-proc mode, we don't
127				// count dedicated workers because
128				// they kick all of the goroutines off
129				// that P, so don't directly
130				// contribute to goroutine latency.
131				if !(flags&UtilPerProc != 0 && ev.SArgs[0] == "GC (dedicated)") {
132					bgMark[ev.G] = true
133					ps[ev.P].gc++
134				}
135			}
136			fallthrough
137		case EvGoStart:
138			if assists[ev.G] {
139				// Unblocked during assist.
140				ps[ev.P].gc++
141			}
142			block[ev.G] = ev.Link
143		default:
144			if ev != block[ev.G] {
145				continue
146			}
147
148			if assists[ev.G] {
149				// Blocked during assist.
150				ps[ev.P].gc--
151			}
152			if bgMark[ev.G] {
153				// Background mark worker done.
154				ps[ev.P].gc--
155				delete(bgMark, ev.G)
156			}
157			delete(block, ev.G)
158		}
159
160		if flags&UtilPerProc == 0 {
161			// Compute the current average utilization.
162			if len(ps) == 0 {
163				continue
164			}
165			gcPs := 0
166			if stw > 0 {
167				gcPs = len(ps)
168			} else {
169				for i := range ps {
170					if ps[i].gc > 0 {
171						gcPs++
172					}
173				}
174			}
175			mu := MutatorUtil{ev.Ts, 1 - float64(gcPs)/float64(len(ps))}
176
177			// Record the utilization change. (Since
178			// len(ps) == len(out), we know len(out) > 0.)
179			out[0] = addUtil(out[0], mu)
180		} else {
181			// Check for per-P utilization changes.
182			for i := range ps {
183				p := &ps[i]
184				util := 1.0
185				if stw > 0 || p.gc > 0 {
186					util = 0.0
187				}
188				out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, util})
189			}
190		}
191	}
192
193	// Add final 0 utilization event to any remaining series. This
194	// is important to mark the end of the trace. The exact value
195	// shouldn't matter since no window should extend beyond this,
196	// but using 0 is symmetric with the start of the trace.
197	mu := MutatorUtil{events[len(events)-1].Ts, 0}
198	for i := range ps {
199		out[ps[i].series] = addUtil(out[ps[i].series], mu)
200	}
201	return out
202}
203
204func addUtil(util []MutatorUtil, mu MutatorUtil) []MutatorUtil {
205	if len(util) > 0 {
206		if mu.Util == util[len(util)-1].Util {
207			// No change.
208			return util
209		}
210		if mu.Time == util[len(util)-1].Time {
211			// Take the lowest utilization at a time stamp.
212			if mu.Util < util[len(util)-1].Util {
213				util[len(util)-1] = mu
214			}
215			return util
216		}
217	}
218	return append(util, mu)
219}
220
221// totalUtil is total utilization, measured in nanoseconds. This is a
222// separate type primarily to distinguish it from mean utilization,
223// which is also a float64.
224type totalUtil float64
225
226func totalUtilOf(meanUtil float64, dur int64) totalUtil {
227	return totalUtil(meanUtil * float64(dur))
228}
229
230// mean returns the mean utilization over dur.
231func (u totalUtil) mean(dur time.Duration) float64 {
232	return float64(u) / float64(dur)
233}
234
235// An MMUCurve is the minimum mutator utilization curve across
236// multiple window sizes.
237type MMUCurve struct {
238	series []mmuSeries
239}
240
241type mmuSeries struct {
242	util []MutatorUtil
243	// sums[j] is the cumulative sum of util[:j].
244	sums []totalUtil
245	// bands summarizes util in non-overlapping bands of duration
246	// bandDur.
247	bands []mmuBand
248	// bandDur is the duration of each band.
249	bandDur int64
250}
251
252type mmuBand struct {
253	// minUtil is the minimum instantaneous mutator utilization in
254	// this band.
255	minUtil float64
256	// cumUtil is the cumulative total mutator utilization between
257	// time 0 and the left edge of this band.
258	cumUtil totalUtil
259
260	// integrator is the integrator for the left edge of this
261	// band.
262	integrator integrator
263}
264
265// NewMMUCurve returns an MMU curve for the given mutator utilization
266// function.
267func NewMMUCurve(utils [][]MutatorUtil) *MMUCurve {
268	series := make([]mmuSeries, len(utils))
269	for i, util := range utils {
270		series[i] = newMMUSeries(util)
271	}
272	return &MMUCurve{series}
273}
274
275// bandsPerSeries is the number of bands to divide each series into.
276// This is only changed by tests.
277var bandsPerSeries = 1000
278
279func newMMUSeries(util []MutatorUtil) mmuSeries {
280	// Compute cumulative sum.
281	sums := make([]totalUtil, len(util))
282	var prev MutatorUtil
283	var sum totalUtil
284	for j, u := range util {
285		sum += totalUtilOf(prev.Util, u.Time-prev.Time)
286		sums[j] = sum
287		prev = u
288	}
289
290	// Divide the utilization curve up into equal size
291	// non-overlapping "bands" and compute a summary for each of
292	// these bands.
293	//
294	// Compute the duration of each band.
295	numBands := bandsPerSeries
296	if numBands > len(util) {
297		// There's no point in having lots of bands if there
298		// aren't many events.
299		numBands = len(util)
300	}
301	dur := util[len(util)-1].Time - util[0].Time
302	bandDur := (dur + int64(numBands) - 1) / int64(numBands)
303	if bandDur < 1 {
304		bandDur = 1
305	}
306	// Compute the bands. There are numBands+1 bands in order to
307	// record the final cumulative sum.
308	bands := make([]mmuBand, numBands+1)
309	s := mmuSeries{util, sums, bands, bandDur}
310	leftSum := integrator{&s, 0}
311	for i := range bands {
312		startTime, endTime := s.bandTime(i)
313		cumUtil := leftSum.advance(startTime)
314		predIdx := leftSum.pos
315		minUtil := 1.0
316		for i := predIdx; i < len(util) && util[i].Time < endTime; i++ {
317			minUtil = math.Min(minUtil, util[i].Util)
318		}
319		bands[i] = mmuBand{minUtil, cumUtil, leftSum}
320	}
321
322	return s
323}
324
325func (s *mmuSeries) bandTime(i int) (start, end int64) {
326	start = int64(i)*s.bandDur + s.util[0].Time
327	end = start + s.bandDur
328	return
329}
330
331type bandUtil struct {
332	// Utilization series index
333	series int
334	// Band index
335	i int
336	// Lower bound of mutator utilization for all windows
337	// with a left edge in this band.
338	utilBound float64
339}
340
341type bandUtilHeap []bandUtil
342
343func (h bandUtilHeap) Len() int {
344	return len(h)
345}
346
347func (h bandUtilHeap) Less(i, j int) bool {
348	return h[i].utilBound < h[j].utilBound
349}
350
351func (h bandUtilHeap) Swap(i, j int) {
352	h[i], h[j] = h[j], h[i]
353}
354
355func (h *bandUtilHeap) Push(x any) {
356	*h = append(*h, x.(bandUtil))
357}
358
359func (h *bandUtilHeap) Pop() any {
360	x := (*h)[len(*h)-1]
361	*h = (*h)[:len(*h)-1]
362	return x
363}
364
365// UtilWindow is a specific window at Time.
366type UtilWindow struct {
367	Time int64
368	// MutatorUtil is the mean mutator utilization in this window.
369	MutatorUtil float64
370}
371
372type utilHeap []UtilWindow
373
374func (h utilHeap) Len() int {
375	return len(h)
376}
377
378func (h utilHeap) Less(i, j int) bool {
379	if h[i].MutatorUtil != h[j].MutatorUtil {
380		return h[i].MutatorUtil > h[j].MutatorUtil
381	}
382	return h[i].Time > h[j].Time
383}
384
385func (h utilHeap) Swap(i, j int) {
386	h[i], h[j] = h[j], h[i]
387}
388
389func (h *utilHeap) Push(x any) {
390	*h = append(*h, x.(UtilWindow))
391}
392
393func (h *utilHeap) Pop() any {
394	x := (*h)[len(*h)-1]
395	*h = (*h)[:len(*h)-1]
396	return x
397}
398
399// An accumulator takes a windowed mutator utilization function and
400// tracks various statistics for that function.
401type accumulator struct {
402	mmu float64
403
404	// bound is the mutator utilization bound where adding any
405	// mutator utilization above this bound cannot affect the
406	// accumulated statistics.
407	bound float64
408
409	// Worst N window tracking
410	nWorst int
411	wHeap  utilHeap
412
413	// Mutator utilization distribution tracking
414	mud *mud
415	// preciseMass is the distribution mass that must be precise
416	// before accumulation is stopped.
417	preciseMass float64
418	// lastTime and lastMU are the previous point added to the
419	// windowed mutator utilization function.
420	lastTime int64
421	lastMU   float64
422}
423
424// resetTime declares a discontinuity in the windowed mutator
425// utilization function by resetting the current time.
426func (acc *accumulator) resetTime() {
427	// This only matters for distribution collection, since that's
428	// the only thing that depends on the progression of the
429	// windowed mutator utilization function.
430	acc.lastTime = math.MaxInt64
431}
432
433// addMU adds a point to the windowed mutator utilization function at
434// (time, mu). This must be called for monotonically increasing values
435// of time.
436//
437// It returns true if further calls to addMU would be pointless.
438func (acc *accumulator) addMU(time int64, mu float64, window time.Duration) bool {
439	if mu < acc.mmu {
440		acc.mmu = mu
441	}
442	acc.bound = acc.mmu
443
444	if acc.nWorst == 0 {
445		// If the minimum has reached zero, it can't go any
446		// lower, so we can stop early.
447		return mu == 0
448	}
449
450	// Consider adding this window to the n worst.
451	if len(acc.wHeap) < acc.nWorst || mu < acc.wHeap[0].MutatorUtil {
452		// This window is lower than the K'th worst window.
453		//
454		// Check if there's any overlapping window
455		// already in the heap and keep whichever is
456		// worse.
457		for i, ui := range acc.wHeap {
458			if time+int64(window) > ui.Time && ui.Time+int64(window) > time {
459				if ui.MutatorUtil <= mu {
460					// Keep the first window.
461					goto keep
462				} else {
463					// Replace it with this window.
464					heap.Remove(&acc.wHeap, i)
465					break
466				}
467			}
468		}
469
470		heap.Push(&acc.wHeap, UtilWindow{time, mu})
471		if len(acc.wHeap) > acc.nWorst {
472			heap.Pop(&acc.wHeap)
473		}
474	keep:
475	}
476
477	if len(acc.wHeap) < acc.nWorst {
478		// We don't have N windows yet, so keep accumulating.
479		acc.bound = 1.0
480	} else {
481		// Anything above the least worst window has no effect.
482		acc.bound = math.Max(acc.bound, acc.wHeap[0].MutatorUtil)
483	}
484
485	if acc.mud != nil {
486		if acc.lastTime != math.MaxInt64 {
487			// Update distribution.
488			acc.mud.add(acc.lastMU, mu, float64(time-acc.lastTime))
489		}
490		acc.lastTime, acc.lastMU = time, mu
491		if _, mudBound, ok := acc.mud.approxInvCumulativeSum(); ok {
492			acc.bound = math.Max(acc.bound, mudBound)
493		} else {
494			// We haven't accumulated enough total precise
495			// mass yet to even reach our goal, so keep
496			// accumulating.
497			acc.bound = 1
498		}
499		// It's not worth checking percentiles every time, so
500		// just keep accumulating this band.
501		return false
502	}
503
504	// If we've found enough 0 utilizations, we can stop immediately.
505	return len(acc.wHeap) == acc.nWorst && acc.wHeap[0].MutatorUtil == 0
506}
507
508// MMU returns the minimum mutator utilization for the given time
509// window. This is the minimum utilization for all windows of this
510// duration across the execution. The returned value is in the range
511// [0, 1].
512func (c *MMUCurve) MMU(window time.Duration) (mmu float64) {
513	acc := accumulator{mmu: 1.0, bound: 1.0}
514	c.mmu(window, &acc)
515	return acc.mmu
516}
517
518// Examples returns n specific examples of the lowest mutator
519// utilization for the given window size. The returned windows will be
520// disjoint (otherwise there would be a huge number of
521// mostly-overlapping windows at the single lowest point). There are
522// no guarantees on which set of disjoint windows this returns.
523func (c *MMUCurve) Examples(window time.Duration, n int) (worst []UtilWindow) {
524	acc := accumulator{mmu: 1.0, bound: 1.0, nWorst: n}
525	c.mmu(window, &acc)
526	sort.Sort(sort.Reverse(acc.wHeap))
527	return ([]UtilWindow)(acc.wHeap)
528}
529
530// MUD returns mutator utilization distribution quantiles for the
531// given window size.
532//
533// The mutator utilization distribution is the distribution of mean
534// mutator utilization across all windows of the given window size in
535// the trace.
536//
537// The minimum mutator utilization is the minimum (0th percentile) of
538// this distribution. (However, if only the minimum is desired, it's
539// more efficient to use the MMU method.)
540func (c *MMUCurve) MUD(window time.Duration, quantiles []float64) []float64 {
541	if len(quantiles) == 0 {
542		return []float64{}
543	}
544
545	// Each unrefined band contributes a known total mass to the
546	// distribution (bandDur except at the end), but in an unknown
547	// way. However, we know that all the mass it contributes must
548	// be at or above its worst-case mean mutator utilization.
549	//
550	// Hence, we refine bands until the highest desired
551	// distribution quantile is less than the next worst-case mean
552	// mutator utilization. At this point, all further
553	// contributions to the distribution must be beyond the
554	// desired quantile and hence cannot affect it.
555	//
556	// First, find the highest desired distribution quantile.
557	maxQ := quantiles[0]
558	for _, q := range quantiles {
559		if q > maxQ {
560			maxQ = q
561		}
562	}
563	// The distribution's mass is in units of time (it's not
564	// normalized because this would make it more annoying to
565	// account for future contributions of unrefined bands). The
566	// total final mass will be the duration of the trace itself
567	// minus the window size. Using this, we can compute the mass
568	// corresponding to quantile maxQ.
569	var duration int64
570	for _, s := range c.series {
571		duration1 := s.util[len(s.util)-1].Time - s.util[0].Time
572		if duration1 >= int64(window) {
573			duration += duration1 - int64(window)
574		}
575	}
576	qMass := float64(duration) * maxQ
577
578	// Accumulate the MUD until we have precise information for
579	// everything to the left of qMass.
580	acc := accumulator{mmu: 1.0, bound: 1.0, preciseMass: qMass, mud: new(mud)}
581	acc.mud.setTrackMass(qMass)
582	c.mmu(window, &acc)
583
584	// Evaluate the quantiles on the accumulated MUD.
585	out := make([]float64, len(quantiles))
586	for i := range out {
587		mu, _ := acc.mud.invCumulativeSum(float64(duration) * quantiles[i])
588		if math.IsNaN(mu) {
589			// There are a few legitimate ways this can
590			// happen:
591			//
592			// 1. If the window is the full trace
593			// duration, then the windowed MU function is
594			// only defined at a single point, so the MU
595			// distribution is not well-defined.
596			//
597			// 2. If there are no events, then the MU
598			// distribution has no mass.
599			//
600			// Either way, all of the quantiles will have
601			// converged toward the MMU at this point.
602			mu = acc.mmu
603		}
604		out[i] = mu
605	}
606	return out
607}
608
609func (c *MMUCurve) mmu(window time.Duration, acc *accumulator) {
610	if window <= 0 {
611		acc.mmu = 0
612		return
613	}
614
615	var bandU bandUtilHeap
616	windows := make([]time.Duration, len(c.series))
617	for i, s := range c.series {
618		windows[i] = window
619		if max := time.Duration(s.util[len(s.util)-1].Time - s.util[0].Time); window > max {
620			windows[i] = max
621		}
622
623		bandU1 := bandUtilHeap(s.mkBandUtil(i, windows[i]))
624		if bandU == nil {
625			bandU = bandU1
626		} else {
627			bandU = append(bandU, bandU1...)
628		}
629	}
630
631	// Process bands from lowest utilization bound to highest.
632	heap.Init(&bandU)
633
634	// Refine each band into a precise window and MMU until
635	// refining the next lowest band can no longer affect the MMU
636	// or windows.
637	for len(bandU) > 0 && bandU[0].utilBound < acc.bound {
638		i := bandU[0].series
639		c.series[i].bandMMU(bandU[0].i, windows[i], acc)
640		heap.Pop(&bandU)
641	}
642}
643
644func (c *mmuSeries) mkBandUtil(series int, window time.Duration) []bandUtil {
645	// For each band, compute the worst-possible total mutator
646	// utilization for all windows that start in that band.
647
648	// minBands is the minimum number of bands a window can span
649	// and maxBands is the maximum number of bands a window can
650	// span in any alignment.
651	minBands := int((int64(window) + c.bandDur - 1) / c.bandDur)
652	maxBands := int((int64(window) + 2*(c.bandDur-1)) / c.bandDur)
653	if window > 1 && maxBands < 2 {
654		panic("maxBands < 2")
655	}
656	tailDur := int64(window) % c.bandDur
657	nUtil := len(c.bands) - maxBands + 1
658	if nUtil < 0 {
659		nUtil = 0
660	}
661	bandU := make([]bandUtil, nUtil)
662	for i := range bandU {
663		// To compute the worst-case MU, we assume the minimum
664		// for any bands that are only partially overlapped by
665		// some window and the mean for any bands that are
666		// completely covered by all windows.
667		var util totalUtil
668
669		// Find the lowest and second lowest of the partial
670		// bands.
671		l := c.bands[i].minUtil
672		r1 := c.bands[i+minBands-1].minUtil
673		r2 := c.bands[i+maxBands-1].minUtil
674		minBand := math.Min(l, math.Min(r1, r2))
675		// Assume the worst window maximally overlaps the
676		// worst minimum and then the rest overlaps the second
677		// worst minimum.
678		if minBands == 1 {
679			util += totalUtilOf(minBand, int64(window))
680		} else {
681			util += totalUtilOf(minBand, c.bandDur)
682			midBand := 0.0
683			switch {
684			case minBand == l:
685				midBand = math.Min(r1, r2)
686			case minBand == r1:
687				midBand = math.Min(l, r2)
688			case minBand == r2:
689				midBand = math.Min(l, r1)
690			}
691			util += totalUtilOf(midBand, tailDur)
692		}
693
694		// Add the total mean MU of bands that are completely
695		// overlapped by all windows.
696		if minBands > 2 {
697			util += c.bands[i+minBands-1].cumUtil - c.bands[i+1].cumUtil
698		}
699
700		bandU[i] = bandUtil{series, i, util.mean(window)}
701	}
702
703	return bandU
704}
705
706// bandMMU computes the precise minimum mutator utilization for
707// windows with a left edge in band bandIdx.
708func (c *mmuSeries) bandMMU(bandIdx int, window time.Duration, acc *accumulator) {
709	util := c.util
710
711	// We think of the mutator utilization over time as the
712	// box-filtered utilization function, which we call the
713	// "windowed mutator utilization function". The resulting
714	// function is continuous and piecewise linear (unless
715	// window==0, which we handle elsewhere), where the boundaries
716	// between segments occur when either edge of the window
717	// encounters a change in the instantaneous mutator
718	// utilization function. Hence, the minimum of this function
719	// will always occur when one of the edges of the window
720	// aligns with a utilization change, so these are the only
721	// points we need to consider.
722	//
723	// We compute the mutator utilization function incrementally
724	// by tracking the integral from t=0 to the left edge of the
725	// window and to the right edge of the window.
726	left := c.bands[bandIdx].integrator
727	right := left
728	time, endTime := c.bandTime(bandIdx)
729	if utilEnd := util[len(util)-1].Time - int64(window); utilEnd < endTime {
730		endTime = utilEnd
731	}
732	acc.resetTime()
733	for {
734		// Advance edges to time and time+window.
735		mu := (right.advance(time+int64(window)) - left.advance(time)).mean(window)
736		if acc.addMU(time, mu, window) {
737			break
738		}
739		if time == endTime {
740			break
741		}
742
743		// The maximum slope of the windowed mutator
744		// utilization function is 1/window, so we can always
745		// advance the time by at least (mu - mmu) * window
746		// without dropping below mmu.
747		minTime := time + int64((mu-acc.bound)*float64(window))
748
749		// Advance the window to the next time where either
750		// the left or right edge of the window encounters a
751		// change in the utilization curve.
752		if t1, t2 := left.next(time), right.next(time+int64(window))-int64(window); t1 < t2 {
753			time = t1
754		} else {
755			time = t2
756		}
757		if time < minTime {
758			time = minTime
759		}
760		if time >= endTime {
761			// For MMUs we could stop here, but for MUDs
762			// it's important that we span the entire
763			// band.
764			time = endTime
765		}
766	}
767}
768
769// An integrator tracks a position in a utilization function and
770// integrates it.
771type integrator struct {
772	u *mmuSeries
773	// pos is the index in u.util of the current time's non-strict
774	// predecessor.
775	pos int
776}
777
778// advance returns the integral of the utilization function from 0 to
779// time. advance must be called on monotonically increasing values of
780// times.
781func (in *integrator) advance(time int64) totalUtil {
782	util, pos := in.u.util, in.pos
783	// Advance pos until pos+1 is time's strict successor (making
784	// pos time's non-strict predecessor).
785	//
786	// Very often, this will be nearby, so we optimize that case,
787	// but it may be arbitrarily far away, so we handled that
788	// efficiently, too.
789	const maxSeq = 8
790	if pos+maxSeq < len(util) && util[pos+maxSeq].Time > time {
791		// Nearby. Use a linear scan.
792		for pos+1 < len(util) && util[pos+1].Time <= time {
793			pos++
794		}
795	} else {
796		// Far. Binary search for time's strict successor.
797		l, r := pos, len(util)
798		for l < r {
799			h := int(uint(l+r) >> 1)
800			if util[h].Time <= time {
801				l = h + 1
802			} else {
803				r = h
804			}
805		}
806		pos = l - 1 // Non-strict predecessor.
807	}
808	in.pos = pos
809	var partial totalUtil
810	if time != util[pos].Time {
811		partial = totalUtilOf(util[pos].Util, time-util[pos].Time)
812	}
813	return in.u.sums[pos] + partial
814}
815
816// next returns the smallest time t' > time of a change in the
817// utilization function.
818func (in *integrator) next(time int64) int64 {
819	for _, u := range in.u.util[in.pos:] {
820		if u.Time > time {
821			return u.Time
822		}
823	}
824	return 1<<63 - 1
825}
826