1/*
2 * Copyright 2019 gRPC authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// Package balancergroup implements a utility struct to bind multiple balancers
18// into one balancer.
19package balancergroup
20
21import (
22	"fmt"
23	"sync"
24	"time"
25
26	orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1"
27	"google.golang.org/grpc/balancer"
28	"google.golang.org/grpc/balancer/base"
29	"google.golang.org/grpc/connectivity"
30	"google.golang.org/grpc/internal/cache"
31	"google.golang.org/grpc/internal/grpclog"
32	"google.golang.org/grpc/internal/wrr"
33	"google.golang.org/grpc/resolver"
34	"google.golang.org/grpc/xds/internal"
35	"google.golang.org/grpc/xds/internal/balancer/lrs"
36)
37
38// subBalancerWithConfig is used to keep the configurations that will be used to start
39// the underlying balancer. It can be called to start/stop the underlying
40// balancer.
41//
42// When the config changes, it will pass the update to the underlying balancer
43// if it exists.
44//
45// TODO: rename to subBalanceWrapper (and move to a separate file?)
46type subBalancerWithConfig struct {
47	// subBalancerWithConfig is passed to the sub-balancer as a ClientConn
48	// wrapper, only to keep the state and picker.  When sub-balancer is
49	// restarted while in cache, the picker needs to be resent.
50	//
51	// It also contains the sub-balancer ID, so the parent balancer group can
52	// keep track of SubConn/pickers and the sub-balancers they belong to. Some
53	// of the actions are forwarded to the parent ClientConn with no change.
54	// Some are forward to balancer group with the sub-balancer ID.
55	balancer.ClientConn
56	id    internal.Locality
57	group *BalancerGroup
58
59	mu    sync.Mutex
60	state balancer.State
61
62	// The static part of sub-balancer. Keeps balancerBuilders and addresses.
63	// To be used when restarting sub-balancer.
64	builder balancer.Builder
65	addrs   []resolver.Address
66	// The dynamic part of sub-balancer. Only used when balancer group is
67	// started. Gets cleared when sub-balancer is closed.
68	balancer balancer.Balancer
69}
70
71func (sbc *subBalancerWithConfig) UpdateBalancerState(state connectivity.State, picker balancer.Picker) {
72}
73
74// UpdateState overrides balancer.ClientConn, to keep state and picker.
75func (sbc *subBalancerWithConfig) UpdateState(state balancer.State) {
76	sbc.mu.Lock()
77	sbc.state = state
78	sbc.group.updateBalancerState(sbc.id, state)
79	sbc.mu.Unlock()
80}
81
82// NewSubConn overrides balancer.ClientConn, so balancer group can keep track of
83// the relation between subconns and sub-balancers.
84func (sbc *subBalancerWithConfig) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
85	return sbc.group.newSubConn(sbc, addrs, opts)
86}
87
88func (sbc *subBalancerWithConfig) updateBalancerStateWithCachedPicker() {
89	sbc.mu.Lock()
90	if sbc.state.Picker != nil {
91		sbc.group.updateBalancerState(sbc.id, sbc.state)
92	}
93	sbc.mu.Unlock()
94}
95
96func (sbc *subBalancerWithConfig) startBalancer() {
97	b := sbc.builder.Build(sbc, balancer.BuildOptions{})
98	sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name())
99	sbc.balancer = b
100	if ub, ok := b.(balancer.V2Balancer); ok {
101		ub.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: sbc.addrs}})
102	} else {
103		b.HandleResolvedAddrs(sbc.addrs, nil)
104	}
105}
106
107func (sbc *subBalancerWithConfig) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
108	b := sbc.balancer
109	if b == nil {
110		// This sub-balancer was closed. This can happen when EDS removes a
111		// locality. The balancer for this locality was already closed, and the
112		// SubConns are being deleted. But SubConn state change can still
113		// happen.
114		return
115	}
116	if ub, ok := b.(balancer.V2Balancer); ok {
117		ub.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: state})
118	} else {
119		b.HandleSubConnStateChange(sc, state)
120	}
121}
122
123func (sbc *subBalancerWithConfig) updateAddrs(addrs []resolver.Address) {
124	sbc.addrs = addrs
125	b := sbc.balancer
126	if b == nil {
127		// This sub-balancer was closed. This should never happen because
128		// sub-balancers are closed when the locality is removed from EDS, or
129		// the balancer group is closed. There should be no further address
130		// updates when either of this happened.
131		//
132		// This will be a common case with priority support, because a
133		// sub-balancer (and the whole balancer group) could be closed because
134		// it's the lower priority, but it can still get address updates.
135		return
136	}
137	if ub, ok := b.(balancer.V2Balancer); ok {
138		ub.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}})
139	} else {
140		b.HandleResolvedAddrs(addrs, nil)
141	}
142}
143
144func (sbc *subBalancerWithConfig) stopBalancer() {
145	sbc.balancer.Close()
146	sbc.balancer = nil
147}
148
149type pickerState struct {
150	weight uint32
151	picker balancer.V2Picker
152	state  connectivity.State
153}
154
155func (s *pickerState) String() string {
156	return fmt.Sprintf("weight:%v,picker:%p,state:%v", s.weight, s.picker, s.state)
157}
158
159// BalancerGroup takes a list of balancers, and make them into one balancer.
160//
161// Note that this struct doesn't implement balancer.Balancer, because it's not
162// intended to be used directly as a balancer. It's expected to be used as a
163// sub-balancer manager by a high level balancer.
164//
165// Updates from ClientConn are forwarded to sub-balancers
166//  - service config update
167//     - Not implemented
168//  - address update
169//  - subConn state change
170//     - find the corresponding balancer and forward
171//
172// Actions from sub-balances are forwarded to parent ClientConn
173//  - new/remove SubConn
174//  - picker update and health states change
175//     - sub-pickers are grouped into a group-picker
176//     - aggregated connectivity state is the overall state of all pickers.
177//  - resolveNow
178//
179// Sub-balancers are only built when the balancer group is started. If the
180// balancer group is closed, the sub-balancers are also closed. And it's
181// guaranteed that no updates will be sent to parent ClientConn from a closed
182// balancer group.
183type BalancerGroup struct {
184	cc        balancer.ClientConn
185	logger    *grpclog.PrefixLogger
186	loadStore lrs.Store
187
188	// outgoingMu guards all operations in the direction:
189	// ClientConn-->Sub-balancer. Including start, stop, resolver updates and
190	// SubConn state changes.
191	//
192	// The corresponding boolean outgoingStarted is used to stop further updates
193	// to sub-balancers after they are closed.
194	outgoingMu         sync.Mutex
195	outgoingStarted    bool
196	idToBalancerConfig map[internal.Locality]*subBalancerWithConfig
197	// Cache for sub-balancers when they are removed.
198	balancerCache *cache.TimeoutCache
199
200	// incomingMu and pickerMu are to make sure this balancer group doesn't send
201	// updates to cc after it's closed.
202	//
203	// We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer
204	// may call back to balancer group inline. It causes deaclock if they
205	// require the same mutex).
206	//
207	// We should never need to hold multiple locks at the same time in this
208	// struct. The case where two locks are held can only happen when the
209	// underlying balancer calls back into balancer group inline. So there's an
210	// implicit lock acquisition order that outgoingMu is locked before either
211	// incomingMu or pickerMu.
212
213	// incomingMu guards all operations in the direction:
214	// Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn, and
215	// updatePicker. It also guards the map from SubConn to balancer ID, so
216	// handleSubConnStateChange needs to hold it shortly to find the
217	// sub-balancer to forward the update.
218	//
219	// The corresponding boolean incomingStarted is used to stop further updates
220	// from sub-balancers after they are closed.
221	incomingMu      sync.Mutex
222	incomingStarted bool // This boolean only guards calls back to ClientConn.
223	scToSubBalancer map[balancer.SubConn]*subBalancerWithConfig
224	// All balancer IDs exist as keys in this map, even if balancer group is not
225	// started.
226	//
227	// If an ID is not in map, it's either removed or never added.
228	idToPickerState map[internal.Locality]*pickerState
229}
230
231// DefaultSubBalancerCloseTimeout is defined as a variable instead of const for
232// testing.
233//
234// TODO: make it a parameter for New().
235var DefaultSubBalancerCloseTimeout = 15 * time.Minute
236
237// New creates a new BalancerGroup. Note that the BalancerGroup
238// needs to be started to work.
239func New(cc balancer.ClientConn, loadStore lrs.Store, logger *grpclog.PrefixLogger) *BalancerGroup {
240	return &BalancerGroup{
241		cc:        cc,
242		logger:    logger,
243		loadStore: loadStore,
244
245		idToBalancerConfig: make(map[internal.Locality]*subBalancerWithConfig),
246		balancerCache:      cache.NewTimeoutCache(DefaultSubBalancerCloseTimeout),
247		scToSubBalancer:    make(map[balancer.SubConn]*subBalancerWithConfig),
248		idToPickerState:    make(map[internal.Locality]*pickerState),
249	}
250}
251
252// Start starts the balancer group, including building all the sub-balancers,
253// and send the existing addresses to them.
254//
255// A BalancerGroup can be closed and started later. When a BalancerGroup is
256// closed, it can still receive address updates, which will be applied when
257// restarted.
258func (bg *BalancerGroup) Start() {
259	bg.incomingMu.Lock()
260	bg.incomingStarted = true
261	bg.incomingMu.Unlock()
262
263	bg.outgoingMu.Lock()
264	if bg.outgoingStarted {
265		bg.outgoingMu.Unlock()
266		return
267	}
268
269	for _, config := range bg.idToBalancerConfig {
270		config.startBalancer()
271	}
272	bg.outgoingStarted = true
273	bg.outgoingMu.Unlock()
274}
275
276// Add adds a balancer built by builder to the group, with given id and weight.
277//
278// weight should never be zero.
279func (bg *BalancerGroup) Add(id internal.Locality, weight uint32, builder balancer.Builder) {
280	if weight == 0 {
281		bg.logger.Errorf("BalancerGroup.add called with weight 0, locality: %v. Locality is not added to balancer group", id)
282		return
283	}
284
285	// First, add things to the picker map. Do this even if incomingStarted is
286	// false, because the data is static.
287	bg.incomingMu.Lock()
288	bg.idToPickerState[id] = &pickerState{
289		weight: weight,
290		// Start everything in IDLE. It's doesn't affect the overall state
291		// because we don't count IDLE when aggregating (as opposite to e.g.
292		// READY, 1 READY results in overall READY).
293		state: connectivity.Idle,
294	}
295	bg.incomingMu.Unlock()
296
297	// Store data in static map, and then check to see if bg is started.
298	bg.outgoingMu.Lock()
299	var sbc *subBalancerWithConfig
300	// If outgoingStarted is true, search in the cache. Otherwise, cache is
301	// guaranteed to be empty, searching is unnecessary.
302	if bg.outgoingStarted {
303		if old, ok := bg.balancerCache.Remove(id); ok {
304			sbc, _ = old.(*subBalancerWithConfig)
305			if sbc != nil && sbc.builder != builder {
306				// If the sub-balancer in cache was built with a different
307				// balancer builder, don't use it, cleanup this old-balancer,
308				// and behave as sub-balancer is not found in cache.
309				//
310				// NOTE that this will also drop the cached addresses for this
311				// sub-balancer, which seems to be reasonable.
312				sbc.stopBalancer()
313				// cleanupSubConns must be done before the new balancer starts,
314				// otherwise new SubConns created by the new balancer might be
315				// removed by mistake.
316				bg.cleanupSubConns(sbc)
317				sbc = nil
318			}
319		}
320	}
321	if sbc == nil {
322		sbc = &subBalancerWithConfig{
323			ClientConn: bg.cc,
324			id:         id,
325			group:      bg,
326			builder:    builder,
327		}
328		if bg.outgoingStarted {
329			// Only start the balancer if bg is started. Otherwise, we only keep the
330			// static data.
331			sbc.startBalancer()
332		}
333	} else {
334		// When brining back a sub-balancer from cache, re-send the cached
335		// picker and state.
336		sbc.updateBalancerStateWithCachedPicker()
337	}
338	bg.idToBalancerConfig[id] = sbc
339	bg.outgoingMu.Unlock()
340}
341
342// Remove removes the balancer with id from the group.
343//
344// But doesn't close the balancer. The balancer is kept in a cache, and will be
345// closed after timeout. Cleanup work (closing sub-balancer and removing
346// subconns) will be done after timeout.
347//
348// It also removes the picker generated from this balancer from the picker
349// group. It always results in a picker update.
350func (bg *BalancerGroup) Remove(id internal.Locality) {
351	bg.outgoingMu.Lock()
352	if sbToRemove, ok := bg.idToBalancerConfig[id]; ok {
353		if bg.outgoingStarted {
354			bg.balancerCache.Add(id, sbToRemove, func() {
355				// After timeout, when sub-balancer is removed from cache, need
356				// to close the underlying sub-balancer, and remove all its
357				// subconns.
358				bg.outgoingMu.Lock()
359				if bg.outgoingStarted {
360					sbToRemove.stopBalancer()
361				}
362				bg.outgoingMu.Unlock()
363				bg.cleanupSubConns(sbToRemove)
364			})
365		}
366		delete(bg.idToBalancerConfig, id)
367	} else {
368		bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id)
369	}
370	bg.outgoingMu.Unlock()
371
372	bg.incomingMu.Lock()
373	// Remove id and picker from picker map. This also results in future updates
374	// for this ID to be ignored.
375	delete(bg.idToPickerState, id)
376	if bg.incomingStarted {
377		// Normally picker update is triggered by SubConn state change. But we
378		// want to update state and picker to reflect the changes, too. Because
379		// we don't want `ClientConn` to pick this sub-balancer anymore.
380		bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState))
381	}
382	bg.incomingMu.Unlock()
383}
384
385// bg.remove(id) doesn't do cleanup for the sub-balancer. This function does
386// cleanup after the timeout.
387func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWithConfig) {
388	bg.incomingMu.Lock()
389	// Remove SubConns. This is only done after the balancer is
390	// actually closed.
391	//
392	// NOTE: if NewSubConn is called by this (closed) balancer later, the
393	// SubConn will be leaked. This shouldn't happen if the balancer
394	// implementation is correct. To make sure this never happens, we need to
395	// add another layer (balancer manager) between balancer group and the
396	// sub-balancers.
397	for sc, b := range bg.scToSubBalancer {
398		if b == config {
399			bg.cc.RemoveSubConn(sc)
400			delete(bg.scToSubBalancer, sc)
401		}
402	}
403	bg.incomingMu.Unlock()
404}
405
406// ChangeWeight changes the weight of the balancer.
407//
408// newWeight should never be zero.
409//
410// NOTE: It always results in a picker update now. This probably isn't
411// necessary. But it seems better to do the update because it's a change in the
412// picker (which is balancer's snapshot).
413func (bg *BalancerGroup) ChangeWeight(id internal.Locality, newWeight uint32) {
414	if newWeight == 0 {
415		bg.logger.Errorf("BalancerGroup.changeWeight called with newWeight 0. Weight is not changed")
416		return
417	}
418	bg.incomingMu.Lock()
419	defer bg.incomingMu.Unlock()
420	pState, ok := bg.idToPickerState[id]
421	if !ok {
422		return
423	}
424	if pState.weight == newWeight {
425		return
426	}
427	pState.weight = newWeight
428	if bg.incomingStarted {
429		// Normally picker update is triggered by SubConn state change. But we
430		// want to update state and picker to reflect the changes, too. Because
431		// `ClientConn` should do pick with the new weights now.
432		bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState))
433	}
434}
435
436// Following are actions from the parent grpc.ClientConn, forward to sub-balancers.
437
438// HandleSubConnStateChange handles the state for the subconn. It finds the
439// corresponding balancer and forwards the update.
440func (bg *BalancerGroup) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) {
441	bg.incomingMu.Lock()
442	config, ok := bg.scToSubBalancer[sc]
443	if !ok {
444		bg.incomingMu.Unlock()
445		return
446	}
447	if state == connectivity.Shutdown {
448		// Only delete sc from the map when state changed to Shutdown.
449		delete(bg.scToSubBalancer, sc)
450	}
451	bg.incomingMu.Unlock()
452
453	bg.outgoingMu.Lock()
454	config.handleSubConnStateChange(sc, state)
455	bg.outgoingMu.Unlock()
456}
457
458// HandleResolvedAddrs handles addresses from resolver. It finds the balancer
459// and forwards the update.
460//
461// TODO: change this to UpdateClientConnState to handle addresses and balancer
462// config.
463func (bg *BalancerGroup) HandleResolvedAddrs(id internal.Locality, addrs []resolver.Address) {
464	bg.outgoingMu.Lock()
465	if config, ok := bg.idToBalancerConfig[id]; ok {
466		config.updateAddrs(addrs)
467	}
468	bg.outgoingMu.Unlock()
469}
470
471// TODO: handleServiceConfig()
472//
473// For BNS address for slicer, comes from endpoint.Metadata. It will be sent
474// from parent to sub-balancers as service config.
475
476// Following are actions from sub-balancers, forward to ClientConn.
477
478// newSubConn: forward to ClientConn, and also create a map from sc to balancer,
479// so state update will find the right balancer.
480//
481// One note about removing SubConn: only forward to ClientConn, but not delete
482// from map. Delete sc from the map only when state changes to Shutdown. Since
483// it's just forwarding the action, there's no need for a removeSubConn()
484// wrapper function.
485func (bg *BalancerGroup) newSubConn(config *subBalancerWithConfig, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
486	// NOTE: if balancer with id was already removed, this should also return
487	// error. But since we call balancer.stopBalancer when removing the balancer, this
488	// shouldn't happen.
489	bg.incomingMu.Lock()
490	if !bg.incomingStarted {
491		bg.incomingMu.Unlock()
492		return nil, fmt.Errorf("NewSubConn is called after balancer group is closed")
493	}
494	sc, err := bg.cc.NewSubConn(addrs, opts)
495	if err != nil {
496		bg.incomingMu.Unlock()
497		return nil, err
498	}
499	bg.scToSubBalancer[sc] = config
500	bg.incomingMu.Unlock()
501	return sc, nil
502}
503
504// updateBalancerState: create an aggregated picker and an aggregated
505// connectivity state, then forward to ClientConn.
506func (bg *BalancerGroup) updateBalancerState(id internal.Locality, state balancer.State) {
507	bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
508
509	bg.incomingMu.Lock()
510	defer bg.incomingMu.Unlock()
511	pickerSt, ok := bg.idToPickerState[id]
512	if !ok {
513		// All state starts in IDLE. If ID is not in map, it's either removed,
514		// or never existed.
515		bg.logger.Warningf("balancer group: pickerState for %v not found when update picker/state", id)
516		return
517	}
518	pickerSt.picker = newLoadReportPicker(state.Picker, id, bg.loadStore)
519	pickerSt.state = state.ConnectivityState
520	if bg.incomingStarted {
521		bg.logger.Infof("Child pickers with weight: %+v", bg.idToPickerState)
522		bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState))
523	}
524}
525
526// Close closes the balancer. It stops sub-balancers, and removes the subconns.
527// The BalancerGroup can be restarted later.
528func (bg *BalancerGroup) Close() {
529	bg.incomingMu.Lock()
530	if bg.incomingStarted {
531		bg.incomingStarted = false
532
533		for _, pState := range bg.idToPickerState {
534			// Reset everything to IDLE but keep the entry in map (to keep the
535			// weight).
536			pState.picker = nil
537			pState.state = connectivity.Idle
538		}
539
540		// Also remove all SubConns.
541		for sc := range bg.scToSubBalancer {
542			bg.cc.RemoveSubConn(sc)
543			delete(bg.scToSubBalancer, sc)
544		}
545	}
546	bg.incomingMu.Unlock()
547
548	bg.outgoingMu.Lock()
549	if bg.outgoingStarted {
550		bg.outgoingStarted = false
551		for _, config := range bg.idToBalancerConfig {
552			config.stopBalancer()
553		}
554	}
555	bg.outgoingMu.Unlock()
556	// Clear(true) runs clear function to close sub-balancers in cache. It
557	// must be called out of outgoing mutex.
558	bg.balancerCache.Clear(true)
559}
560
561func buildPickerAndState(m map[internal.Locality]*pickerState) balancer.State {
562	var readyN, connectingN int
563	readyPickerWithWeights := make([]pickerState, 0, len(m))
564	for _, ps := range m {
565		switch ps.state {
566		case connectivity.Ready:
567			readyN++
568			readyPickerWithWeights = append(readyPickerWithWeights, *ps)
569		case connectivity.Connecting:
570			connectingN++
571		}
572	}
573	var aggregatedState connectivity.State
574	switch {
575	case readyN > 0:
576		aggregatedState = connectivity.Ready
577	case connectingN > 0:
578		aggregatedState = connectivity.Connecting
579	default:
580		aggregatedState = connectivity.TransientFailure
581	}
582	if aggregatedState == connectivity.TransientFailure {
583		return balancer.State{ConnectivityState: aggregatedState, Picker: base.NewErrPickerV2(balancer.ErrTransientFailure)}
584	}
585	return balancer.State{ConnectivityState: aggregatedState, Picker: newPickerGroup(readyPickerWithWeights)}
586}
587
588// NewRandomWRR is the WRR constructor used to pick sub-pickers from
589// sub-balancers. It's to be modified in tests.
590var NewRandomWRR = wrr.NewRandom
591
592type pickerGroup struct {
593	length int
594	w      wrr.WRR
595}
596
597// newPickerGroup takes pickers with weights, and group them into one picker.
598//
599// Note it only takes ready pickers. The map shouldn't contain non-ready
600// pickers.
601//
602// TODO: (bg) confirm this is the expected behavior: non-ready balancers should
603// be ignored when picking. Only ready balancers are picked.
604func newPickerGroup(readyPickerWithWeights []pickerState) *pickerGroup {
605	w := NewRandomWRR()
606	for _, ps := range readyPickerWithWeights {
607		w.Add(ps.picker, int64(ps.weight))
608	}
609
610	return &pickerGroup{
611		length: len(readyPickerWithWeights),
612		w:      w,
613	}
614}
615
616func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
617	if pg.length <= 0 {
618		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
619	}
620	p := pg.w.Next().(balancer.V2Picker)
621	return p.Pick(info)
622}
623
624const (
625	serverLoadCPUName    = "cpu_utilization"
626	serverLoadMemoryName = "mem_utilization"
627)
628
629type loadReportPicker struct {
630	p balancer.V2Picker
631
632	id        internal.Locality
633	loadStore lrs.Store
634}
635
636func newLoadReportPicker(p balancer.V2Picker, id internal.Locality, loadStore lrs.Store) *loadReportPicker {
637	return &loadReportPicker{
638		p:         p,
639		id:        id,
640		loadStore: loadStore,
641	}
642}
643
644func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
645	res, err := lrp.p.Pick(info)
646	if lrp.loadStore != nil && err == nil {
647		lrp.loadStore.CallStarted(lrp.id)
648		td := res.Done
649		res.Done = func(info balancer.DoneInfo) {
650			lrp.loadStore.CallFinished(lrp.id, info.Err)
651			if load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport); ok {
652				lrp.loadStore.CallServerLoad(lrp.id, serverLoadCPUName, load.CpuUtilization)
653				lrp.loadStore.CallServerLoad(lrp.id, serverLoadMemoryName, load.MemUtilization)
654				for n, d := range load.RequestCost {
655					lrp.loadStore.CallServerLoad(lrp.id, n, d)
656				}
657				for n, d := range load.Utilization {
658					lrp.loadStore.CallServerLoad(lrp.id, n, d)
659				}
660			}
661			if td != nil {
662				td(info)
663			}
664		}
665	}
666	return res, err
667}
668