xref: /linux/net/rxrpc/conn_client.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
3  *
4  * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * Client connections need to be cached for a little while after they've made a
8  * call so as to handle retransmitted DATA packets in case the server didn't
9  * receive the final ACK or terminating ABORT we sent it.
10  *
11  * There are flags of relevance to the cache:
12  *
13  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
14  *      should not be reused.  This is set when an exclusive connection is used
15  *      or a call ID counter overflows.
16  *
17  * The caching state may only be changed if the cache lock is held.
18  *
19  * There are two idle client connection expiry durations.  If the total number
20  * of connections is below the reap threshold, we use the normal duration; if
21  * it's above, we use the fast duration.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/timer.h>
29 #include <linux/sched/signal.h>
30 
31 #include "ar-internal.h"
32 
33 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
34 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
35 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
36 
37 static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
38 {
39 	atomic_inc(&bundle->active);
40 }
41 
42 /*
43  * Release a connection ID for a client connection.
44  */
45 static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
46 					   struct rxrpc_connection *conn)
47 {
48 	idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
49 }
50 
51 /*
52  * Destroy the client connection ID tree.
53  */
54 static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
55 {
56 	struct rxrpc_connection *conn;
57 	int id;
58 
59 	if (!idr_is_empty(&local->conn_ids)) {
60 		idr_for_each_entry(&local->conn_ids, conn, id) {
61 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
62 			       conn, refcount_read(&conn->ref));
63 		}
64 		BUG();
65 	}
66 
67 	idr_destroy(&local->conn_ids);
68 }
69 
70 /*
71  * Allocate a connection bundle.
72  */
73 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
74 					       gfp_t gfp)
75 {
76 	static atomic_t rxrpc_bundle_id;
77 	struct rxrpc_bundle *bundle;
78 
79 	bundle = kzalloc(sizeof(*bundle), gfp);
80 	if (bundle) {
81 		bundle->local		= call->local;
82 		bundle->peer		= rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
83 		bundle->key		= key_get(call->key);
84 		bundle->security	= call->security;
85 		bundle->exclusive	= test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
86 		bundle->upgrade		= test_bit(RXRPC_CALL_UPGRADE, &call->flags);
87 		bundle->service_id	= call->dest_srx.srx_service;
88 		bundle->security_level	= call->security_level;
89 		bundle->debug_id	= atomic_inc_return(&rxrpc_bundle_id);
90 		refcount_set(&bundle->ref, 1);
91 		atomic_set(&bundle->active, 1);
92 		INIT_LIST_HEAD(&bundle->waiting_calls);
93 		trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
94 
95 		write_lock(&bundle->local->rxnet->conn_lock);
96 		list_add_tail(&bundle->proc_link, &bundle->local->rxnet->bundle_proc_list);
97 		write_unlock(&bundle->local->rxnet->conn_lock);
98 	}
99 	return bundle;
100 }
101 
102 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
103 				      enum rxrpc_bundle_trace why)
104 {
105 	int r;
106 
107 	__refcount_inc(&bundle->ref, &r);
108 	trace_rxrpc_bundle(bundle->debug_id, r + 1, why);
109 	return bundle;
110 }
111 
112 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
113 {
114 	trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref),
115 			   rxrpc_bundle_free);
116 	write_lock(&bundle->local->rxnet->conn_lock);
117 	list_del(&bundle->proc_link);
118 	write_unlock(&bundle->local->rxnet->conn_lock);
119 	rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
120 	key_put(bundle->key);
121 	kfree(bundle);
122 }
123 
124 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
125 {
126 	unsigned int id;
127 	bool dead;
128 	int r;
129 
130 	if (bundle) {
131 		id = bundle->debug_id;
132 		dead = __refcount_dec_and_test(&bundle->ref, &r);
133 		trace_rxrpc_bundle(id, r - 1, why);
134 		if (dead)
135 			rxrpc_free_bundle(bundle);
136 	}
137 }
138 
139 /*
140  * Get rid of outstanding client connection preallocations when a local
141  * endpoint is destroyed.
142  */
143 void rxrpc_purge_client_connections(struct rxrpc_local *local)
144 {
145 	rxrpc_destroy_client_conn_ids(local);
146 }
147 
148 /*
149  * Allocate a client connection.
150  */
151 static struct rxrpc_connection *
152 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
153 {
154 	struct rxrpc_connection *conn;
155 	struct rxrpc_local *local = bundle->local;
156 	struct rxrpc_net *rxnet = local->rxnet;
157 	int id;
158 
159 	_enter("");
160 
161 	conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
162 	if (!conn)
163 		return ERR_PTR(-ENOMEM);
164 
165 	id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
166 			      GFP_ATOMIC | __GFP_NOWARN);
167 	if (id < 0) {
168 		kfree(conn);
169 		return ERR_PTR(id);
170 	}
171 
172 	refcount_set(&conn->ref, 1);
173 	conn->proto.cid		= id << RXRPC_CIDSHIFT;
174 	conn->proto.epoch	= local->rxnet->epoch;
175 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
176 	conn->bundle		= rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
177 	conn->local		= rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
178 	conn->peer		= rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
179 	conn->key		= key_get(bundle->key);
180 	conn->security		= bundle->security;
181 	conn->exclusive		= bundle->exclusive;
182 	conn->upgrade		= bundle->upgrade;
183 	conn->orig_service_id	= bundle->service_id;
184 	conn->security_level	= bundle->security_level;
185 	conn->state		= RXRPC_CONN_CLIENT_UNSECURED;
186 	conn->service_id	= conn->orig_service_id;
187 
188 	if (conn->security == &rxrpc_no_security)
189 		conn->state	= RXRPC_CONN_CLIENT;
190 
191 	atomic_inc(&rxnet->nr_conns);
192 	write_lock(&rxnet->conn_lock);
193 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
194 	write_unlock(&rxnet->conn_lock);
195 
196 	rxrpc_see_connection(conn, rxrpc_conn_new_client);
197 
198 	atomic_inc(&rxnet->nr_client_conns);
199 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
200 	return conn;
201 }
202 
203 /*
204  * Determine if a connection may be reused.
205  */
206 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
207 {
208 	struct rxrpc_net *rxnet;
209 	int id_cursor, id, distance, limit;
210 
211 	if (!conn)
212 		goto dont_reuse;
213 
214 	rxnet = conn->rxnet;
215 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
216 		goto dont_reuse;
217 
218 	if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
219 	     conn->state != RXRPC_CONN_CLIENT) ||
220 	    conn->proto.epoch != rxnet->epoch)
221 		goto mark_dont_reuse;
222 
223 	/* The IDR tree gets very expensive on memory if the connection IDs are
224 	 * widely scattered throughout the number space, so we shall want to
225 	 * kill off connections that, say, have an ID more than about four
226 	 * times the maximum number of client conns away from the current
227 	 * allocation point to try and keep the IDs concentrated.
228 	 */
229 	id_cursor = idr_get_cursor(&conn->local->conn_ids);
230 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
231 	distance = id - id_cursor;
232 	if (distance < 0)
233 		distance = -distance;
234 	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
235 	if (distance > limit)
236 		goto mark_dont_reuse;
237 
238 	return true;
239 
240 mark_dont_reuse:
241 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
242 dont_reuse:
243 	return false;
244 }
245 
246 /*
247  * Look up the conn bundle that matches the connection parameters, adding it if
248  * it doesn't yet exist.
249  */
250 int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
251 {
252 	struct rxrpc_bundle *bundle, *candidate;
253 	struct rxrpc_local *local = call->local;
254 	struct rb_node *p, **pp, *parent;
255 	long diff;
256 	bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
257 
258 	_enter("{%px,%x,%u,%u}",
259 	       call->peer, key_serial(call->key), call->security_level,
260 	       upgrade);
261 
262 	if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
263 		call->bundle = rxrpc_alloc_bundle(call, gfp);
264 		return call->bundle ? 0 : -ENOMEM;
265 	}
266 
267 	/* First, see if the bundle is already there. */
268 	_debug("search 1");
269 	spin_lock(&local->client_bundles_lock);
270 	p = local->client_bundles.rb_node;
271 	while (p) {
272 		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
273 
274 #define cmp(X, Y) ((long)(X) - (long)(Y))
275 		diff = (cmp(bundle->peer, call->peer) ?:
276 			cmp(bundle->key, call->key) ?:
277 			cmp(bundle->security_level, call->security_level) ?:
278 			cmp(bundle->upgrade, upgrade));
279 #undef cmp
280 		if (diff < 0)
281 			p = p->rb_left;
282 		else if (diff > 0)
283 			p = p->rb_right;
284 		else
285 			goto found_bundle;
286 	}
287 	spin_unlock(&local->client_bundles_lock);
288 	_debug("not found");
289 
290 	/* It wasn't.  We need to add one. */
291 	candidate = rxrpc_alloc_bundle(call, gfp);
292 	if (!candidate)
293 		return -ENOMEM;
294 
295 	_debug("search 2");
296 	spin_lock(&local->client_bundles_lock);
297 	pp = &local->client_bundles.rb_node;
298 	parent = NULL;
299 	while (*pp) {
300 		parent = *pp;
301 		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
302 
303 #define cmp(X, Y) ((long)(X) - (long)(Y))
304 		diff = (cmp(bundle->peer, call->peer) ?:
305 			cmp(bundle->key, call->key) ?:
306 			cmp(bundle->security_level, call->security_level) ?:
307 			cmp(bundle->upgrade, upgrade));
308 #undef cmp
309 		if (diff < 0)
310 			pp = &(*pp)->rb_left;
311 		else if (diff > 0)
312 			pp = &(*pp)->rb_right;
313 		else
314 			goto found_bundle_free;
315 	}
316 
317 	_debug("new bundle");
318 	rb_link_node(&candidate->local_node, parent, pp);
319 	rb_insert_color(&candidate->local_node, &local->client_bundles);
320 	call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
321 	spin_unlock(&local->client_bundles_lock);
322 	_leave(" = B=%u [new]", call->bundle->debug_id);
323 	return 0;
324 
325 found_bundle_free:
326 	rxrpc_free_bundle(candidate);
327 found_bundle:
328 	call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
329 	rxrpc_activate_bundle(bundle);
330 	spin_unlock(&local->client_bundles_lock);
331 	_leave(" = B=%u [found]", call->bundle->debug_id);
332 	return 0;
333 }
334 
335 /*
336  * Allocate a new connection and add it into a bundle.
337  */
338 static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
339 				     unsigned int slot)
340 {
341 	struct rxrpc_connection *conn, *old;
342 	unsigned int shift = slot * RXRPC_MAXCALLS;
343 	unsigned int i;
344 
345 	old = bundle->conns[slot];
346 	if (old) {
347 		bundle->conns[slot] = NULL;
348 		bundle->conn_ids[slot] = 0;
349 		trace_rxrpc_client(old, -1, rxrpc_client_replace);
350 		rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
351 	}
352 
353 	conn = rxrpc_alloc_client_connection(bundle);
354 	if (IS_ERR(conn)) {
355 		bundle->alloc_error = PTR_ERR(conn);
356 		return false;
357 	}
358 
359 	rxrpc_activate_bundle(bundle);
360 	conn->bundle_shift = shift;
361 	bundle->conns[slot] = conn;
362 	bundle->conn_ids[slot] = conn->debug_id;
363 	for (i = 0; i < RXRPC_MAXCALLS; i++)
364 		set_bit(shift + i, &bundle->avail_chans);
365 	return true;
366 }
367 
368 /*
369  * Add a connection to a bundle if there are no usable connections or we have
370  * connections waiting for extra capacity.
371  */
372 static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
373 {
374 	int slot = -1, i, usable;
375 
376 	_enter("");
377 
378 	bundle->alloc_error = 0;
379 
380 	/* See if there are any usable connections. */
381 	usable = 0;
382 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
383 		if (rxrpc_may_reuse_conn(bundle->conns[i]))
384 			usable++;
385 		else if (slot == -1)
386 			slot = i;
387 	}
388 
389 	if (!usable && bundle->upgrade)
390 		bundle->try_upgrade = true;
391 
392 	if (!usable)
393 		goto alloc_conn;
394 
395 	if (!bundle->avail_chans &&
396 	    !bundle->try_upgrade &&
397 	    usable < ARRAY_SIZE(bundle->conns))
398 		goto alloc_conn;
399 
400 	_leave("");
401 	return usable;
402 
403 alloc_conn:
404 	return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
405 }
406 
407 /*
408  * Assign a channel to the call at the front of the queue and wake the call up.
409  * We don't increment the callNumber counter until this number has been exposed
410  * to the world.
411  */
412 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
413 				       unsigned int channel)
414 {
415 	struct rxrpc_channel *chan = &conn->channels[channel];
416 	struct rxrpc_bundle *bundle = conn->bundle;
417 	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
418 					     struct rxrpc_call, wait_link);
419 	u32 call_id = chan->call_counter + 1;
420 
421 	_enter("C=%x,%u", conn->debug_id, channel);
422 
423 	list_del_init(&call->wait_link);
424 
425 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
426 
427 	/* Cancel the final ACK on the previous call if it hasn't been sent yet
428 	 * as the DATA packet will implicitly ACK it.
429 	 */
430 	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
431 	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
432 
433 	rxrpc_see_call(call, rxrpc_call_see_activate_client);
434 	call->conn	= rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
435 	call->cid	= conn->proto.cid | channel;
436 	call->call_id	= call_id;
437 	call->dest_srx.srx_service = conn->service_id;
438 	call->cong_ssthresh = call->peer->cong_ssthresh;
439 	if (call->cong_cwnd >= call->cong_ssthresh)
440 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
441 	else
442 		call->cong_mode = RXRPC_CALL_SLOW_START;
443 
444 	chan->call_id		= call_id;
445 	chan->call_debug_id	= call->debug_id;
446 	chan->call		= call;
447 
448 	rxrpc_see_call(call, rxrpc_call_see_connected);
449 	trace_rxrpc_connect_call(call);
450 	call->tx_last_sent = ktime_get_real();
451 	rxrpc_start_call_timer(call);
452 	rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
453 	wake_up(&call->waitq);
454 }
455 
456 /*
457  * Remove a connection from the idle list if it's on it.
458  */
459 static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
460 {
461 	if (!list_empty(&conn->cache_link)) {
462 		list_del_init(&conn->cache_link);
463 		rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
464 	}
465 }
466 
467 /*
468  * Assign channels and callNumbers to waiting calls.
469  */
470 static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
471 {
472 	struct rxrpc_connection *conn;
473 	unsigned long avail, mask;
474 	unsigned int channel, slot;
475 
476 	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
477 
478 	if (bundle->try_upgrade)
479 		mask = 1;
480 	else
481 		mask = ULONG_MAX;
482 
483 	while (!list_empty(&bundle->waiting_calls)) {
484 		avail = bundle->avail_chans & mask;
485 		if (!avail)
486 			break;
487 		channel = __ffs(avail);
488 		clear_bit(channel, &bundle->avail_chans);
489 
490 		slot = channel / RXRPC_MAXCALLS;
491 		conn = bundle->conns[slot];
492 		if (!conn)
493 			break;
494 
495 		if (bundle->try_upgrade)
496 			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
497 		rxrpc_unidle_conn(conn);
498 
499 		channel &= (RXRPC_MAXCALLS - 1);
500 		conn->act_chans	|= 1 << channel;
501 		rxrpc_activate_one_channel(conn, channel);
502 	}
503 }
504 
505 /*
506  * Connect waiting channels (called from the I/O thread).
507  */
508 void rxrpc_connect_client_calls(struct rxrpc_local *local)
509 {
510 	struct rxrpc_call *call;
511 
512 	while ((call = list_first_entry_or_null(&local->new_client_calls,
513 						struct rxrpc_call, wait_link))
514 	       ) {
515 		struct rxrpc_bundle *bundle = call->bundle;
516 
517 		spin_lock(&local->client_call_lock);
518 		list_move_tail(&call->wait_link, &bundle->waiting_calls);
519 		spin_unlock(&local->client_call_lock);
520 
521 		if (rxrpc_bundle_has_space(bundle))
522 			rxrpc_activate_channels(bundle);
523 	}
524 }
525 
526 /*
527  * Note that a call, and thus a connection, is about to be exposed to the
528  * world.
529  */
530 void rxrpc_expose_client_call(struct rxrpc_call *call)
531 {
532 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
533 	struct rxrpc_connection *conn = call->conn;
534 	struct rxrpc_channel *chan = &conn->channels[channel];
535 
536 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
537 		/* Mark the call ID as being used.  If the callNumber counter
538 		 * exceeds ~2 billion, we kill the connection after its
539 		 * outstanding calls have finished so that the counter doesn't
540 		 * wrap.
541 		 */
542 		chan->call_counter++;
543 		if (chan->call_counter >= INT_MAX)
544 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
545 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
546 
547 		spin_lock(&call->peer->lock);
548 		hlist_add_head(&call->error_link, &call->peer->error_targets);
549 		spin_unlock(&call->peer->lock);
550 	}
551 }
552 
553 /*
554  * Set the reap timer.
555  */
556 static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
557 {
558 	if (!local->kill_all_client_conns) {
559 		unsigned long now = jiffies;
560 		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
561 
562 		if (local->rxnet->live)
563 			timer_reduce(&local->client_conn_reap_timer, reap_at);
564 	}
565 }
566 
567 /*
568  * Disconnect a client call.
569  */
570 void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
571 {
572 	struct rxrpc_connection *conn;
573 	struct rxrpc_channel *chan = NULL;
574 	struct rxrpc_local *local = bundle->local;
575 	unsigned int channel;
576 	bool may_reuse;
577 	u32 cid;
578 
579 	_enter("c=%x", call->debug_id);
580 
581 	/* Calls that have never actually been assigned a channel can simply be
582 	 * discarded.
583 	 */
584 	conn = call->conn;
585 	if (!conn) {
586 		_debug("call is waiting");
587 		ASSERTCMP(call->call_id, ==, 0);
588 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
589 		list_del_init(&call->wait_link);
590 		return;
591 	}
592 
593 	cid = call->cid;
594 	channel = cid & RXRPC_CHANNELMASK;
595 	chan = &conn->channels[channel];
596 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
597 
598 	if (WARN_ON(chan->call != call))
599 		return;
600 
601 	may_reuse = rxrpc_may_reuse_conn(conn);
602 
603 	/* If a client call was exposed to the world, we save the result for
604 	 * retransmission.
605 	 *
606 	 * We use a barrier here so that the call number and abort code can be
607 	 * read without needing to take a lock.
608 	 *
609 	 * TODO: Make the incoming packet handler check this and handle
610 	 * terminal retransmission without requiring access to the call.
611 	 */
612 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
613 		_debug("exposed %u,%u", call->call_id, call->abort_code);
614 		__rxrpc_disconnect_call(conn, call);
615 
616 		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
617 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
618 			bundle->try_upgrade = false;
619 			if (may_reuse)
620 				rxrpc_activate_channels(bundle);
621 		}
622 	}
623 
624 	/* See if we can pass the channel directly to another call. */
625 	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
626 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
627 		rxrpc_activate_one_channel(conn, channel);
628 		return;
629 	}
630 
631 	/* Schedule the final ACK to be transmitted in a short while so that it
632 	 * can be skipped if we find a follow-on call.  The first DATA packet
633 	 * of the follow on call will implicitly ACK this call.
634 	 */
635 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
636 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
637 		unsigned long final_ack_at = jiffies + 2;
638 
639 		WRITE_ONCE(chan->final_ack_at, final_ack_at);
640 		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
641 		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
642 		rxrpc_reduce_conn_timer(conn, final_ack_at);
643 	}
644 
645 	/* Deactivate the channel. */
646 	chan->call = NULL;
647 	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
648 	conn->act_chans	&= ~(1 << channel);
649 
650 	/* If no channels remain active, then put the connection on the idle
651 	 * list for a short while.  Give it a ref to stop it going away if it
652 	 * becomes unbundled.
653 	 */
654 	if (!conn->act_chans) {
655 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
656 		conn->idle_timestamp = jiffies;
657 
658 		rxrpc_get_connection(conn, rxrpc_conn_get_idle);
659 		list_move_tail(&conn->cache_link, &local->idle_client_conns);
660 
661 		rxrpc_set_client_reap_timer(local);
662 	}
663 }
664 
665 /*
666  * Remove a connection from a bundle.
667  */
668 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
669 {
670 	struct rxrpc_bundle *bundle = conn->bundle;
671 	unsigned int bindex;
672 	int i;
673 
674 	_enter("C=%x", conn->debug_id);
675 
676 	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
677 		rxrpc_process_delayed_final_acks(conn, true);
678 
679 	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
680 	if (bundle->conns[bindex] == conn) {
681 		_debug("clear slot %u", bindex);
682 		bundle->conns[bindex] = NULL;
683 		bundle->conn_ids[bindex] = 0;
684 		for (i = 0; i < RXRPC_MAXCALLS; i++)
685 			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
686 		rxrpc_put_client_connection_id(bundle->local, conn);
687 		rxrpc_deactivate_bundle(bundle);
688 		rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
689 	}
690 }
691 
692 /*
693  * Drop the active count on a bundle.
694  */
695 void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
696 {
697 	struct rxrpc_local *local;
698 	bool need_put = false;
699 
700 	if (!bundle)
701 		return;
702 
703 	local = bundle->local;
704 	if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
705 		if (!bundle->exclusive) {
706 			_debug("erase bundle");
707 			rb_erase(&bundle->local_node, &local->client_bundles);
708 			need_put = true;
709 		}
710 
711 		spin_unlock(&local->client_bundles_lock);
712 		if (need_put)
713 			rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard);
714 	}
715 }
716 
717 /*
718  * Clean up a dead client connection.
719  */
720 void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
721 {
722 	struct rxrpc_local *local = conn->local;
723 	struct rxrpc_net *rxnet = local->rxnet;
724 
725 	_enter("C=%x", conn->debug_id);
726 
727 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
728 	atomic_dec(&rxnet->nr_client_conns);
729 
730 	rxrpc_put_client_connection_id(local, conn);
731 }
732 
733 /*
734  * Discard expired client connections from the idle list.  Each conn in the
735  * idle list has been exposed and holds an extra ref because of that.
736  *
737  * This may be called from conn setup or from a work item so cannot be
738  * considered non-reentrant.
739  */
740 void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
741 {
742 	struct rxrpc_connection *conn;
743 	unsigned long expiry, conn_expires_at, now;
744 	unsigned int nr_conns;
745 
746 	_enter("");
747 
748 	/* We keep an estimate of what the number of conns ought to be after
749 	 * we've discarded some so that we don't overdo the discarding.
750 	 */
751 	nr_conns = atomic_read(&local->rxnet->nr_client_conns);
752 
753 next:
754 	conn = list_first_entry_or_null(&local->idle_client_conns,
755 					struct rxrpc_connection, cache_link);
756 	if (!conn)
757 		return;
758 
759 	if (!local->kill_all_client_conns) {
760 		/* If the number of connections is over the reap limit, we
761 		 * expedite discard by reducing the expiry timeout.  We must,
762 		 * however, have at least a short grace period to be able to do
763 		 * final-ACK or ABORT retransmission.
764 		 */
765 		expiry = rxrpc_conn_idle_client_expiry;
766 		if (nr_conns > rxrpc_reap_client_connections)
767 			expiry = rxrpc_conn_idle_client_fast_expiry;
768 		if (conn->local->service_closed)
769 			expiry = rxrpc_closed_conn_expiry * HZ;
770 
771 		conn_expires_at = conn->idle_timestamp + expiry;
772 
773 		now = READ_ONCE(jiffies);
774 		if (time_after(conn_expires_at, now))
775 			goto not_yet_expired;
776 	}
777 
778 	atomic_dec(&conn->active);
779 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
780 	list_del_init(&conn->cache_link);
781 
782 	rxrpc_unbundle_conn(conn);
783 	/* Drop the ->cache_link ref */
784 	rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
785 
786 	nr_conns--;
787 	goto next;
788 
789 not_yet_expired:
790 	/* The connection at the front of the queue hasn't yet expired, so
791 	 * schedule the work item for that point if we discarded something.
792 	 *
793 	 * We don't worry if the work item is already scheduled - it can look
794 	 * after rescheduling itself at a later time.  We could cancel it, but
795 	 * then things get messier.
796 	 */
797 	_debug("not yet");
798 	if (!local->kill_all_client_conns)
799 		timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
800 
801 	_leave("");
802 }
803 
804 /*
805  * Clean up the client connections on a local endpoint.
806  */
807 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
808 {
809 	struct rxrpc_connection *conn;
810 
811 	_enter("");
812 
813 	local->kill_all_client_conns = true;
814 
815 	del_timer_sync(&local->client_conn_reap_timer);
816 
817 	while ((conn = list_first_entry_or_null(&local->idle_client_conns,
818 						struct rxrpc_connection, cache_link))) {
819 		list_del_init(&conn->cache_link);
820 		atomic_dec(&conn->active);
821 		trace_rxrpc_client(conn, -1, rxrpc_client_discard);
822 		rxrpc_unbundle_conn(conn);
823 		rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
824 	}
825 
826 	_leave(" [culled]");
827 }
828