xref: /linux/net/rxrpc/rtt.c (revision 153f90a0)
1c410bf01SDavid Howells // SPDX-License-Identifier: GPL-2.0
2c410bf01SDavid Howells /* RTT/RTO calculation.
3c410bf01SDavid Howells  *
4c410bf01SDavid Howells  * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com)
5c410bf01SDavid Howells  *
6c410bf01SDavid Howells  * https://tools.ietf.org/html/rfc6298
7c410bf01SDavid Howells  * https://tools.ietf.org/html/rfc1122#section-4.2.3.1
8c410bf01SDavid Howells  * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf
9c410bf01SDavid Howells  */
10c410bf01SDavid Howells 
11c410bf01SDavid Howells #include <linux/net.h>
12c410bf01SDavid Howells #include "ar-internal.h"
13c410bf01SDavid Howells 
14*153f90a0SDavid Howells #define RXRPC_RTO_MAX	(120 * USEC_PER_SEC)
15*153f90a0SDavid Howells #define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * MSEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */
16c410bf01SDavid Howells #define rxrpc_jiffies32 ((u32)jiffies)		/* As rxrpc_jiffies32 */
17c410bf01SDavid Howells 
rxrpc_rto_min_us(struct rxrpc_peer * peer)18c410bf01SDavid Howells static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
19c410bf01SDavid Howells {
20c410bf01SDavid Howells 	return 200;
21c410bf01SDavid Howells }
22c410bf01SDavid Howells 
__rxrpc_set_rto(const struct rxrpc_peer * peer)23c410bf01SDavid Howells static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
24c410bf01SDavid Howells {
25*153f90a0SDavid Howells 	return (peer->srtt_us >> 3) + peer->rttvar_us;
26c410bf01SDavid Howells }
27c410bf01SDavid Howells 
rxrpc_bound_rto(u32 rto)28c410bf01SDavid Howells static u32 rxrpc_bound_rto(u32 rto)
29c410bf01SDavid Howells {
30c410bf01SDavid Howells 	return min(rto, RXRPC_RTO_MAX);
31c410bf01SDavid Howells }
32c410bf01SDavid Howells 
33c410bf01SDavid Howells /*
34c410bf01SDavid Howells  * Called to compute a smoothed rtt estimate. The data fed to this
35c410bf01SDavid Howells  * routine either comes from timestamps, or from segments that were
36c410bf01SDavid Howells  * known _not_ to have been retransmitted [see Karn/Partridge
37c410bf01SDavid Howells  * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
38c410bf01SDavid Howells  * piece by Van Jacobson.
39c410bf01SDavid Howells  * NOTE: the next three routines used to be one big routine.
40c410bf01SDavid Howells  * To save cycles in the RFC 1323 implementation it was better to break
41c410bf01SDavid Howells  * it up into three procedures. -- erics
42c410bf01SDavid Howells  */
rxrpc_rtt_estimator(struct rxrpc_peer * peer,long sample_rtt_us)43c410bf01SDavid Howells static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
44c410bf01SDavid Howells {
45c410bf01SDavid Howells 	long m = sample_rtt_us; /* RTT */
46c410bf01SDavid Howells 	u32 srtt = peer->srtt_us;
47c410bf01SDavid Howells 
48c410bf01SDavid Howells 	/*	The following amusing code comes from Jacobson's
49c410bf01SDavid Howells 	 *	article in SIGCOMM '88.  Note that rtt and mdev
50c410bf01SDavid Howells 	 *	are scaled versions of rtt and mean deviation.
51c410bf01SDavid Howells 	 *	This is designed to be as fast as possible
52c410bf01SDavid Howells 	 *	m stands for "measurement".
53c410bf01SDavid Howells 	 *
54c410bf01SDavid Howells 	 *	On a 1990 paper the rto value is changed to:
55c410bf01SDavid Howells 	 *	RTO = rtt + 4 * mdev
56c410bf01SDavid Howells 	 *
57c410bf01SDavid Howells 	 * Funny. This algorithm seems to be very broken.
58c410bf01SDavid Howells 	 * These formulae increase RTO, when it should be decreased, increase
59c410bf01SDavid Howells 	 * too slowly, when it should be increased quickly, decrease too quickly
60c410bf01SDavid Howells 	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
61c410bf01SDavid Howells 	 * does not matter how to _calculate_ it. Seems, it was trap
62c410bf01SDavid Howells 	 * that VJ failed to avoid. 8)
63c410bf01SDavid Howells 	 */
64c410bf01SDavid Howells 	if (srtt != 0) {
65c410bf01SDavid Howells 		m -= (srtt >> 3);	/* m is now error in rtt est */
66c410bf01SDavid Howells 		srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
67c410bf01SDavid Howells 		if (m < 0) {
68c410bf01SDavid Howells 			m = -m;		/* m is now abs(error) */
69c410bf01SDavid Howells 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
70c410bf01SDavid Howells 			/* This is similar to one of Eifel findings.
71c410bf01SDavid Howells 			 * Eifel blocks mdev updates when rtt decreases.
72c410bf01SDavid Howells 			 * This solution is a bit different: we use finer gain
73c410bf01SDavid Howells 			 * for mdev in this case (alpha*beta).
74c410bf01SDavid Howells 			 * Like Eifel it also prevents growth of rto,
75c410bf01SDavid Howells 			 * but also it limits too fast rto decreases,
76c410bf01SDavid Howells 			 * happening in pure Eifel.
77c410bf01SDavid Howells 			 */
78c410bf01SDavid Howells 			if (m > 0)
79c410bf01SDavid Howells 				m >>= 3;
80c410bf01SDavid Howells 		} else {
81c410bf01SDavid Howells 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
82c410bf01SDavid Howells 		}
83c410bf01SDavid Howells 
84c410bf01SDavid Howells 		peer->mdev_us += m;		/* mdev = 3/4 mdev + 1/4 new */
85c410bf01SDavid Howells 		if (peer->mdev_us > peer->mdev_max_us) {
86c410bf01SDavid Howells 			peer->mdev_max_us = peer->mdev_us;
87c410bf01SDavid Howells 			if (peer->mdev_max_us > peer->rttvar_us)
88c410bf01SDavid Howells 				peer->rttvar_us = peer->mdev_max_us;
89c410bf01SDavid Howells 		}
90c410bf01SDavid Howells 	} else {
91c410bf01SDavid Howells 		/* no previous measure. */
92c410bf01SDavid Howells 		srtt = m << 3;		/* take the measured time to be rtt */
93c410bf01SDavid Howells 		peer->mdev_us = m << 1;	/* make sure rto = 3*rtt */
94c410bf01SDavid Howells 		peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
95c410bf01SDavid Howells 		peer->mdev_max_us = peer->rttvar_us;
96c410bf01SDavid Howells 	}
97c410bf01SDavid Howells 
98c410bf01SDavid Howells 	peer->srtt_us = max(1U, srtt);
99c410bf01SDavid Howells }
100c410bf01SDavid Howells 
101c410bf01SDavid Howells /*
102c410bf01SDavid Howells  * Calculate rto without backoff.  This is the second half of Van Jacobson's
103c410bf01SDavid Howells  * routine referred to above.
104c410bf01SDavid Howells  */
rxrpc_set_rto(struct rxrpc_peer * peer)105c410bf01SDavid Howells static void rxrpc_set_rto(struct rxrpc_peer *peer)
106c410bf01SDavid Howells {
107c410bf01SDavid Howells 	u32 rto;
108c410bf01SDavid Howells 
109c410bf01SDavid Howells 	/* 1. If rtt variance happened to be less 50msec, it is hallucination.
110c410bf01SDavid Howells 	 *    It cannot be less due to utterly erratic ACK generation made
111c410bf01SDavid Howells 	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
112c410bf01SDavid Howells 	 *    to do with delayed acks, because at cwnd>2 true delack timeout
113c410bf01SDavid Howells 	 *    is invisible. Actually, Linux-2.4 also generates erratic
114c410bf01SDavid Howells 	 *    ACKs in some circumstances.
115c410bf01SDavid Howells 	 */
116c410bf01SDavid Howells 	rto = __rxrpc_set_rto(peer);
117c410bf01SDavid Howells 
118c410bf01SDavid Howells 	/* 2. Fixups made earlier cannot be right.
119c410bf01SDavid Howells 	 *    If we do not estimate RTO correctly without them,
120c410bf01SDavid Howells 	 *    all the algo is pure shit and should be replaced
121c410bf01SDavid Howells 	 *    with correct one. It is exactly, which we pretend to do.
122c410bf01SDavid Howells 	 */
123c410bf01SDavid Howells 
124c410bf01SDavid Howells 	/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
125c410bf01SDavid Howells 	 * guarantees that rto is higher.
126c410bf01SDavid Howells 	 */
127*153f90a0SDavid Howells 	peer->rto_us = rxrpc_bound_rto(rto);
128c410bf01SDavid Howells }
129c410bf01SDavid Howells 
rxrpc_ack_update_rtt(struct rxrpc_peer * peer,long rtt_us)130c410bf01SDavid Howells static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
131c410bf01SDavid Howells {
132c410bf01SDavid Howells 	if (rtt_us < 0)
133c410bf01SDavid Howells 		return;
134c410bf01SDavid Howells 
135c410bf01SDavid Howells 	//rxrpc_update_rtt_min(peer, rtt_us);
136c410bf01SDavid Howells 	rxrpc_rtt_estimator(peer, rtt_us);
137c410bf01SDavid Howells 	rxrpc_set_rto(peer);
138c410bf01SDavid Howells 
139c410bf01SDavid Howells 	/* RFC6298: only reset backoff on valid RTT measurement. */
140c410bf01SDavid Howells 	peer->backoff = 0;
141c410bf01SDavid Howells }
142c410bf01SDavid Howells 
143c410bf01SDavid Howells /*
144c410bf01SDavid Howells  * Add RTT information to cache.  This is called in softirq mode and has
145c410bf01SDavid Howells  * exclusive access to the peer RTT data.
146c410bf01SDavid Howells  */
rxrpc_peer_add_rtt(struct rxrpc_call * call,enum rxrpc_rtt_rx_trace why,int rtt_slot,rxrpc_serial_t send_serial,rxrpc_serial_t resp_serial,ktime_t send_time,ktime_t resp_time)147c410bf01SDavid Howells void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
1484700c4d8SDavid Howells 			int rtt_slot,
149c410bf01SDavid Howells 			rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
150c410bf01SDavid Howells 			ktime_t send_time, ktime_t resp_time)
151c410bf01SDavid Howells {
152c410bf01SDavid Howells 	struct rxrpc_peer *peer = call->peer;
153c410bf01SDavid Howells 	s64 rtt_us;
154c410bf01SDavid Howells 
155c410bf01SDavid Howells 	rtt_us = ktime_to_us(ktime_sub(resp_time, send_time));
156c410bf01SDavid Howells 	if (rtt_us < 0)
157c410bf01SDavid Howells 		return;
158c410bf01SDavid Howells 
159c410bf01SDavid Howells 	spin_lock(&peer->rtt_input_lock);
160c410bf01SDavid Howells 	rxrpc_ack_update_rtt(peer, rtt_us);
161c410bf01SDavid Howells 	if (peer->rtt_count < 3)
162c410bf01SDavid Howells 		peer->rtt_count++;
163c410bf01SDavid Howells 	spin_unlock(&peer->rtt_input_lock);
164c410bf01SDavid Howells 
1654700c4d8SDavid Howells 	trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
166*153f90a0SDavid Howells 			   peer->srtt_us >> 3, peer->rto_us);
167c410bf01SDavid Howells }
168c410bf01SDavid Howells 
169c410bf01SDavid Howells /*
170*153f90a0SDavid Howells  * Get the retransmission timeout to set in nanoseconds, backing it off each
171*153f90a0SDavid Howells  * time we retransmit.
172c410bf01SDavid Howells  */
rxrpc_get_rto_backoff(struct rxrpc_peer * peer,bool retrans)173*153f90a0SDavid Howells ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
174c410bf01SDavid Howells {
175*153f90a0SDavid Howells 	u64 timo_us;
176*153f90a0SDavid Howells 	u32 backoff = READ_ONCE(peer->backoff);
177c410bf01SDavid Howells 
178*153f90a0SDavid Howells 	timo_us = peer->rto_us;
179*153f90a0SDavid Howells 	timo_us <<= backoff;
180*153f90a0SDavid Howells 	if (retrans && timo_us * 2 <= RXRPC_RTO_MAX)
181c410bf01SDavid Howells 		WRITE_ONCE(peer->backoff, backoff + 1);
182c410bf01SDavid Howells 
183*153f90a0SDavid Howells 	if (timo_us < 1)
184*153f90a0SDavid Howells 		timo_us = 1;
185c410bf01SDavid Howells 
186*153f90a0SDavid Howells 	return ns_to_ktime(timo_us * NSEC_PER_USEC);
187c410bf01SDavid Howells }
188c410bf01SDavid Howells 
rxrpc_peer_init_rtt(struct rxrpc_peer * peer)189c410bf01SDavid Howells void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
190c410bf01SDavid Howells {
191*153f90a0SDavid Howells 	peer->rto_us	= RXRPC_TIMEOUT_INIT;
192*153f90a0SDavid Howells 	peer->mdev_us	= RXRPC_TIMEOUT_INIT;
193c410bf01SDavid Howells 	peer->backoff	= 0;
194c410bf01SDavid Howells 	//minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
195c410bf01SDavid Howells }
196