xref: /openbsd/sys/netinet/tcp_timer.c (revision e835bce2)
1 /*	$OpenBSD: tcp_timer.c,v 1.82 2025/01/16 11:59:20 bluhm Exp $	*/
2 /*	$NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1988, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)tcp_timer.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/protosw.h>
41 #include <sys/kernel.h>
42 #include <sys/pool.h>
43 
44 #include <net/route.h>
45 
46 #include <netinet/in.h>
47 #include <netinet/ip.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_timer.h>
53 #include <netinet/tcp_var.h>
54 #include <netinet/tcp_debug.h>
55 #include <netinet/ip_icmp.h>
56 #include <netinet/tcp_seq.h>
57 
58 /*
59  * Locks used to protect struct members in this file:
60  *	T	tcp_timer_mtx		global tcp timer data structures
61  */
62 
63 int	tcp_always_keepalive;
64 int	tcp_keepinit = TCPTV_KEEPINIT;
65 int	tcp_keepidle = TCPTV_KEEPIDLE;
66 int	tcp_keepintvl = TCPTV_KEEPINTVL;
67 int	tcp_keepinit_sec = TCPTV_KEEPINIT / TCP_TIME(1);
68 int	tcp_keepidle_sec = TCPTV_KEEPIDLE / TCP_TIME(1);
69 int	tcp_keepintvl_sec = TCPTV_KEEPINTVL / TCP_TIME(1);
70 int	tcp_maxpersistidle = TCPTV_KEEPIDLE;	/* max idle time in persist */
71 int	tcp_delack_msecs = TCP_DELACK_MSECS;	/* time to delay the ACK */
72 
73 void	tcp_timer_rexmt(void *);
74 void	tcp_timer_persist(void *);
75 void	tcp_timer_keep(void *);
76 void	tcp_timer_2msl(void *);
77 void	tcp_timer_delack(void *);
78 
79 const tcp_timer_func_t tcp_timer_funcs[TCPT_NTIMERS] = {
80 	tcp_timer_rexmt,
81 	tcp_timer_persist,
82 	tcp_timer_keep,
83 	tcp_timer_2msl,
84 	tcp_timer_delack,
85 };
86 
87 static inline int
tcp_timer_enter(struct inpcb * inp,struct socket ** so,struct tcpcb ** tp,u_int timer)88 tcp_timer_enter(struct inpcb *inp, struct socket **so, struct tcpcb **tp,
89     u_int timer)
90 {
91 	KASSERT(timer < TCPT_NTIMERS);
92 
93 	NET_LOCK_SHARED();
94 	*so = in_pcbsolock_ref(inp);
95 	if (*so == NULL) {
96 		*tp = NULL;
97 		return -1;
98 	}
99 	*tp = intotcpcb(inp);
100 	/* Ignore canceled timeouts or timeouts that have been rescheduled. */
101 	if (*tp == NULL || !ISSET((*tp)->t_flags, TF_TIMER << timer) ||
102 	    timeout_pending(&(*tp)->t_timer[timer]))
103 		return -1;
104 	CLR((*tp)->t_flags, TF_TIMER << timer);
105 
106 	return 0;
107 }
108 
109 static inline void
tcp_timer_leave(struct inpcb * inp,struct socket * so)110 tcp_timer_leave(struct inpcb *inp, struct socket *so)
111 {
112 	in_pcbsounlock_rele(inp, so);
113 	NET_UNLOCK_SHARED();
114 	in_pcbunref(inp);
115 }
116 
117 /*
118  * Callout to process delayed ACKs for a TCPCB.
119  */
120 void
tcp_timer_delack(void * arg)121 tcp_timer_delack(void *arg)
122 {
123 	struct inpcb *inp = arg;
124 	struct socket *so;
125 	struct tcpcb *otp = NULL, *tp;
126 	short ostate;
127 
128 	/*
129 	 * If tcp_output() wasn't able to transmit the ACK
130 	 * for whatever reason, it will restart the delayed
131 	 * ACK callout.
132 	 */
133 	if (tcp_timer_enter(inp, &so, &tp, TCPT_DELACK))
134 		goto out;
135 
136 	if (so->so_options & SO_DEBUG) {
137 		otp = tp;
138 		ostate = tp->t_state;
139 	}
140 	tp->t_flags |= TF_ACKNOW;
141 	(void) tcp_output(tp);
142 	if (otp)
143 		tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_DELACK, 0);
144  out:
145 	tcp_timer_leave(inp, so);
146 }
147 
148 /*
149  * Tcp protocol timeout routine called every 500 ms.
150  * Updates the timers in all active tcb's and
151  * causes finite state machine actions if timers expire.
152  */
153 void
tcp_slowtimo(void)154 tcp_slowtimo(void)
155 {
156 	mtx_enter(&tcp_timer_mtx);
157 	tcp_iss += TCP_ISSINCR2/PR_SLOWHZ;		/* increment iss */
158 	mtx_leave(&tcp_timer_mtx);
159 }
160 
161 /*
162  * Cancel all timers for TCP tp.
163  */
164 void
tcp_canceltimers(struct tcpcb * tp)165 tcp_canceltimers(struct tcpcb *tp)
166 {
167 	int i;
168 
169 	for (i = 0; i < TCPT_NTIMERS; i++)
170 		TCP_TIMER_DISARM(tp, i);
171 }
172 
173 const int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
174     { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
175 
176 const int tcp_totbackoff = 511;	/* sum of tcp_backoff[] */
177 
178 /*
179  * TCP timer processing.
180  */
181 
182 void	tcp_timer_freesack(struct tcpcb *);
183 
184 void
tcp_timer_freesack(struct tcpcb * tp)185 tcp_timer_freesack(struct tcpcb *tp)
186 {
187 	struct sackhole *p, *q;
188 	/*
189 	 * Free SACK holes for 2MSL and REXMT timers.
190 	 */
191 	q = tp->snd_holes;
192 	while (q != NULL) {
193 		p = q;
194 		q = q->next;
195 		pool_put(&sackhl_pool, p);
196 	}
197 	tp->snd_holes = 0;
198 }
199 
200 void
tcp_timer_rexmt(void * arg)201 tcp_timer_rexmt(void *arg)
202 {
203 	struct inpcb *inp = arg;
204 	struct socket *so;
205 	struct tcpcb *otp = NULL, *tp;
206 	short ostate;
207 	uint32_t rto;
208 
209 	if (tcp_timer_enter(inp, &so, &tp, TCPT_REXMT))
210 		goto out;
211 
212 	if ((tp->t_flags & TF_PMTUD_PEND) &&
213 	    SEQ_GEQ(tp->t_pmtud_th_seq, tp->snd_una) &&
214 	    SEQ_LT(tp->t_pmtud_th_seq, (int)(tp->snd_una + tp->t_maxseg))) {
215 		struct sockaddr_in sin;
216 		struct icmp icmp;
217 
218 		/* TF_PMTUD_PEND is set in tcp_ctlinput() which is IPv4 only */
219 		KASSERT(!ISSET(inp->inp_flags, INP_IPV6));
220 		tp->t_flags &= ~TF_PMTUD_PEND;
221 
222 		/* XXX create fake icmp message with relevant entries */
223 		icmp.icmp_nextmtu = tp->t_pmtud_nextmtu;
224 		icmp.icmp_ip.ip_len = tp->t_pmtud_ip_len;
225 		icmp.icmp_ip.ip_hl = tp->t_pmtud_ip_hl;
226 		icmp.icmp_ip.ip_dst = inp->inp_faddr;
227 		icmp_mtudisc(&icmp, inp->inp_rtableid);
228 
229 		/*
230 		 * Notify all connections to the same peer about
231 		 * new mss and trigger retransmit.
232 		 */
233 		bzero(&sin, sizeof(sin));
234 		sin.sin_len = sizeof(sin);
235 		sin.sin_family = AF_INET;
236 		sin.sin_addr = inp->inp_faddr;
237 		in_pcbnotifyall(&tcbtable, &sin, inp->inp_rtableid, EMSGSIZE,
238 		    tcp_mtudisc);
239 		goto out;
240 	}
241 
242 	tcp_timer_freesack(tp);
243 	if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
244 		tp->t_rxtshift = TCP_MAXRXTSHIFT;
245 		tcpstat_inc(tcps_timeoutdrop);
246 		tp = tcp_drop(tp, tp->t_softerror ?
247 		    tp->t_softerror : ETIMEDOUT);
248 		goto out;
249 	}
250 	if (so->so_options & SO_DEBUG) {
251 		otp = tp;
252 		ostate = tp->t_state;
253 	}
254 	tcpstat_inc(tcps_rexmttimeo);
255 	rto = TCP_REXMTVAL(tp);
256 	if (rto < tp->t_rttmin)
257 		rto = tp->t_rttmin;
258 	TCPT_RANGESET(tp->t_rxtcur,
259 	    rto * tcp_backoff[tp->t_rxtshift],
260 	    tp->t_rttmin, TCPTV_REXMTMAX);
261 	TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur);
262 
263 	/*
264 	 * If we are losing and we are trying path MTU discovery,
265 	 * try turning it off.  This will avoid black holes in
266 	 * the network which suppress or fail to send "packet
267 	 * too big" ICMP messages.  We should ideally do
268 	 * lots more sophisticated searching to find the right
269 	 * value here...
270 	 */
271 	if (ip_mtudisc &&
272 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
273 	    tp->t_rxtshift > TCP_MAXRXTSHIFT / 6) {
274 		struct rtentry *rt = NULL;
275 
276 		/* No data to send means path mtu is not a problem */
277 		if (!READ_ONCE(so->so_snd.sb_cc))
278 			goto leave;
279 
280 		rt = in_pcbrtentry(inp);
281 		/* Check if path MTU discovery is disabled already */
282 		if (rt && (rt->rt_flags & RTF_HOST) &&
283 		    (rt->rt_locks & RTV_MTU))
284 			goto leave;
285 
286 		rt = NULL;
287 		switch(tp->pf) {
288 #ifdef INET6
289 		case PF_INET6:
290 			/*
291 			 * We can not turn off path MTU for IPv6.
292 			 * Do nothing for now, maybe lower to
293 			 * minimum MTU.
294 			 */
295 			break;
296 #endif
297 		case PF_INET:
298 			rt = icmp_mtudisc_clone(inp->inp_faddr,
299 			    inp->inp_rtableid, 0);
300 			break;
301 		}
302 		if (rt != NULL) {
303 			/* Disable path MTU discovery */
304 			if ((rt->rt_locks & RTV_MTU) == 0) {
305 				rt->rt_locks |= RTV_MTU;
306 				in_rtchange(inp, 0);
307 			}
308 
309 			rtfree(rt);
310 		}
311 	leave:
312 		;
313 	}
314 
315 	/*
316 	 * If losing, let the lower level know and try for
317 	 * a better route.  Also, if we backed off this far,
318 	 * our srtt estimate is probably bogus.  Clobber it
319 	 * so we'll take the next rtt measurement as our srtt;
320 	 * move the current srtt into rttvar to keep the current
321 	 * retransmit times until then.
322 	 */
323 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
324 		in_losing(inp);
325 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
326 		tp->t_srtt = 0;
327 	}
328 	tp->snd_nxt = tp->snd_una;
329 	/*
330 	 * Note:  We overload snd_last to function also as the
331 	 * snd_last variable described in RFC 2582
332 	 */
333 	tp->snd_last = tp->snd_max;
334 	/*
335 	 * If timing a segment in this window, stop the timer.
336 	 */
337 	tp->t_rtttime = 0;
338 #ifdef TCP_ECN
339 	/*
340 	 * if ECN is enabled, there might be a broken firewall which
341 	 * blocks ecn packets.  fall back to non-ecn.
342 	 */
343 	if ((tp->t_state == TCPS_SYN_SENT || tp->t_state == TCPS_SYN_RECEIVED)
344 	    && atomic_load_int(&tcp_do_ecn) && !(tp->t_flags & TF_DISABLE_ECN))
345 		tp->t_flags |= TF_DISABLE_ECN;
346 #endif
347 	/*
348 	 * Close the congestion window down to one segment
349 	 * (we'll open it by one segment for each ack we get).
350 	 * Since we probably have a window's worth of unacked
351 	 * data accumulated, this "slow start" keeps us from
352 	 * dumping all that data as back-to-back packets (which
353 	 * might overwhelm an intermediate gateway).
354 	 *
355 	 * There are two phases to the opening: Initially we
356 	 * open by one mss on each ack.  This makes the window
357 	 * size increase exponentially with time.  If the
358 	 * window is larger than the path can handle, this
359 	 * exponential growth results in dropped packet(s)
360 	 * almost immediately.  To get more time between
361 	 * drops but still "push" the network to take advantage
362 	 * of improving conditions, we switch from exponential
363 	 * to linear window opening at some threshold size.
364 	 * For a threshold, we use half the current window
365 	 * size, truncated to a multiple of the mss.
366 	 *
367 	 * (the minimum cwnd that will give us exponential
368 	 * growth is 2 mss.  We don't allow the threshold
369 	 * to go below this.)
370 	 */
371 	{
372 		u_long win;
373 
374 		win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
375 		if (win < 2)
376 			win = 2;
377 		tp->snd_cwnd = tp->t_maxseg;
378 		tp->snd_ssthresh = win * tp->t_maxseg;
379 		tp->t_dupacks = 0;
380 #ifdef TCP_ECN
381 		tp->snd_last = tp->snd_max;
382 		tp->t_flags |= TF_SEND_CWR;
383 #endif
384 #if 1 /* TCP_ECN */
385 		tcpstat_inc(tcps_cwr_timeout);
386 #endif
387 	}
388 	(void) tcp_output(tp);
389 	if (otp)
390 		tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_REXMT, 0);
391  out:
392 	tcp_timer_leave(inp, so);
393 }
394 
395 void
tcp_timer_persist(void * arg)396 tcp_timer_persist(void *arg)
397 {
398 	struct inpcb *inp = arg;
399 	struct socket *so;
400 	struct tcpcb *otp = NULL, *tp;
401 	short ostate;
402 	uint64_t now;
403 	uint32_t rto;
404 
405 	if (tcp_timer_enter(inp, &so, &tp, TCPT_PERSIST))
406 		goto out;
407 
408 	if (TCP_TIMER_ISARMED(tp, TCPT_REXMT))
409 		goto out;
410 
411 	if (so->so_options & SO_DEBUG) {
412 		otp = tp;
413 		ostate = tp->t_state;
414 	}
415 	tcpstat_inc(tcps_persisttimeo);
416 	/*
417 	 * Hack: if the peer is dead/unreachable, we do not
418 	 * time out if the window is closed.  After a full
419 	 * backoff, drop the connection if the idle time
420 	 * (no responses to probes) reaches the maximum
421 	 * backoff that we would use if retransmitting.
422 	 */
423 	rto = TCP_REXMTVAL(tp);
424 	if (rto < tp->t_rttmin)
425 		rto = tp->t_rttmin;
426 	now = tcp_now();
427 	if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
428 	    ((now - tp->t_rcvtime) >= tcp_maxpersistidle ||
429 	    (now - tp->t_rcvtime) >= rto * tcp_totbackoff)) {
430 		tcpstat_inc(tcps_persistdrop);
431 		tp = tcp_drop(tp, ETIMEDOUT);
432 		goto out;
433 	}
434 	tcp_setpersist(tp);
435 	tp->t_force = 1;
436 	(void) tcp_output(tp);
437 	tp->t_force = 0;
438 	if (otp)
439 		tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_PERSIST, 0);
440  out:
441 	tcp_timer_leave(inp, so);
442 }
443 
444 void
tcp_timer_keep(void * arg)445 tcp_timer_keep(void *arg)
446 {
447 	struct inpcb *inp = arg;
448 	struct socket *so;
449 	struct tcpcb *otp = NULL, *tp;
450 	short ostate;
451 
452 	if (tcp_timer_enter(inp, &so, &tp, TCPT_KEEP))
453 		goto out;
454 
455 	if (so->so_options & SO_DEBUG) {
456 		otp = tp;
457 		ostate = tp->t_state;
458 	}
459 	tcpstat_inc(tcps_keeptimeo);
460 	if (TCPS_HAVEESTABLISHED(tp->t_state) == 0) {
461 		tcpstat_inc(tcps_keepdrops);
462 		tp = tcp_drop(tp, ETIMEDOUT);
463 		goto out;
464 	}
465 	if ((atomic_load_int(&tcp_always_keepalive) ||
466 	    so->so_options & SO_KEEPALIVE) &&
467 	    tp->t_state <= TCPS_CLOSING) {
468 		int keepidle, maxidle;
469 		uint64_t now;
470 
471 		keepidle = atomic_load_int(&tcp_keepidle);
472 		maxidle = TCPTV_KEEPCNT * keepidle;
473 		now = tcp_now();
474 		if ((maxidle > 0) &&
475 		    ((now - tp->t_rcvtime) >= keepidle + maxidle)) {
476 			tcpstat_inc(tcps_keepdrops);
477 			tp = tcp_drop(tp, ETIMEDOUT);
478 			goto out;
479 		}
480 		/*
481 		 * Send a packet designed to force a response
482 		 * if the peer is up and reachable:
483 		 * either an ACK if the connection is still alive,
484 		 * or an RST if the peer has closed the connection
485 		 * due to timeout or reboot.
486 		 * Using sequence number tp->snd_una-1
487 		 * causes the transmitted zero-length segment
488 		 * to lie outside the receive window;
489 		 * by the protocol spec, this requires the
490 		 * correspondent TCP to respond.
491 		 */
492 		tcpstat_inc(tcps_keepprobe);
493 		tcp_respond(tp, mtod(tp->t_template, caddr_t),
494 		    NULL, tp->rcv_nxt, tp->snd_una - 1, 0, 0, now);
495 		TCP_TIMER_ARM(tp, TCPT_KEEP, atomic_load_int(&tcp_keepintvl));
496 	} else
497 		TCP_TIMER_ARM(tp, TCPT_KEEP, atomic_load_int(&tcp_keepidle));
498 	if (otp)
499 		tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_KEEP, 0);
500  out:
501 	tcp_timer_leave(inp, so);
502 }
503 
504 void
tcp_timer_2msl(void * arg)505 tcp_timer_2msl(void *arg)
506 {
507 	struct inpcb *inp = arg;
508 	struct socket *so;
509 	struct tcpcb *otp = NULL, *tp;
510 	short ostate;
511 	uint64_t now;
512 	int maxidle;
513 
514 	if (tcp_timer_enter(inp, &so, &tp, TCPT_2MSL))
515 		goto out;
516 
517 	if (so->so_options & SO_DEBUG) {
518 		otp = tp;
519 		ostate = tp->t_state;
520 	}
521 	tcp_timer_freesack(tp);
522 
523 	maxidle = TCPTV_KEEPCNT * atomic_load_int(&tcp_keepidle);
524 	now = tcp_now();
525 	if (tp->t_state != TCPS_TIME_WAIT &&
526 	    ((maxidle == 0) || ((now - tp->t_rcvtime) <= maxidle)))
527 		TCP_TIMER_ARM(tp, TCPT_2MSL, atomic_load_int(&tcp_keepintvl));
528 	else
529 		tp = tcp_close(tp);
530 	if (otp)
531 		tcp_trace(TA_TIMER, ostate, tp, otp, NULL, TCPT_2MSL, 0);
532  out:
533 	tcp_timer_leave(inp, so);
534 }
535 
536 void
tcp_timer_reaper(void * arg)537 tcp_timer_reaper(void *arg)
538 {
539 	struct tcpcb *tp = arg;
540 
541 	/*
542 	 * This timer is necessary to delay the pool_put() after all timers
543 	 * have finished, even if they were sleeping to grab the net lock.
544 	 * Putting the pool_put() in a timer is sufficient as all timers run
545 	 * from the same timeout thread.  Note that neither softnet thread nor
546 	 * user process may access the tcpcb after arming the reaper timer.
547 	 * Freeing may run in parallel as it does not grab the net lock.
548 	 */
549 	pool_put(&tcpcb_pool, tp);
550 }
551