xref: /freebsd/sys/netinet/tcp_timewait.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_tcpdebug.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/callout.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/protosw.h>
51 #include <sys/random.h>
52 
53 #include <vm/uma.h>
54 
55 #include <net/route.h>
56 #include <net/if.h>
57 #include <net/vnet.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip_icmp.h>
65 #include <netinet/ip_var.h>
66 #ifdef INET6
67 #include <netinet/ip6.h>
68 #include <netinet6/in6_pcb.h>
69 #include <netinet6/ip6_var.h>
70 #include <netinet6/scope6_var.h>
71 #include <netinet6/nd6.h>
72 #endif
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_fsm.h>
75 #include <netinet/tcp_seq.h>
76 #include <netinet/tcp_timer.h>
77 #include <netinet/tcp_var.h>
78 #ifdef INET6
79 #include <netinet6/tcp6_var.h>
80 #endif
81 #include <netinet/tcpip.h>
82 #ifdef TCPDEBUG
83 #include <netinet/tcp_debug.h>
84 #endif
85 #ifdef INET6
86 #include <netinet6/ip6protosw.h>
87 #endif
88 
89 #include <machine/in_cksum.h>
90 
91 #include <security/mac/mac_framework.h>
92 
93 static VNET_DEFINE(uma_zone_t, tcptw_zone);
94 #define	V_tcptw_zone			VNET(tcptw_zone)
95 static int	maxtcptw;
96 
97 /*
98  * The timed wait queue contains references to each of the TCP sessions
99  * currently in the TIME_WAIT state.  The queue pointers, including the
100  * queue pointers in each tcptw structure, are protected using the global
101  * tcbinfo lock, which must be held over queue iteration and modification.
102  */
103 static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl);
104 #define	V_twq_2msl			VNET(twq_2msl)
105 
106 static void	tcp_tw_2msl_reset(struct tcptw *, int);
107 static void	tcp_tw_2msl_stop(struct tcptw *);
108 
109 static int
110 tcptw_auto_size(void)
111 {
112 	int halfrange;
113 
114 	/*
115 	 * Max out at half the ephemeral port range so that TIME_WAIT
116 	 * sockets don't tie up too many ephemeral ports.
117 	 */
118 	if (V_ipport_lastauto > V_ipport_firstauto)
119 		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
120 	else
121 		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
122 	/* Protect against goofy port ranges smaller than 32. */
123 	return (imin(imax(halfrange, 32), maxsockets / 5));
124 }
125 
126 static int
127 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
128 {
129 	int error, new;
130 
131 	if (maxtcptw == 0)
132 		new = tcptw_auto_size();
133 	else
134 		new = maxtcptw;
135 	error = sysctl_handle_int(oidp, &new, 0, req);
136 	if (error == 0 && req->newptr)
137 		if (new >= 32) {
138 			maxtcptw = new;
139 			uma_zone_set_max(V_tcptw_zone, maxtcptw);
140 		}
141 	return (error);
142 }
143 
144 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
145     &maxtcptw, 0, sysctl_maxtcptw, "IU",
146     "Maximum number of compressed TCP TIME_WAIT entries");
147 
148 VNET_DEFINE(int, nolocaltimewait) = 0;
149 #define	V_nolocaltimewait	VNET(nolocaltimewait)
150 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW,
151     &VNET_NAME(nolocaltimewait), 0,
152     "Do not create compressed TCP TIME_WAIT entries for local connections");
153 
154 void
155 tcp_tw_zone_change(void)
156 {
157 
158 	if (maxtcptw == 0)
159 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
160 }
161 
162 void
163 tcp_tw_init(void)
164 {
165 
166 	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
167 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
168 	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
169 	if (maxtcptw == 0)
170 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
171 	else
172 		uma_zone_set_max(V_tcptw_zone, maxtcptw);
173 	TAILQ_INIT(&V_twq_2msl);
174 }
175 
176 #ifdef VIMAGE
177 void
178 tcp_tw_destroy(void)
179 {
180 	struct tcptw *tw;
181 
182 	INP_INFO_WLOCK(&V_tcbinfo);
183 	while((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
184 		tcp_twclose(tw, 0);
185 	INP_INFO_WUNLOCK(&V_tcbinfo);
186 
187 	uma_zdestroy(V_tcptw_zone);
188 }
189 #endif
190 
191 /*
192  * Move a TCP connection into TIME_WAIT state.
193  *    tcbinfo is locked.
194  *    inp is locked, and is unlocked before returning.
195  */
196 void
197 tcp_twstart(struct tcpcb *tp)
198 {
199 	struct tcptw *tw;
200 	struct inpcb *inp = tp->t_inpcb;
201 	int acknow;
202 	struct socket *so;
203 #ifdef INET6
204 	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
205 #endif
206 
207 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_reset(). */
208 	INP_WLOCK_ASSERT(inp);
209 
210 	if (V_nolocaltimewait) {
211 		int error = 0;
212 #ifdef INET6
213 		if (isipv6)
214 			error = in6_localaddr(&inp->in6p_faddr);
215 #endif
216 #if defined(INET6) && defined(INET)
217 		else
218 #endif
219 #ifdef INET
220 			error = in_localip(inp->inp_faddr);
221 #endif
222 		if (error) {
223 			tp = tcp_close(tp);
224 			if (tp != NULL)
225 				INP_WUNLOCK(inp);
226 			return;
227 		}
228 	}
229 
230 	tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
231 	if (tw == NULL) {
232 		tw = tcp_tw_2msl_scan(1);
233 		if (tw == NULL) {
234 			tp = tcp_close(tp);
235 			if (tp != NULL)
236 				INP_WUNLOCK(inp);
237 			return;
238 		}
239 	}
240 	tw->tw_inpcb = inp;
241 
242 	/*
243 	 * Recover last window size sent.
244 	 */
245 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
246 		tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
247 	else
248 		tw->last_win = 0;
249 
250 	/*
251 	 * Set t_recent if timestamps are used on the connection.
252 	 */
253 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
254 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
255 		tw->t_recent = tp->ts_recent;
256 		tw->ts_offset = tp->ts_offset;
257 	} else {
258 		tw->t_recent = 0;
259 		tw->ts_offset = 0;
260 	}
261 
262 	tw->snd_nxt = tp->snd_nxt;
263 	tw->rcv_nxt = tp->rcv_nxt;
264 	tw->iss     = tp->iss;
265 	tw->irs     = tp->irs;
266 	tw->t_starttime = tp->t_starttime;
267 	tw->tw_time = 0;
268 
269 /* XXX
270  * If this code will
271  * be used for fin-wait-2 state also, then we may need
272  * a ts_recent from the last segment.
273  */
274 	acknow = tp->t_flags & TF_ACKNOW;
275 
276 	/*
277 	 * First, discard tcpcb state, which includes stopping its timers and
278 	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
279 	 * that work is now done in the caller.
280 	 *
281 	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
282 	 * and might not be needed here any longer.
283 	 */
284 	tcp_discardcb(tp);
285 	so = inp->inp_socket;
286 	soisdisconnected(so);
287 	tw->tw_cred = crhold(so->so_cred);
288 	SOCK_LOCK(so);
289 	tw->tw_so_options = so->so_options;
290 	SOCK_UNLOCK(so);
291 	if (acknow)
292 		tcp_twrespond(tw, TH_ACK);
293 	inp->inp_ppcb = tw;
294 	inp->inp_flags |= INP_TIMEWAIT;
295 	tcp_tw_2msl_reset(tw, 0);
296 
297 	/*
298 	 * If the inpcb owns the sole reference to the socket, then we can
299 	 * detach and free the socket as it is not needed in time wait.
300 	 */
301 	if (inp->inp_flags & INP_SOCKREF) {
302 		KASSERT(so->so_state & SS_PROTOREF,
303 		    ("tcp_twstart: !SS_PROTOREF"));
304 		inp->inp_flags &= ~INP_SOCKREF;
305 		INP_WUNLOCK(inp);
306 		ACCEPT_LOCK();
307 		SOCK_LOCK(so);
308 		so->so_state &= ~SS_PROTOREF;
309 		sofree(so);
310 	} else
311 		INP_WUNLOCK(inp);
312 }
313 
314 #if 0
315 /*
316  * The appromixate rate of ISN increase of Microsoft TCP stacks;
317  * the actual rate is slightly higher due to the addition of
318  * random positive increments.
319  *
320  * Most other new OSes use semi-randomized ISN values, so we
321  * do not need to worry about them.
322  */
323 #define MS_ISN_BYTES_PER_SECOND		250000
324 
325 /*
326  * Determine if the ISN we will generate has advanced beyond the last
327  * sequence number used by the previous connection.  If so, indicate
328  * that it is safe to recycle this tw socket by returning 1.
329  */
330 int
331 tcp_twrecycleable(struct tcptw *tw)
332 {
333 	tcp_seq new_iss = tw->iss;
334 	tcp_seq new_irs = tw->irs;
335 
336 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
337 	new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
338 	new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
339 
340 	if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
341 		return (1);
342 	else
343 		return (0);
344 }
345 #endif
346 
347 /*
348  * Returns 1 if the TIME_WAIT state was killed and we should start over,
349  * looking for a pcb in the listen state.  Returns 0 otherwise.
350  */
351 int
352 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
353     struct mbuf *m, int tlen)
354 {
355 	struct tcptw *tw;
356 	int thflags;
357 	tcp_seq seq;
358 
359 	/* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
360 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
361 	INP_WLOCK_ASSERT(inp);
362 
363 	/*
364 	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
365 	 * still present.  This is undesirable, but temporarily necessary
366 	 * until we work out how to handle inpcb's who's timewait state has
367 	 * been removed.
368 	 */
369 	tw = intotw(inp);
370 	if (tw == NULL)
371 		goto drop;
372 
373 	thflags = th->th_flags;
374 
375 	/*
376 	 * NOTE: for FIN_WAIT_2 (to be added later),
377 	 * must validate sequence number before accepting RST
378 	 */
379 
380 	/*
381 	 * If the segment contains RST:
382 	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
383 	 *      RFC 1337.
384 	 */
385 	if (thflags & TH_RST)
386 		goto drop;
387 
388 #if 0
389 /* PAWS not needed at the moment */
390 	/*
391 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
392 	 * and it's less than ts_recent, drop it.
393 	 */
394 	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
395 	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
396 		if ((thflags & TH_ACK) == 0)
397 			goto drop;
398 		goto ack;
399 	}
400 	/*
401 	 * ts_recent is never updated because we never accept new segments.
402 	 */
403 #endif
404 
405 	/*
406 	 * If a new connection request is received
407 	 * while in TIME_WAIT, drop the old connection
408 	 * and start over if the sequence numbers
409 	 * are above the previous ones.
410 	 */
411 	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
412 		tcp_twclose(tw, 0);
413 		return (1);
414 	}
415 
416 	/*
417 	 * Drop the segment if it does not contain an ACK.
418 	 */
419 	if ((thflags & TH_ACK) == 0)
420 		goto drop;
421 
422 	/*
423 	 * Reset the 2MSL timer if this is a duplicate FIN.
424 	 */
425 	if (thflags & TH_FIN) {
426 		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
427 		if (seq + 1 == tw->rcv_nxt)
428 			tcp_tw_2msl_reset(tw, 1);
429 	}
430 
431 	/*
432 	 * Acknowledge the segment if it has data or is not a duplicate ACK.
433 	 */
434 	if (thflags != TH_ACK || tlen != 0 ||
435 	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
436 		tcp_twrespond(tw, TH_ACK);
437 drop:
438 	INP_WUNLOCK(inp);
439 	m_freem(m);
440 	return (0);
441 }
442 
443 void
444 tcp_twclose(struct tcptw *tw, int reuse)
445 {
446 	struct socket *so;
447 	struct inpcb *inp;
448 
449 	/*
450 	 * At this point, we are in one of two situations:
451 	 *
452 	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
453 	 *     all state.
454 	 *
455 	 * (2) We have a socket -- if we own a reference, release it and
456 	 *     notify the socket layer.
457 	 */
458 	inp = tw->tw_inpcb;
459 	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
460 	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
461 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_stop(). */
462 	INP_WLOCK_ASSERT(inp);
463 
464 	tw->tw_inpcb = NULL;
465 	tcp_tw_2msl_stop(tw);
466 	inp->inp_ppcb = NULL;
467 	in_pcbdrop(inp);
468 
469 	so = inp->inp_socket;
470 	if (so != NULL) {
471 		/*
472 		 * If there's a socket, handle two cases: first, we own a
473 		 * strong reference, which we will now release, or we don't
474 		 * in which case another reference exists (XXXRW: think
475 		 * about this more), and we don't need to take action.
476 		 */
477 		if (inp->inp_flags & INP_SOCKREF) {
478 			inp->inp_flags &= ~INP_SOCKREF;
479 			INP_WUNLOCK(inp);
480 			ACCEPT_LOCK();
481 			SOCK_LOCK(so);
482 			KASSERT(so->so_state & SS_PROTOREF,
483 			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
484 			so->so_state &= ~SS_PROTOREF;
485 			sofree(so);
486 		} else {
487 			/*
488 			 * If we don't own the only reference, the socket and
489 			 * inpcb need to be left around to be handled by
490 			 * tcp_usr_detach() later.
491 			 */
492 			INP_WUNLOCK(inp);
493 		}
494 	} else
495 		in_pcbfree(inp);
496 	TCPSTAT_INC(tcps_closed);
497 	crfree(tw->tw_cred);
498 	tw->tw_cred = NULL;
499 	if (reuse)
500 		return;
501 	uma_zfree(V_tcptw_zone, tw);
502 }
503 
504 int
505 tcp_twrespond(struct tcptw *tw, int flags)
506 {
507 	struct inpcb *inp = tw->tw_inpcb;
508 #if defined(INET6) || defined(INET)
509 	struct tcphdr *th = NULL;
510 #endif
511 	struct mbuf *m;
512 #ifdef INET
513 	struct ip *ip = NULL;
514 #endif
515 	u_int hdrlen, optlen;
516 	int error = 0;			/* Keep compiler happy */
517 	struct tcpopt to;
518 #ifdef INET6
519 	struct ip6_hdr *ip6 = NULL;
520 	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
521 #endif
522 	hdrlen = 0;                     /* Keep compiler happy */
523 
524 	INP_WLOCK_ASSERT(inp);
525 
526 	m = m_gethdr(M_NOWAIT, MT_DATA);
527 	if (m == NULL)
528 		return (ENOBUFS);
529 	m->m_data += max_linkhdr;
530 
531 #ifdef MAC
532 	mac_inpcb_create_mbuf(inp, m);
533 #endif
534 
535 #ifdef INET6
536 	if (isipv6) {
537 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
538 		ip6 = mtod(m, struct ip6_hdr *);
539 		th = (struct tcphdr *)(ip6 + 1);
540 		tcpip_fillheaders(inp, ip6, th);
541 	}
542 #endif
543 #if defined(INET6) && defined(INET)
544 	else
545 #endif
546 #ifdef INET
547 	{
548 		hdrlen = sizeof(struct tcpiphdr);
549 		ip = mtod(m, struct ip *);
550 		th = (struct tcphdr *)(ip + 1);
551 		tcpip_fillheaders(inp, ip, th);
552 	}
553 #endif
554 	to.to_flags = 0;
555 
556 	/*
557 	 * Send a timestamp and echo-reply if both our side and our peer
558 	 * have sent timestamps in our SYN's and this is not a RST.
559 	 */
560 	if (tw->t_recent && flags == TH_ACK) {
561 		to.to_flags |= TOF_TS;
562 		to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
563 		to.to_tsecr = tw->t_recent;
564 	}
565 	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
566 
567 	m->m_len = hdrlen + optlen;
568 	m->m_pkthdr.len = m->m_len;
569 
570 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
571 
572 	th->th_seq = htonl(tw->snd_nxt);
573 	th->th_ack = htonl(tw->rcv_nxt);
574 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
575 	th->th_flags = flags;
576 	th->th_win = htons(tw->last_win);
577 
578 	m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
579 #ifdef INET6
580 	if (isipv6) {
581 		m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
582 		th->th_sum = in6_cksum_pseudo(ip6,
583 		    sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
584 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
585 		error = ip6_output(m, inp->in6p_outputopts, NULL,
586 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
587 	}
588 #endif
589 #if defined(INET6) && defined(INET)
590 	else
591 #endif
592 #ifdef INET
593 	{
594 		m->m_pkthdr.csum_flags = CSUM_TCP;
595 		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
596 		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
597 		ip->ip_len = htons(m->m_pkthdr.len);
598 		if (V_path_mtu_discovery)
599 			ip->ip_off |= htons(IP_DF);
600 		error = ip_output(m, inp->inp_options, NULL,
601 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
602 		    NULL, inp);
603 	}
604 #endif
605 	if (flags & TH_ACK)
606 		TCPSTAT_INC(tcps_sndacks);
607 	else
608 		TCPSTAT_INC(tcps_sndctrl);
609 	TCPSTAT_INC(tcps_sndtotal);
610 	return (error);
611 }
612 
613 static void
614 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
615 {
616 
617 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
618 	INP_WLOCK_ASSERT(tw->tw_inpcb);
619 	if (rearm)
620 		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
621 	tw->tw_time = ticks + 2 * tcp_msl;
622 	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
623 }
624 
625 static void
626 tcp_tw_2msl_stop(struct tcptw *tw)
627 {
628 
629 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
630 	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
631 }
632 
633 struct tcptw *
634 tcp_tw_2msl_scan(int reuse)
635 {
636 	struct tcptw *tw;
637 
638 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
639 	for (;;) {
640 		tw = TAILQ_FIRST(&V_twq_2msl);
641 		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0))
642 			break;
643 		INP_WLOCK(tw->tw_inpcb);
644 		tcp_twclose(tw, reuse);
645 		if (reuse)
646 			return (tw);
647 	}
648 	return (NULL);
649 }
650