1 /* $KAME: sctp_timer.c,v 1.30 2005/06/16 18:29:25 jinmei Exp $ */
2 /* $NetBSD: sctp_timer.c,v 1.6 2022/02/16 22:00:56 andvar Exp $ */
3
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sctp_timer.c,v 1.6 2022/02/16 22:00:56 andvar Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_inet.h"
37 #include "opt_sctp.h"
38 #include "opt_ipsec.h"
39 #endif /* _KERNEL_OPT */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #ifdef INET6
53 #include <sys/domain.h>
54 #endif
55
56 #include <machine/limits.h>
57
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #define _IP_VHL
64 #include <netinet/ip.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip_var.h>
68
69 #ifdef INET6
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #endif /* INET6 */
73
74 #include <netinet/sctp_pcb.h>
75
76 #ifdef IPSEC
77 #include <netipsec/ipsec.h>
78 #include <netipsec/key.h>
79 #endif /* IPSEC */
80 #ifdef INET6
81 #include <netinet6/sctp6_var.h>
82 #endif
83 #include <netinet/sctp_var.h>
84 #include <netinet/sctp_timer.h>
85 #include <netinet/sctputil.h>
86 #include <netinet/sctp_output.h>
87 #include <netinet/sctp_hashdriver.h>
88 #include <netinet/sctp_header.h>
89 #include <netinet/sctp_indata.h>
90 #include <netinet/sctp_asconf.h>
91
92 #include <netinet/sctp.h>
93 #include <netinet/sctp_uio.h>
94
95 #ifdef SCTP_DEBUG
96 extern u_int32_t sctp_debug_on;
97 #endif /* SCTP_DEBUG */
98
99 void
sctp_audit_retranmission_queue(struct sctp_association * asoc)100 sctp_audit_retranmission_queue(struct sctp_association *asoc)
101 {
102 struct sctp_tmit_chunk *chk;
103
104 #ifdef SCTP_DEBUG
105 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
106 printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
107 asoc->sent_queue_retran_cnt,
108 asoc->sent_queue_cnt);
109 }
110 #endif /* SCTP_DEBUG */
111 asoc->sent_queue_retran_cnt = 0;
112 asoc->sent_queue_cnt = 0;
113 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
114 if (chk->sent == SCTP_DATAGRAM_RESEND) {
115 asoc->sent_queue_retran_cnt++;
116 }
117 asoc->sent_queue_cnt++;
118 }
119 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
120 if (chk->sent == SCTP_DATAGRAM_RESEND) {
121 asoc->sent_queue_retran_cnt++;
122 }
123 }
124 #ifdef SCTP_DEBUG
125 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
126 printf("Audit completes retran:%d onqueue:%d\n",
127 asoc->sent_queue_retran_cnt,
128 asoc->sent_queue_cnt);
129 }
130 #endif /* SCTP_DEBUG */
131 }
132
133 int
sctp_threshold_management(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint16_t threshold)134 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
135 struct sctp_nets *net, uint16_t threshold)
136 {
137 if (net) {
138 net->error_count++;
139 #ifdef SCTP_DEBUG
140 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
141 printf("Error count for %p now %d thresh:%d\n",
142 net, net->error_count,
143 net->failure_threshold);
144 }
145 #endif /* SCTP_DEBUG */
146 if (net->error_count >= net->failure_threshold) {
147 /* We had a threshold failure */
148 if (net->dest_state & SCTP_ADDR_REACHABLE) {
149 net->dest_state &= ~SCTP_ADDR_REACHABLE;
150 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
151 if (net == stcb->asoc.primary_destination) {
152 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
153 }
154 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
155 stcb,
156 SCTP_FAILED_THRESHOLD,
157 (void *)net);
158 }
159 }
160 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
161 *********ROUTING CODE
162 */
163 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
164 *********ROUTING CODE
165 */
166 }
167 if (stcb == NULL)
168 return (0);
169
170 if (net) {
171 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
172 stcb->asoc.overall_error_count++;
173 }
174 } else {
175 stcb->asoc.overall_error_count++;
176 }
177 #ifdef SCTP_DEBUG
178 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
179 printf("Overall error count for %p now %d thresh:%u state:%x\n",
180 &stcb->asoc,
181 stcb->asoc.overall_error_count,
182 (u_int)threshold,
183 ((net == NULL) ? (u_int)0 : (u_int)net->dest_state));
184 }
185 #endif /* SCTP_DEBUG */
186 /* We specifically do not do >= to give the assoc one more
187 * change before we fail it.
188 */
189 if (stcb->asoc.overall_error_count > threshold) {
190 /* Abort notification sends a ULP notify */
191 struct mbuf *oper;
192 MGET(oper, M_DONTWAIT, MT_DATA);
193 if (oper) {
194 struct sctp_paramhdr *ph;
195 u_int32_t *ippp;
196
197 oper->m_len = sizeof(struct sctp_paramhdr) +
198 sizeof(*ippp);
199 ph = mtod(oper, struct sctp_paramhdr *);
200 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
201 ph->param_length = htons(oper->m_len);
202 ippp = (u_int32_t *)(ph + 1);
203 *ippp = htonl(0x40000001);
204 }
205 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
206 return (1);
207 }
208 return (0);
209 }
210
211 struct sctp_nets *
sctp_find_alternate_net(struct sctp_tcb * stcb,struct sctp_nets * net)212 sctp_find_alternate_net(struct sctp_tcb *stcb,
213 struct sctp_nets *net)
214 {
215 /* Find and return an alternate network if possible */
216 struct sctp_nets *alt, *mnet;
217 struct rtentry *rt;
218 int once;
219
220 if (stcb->asoc.numnets == 1) {
221 /* No others but net */
222 return (TAILQ_FIRST(&stcb->asoc.nets));
223 }
224 mnet = net;
225 once = 0;
226
227 if (mnet == NULL) {
228 mnet = TAILQ_FIRST(&stcb->asoc.nets);
229 }
230 do {
231 alt = TAILQ_NEXT(mnet, sctp_next);
232 if (alt == NULL) {
233 once++;
234 if (once > 1) {
235 break;
236 }
237 alt = TAILQ_FIRST(&stcb->asoc.nets);
238 }
239 rt = rtcache_validate(&alt->ro);
240 if (rt == NULL) {
241 alt->src_addr_selected = 0;
242 }
243 if (
244 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
245 (rt != NULL) &&
246 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
247 ) {
248 /* Found a reachable address */
249 rtcache_unref(rt, &alt->ro);
250 break;
251 }
252 rtcache_unref(rt, &alt->ro);
253 mnet = alt;
254 } while (alt != NULL);
255
256 if (alt == NULL) {
257 /* Case where NO insv network exists (dormant state) */
258 /* we rotate destinations */
259 once = 0;
260 mnet = net;
261 do {
262 alt = TAILQ_NEXT(mnet, sctp_next);
263 if (alt == NULL) {
264 once++;
265 if (once > 1) {
266 break;
267 }
268 alt = TAILQ_FIRST(&stcb->asoc.nets);
269 }
270 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
271 (alt != net)) {
272 /* Found an alternate address */
273 break;
274 }
275 mnet = alt;
276 } while (alt != NULL);
277 }
278 if (alt == NULL) {
279 return (net);
280 }
281 return (alt);
282 }
283
284 static void
sctp_backoff_on_timeout(struct sctp_tcb * stcb,struct sctp_nets * net,int win_probe,int num_marked)285 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
286 struct sctp_nets *net,
287 int win_probe,
288 int num_marked)
289 {
290 #ifdef SCTP_DEBUG
291 int oldRTO;
292
293 oldRTO = net->RTO;
294 #endif /* SCTP_DEBUG */
295 net->RTO <<= 1;
296 #ifdef SCTP_DEBUG
297 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
298 printf("Timer doubles from %d ms -to-> %d ms\n",
299 oldRTO, net->RTO);
300 }
301 #endif /* SCTP_DEBUG */
302
303 if (net->RTO > stcb->asoc.maxrto) {
304 net->RTO = stcb->asoc.maxrto;
305 #ifdef SCTP_DEBUG
306 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
307 printf("Growth capped by maxrto %d\n",
308 net->RTO);
309 }
310 #endif /* SCTP_DEBUG */
311 }
312
313
314 if ((win_probe == 0) && num_marked) {
315 /* We don't apply penalty to window probe scenarios */
316 #ifdef SCTP_CWND_LOGGING
317 int old_cwnd=net->cwnd;
318 #endif
319 net->ssthresh = net->cwnd >> 1;
320 if (net->ssthresh < (net->mtu << 1)) {
321 net->ssthresh = (net->mtu << 1);
322 }
323 net->cwnd = net->mtu;
324 /* floor of 1 mtu */
325 if (net->cwnd < net->mtu)
326 net->cwnd = net->mtu;
327 #ifdef SCTP_CWND_LOGGING
328 sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX);
329 #endif
330
331 net->partial_bytes_acked = 0;
332 #ifdef SCTP_DEBUG
333 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
334 printf("collapse cwnd to 1MTU ssthresh to %d\n",
335 net->ssthresh);
336 }
337 #endif
338
339 }
340 }
341
342
343 static int
sctp_mark_all_for_resend(struct sctp_tcb * stcb,struct sctp_nets * net,struct sctp_nets * alt,int * num_marked)344 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
345 struct sctp_nets *net,
346 struct sctp_nets *alt,
347 int *num_marked)
348 {
349
350 /*
351 * Mark all chunks (well not all) that were sent to *net for retransmission.
352 * Move them to alt for there destination as well... We only
353 * mark chunks that have been outstanding long enough to have
354 * received feed-back.
355 */
356 struct sctp_tmit_chunk *chk, *tp2;
357 struct sctp_nets *lnets;
358 struct timeval now, min_wait, tv;
359 int cur_rto;
360 int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir;
361 unsigned int cnt_mk;
362 u_int32_t orig_flight;
363 #ifdef SCTP_FR_LOGGING
364 u_int32_t tsnfirst, tsnlast;
365 #endif
366
367 /* none in flight now */
368 audit_tf = 0;
369 fir=0;
370 /* figure out how long a data chunk must be pending
371 * before we can mark it ..
372 */
373 SCTP_GETTIME_TIMEVAL(&now);
374 /* get cur rto in micro-seconds */
375 cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1);
376 #ifdef SCTP_FR_LOGGING
377 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
378 #endif
379 cur_rto *= 1000;
380 #ifdef SCTP_FR_LOGGING
381 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
382 #endif
383 tv.tv_sec = cur_rto / 1000000;
384 tv.tv_usec = cur_rto % 1000000;
385 #ifndef __FreeBSD__
386 timersub(&now, &tv, &min_wait);
387 #else
388 min_wait = now;
389 timevalsub(&min_wait, &tv);
390 #endif
391 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
392 /*
393 * if we hit here, we don't
394 * have enough seconds on the clock to account
395 * for the RTO. We just let the lower seconds
396 * be the bounds and don't worry about it. This
397 * may mean we will mark a lot more than we should.
398 */
399 min_wait.tv_sec = min_wait.tv_usec = 0;
400 }
401 #ifdef SCTP_FR_LOGGING
402 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
403 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
404 #endif
405 if (stcb->asoc.total_flight >= net->flight_size) {
406 stcb->asoc.total_flight -= net->flight_size;
407 } else {
408 audit_tf = 1;
409 stcb->asoc.total_flight = 0;
410 }
411 /* Our rwnd will be incorrect here since we are not adding
412 * back the cnt * mbuf but we will fix that down below.
413 */
414 orig_rwnd = stcb->asoc.peers_rwnd;
415 orig_flight = net->flight_size;
416 stcb->asoc.peers_rwnd += net->flight_size;
417 net->flight_size = 0;
418 net->rto_pending = 0;
419 net->fast_retran_ip= 0;
420 win_probes = non_win_probes = 0;
421 #ifdef SCTP_DEBUG
422 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
423 printf("Marking ALL un-acked for retransmission at t3-timeout\n");
424 }
425 #endif /* SCTP_DEBUG */
426 /* Now on to each chunk */
427 num_mk = cnt_mk = 0;
428 #ifdef SCTP_FR_LOGGING
429 tsnlast = tsnfirst = 0;
430 #endif
431 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
432 for (;chk != NULL; chk = tp2) {
433 tp2 = TAILQ_NEXT(chk, sctp_next);
434 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
435 chk->rec.data.TSN_seq,
436 MAX_TSN)) ||
437 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
438 /* Strange case our list got out of order? */
439 printf("Our list is out of order?\n");
440 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
441 if (chk->data) {
442 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
443 &stcb->asoc.sent_queue);
444 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
445 stcb->asoc.sent_queue_cnt_removeable--;
446 }
447 }
448 stcb->asoc.sent_queue_cnt--;
449 sctp_free_remote_addr(chk->whoTo);
450 sctppcbinfo.ipi_count_chunk--;
451 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
452 panic("Chunk count is going negative");
453 }
454 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
455 sctppcbinfo.ipi_gencnt_chunk++;
456 continue;
457 }
458 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
459 /* found one to mark:
460 * If it is less than DATAGRAM_ACKED it MUST
461 * not be a skipped or marked TSN but instead
462 * one that is either already set for retransmission OR
463 * one that needs retransmission.
464 */
465
466 /* validate its been outstanding long enough */
467 #ifdef SCTP_FR_LOGGING
468 sctp_log_fr(chk->rec.data.TSN_seq,
469 chk->sent_rcv_time.tv_sec,
470 chk->sent_rcv_time.tv_usec,
471 SCTP_FR_T3_MARK_TIME);
472 #endif
473 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
474 /* we have reached a chunk that was sent some
475 * seconds past our min.. forget it we will
476 * find no more to send.
477 */
478 #ifdef SCTP_FR_LOGGING
479 sctp_log_fr(0,
480 chk->sent_rcv_time.tv_sec,
481 chk->sent_rcv_time.tv_usec,
482 SCTP_FR_T3_STOPPED);
483 #endif
484 continue;
485 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
486 /* we must look at the micro seconds to know.
487 */
488 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
489 /* ok it was sent after our boundary time. */
490 #ifdef SCTP_FR_LOGGING
491 sctp_log_fr(0,
492 chk->sent_rcv_time.tv_sec,
493 chk->sent_rcv_time.tv_usec,
494 SCTP_FR_T3_STOPPED);
495 #endif
496 continue;
497 }
498 }
499 if (stcb->asoc.total_flight_count > 0) {
500 stcb->asoc.total_flight_count--;
501 }
502 if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) {
503 /* Is it expired? */
504 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
505 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
506 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
507 /* Yes so drop it */
508 if (chk->data) {
509 sctp_release_pr_sctp_chunk(stcb,
510 chk,
511 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
512 &stcb->asoc.sent_queue);
513 }
514 }
515 continue;
516 }
517 if (chk->sent != SCTP_DATAGRAM_RESEND) {
518 stcb->asoc.sent_queue_retran_cnt++;
519 num_mk++;
520 if (fir == 0) {
521 fir = 1;
522 #ifdef SCTP_DEBUG
523 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
524 printf("First TSN marked was %x\n",
525 chk->rec.data.TSN_seq);
526 }
527 #endif
528 #ifdef SCTP_FR_LOGGING
529 tsnfirst = chk->rec.data.TSN_seq;
530 #endif
531 }
532 #ifdef SCTP_FR_LOGGING
533 tsnlast = chk->rec.data.TSN_seq;
534 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
535 0, SCTP_FR_T3_MARKED);
536
537 #endif
538 }
539 chk->sent = SCTP_DATAGRAM_RESEND;
540 /* reset the TSN for striking and other FR stuff */
541 chk->rec.data.doing_fast_retransmit = 0;
542 #ifdef SCTP_DEBUG
543 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
544 printf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq);
545 }
546 #endif /* SCTP_DEBUG */
547 /* Clear any time so NO RTT is being done */
548 chk->do_rtt = 0;
549 /* Bump up the count */
550 if (compare_with_wrap(chk->rec.data.TSN_seq,
551 stcb->asoc.t3timeout_highest_marked,
552 MAX_TSN)) {
553 /* TSN_seq > than t3timeout so update */
554 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
555 }
556 if (alt != net) {
557 sctp_free_remote_addr(chk->whoTo);
558 chk->whoTo = alt;
559 alt->ref_count++;
560 }
561 if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) !=
562 SCTP_WINDOW_PROBE) {
563 non_win_probes++;
564 } else {
565 chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
566 win_probes++;
567 }
568 }
569 if (chk->sent == SCTP_DATAGRAM_RESEND) {
570 cnt_mk++;
571 }
572 }
573
574 #ifdef SCTP_FR_LOGGING
575 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
576 #endif
577 /* compensate for the number we marked */
578 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
579
580 #ifdef SCTP_DEBUG
581 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
582 if (num_mk) {
583 #ifdef SCTP_FR_LOGGING
584 printf("LAST TSN marked was %x\n", tsnlast);
585 #endif
586 printf("Num marked for retransmission was %d peer-rwd:%ld\n",
587 num_mk, (u_long)stcb->asoc.peers_rwnd);
588 #ifdef SCTP_FR_LOGGING
589 printf("LAST TSN marked was %x\n", tsnlast);
590 #endif
591 printf("Num marked for retransmission was %d peer-rwd:%d\n",
592 num_mk,
593 (int)stcb->asoc.peers_rwnd
594 );
595 }
596 }
597 #endif
598 *num_marked = num_mk;
599 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
600 printf("Local Audit says there are %d for retran asoc cnt:%d\n",
601 cnt_mk, stcb->asoc.sent_queue_retran_cnt);
602 #ifndef SCTP_AUDITING_ENABLED
603 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
604 #endif
605 }
606 #ifdef SCTP_DEBUG
607 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
608 printf("**************************\n");
609 }
610 #endif /* SCTP_DEBUG */
611
612 /* Now check for a ECN Echo that may be stranded */
613 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
614 if ((chk->whoTo == net) &&
615 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
616 sctp_free_remote_addr(chk->whoTo);
617 chk->whoTo = alt;
618 if (chk->sent != SCTP_DATAGRAM_RESEND) {
619 chk->sent = SCTP_DATAGRAM_RESEND;
620 stcb->asoc.sent_queue_retran_cnt++;
621 }
622 alt->ref_count++;
623 }
624 }
625 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
626 (orig_flight <= net->mtu)) {
627 /*
628 * If the LAST packet sent was not acked and our rwnd is 0
629 * then we are in a win-probe state.
630 */
631 win_probes = 1;
632 non_win_probes = 0;
633 #ifdef SCTP_DEBUG
634 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
635 printf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
636 orig_flight, net->mtu);
637 }
638 #endif
639 }
640
641 if (audit_tf) {
642 #ifdef SCTP_DEBUG
643 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
644 printf("Audit total flight due to negative value net:%p\n",
645 net);
646 }
647 #endif /* SCTP_DEBUG */
648 stcb->asoc.total_flight = 0;
649 stcb->asoc.total_flight_count = 0;
650 /* Clear all networks flight size */
651 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
652 lnets->flight_size = 0;
653 #ifdef SCTP_DEBUG
654 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
655 printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
656 lnets, lnets->cwnd, lnets->ssthresh);
657 }
658 #endif /* SCTP_DEBUG */
659 }
660 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
661 if (chk->sent < SCTP_DATAGRAM_RESEND) {
662 stcb->asoc.total_flight += chk->book_size;
663 chk->whoTo->flight_size += chk->book_size;
664 stcb->asoc.total_flight_count++;
665 }
666 }
667 }
668 /* Setup the ecn nonce re-sync point. We
669 * do this since retranmissions are NOT
670 * setup for ECN. This means that do to
671 * Karn's rule, we don't know the total
672 * of the peers ecn bits.
673 */
674 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
675 if (chk == NULL) {
676 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
677 } else {
678 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
679 }
680 stcb->asoc.nonce_wait_for_ecne = 0;
681 stcb->asoc.nonce_sum_check = 0;
682 /* We return 1 if we only have a window probe outstanding */
683 if (win_probes && (non_win_probes == 0)) {
684 return (1);
685 }
686 return (0);
687 }
688
689 static void
sctp_move_all_chunks_to_alt(struct sctp_tcb * stcb,struct sctp_nets * net,struct sctp_nets * alt)690 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
691 struct sctp_nets *net,
692 struct sctp_nets *alt)
693 {
694 struct sctp_association *asoc;
695 struct sctp_stream_out *outs;
696 struct sctp_tmit_chunk *chk;
697
698 if (net == alt)
699 /* nothing to do */
700 return;
701
702 asoc = &stcb->asoc;
703
704 /*
705 * now through all the streams checking for chunks sent to our
706 * bad network.
707 */
708 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
709 /* now clean up any chunks here */
710 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
711 if (chk->whoTo == net) {
712 sctp_free_remote_addr(chk->whoTo);
713 chk->whoTo = alt;
714 alt->ref_count++;
715 }
716 }
717 }
718 /* Now check the pending queue */
719 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
720 if (chk->whoTo == net) {
721 sctp_free_remote_addr(chk->whoTo);
722 chk->whoTo = alt;
723 alt->ref_count++;
724 }
725 }
726
727 }
728
729 int
sctp_t3rxt_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)730 sctp_t3rxt_timer(struct sctp_inpcb *inp,
731 struct sctp_tcb *stcb,
732 struct sctp_nets *net)
733 {
734 struct sctp_nets *alt;
735 int win_probe, num_mk;
736
737
738 #ifdef SCTP_FR_LOGGING
739 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
740 #endif
741 /* Find an alternate and mark those for retransmission */
742 alt = sctp_find_alternate_net(stcb, net);
743 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
744
745 /* FR Loss recovery just ended with the T3. */
746 stcb->asoc.fast_retran_loss_recovery = 0;
747
748 /* setup the sat loss recovery that prevents
749 * satellite cwnd advance.
750 */
751 stcb->asoc.sat_t3_loss_recovery = 1;
752 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
753
754 /* Backoff the timer and cwnd */
755 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
756 if (win_probe == 0) {
757 /* We don't do normal threshold management on window probes */
758 if (sctp_threshold_management(inp, stcb, net,
759 stcb->asoc.max_send_times)) {
760 /* Association was destroyed */
761 return (1);
762 } else {
763 if (net != stcb->asoc.primary_destination) {
764 /* send a immediate HB if our RTO is stale */
765 struct timeval now;
766 unsigned int ms_goneby;
767 SCTP_GETTIME_TIMEVAL(&now);
768 if (net->last_sent_time.tv_sec) {
769 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
770 } else {
771 ms_goneby = 0;
772 }
773 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
774 /* no recent feed back in an RTO or more, request a RTT update */
775 sctp_send_hb(stcb, 1, net);
776 }
777 }
778 }
779 } else {
780 /*
781 * For a window probe we don't penalize the net's but only
782 * the association. This may fail it if SACKs are not coming
783 * back. If sack's are coming with rwnd locked at 0, we will
784 * continue to hold things waiting for rwnd to raise
785 */
786 if (sctp_threshold_management(inp, stcb, NULL,
787 stcb->asoc.max_send_times)) {
788 /* Association was destroyed */
789 return (1);
790 }
791 }
792 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
793 /* Move all pending over too */
794 sctp_move_all_chunks_to_alt(stcb, net, alt);
795 /* Was it our primary? */
796 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
797 /*
798 * Yes, note it as such and find an alternate
799 * note: this means HB code must use this to resent
800 * the primary if it goes active AND if someone does
801 * a change-primary then this flag must be cleared
802 * from any net structures.
803 */
804 if (sctp_set_primary_addr(stcb,
805 (struct sockaddr *)NULL,
806 alt) == 0) {
807 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
808 net->src_addr_selected = 0;
809 }
810 }
811 }
812 /*
813 * Special case for cookie-echo'ed case, we don't do output
814 * but must await the COOKIE-ACK before retransmission
815 */
816 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
817 /*
818 * Here we just reset the timer and start again since we
819 * have not established the asoc
820 */
821 #ifdef SCTP_DEBUG
822 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
823 printf("Special cookie case return\n");
824 }
825 #endif /* SCTP_DEBUG */
826 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
827 return (0);
828 }
829 if (stcb->asoc.peer_supports_prsctp) {
830 struct sctp_tmit_chunk *lchk;
831 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
832 /* C3. See if we need to send a Fwd-TSN */
833 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
834 stcb->asoc.last_acked_seq, MAX_TSN)) {
835 /*
836 * ISSUE with ECN, see FWD-TSN processing for notes
837 * on issues that will occur when the ECN NONCE stuff
838 * is put into SCTP for cross checking.
839 */
840 #ifdef SCTP_DEBUG
841 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
842 printf("Forward TSN time\n");
843 }
844 #endif /* SCTP_DEBUG */
845 send_forward_tsn(stcb, &stcb->asoc);
846 if (lchk) {
847 /* Assure a timer is up */
848 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
849 }
850 }
851 }
852 return (0);
853 }
854
855 int
sctp_t1init_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)856 sctp_t1init_timer(struct sctp_inpcb *inp,
857 struct sctp_tcb *stcb,
858 struct sctp_nets *net)
859 {
860 /* bump the thresholds */
861 if (stcb->asoc.delayed_connection) {
862 /* special hook for delayed connection. The
863 * library did NOT complete the rest of its
864 * sends.
865 */
866 stcb->asoc.delayed_connection = 0;
867 sctp_send_initiate(inp, stcb);
868 return (0);
869 }
870 if (sctp_threshold_management(inp, stcb, net,
871 stcb->asoc.max_init_times)) {
872 /* Association was destroyed */
873 return (1);
874 }
875 stcb->asoc.dropped_special_cnt = 0;
876 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
877 if (stcb->asoc.initial_init_rto_max < net->RTO) {
878 net->RTO = stcb->asoc.initial_init_rto_max;
879 }
880 if (stcb->asoc.numnets > 1) {
881 /* If we have more than one addr use it */
882 struct sctp_nets *alt;
883 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
884 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
885 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
886 stcb->asoc.primary_destination = alt;
887 }
888 }
889 /* Send out a new init */
890 sctp_send_initiate(inp, stcb);
891 return (0);
892 }
893
894 /*
895 * For cookie and asconf we actually need to find and mark for resend,
896 * then increment the resend counter (after all the threshold management
897 * stuff of course).
898 */
sctp_cookie_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)899 int sctp_cookie_timer(struct sctp_inpcb *inp,
900 struct sctp_tcb *stcb,
901 struct sctp_nets *net)
902 {
903 struct sctp_nets *alt;
904 struct sctp_tmit_chunk *cookie;
905 /* first before all else we must find the cookie */
906 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
907 if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) {
908 break;
909 }
910 }
911 if (cookie == NULL) {
912 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
913 /* FOOBAR! */
914 struct mbuf *oper;
915 MGET(oper, M_DONTWAIT, MT_DATA);
916 if (oper) {
917 struct sctp_paramhdr *ph;
918 u_int32_t *ippp;
919
920 oper->m_len = sizeof(struct sctp_paramhdr) +
921 sizeof(*ippp);
922 ph = mtod(oper, struct sctp_paramhdr *);
923 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
924 ph->param_length = htons(oper->m_len);
925 ippp = (u_int32_t *)(ph + 1);
926 *ippp = htonl(0x40000002);
927 }
928 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
929 oper);
930 }
931 return (1);
932 }
933 /* Ok we found the cookie, threshold management next */
934 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
935 stcb->asoc.max_init_times)) {
936 /* Assoc is over */
937 return (1);
938 }
939 /*
940 * cleared theshold management now lets backoff the address &
941 * select an alternate
942 */
943 stcb->asoc.dropped_special_cnt = 0;
944 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
945 alt = sctp_find_alternate_net(stcb, cookie->whoTo);
946 if (alt != cookie->whoTo) {
947 sctp_free_remote_addr(cookie->whoTo);
948 cookie->whoTo = alt;
949 alt->ref_count++;
950 }
951 /* Now mark the retran info */
952 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
953 stcb->asoc.sent_queue_retran_cnt++;
954 }
955 cookie->sent = SCTP_DATAGRAM_RESEND;
956 /*
957 * Now call the output routine to kick out the cookie again, Note we
958 * don't mark any chunks for retran so that FR will need to kick in
959 * to move these (or a send timer).
960 */
961 return (0);
962 }
963
sctp_strreset_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)964 int sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
965 struct sctp_nets *net)
966 {
967 struct sctp_nets *alt;
968 struct sctp_tmit_chunk *strrst, *chk;
969 struct sctp_stream_reset_req *strreq;
970 /* find the existing STRRESET */
971 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
972 sctp_next) {
973 if (strrst->rec.chunk_id == SCTP_STREAM_RESET) {
974 /* is it what we want */
975 strreq = mtod(strrst->data, struct sctp_stream_reset_req *);
976 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) {
977 break;
978 }
979 }
980 }
981 if (strrst == NULL) {
982 #ifdef SCTP_DEBUG
983 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
984 printf("Strange, strreset timer fires, but I can't find an str-reset?\n");
985 }
986 #endif /* SCTP_DEBUG */
987 return (0);
988 }
989 /* do threshold management */
990 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
991 stcb->asoc.max_send_times)) {
992 /* Assoc is over */
993 return (1);
994 }
995
996 /*
997 * cleared theshold management
998 * now lets backoff the address & select an alternate
999 */
1000 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1001 alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1002 sctp_free_remote_addr(strrst->whoTo);
1003 strrst->whoTo = alt;
1004 alt->ref_count++;
1005
1006 /* See if a ECN Echo is also stranded */
1007 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1008 if ((chk->whoTo == net) &&
1009 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1010 sctp_free_remote_addr(chk->whoTo);
1011 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1012 chk->sent = SCTP_DATAGRAM_RESEND;
1013 stcb->asoc.sent_queue_retran_cnt++;
1014 }
1015 chk->whoTo = alt;
1016 alt->ref_count++;
1017 }
1018 }
1019 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1020 /*
1021 * If the address went un-reachable, we need to move
1022 * to alternates for ALL chk's in queue
1023 */
1024 sctp_move_all_chunks_to_alt(stcb, net, alt);
1025 }
1026 /* mark the retran info */
1027 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1028 stcb->asoc.sent_queue_retran_cnt++;
1029 strrst->sent = SCTP_DATAGRAM_RESEND;
1030
1031 /* restart the timer */
1032 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1033 return (0);
1034 }
1035
sctp_asconf_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1036 int sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1037 struct sctp_nets *net)
1038 {
1039 struct sctp_nets *alt;
1040 struct sctp_tmit_chunk *asconf, *chk;
1041
1042 /* is this the first send, or a retransmission? */
1043 if (stcb->asoc.asconf_sent == 0) {
1044 /* compose a new ASCONF chunk and send it */
1045 sctp_send_asconf(stcb, net);
1046 } else {
1047 /* Retransmission of the existing ASCONF needed... */
1048
1049 /* find the existing ASCONF */
1050 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1051 sctp_next) {
1052 if (asconf->rec.chunk_id == SCTP_ASCONF) {
1053 break;
1054 }
1055 }
1056 if (asconf == NULL) {
1057 #ifdef SCTP_DEBUG
1058 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1059 printf("Strange, asconf timer fires, but I can't find an asconf?\n");
1060 }
1061 #endif /* SCTP_DEBUG */
1062 return (0);
1063 }
1064 /* do threshold management */
1065 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1066 stcb->asoc.max_send_times)) {
1067 /* Assoc is over */
1068 return (1);
1069 }
1070
1071 /* PETER? FIX? How will the following code ever run? If
1072 * the max_send_times is hit, threshold management will
1073 * blow away the association?
1074 */
1075 if (asconf->snd_count > stcb->asoc.max_send_times) {
1076 /*
1077 * Something is rotten, peer is not responding to
1078 * ASCONFs but maybe is to data etc. e.g. it is not
1079 * properly handling the chunk type upper bits
1080 * Mark this peer as ASCONF incapable and cleanup
1081 */
1082 #ifdef SCTP_DEBUG
1083 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1084 printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1085 }
1086 #endif /* SCTP_DEBUG */
1087 sctp_asconf_cleanup(stcb, net);
1088 return (0);
1089 }
1090 /*
1091 * cleared theshold management
1092 * now lets backoff the address & select an alternate
1093 */
1094 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1095 alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1096 sctp_free_remote_addr(asconf->whoTo);
1097 asconf->whoTo = alt;
1098 alt->ref_count++;
1099
1100 /* See if a ECN Echo is also stranded */
1101 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1102 if ((chk->whoTo == net) &&
1103 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1104 sctp_free_remote_addr(chk->whoTo);
1105 chk->whoTo = alt;
1106 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1107 chk->sent = SCTP_DATAGRAM_RESEND;
1108 stcb->asoc.sent_queue_retran_cnt++;
1109 }
1110 alt->ref_count++;
1111
1112 }
1113 }
1114 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1115 /*
1116 * If the address went un-reachable, we need to move
1117 * to alternates for ALL chk's in queue
1118 */
1119 sctp_move_all_chunks_to_alt(stcb, net, alt);
1120 }
1121 /* mark the retran info */
1122 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1123 stcb->asoc.sent_queue_retran_cnt++;
1124 asconf->sent = SCTP_DATAGRAM_RESEND;
1125 }
1126 return (0);
1127 }
1128
1129 /*
1130 * For the shutdown and shutdown-ack, we do not keep one around on the
1131 * control queue. This means we must generate a new one and call the general
1132 * chunk output routine, AFTER having done threshold
1133 * management.
1134 */
1135 int
sctp_shutdown_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1136 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1137 struct sctp_nets *net)
1138 {
1139 struct sctp_nets *alt;
1140 /* first threshold management */
1141 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1142 /* Assoc is over */
1143 return (1);
1144 }
1145 /* second select an alternative */
1146 alt = sctp_find_alternate_net(stcb, net);
1147
1148 /* third generate a shutdown into the queue for out net */
1149 #ifdef SCTP_DEBUG
1150 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1151 printf("%s:%d sends a shutdown\n",
1152 __FILE__,
1153 __LINE__
1154 );
1155 }
1156 #endif
1157 if (alt) {
1158 sctp_send_shutdown(stcb, alt);
1159 } else {
1160 /* if alt is NULL, there is no dest
1161 * to send to??
1162 */
1163 return (0);
1164 }
1165 /* fourth restart timer */
1166 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1167 return (0);
1168 }
1169
sctp_shutdownack_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1170 int sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1171 struct sctp_nets *net)
1172 {
1173 struct sctp_nets *alt;
1174 /* first threshold management */
1175 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1176 /* Assoc is over */
1177 return (1);
1178 }
1179 /* second select an alternative */
1180 alt = sctp_find_alternate_net(stcb, net);
1181
1182 /* third generate a shutdown into the queue for out net */
1183 sctp_send_shutdown_ack(stcb, alt);
1184
1185 /* fourth restart timer */
1186 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1187 return (0);
1188 }
1189
1190 static void
sctp_audit_stream_queues_for_size(struct sctp_inpcb * inp,struct sctp_tcb * stcb)1191 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1192 struct sctp_tcb *stcb)
1193 {
1194 struct sctp_stream_out *outs;
1195 struct sctp_tmit_chunk *chk;
1196 unsigned int chks_in_queue=0;
1197
1198 if ((stcb == NULL) || (inp == NULL))
1199 return;
1200 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1201 printf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n",
1202 (u_long)stcb->asoc.total_output_queue_size);
1203 stcb->asoc.total_output_queue_size = 0;
1204 return;
1205 }
1206 if (stcb->asoc.sent_queue_retran_cnt) {
1207 printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1208 stcb->asoc.sent_queue_retran_cnt);
1209 stcb->asoc.sent_queue_retran_cnt = 0;
1210 }
1211 /* Check to see if some data queued, if so report it */
1212 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1213 if (!TAILQ_EMPTY(&outs->outqueue)) {
1214 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
1215 chks_in_queue++;
1216 }
1217 }
1218 }
1219 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1220 printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1221 stcb->asoc.stream_queue_cnt, chks_in_queue);
1222 }
1223 if (chks_in_queue) {
1224 /* call the output queue function */
1225 sctp_chunk_output(inp, stcb, 1);
1226 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1227 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1228 /* Probably should go in and make it go back through and add fragments allowed */
1229 printf("Still nothing moved %d chunks are stuck\n", chks_in_queue);
1230 }
1231 } else {
1232 printf("Found no chunks on any queue tot:%lu\n",
1233 (u_long)stcb->asoc.total_output_queue_size);
1234 stcb->asoc.total_output_queue_size = 0;
1235 }
1236 }
1237
1238 int
sctp_heartbeat_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1239 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1240 struct sctp_nets *net)
1241 {
1242 int cnt_of_unconf=0;
1243
1244 if (net) {
1245 if (net->hb_responded == 0) {
1246 sctp_backoff_on_timeout(stcb, net, 1, 0);
1247 }
1248 /* Zero PBA, if it needs it */
1249 if (net->partial_bytes_acked) {
1250 net->partial_bytes_acked = 0;
1251 }
1252 }
1253 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1254 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1255 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1256 cnt_of_unconf++;
1257 }
1258 }
1259 if ((stcb->asoc.total_output_queue_size > 0) &&
1260 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1261 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1262 sctp_audit_stream_queues_for_size(inp, stcb);
1263 }
1264 /* Send a new HB, this will do threshold management, pick a new dest */
1265 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1266 return (1);
1267 }
1268 if (cnt_of_unconf > 1) {
1269 /*
1270 * this will send out extra hb's up to maxburst if
1271 * there are any unconfirmed addresses.
1272 */
1273 int cnt_sent = 1;
1274 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1275 if (sctp_send_hb(stcb, 0, NULL) == 0)
1276 break;
1277 cnt_of_unconf--;
1278 cnt_sent++;
1279 }
1280 }
1281 return (0);
1282 }
1283
1284 #define SCTP_NUMBER_OF_MTU_SIZES 18
1285 static u_int32_t mtu_sizes[]={
1286 68,
1287 296,
1288 508,
1289 512,
1290 544,
1291 576,
1292 1006,
1293 1492,
1294 1500,
1295 1536,
1296 2002,
1297 2048,
1298 4352,
1299 4464,
1300 8166,
1301 17914,
1302 32000,
1303 65535
1304 };
1305
1306
1307 static u_int32_t
sctp_getnext_mtu(struct sctp_inpcb * inp,u_int32_t cur_mtu)1308 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu)
1309 {
1310 /* select another MTU that is just bigger than this one */
1311 int i;
1312
1313 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1314 if (cur_mtu < mtu_sizes[i]) {
1315 /* no max_mtu is bigger than this one */
1316 return (mtu_sizes[i]);
1317 }
1318 }
1319 /* here return the highest allowable */
1320 return (cur_mtu);
1321 }
1322
1323
sctp_pathmtu_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1324 void sctp_pathmtu_timer(struct sctp_inpcb *inp,
1325 struct sctp_tcb *stcb,
1326 struct sctp_nets *net)
1327 {
1328 u_int32_t next_mtu;
1329 struct rtentry *rt;
1330
1331 /* restart the timer in any case */
1332 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1333 if (next_mtu <= net->mtu) {
1334 /* nothing to do */
1335 return;
1336 }
1337 rt = rtcache_validate(&net->ro);
1338 if (rt != NULL) {
1339 /* only if we have a route and interface do we
1340 * set anything. Note we always restart
1341 * the timer though just in case it is updated
1342 * (i.e. the ifp) or route/ifp is populated.
1343 */
1344 if (rt->rt_ifp != NULL) {
1345 if (rt->rt_ifp->if_mtu > next_mtu) {
1346 /* ok it will fit out the door */
1347 net->mtu = next_mtu;
1348 }
1349 }
1350 rtcache_unref(rt, &net->ro);
1351 }
1352 /* restart the timer */
1353 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1354 }
1355
sctp_autoclose_timer(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1356 void sctp_autoclose_timer(struct sctp_inpcb *inp,
1357 struct sctp_tcb *stcb,
1358 struct sctp_nets *net)
1359 {
1360 struct timeval tn, *tim_touse;
1361 struct sctp_association *asoc;
1362 int ticks_gone_by;
1363
1364 SCTP_GETTIME_TIMEVAL(&tn);
1365 if (stcb->asoc.sctp_autoclose_ticks &&
1366 (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) {
1367 /* Auto close is on */
1368 asoc = &stcb->asoc;
1369 /* pick the time to use */
1370 if (asoc->time_last_rcvd.tv_sec >
1371 asoc->time_last_sent.tv_sec) {
1372 tim_touse = &asoc->time_last_rcvd;
1373 } else {
1374 tim_touse = &asoc->time_last_sent;
1375 }
1376 /* Now has long enough transpired to autoclose? */
1377 ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz);
1378 if ((ticks_gone_by > 0) &&
1379 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1380 /*
1381 * autoclose time has hit, call the output routine,
1382 * which should do nothing just to be SURE we don't
1383 * have hanging data. We can then safely check the
1384 * queues and know that we are clear to send shutdown
1385 */
1386 sctp_chunk_output(inp, stcb, 9);
1387 /* Are we clean? */
1388 if (TAILQ_EMPTY(&asoc->send_queue) &&
1389 TAILQ_EMPTY(&asoc->sent_queue)) {
1390 /*
1391 * there is nothing queued to send,
1392 * so I'm done...
1393 */
1394 if (SCTP_GET_STATE(asoc) !=
1395 SCTP_STATE_SHUTDOWN_SENT) {
1396 /* only send SHUTDOWN 1st time thru */
1397 #ifdef SCTP_DEBUG
1398 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1399 printf("%s:%d sends a shutdown\n",
1400 __FILE__,
1401 __LINE__
1402 );
1403 }
1404 #endif
1405 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1406 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1407 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1408 stcb->sctp_ep, stcb,
1409 asoc->primary_destination);
1410 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1411 stcb->sctp_ep, stcb,
1412 asoc->primary_destination);
1413 }
1414 }
1415 } else {
1416 /*
1417 * No auto close at this time, reset t-o to
1418 * check later
1419 */
1420 int tmp;
1421 /* fool the timer startup to use the time left */
1422 tmp = asoc->sctp_autoclose_ticks;
1423 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1424 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1425 net);
1426 /* restore the real tick value */
1427 asoc->sctp_autoclose_ticks = tmp;
1428 }
1429 }
1430 }
1431
1432 void
sctp_iterator_timer(struct sctp_iterator * it)1433 sctp_iterator_timer(struct sctp_iterator *it)
1434 {
1435 int cnt= 0;
1436 /* only one iterator can run at a
1437 * time. This is the only way we
1438 * can cleanly pull ep's from underneath
1439 * all the running interators when a
1440 * ep is freed.
1441 */
1442 SCTP_ITERATOR_LOCK();
1443 if (it->inp == NULL) {
1444 /* iterator is complete */
1445 done_with_iterator:
1446 SCTP_ITERATOR_UNLOCK();
1447 SCTP_INP_INFO_WLOCK();
1448 LIST_REMOVE(it, sctp_nxt_itr);
1449 /* stopping the callout is not needed, in theory,
1450 * but I am paranoid.
1451 */
1452 SCTP_INP_INFO_WUNLOCK();
1453 callout_stop(&it->tmr.timer);
1454 if (it->function_atend != NULL) {
1455 (*it->function_atend)(it->pointer, it->val);
1456 }
1457 callout_destroy(&it->tmr.timer);
1458 free(it, M_PCB);
1459 return;
1460 }
1461 select_a_new_ep:
1462 SCTP_INP_WLOCK(it->inp);
1463 while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) {
1464 /* we do not like this ep */
1465 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1466 SCTP_INP_WUNLOCK(it->inp);
1467 goto done_with_iterator;
1468 }
1469 SCTP_INP_WUNLOCK(it->inp);
1470 it->inp = LIST_NEXT(it->inp, sctp_list);
1471 if (it->inp == NULL) {
1472 goto done_with_iterator;
1473 }
1474 SCTP_INP_WLOCK(it->inp);
1475 }
1476 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1477 (it->inp->inp_starting_point_for_iterator != it)) {
1478 printf("Iterator collision, we must wait for other iterator at %p\n",
1479 it->inp);
1480 SCTP_INP_WUNLOCK(it->inp);
1481 goto start_timer_return;
1482 }
1483 /* now we do the actual write to this guy */
1484 it->inp->inp_starting_point_for_iterator = it;
1485 SCTP_INP_WUNLOCK(it->inp);
1486 SCTP_INP_RLOCK(it->inp);
1487 /* if we reach here we found a inp acceptable, now through each
1488 * one that has the association in the right state
1489 */
1490 if (it->stcb == NULL) {
1491 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1492 }
1493 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1494 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1495 }
1496 while (it->stcb) {
1497 SCTP_TCB_LOCK(it->stcb);
1498 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1499 SCTP_TCB_UNLOCK(it->stcb);
1500 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1501 continue;
1502 }
1503 cnt++;
1504 /* run function on this one */
1505 SCTP_INP_RUNLOCK(it->inp);
1506 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1507 sctp_chunk_output(it->inp, it->stcb, 1);
1508 SCTP_TCB_UNLOCK(it->stcb);
1509 /* see if we have limited out */
1510 if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) {
1511 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1512 start_timer_return:
1513 SCTP_ITERATOR_UNLOCK();
1514 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL);
1515 return;
1516 }
1517 SCTP_INP_RLOCK(it->inp);
1518 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1519 }
1520 /* if we reach here, we ran out of stcb's in the inp we are looking at */
1521 SCTP_INP_RUNLOCK(it->inp);
1522 SCTP_INP_WLOCK(it->inp);
1523 it->inp->inp_starting_point_for_iterator = NULL;
1524 SCTP_INP_WUNLOCK(it->inp);
1525 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1526 it->inp = NULL;
1527 } else {
1528 SCTP_INP_INFO_RLOCK();
1529 it->inp = LIST_NEXT(it->inp, sctp_list);
1530 SCTP_INP_INFO_RUNLOCK();
1531 }
1532 if (it->inp == NULL) {
1533 goto done_with_iterator;
1534 }
1535 goto select_a_new_ep;
1536 }
1537