1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 #endif
39
40 #define _IP_VHL
41 #include <netinet/sctp_os.h>
42 #include <netinet/sctp_pcb.h>
43 #ifdef INET6
44 #if defined(__FreeBSD__) && defined(__Userspace__)
45 #include <netinet6/sctp6_var.h>
46 #endif
47 #endif
48 #include <netinet/sctp_var.h>
49 #include <netinet/sctp_sysctl.h>
50 #include <netinet/sctp_timer.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_output.h>
53 #include <netinet/sctp_header.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_asconf.h>
56 #include <netinet/sctp_input.h>
57 #include <netinet/sctp.h>
58 #include <netinet/sctp_uio.h>
59 #if defined(INET) || defined(INET6)
60 #if !(defined(_WIN32) && defined(__Userspace__))
61 #include <netinet/udp.h>
62 #endif
63 #endif
64
65 void
sctp_audit_retranmission_queue(struct sctp_association * asoc)66 sctp_audit_retranmission_queue(struct sctp_association *asoc)
67 {
68 struct sctp_tmit_chunk *chk;
69
70 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
71 asoc->sent_queue_retran_cnt,
72 asoc->sent_queue_cnt);
73 asoc->sent_queue_retran_cnt = 0;
74 asoc->sent_queue_cnt = 0;
75 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
76 if (chk->sent == SCTP_DATAGRAM_RESEND) {
77 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
78 }
79 asoc->sent_queue_cnt++;
80 }
81 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
82 if (chk->sent == SCTP_DATAGRAM_RESEND) {
83 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
84 }
85 }
86 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
87 if (chk->sent == SCTP_DATAGRAM_RESEND) {
88 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
89 }
90 }
91 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
92 asoc->sent_queue_retran_cnt,
93 asoc->sent_queue_cnt);
94 }
95
96 static int
sctp_threshold_management(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint16_t threshold)97 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
98 struct sctp_nets *net, uint16_t threshold)
99 {
100 if (net) {
101 net->error_count++;
102 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
103 (void *)net, net->error_count,
104 net->failure_threshold);
105 if (net->error_count > net->failure_threshold) {
106 /* We had a threshold failure */
107 if (net->dest_state & SCTP_ADDR_REACHABLE) {
108 net->dest_state &= ~SCTP_ADDR_REACHABLE;
109 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
110 net->dest_state &= ~SCTP_ADDR_PF;
111 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
112 stcb, 0,
113 (void *)net, SCTP_SO_NOT_LOCKED);
114 }
115 } else if ((net->pf_threshold < net->failure_threshold) &&
116 (net->error_count > net->pf_threshold)) {
117 if (!(net->dest_state & SCTP_ADDR_PF)) {
118 net->dest_state |= SCTP_ADDR_PF;
119 net->last_active = sctp_get_tick_count();
120 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
121 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
122 inp, stcb, net,
123 SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
124 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
125 }
126 }
127 }
128 if (stcb == NULL)
129 return (0);
130
131 if (net) {
132 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
134 sctp_misc_ints(SCTP_THRESHOLD_INCR,
135 stcb->asoc.overall_error_count,
136 (stcb->asoc.overall_error_count+1),
137 SCTP_FROM_SCTP_TIMER,
138 __LINE__);
139 }
140 stcb->asoc.overall_error_count++;
141 }
142 } else {
143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
144 sctp_misc_ints(SCTP_THRESHOLD_INCR,
145 stcb->asoc.overall_error_count,
146 (stcb->asoc.overall_error_count+1),
147 SCTP_FROM_SCTP_TIMER,
148 __LINE__);
149 }
150 stcb->asoc.overall_error_count++;
151 }
152 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
153 (void *)&stcb->asoc, stcb->asoc.overall_error_count,
154 (uint32_t)threshold,
155 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
156 /*
157 * We specifically do not do >= to give the assoc one more change
158 * before we fail it.
159 */
160 if (stcb->asoc.overall_error_count > threshold) {
161 /* Abort notification sends a ULP notify */
162 struct mbuf *op_err;
163
164 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
165 "Association error counter exceeded");
166 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2;
167 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED);
168 return (1);
169 }
170 return (0);
171 }
172
173 /*
174 * sctp_find_alternate_net() returns a non-NULL pointer as long as there
175 * exists nets, which are not being deleted.
176 */
177 struct sctp_nets *
sctp_find_alternate_net(struct sctp_tcb * stcb,struct sctp_nets * net,int mode)178 sctp_find_alternate_net(struct sctp_tcb *stcb,
179 struct sctp_nets *net,
180 int mode)
181 {
182 /* Find and return an alternate network if possible */
183 struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL;
184 bool looped;
185 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
186 int min_errors = -1;
187 uint32_t max_cwnd = 0;
188
189 if (stcb->asoc.numnets == 1) {
190 /* No selection can be made. */
191 return (TAILQ_FIRST(&stcb->asoc.nets));
192 }
193 /*
194 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm.
195 * This algorithm chooses the active destination (not in PF state) with the largest
196 * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose
197 * the desination that is in PF state with the lowest error count. In case of a tie,
198 * choose the destination that was most recently active.
199 */
200 if (mode == 2) {
201 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
202 /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */
203 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
204 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
205 continue;
206 }
207 /*
208 * JRS 5/14/07 - If the destination is reachable but in PF state, compare
209 * the error count of the destination to the minimum error count seen thus far.
210 * Store the destination with the lower error count. If the error counts are
211 * equal, store the destination that was most recently active.
212 */
213 if (mnet->dest_state & SCTP_ADDR_PF) {
214 /*
215 * JRS 5/14/07 - If the destination under consideration is the current
216 * destination, work as if the error count is one higher. The
217 * actual error count will not be incremented until later in the
218 * t3 handler.
219 */
220 if (mnet == net) {
221 if (min_errors == -1) {
222 min_errors = mnet->error_count + 1;
223 min_errors_net = mnet;
224 } else if (mnet->error_count + 1 < min_errors) {
225 min_errors = mnet->error_count + 1;
226 min_errors_net = mnet;
227 } else if (mnet->error_count + 1 == min_errors
228 && mnet->last_active > min_errors_net->last_active) {
229 min_errors_net = mnet;
230 min_errors = mnet->error_count + 1;
231 }
232 continue;
233 } else {
234 if (min_errors == -1) {
235 min_errors = mnet->error_count;
236 min_errors_net = mnet;
237 } else if (mnet->error_count < min_errors) {
238 min_errors = mnet->error_count;
239 min_errors_net = mnet;
240 } else if (mnet->error_count == min_errors
241 && mnet->last_active > min_errors_net->last_active) {
242 min_errors_net = mnet;
243 min_errors = mnet->error_count;
244 }
245 continue;
246 }
247 }
248 /*
249 * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the
250 * cwnd of the destination to the highest cwnd seen thus far. Store the
251 * destination with the higher cwnd value. If the cwnd values are equal,
252 * randomly choose one of the two destinations.
253 */
254 if (max_cwnd < mnet->cwnd) {
255 max_cwnd_net = mnet;
256 max_cwnd = mnet->cwnd;
257 } else if (max_cwnd == mnet->cwnd) {
258 uint32_t rndval;
259 uint8_t this_random;
260
261 if (stcb->asoc.hb_random_idx > 3) {
262 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
263 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
264 this_random = stcb->asoc.hb_random_values[0];
265 stcb->asoc.hb_random_idx++;
266 stcb->asoc.hb_ect_randombit = 0;
267 } else {
268 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
269 stcb->asoc.hb_random_idx++;
270 stcb->asoc.hb_ect_randombit = 0;
271 }
272 if (this_random % 2 == 1) {
273 max_cwnd_net = mnet;
274 max_cwnd = mnet->cwnd; /* Useless? */
275 }
276 }
277 }
278 if (max_cwnd_net == NULL) {
279 if (min_errors_net == NULL) {
280 return (net);
281 }
282 return (min_errors_net);
283 } else {
284 return (max_cwnd_net);
285 }
286 } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */
287 else if (mode == 1) {
288 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
289 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
290 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
291 /*
292 * will skip ones that are not-reachable or
293 * unconfirmed
294 */
295 continue;
296 }
297 if (max_cwnd < mnet->cwnd) {
298 max_cwnd_net = mnet;
299 max_cwnd = mnet->cwnd;
300 } else if (max_cwnd == mnet->cwnd) {
301 uint32_t rndval;
302 uint8_t this_random;
303
304 if (stcb->asoc.hb_random_idx > 3) {
305 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
306 memcpy(stcb->asoc.hb_random_values, &rndval,
307 sizeof(stcb->asoc.hb_random_values));
308 this_random = stcb->asoc.hb_random_values[0];
309 stcb->asoc.hb_random_idx = 0;
310 stcb->asoc.hb_ect_randombit = 0;
311 } else {
312 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
313 stcb->asoc.hb_random_idx++;
314 stcb->asoc.hb_ect_randombit = 0;
315 }
316 if (this_random % 2) {
317 max_cwnd_net = mnet;
318 max_cwnd = mnet->cwnd;
319 }
320 }
321 }
322 if (max_cwnd_net) {
323 return (max_cwnd_net);
324 }
325 }
326 /* Look for an alternate net, which is active. */
327 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
328 alt = TAILQ_NEXT(net, sctp_next);
329 } else {
330 alt = TAILQ_FIRST(&stcb->asoc.nets);
331 }
332 looped = false;
333 for (;;) {
334 if (alt == NULL) {
335 if (!looped) {
336 alt = TAILQ_FIRST(&stcb->asoc.nets);
337 looped = true;
338 }
339 /* Definitely out of candidates. */
340 if (alt == NULL) {
341 break;
342 }
343 }
344 #if defined(__FreeBSD__) && !defined(__Userspace__)
345 if (alt->ro.ro_nh == NULL) {
346 #else
347 if (alt->ro.ro_rt == NULL) {
348 #endif
349 if (alt->ro._s_addr) {
350 sctp_free_ifa(alt->ro._s_addr);
351 alt->ro._s_addr = NULL;
352 }
353 alt->src_addr_selected = 0;
354 }
355 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
356 #if defined(__FreeBSD__) && !defined(__Userspace__)
357 (alt->ro.ro_nh != NULL) &&
358 #else
359 (alt->ro.ro_rt != NULL) &&
360 #endif
361 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
362 (alt != net)) {
363 /* Found an alternate net, which is reachable. */
364 break;
365 }
366 alt = TAILQ_NEXT(alt, sctp_next);
367 }
368
369 if (alt == NULL) {
370 /*
371 * In case no active alternate net has been found, look for
372 * an alternate net, which is confirmed.
373 */
374 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
375 alt = TAILQ_NEXT(net, sctp_next);
376 } else {
377 alt = TAILQ_FIRST(&stcb->asoc.nets);
378 }
379 looped = false;
380 for (;;) {
381 if (alt == NULL) {
382 if (!looped) {
383 alt = TAILQ_FIRST(&stcb->asoc.nets);
384 looped = true;
385 }
386 /* Definitely out of candidates. */
387 if (alt == NULL) {
388 break;
389 }
390 }
391 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
392 (alt != net)) {
393 /* Found an alternate net, which is confirmed. */
394 break;
395 }
396 alt = TAILQ_NEXT(alt, sctp_next);
397 }
398 }
399 if (alt == NULL) {
400 /*
401 * In case no confirmed alternate net has been found, just
402 * return net, if it is not being deleted. In the other case
403 * just return the first net.
404 */
405 if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) {
406 alt = net;
407 }
408 if (alt == NULL) {
409 alt = TAILQ_FIRST(&stcb->asoc.nets);
410 }
411 }
412 return (alt);
413 }
414
415 static void
416 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
417 struct sctp_nets *net,
418 int win_probe,
419 int num_marked, int num_abandoned)
420 {
421 if (net->RTO == 0) {
422 if (net->RTO_measured) {
423 net->RTO = stcb->asoc.minrto;
424 } else {
425 net->RTO = stcb->asoc.initial_rto;
426 }
427 }
428 net->RTO <<= 1;
429 if (net->RTO > stcb->asoc.maxrto) {
430 net->RTO = stcb->asoc.maxrto;
431 }
432 if ((win_probe == 0) && (num_marked || num_abandoned)) {
433 /* We don't apply penalty to window probe scenarios */
434 /* JRS - Use the congestion control given in the CC module */
435 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
436 }
437 }
438
439 #ifndef INVARIANTS
440 static void
441 sctp_recover_sent_list(struct sctp_tcb *stcb)
442 {
443 struct sctp_tmit_chunk *chk, *nchk;
444 struct sctp_association *asoc;
445
446 asoc = &stcb->asoc;
447 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
448 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) {
449 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
450 (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq);
451 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
452 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
453 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
454 }
455 }
456 if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) &&
457 (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
458 TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) {
459 asoc->trigger_reset = 1;
460 }
461 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
462 if (PR_SCTP_ENABLED(chk->flags)) {
463 if (asoc->pr_sctp_cnt != 0)
464 asoc->pr_sctp_cnt--;
465 }
466 if (chk->data) {
467 /*sa_ignore NO_NULL_CHK*/
468 sctp_free_bufspace(stcb, asoc, chk, 1);
469 sctp_m_freem(chk->data);
470 chk->data = NULL;
471 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) {
472 asoc->sent_queue_cnt_removeable--;
473 }
474 }
475 asoc->sent_queue_cnt--;
476 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 }
478 }
479 SCTP_PRINTF("after recover order is as follows\n");
480 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
481 SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn);
482 }
483 }
484 #endif
485
486 static int
487 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
488 struct sctp_nets *net,
489 struct sctp_nets *alt,
490 int window_probe,
491 int *num_marked,
492 int *num_abandoned)
493 {
494
495 /*
496 * Mark all chunks (well not all) that were sent to *net for
497 * retransmission. Move them to alt for there destination as well...
498 * We only mark chunks that have been outstanding long enough to
499 * have received feed-back.
500 */
501 struct sctp_tmit_chunk *chk, *nchk;
502 struct sctp_nets *lnets;
503 struct timeval now, min_wait, tv;
504 int cur_rto;
505 int cnt_abandoned;
506 int audit_tf, num_mk, fir;
507 unsigned int cnt_mk;
508 uint32_t orig_flight, orig_tf;
509 uint32_t tsnlast, tsnfirst;
510 int recovery_cnt = 0;
511
512 /* none in flight now */
513 audit_tf = 0;
514 fir = 0;
515 /*
516 * figure out how long a data chunk must be pending before we can
517 * mark it ..
518 */
519 (void)SCTP_GETTIME_TIMEVAL(&now);
520 /* get cur rto in micro-seconds */
521 cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
522 cur_rto *= 1000;
523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
524 sctp_log_fr(cur_rto,
525 stcb->asoc.peers_rwnd,
526 window_probe,
527 SCTP_FR_T3_MARK_TIME);
528 sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT);
529 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
530 }
531 tv.tv_sec = cur_rto / 1000000;
532 tv.tv_usec = cur_rto % 1000000;
533 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
534 timersub(&now, &tv, &min_wait);
535 #else
536 min_wait = now;
537 timevalsub(&min_wait, &tv);
538 #endif
539 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
540 /*
541 * if we hit here, we don't have enough seconds on the clock
542 * to account for the RTO. We just let the lower seconds be
543 * the bounds and don't worry about it. This may mean we
544 * will mark a lot more than we should.
545 */
546 min_wait.tv_sec = min_wait.tv_usec = 0;
547 }
548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
549 sctp_log_fr(cur_rto, (uint32_t)now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
550 sctp_log_fr(0, (uint32_t)min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
551 }
552 /*
553 * Our rwnd will be incorrect here since we are not adding back the
554 * cnt * mbuf but we will fix that down below.
555 */
556 orig_flight = net->flight_size;
557 orig_tf = stcb->asoc.total_flight;
558
559 net->fast_retran_ip = 0;
560 /* Now on to each chunk */
561 cnt_abandoned = 0;
562 num_mk = cnt_mk = 0;
563 tsnfirst = tsnlast = 0;
564 #ifndef INVARIANTS
565 start_again:
566 #endif
567 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
568 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) {
569 /* Strange case our list got out of order? */
570 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n",
571 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn);
572 recovery_cnt++;
573 #ifdef INVARIANTS
574 panic("last acked >= chk on sent-Q");
575 #else
576 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
577 sctp_recover_sent_list(stcb);
578 if (recovery_cnt < 10) {
579 goto start_again;
580 } else {
581 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
582 }
583 #endif
584 }
585 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
586 /*
587 * found one to mark: If it is less than
588 * DATAGRAM_ACKED it MUST not be a skipped or marked
589 * TSN but instead one that is either already set
590 * for retransmission OR one that needs
591 * retransmission.
592 */
593
594 /* validate its been outstanding long enough */
595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
596 sctp_log_fr(chk->rec.data.tsn,
597 (uint32_t)chk->sent_rcv_time.tv_sec,
598 chk->sent_rcv_time.tv_usec,
599 SCTP_FR_T3_MARK_TIME);
600 }
601 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
602 /*
603 * we have reached a chunk that was sent
604 * some seconds past our min.. forget it we
605 * will find no more to send.
606 */
607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
608 sctp_log_fr(0,
609 (uint32_t)chk->sent_rcv_time.tv_sec,
610 chk->sent_rcv_time.tv_usec,
611 SCTP_FR_T3_STOPPED);
612 }
613 continue;
614 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
615 (window_probe == 0)) {
616 /*
617 * we must look at the micro seconds to
618 * know.
619 */
620 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
621 /*
622 * ok it was sent after our boundary
623 * time.
624 */
625 continue;
626 }
627 }
628 if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) {
629 /* Is it expired? */
630 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
631 if (timercmp(&now, &chk->rec.data.timetodrop, >)) {
632 #else
633 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) {
634 #endif
635 /* Yes so drop it */
636 if (chk->data) {
637 (void)sctp_release_pr_sctp_chunk(stcb,
638 chk,
639 1,
640 SCTP_SO_NOT_LOCKED);
641 cnt_abandoned++;
642 }
643 continue;
644 }
645 }
646 if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) {
647 /* Has it been retransmitted tv_sec times? */
648 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
649 if (chk->data) {
650 (void)sctp_release_pr_sctp_chunk(stcb,
651 chk,
652 1,
653 SCTP_SO_NOT_LOCKED);
654 cnt_abandoned++;
655 }
656 continue;
657 }
658 }
659 if (chk->sent < SCTP_DATAGRAM_RESEND) {
660 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
661 num_mk++;
662 if (fir == 0) {
663 fir = 1;
664 tsnfirst = chk->rec.data.tsn;
665 }
666 tsnlast = chk->rec.data.tsn;
667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
668 sctp_log_fr(chk->rec.data.tsn, chk->snd_count,
669 0, SCTP_FR_T3_MARKED);
670 }
671
672 if (chk->rec.data.chunk_was_revoked) {
673 /* deflate the cwnd */
674 chk->whoTo->cwnd -= chk->book_size;
675 chk->rec.data.chunk_was_revoked = 0;
676 }
677 net->marked_retrans++;
678 stcb->asoc.marked_retrans++;
679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
680 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
681 chk->whoTo->flight_size,
682 chk->book_size,
683 (uint32_t)(uintptr_t)chk->whoTo,
684 chk->rec.data.tsn);
685 }
686 sctp_flight_size_decrease(chk);
687 sctp_total_flight_decrease(stcb, chk);
688 stcb->asoc.peers_rwnd += chk->send_size;
689 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
690 }
691 chk->sent = SCTP_DATAGRAM_RESEND;
692 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
693 SCTP_STAT_INCR(sctps_markedretrans);
694
695 /* reset the TSN for striking and other FR stuff */
696 chk->rec.data.doing_fast_retransmit = 0;
697 /* Clear any time so NO RTT is being done */
698
699 if (chk->do_rtt) {
700 if (chk->whoTo->rto_needed == 0) {
701 chk->whoTo->rto_needed = 1;
702 }
703 }
704 chk->do_rtt = 0;
705 if (alt != net) {
706 sctp_free_remote_addr(chk->whoTo);
707 chk->no_fr_allowed = 1;
708 chk->whoTo = alt;
709 atomic_add_int(&alt->ref_count, 1);
710 } else {
711 chk->no_fr_allowed = 0;
712 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
713 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
714 } else {
715 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
716 }
717 }
718 /* CMT: Do not allow FRs on retransmitted TSNs.
719 */
720 if (stcb->asoc.sctp_cmt_on_off > 0) {
721 chk->no_fr_allowed = 1;
722 }
723 #ifdef THIS_SHOULD_NOT_BE_DONE
724 } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
725 /* remember highest acked one */
726 could_be_sent = chk;
727 #endif
728 }
729 if (chk->sent == SCTP_DATAGRAM_RESEND) {
730 cnt_mk++;
731 }
732 }
733 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
734 /* we did not subtract the same things? */
735 audit_tf = 1;
736 }
737
738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
739 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
740 }
741 #ifdef SCTP_DEBUG
742 if (num_mk) {
743 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
744 tsnlast);
745 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%u\n",
746 num_mk,
747 stcb->asoc.peers_rwnd);
748 }
749 #endif
750 *num_marked = num_mk;
751 *num_abandoned = cnt_abandoned;
752 /* Now check for a ECN Echo that may be stranded And
753 * include the cnt_mk'd to have all resends in the
754 * control queue.
755 */
756 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
757 if (chk->sent == SCTP_DATAGRAM_RESEND) {
758 cnt_mk++;
759 }
760 if ((chk->whoTo == net) &&
761 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
762 sctp_free_remote_addr(chk->whoTo);
763 chk->whoTo = alt;
764 if (chk->sent != SCTP_DATAGRAM_RESEND) {
765 chk->sent = SCTP_DATAGRAM_RESEND;
766 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
767 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
768 cnt_mk++;
769 }
770 atomic_add_int(&alt->ref_count, 1);
771 }
772 }
773 #ifdef THIS_SHOULD_NOT_BE_DONE
774 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
775 /* fix it so we retransmit the highest acked anyway */
776 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
777 cnt_mk++;
778 could_be_sent->sent = SCTP_DATAGRAM_RESEND;
779 }
780 #endif
781 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
782 #ifdef INVARIANTS
783 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
784 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
785 #endif
786 #ifndef SCTP_AUDITING_ENABLED
787 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
788 #endif
789 }
790 if (audit_tf) {
791 SCTPDBG(SCTP_DEBUG_TIMER4,
792 "Audit total flight due to negative value net:%p\n",
793 (void *)net);
794 stcb->asoc.total_flight = 0;
795 stcb->asoc.total_flight_count = 0;
796 /* Clear all networks flight size */
797 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
798 lnets->flight_size = 0;
799 SCTPDBG(SCTP_DEBUG_TIMER4,
800 "Net:%p c-f cwnd:%d ssthresh:%d\n",
801 (void *)lnets, lnets->cwnd, lnets->ssthresh);
802 }
803 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
804 if (chk->sent < SCTP_DATAGRAM_RESEND) {
805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
806 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
807 chk->whoTo->flight_size,
808 chk->book_size,
809 (uint32_t)(uintptr_t)chk->whoTo,
810 chk->rec.data.tsn);
811 }
812
813 sctp_flight_size_increase(chk);
814 sctp_total_flight_increase(stcb, chk);
815 }
816 }
817 }
818 /* We return 1 if we only have a window probe outstanding */
819 return (0);
820 }
821
822 int
823 sctp_t3rxt_timer(struct sctp_inpcb *inp,
824 struct sctp_tcb *stcb,
825 struct sctp_nets *net)
826 {
827 struct sctp_nets *alt;
828 int win_probe, num_mk, num_abandoned;
829
830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
831 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
832 }
833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
834 struct sctp_nets *lnet;
835
836 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
837 if (net == lnet) {
838 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
839 } else {
840 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
841 }
842 }
843 }
844 /* Find an alternate and mark those for retransmission */
845 if ((stcb->asoc.peers_rwnd == 0) &&
846 (stcb->asoc.total_flight < net->mtu)) {
847 SCTP_STAT_INCR(sctps_timowindowprobe);
848 win_probe = 1;
849 } else {
850 win_probe = 0;
851 }
852
853 if (win_probe == 0) {
854 /* We don't do normal threshold management on window probes */
855 if (sctp_threshold_management(inp, stcb, net,
856 stcb->asoc.max_send_times)) {
857 /* Association was destroyed */
858 return (1);
859 } else {
860 if (net != stcb->asoc.primary_destination) {
861 /* send a immediate HB if our RTO is stale */
862 struct timeval now;
863 uint32_t ms_goneby;
864
865 (void)SCTP_GETTIME_TIMEVAL(&now);
866 if (net->last_sent_time.tv_sec) {
867 ms_goneby = (uint32_t)(now.tv_sec - net->last_sent_time.tv_sec) * 1000;
868 } else {
869 ms_goneby = 0;
870 }
871 if ((net->dest_state & SCTP_ADDR_PF) == 0) {
872 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
873 /*
874 * no recent feed back in an RTO or
875 * more, request a RTT update
876 */
877 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
878 }
879 }
880 }
881 }
882 } else {
883 /*
884 * For a window probe we don't penalize the net's but only
885 * the association. This may fail it if SACKs are not coming
886 * back. If sack's are coming with rwnd locked at 0, we will
887 * continue to hold things waiting for rwnd to raise
888 */
889 if (sctp_threshold_management(inp, stcb, NULL,
890 stcb->asoc.max_send_times)) {
891 /* Association was destroyed */
892 return (1);
893 }
894 }
895 if (stcb->asoc.sctp_cmt_on_off > 0) {
896 if (net->pf_threshold < net->failure_threshold) {
897 alt = sctp_find_alternate_net(stcb, net, 2);
898 } else {
899 /*
900 * CMT: Using RTX_SSTHRESH policy for CMT.
901 * If CMT is being used, then pick dest with
902 * largest ssthresh for any retransmission.
903 */
904 alt = sctp_find_alternate_net(stcb, net, 1);
905 /*
906 * CUCv2: If a different dest is picked for
907 * the retransmission, then new
908 * (rtx-)pseudo_cumack needs to be tracked
909 * for orig dest. Let CUCv2 track new (rtx-)
910 * pseudo-cumack always.
911 */
912 net->find_pseudo_cumack = 1;
913 net->find_rtx_pseudo_cumack = 1;
914 }
915 } else {
916 alt = sctp_find_alternate_net(stcb, net, 0);
917 }
918
919 num_mk = 0;
920 num_abandoned = 0;
921 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
922 &num_mk, &num_abandoned);
923 /* FR Loss recovery just ended with the T3. */
924 stcb->asoc.fast_retran_loss_recovery = 0;
925
926 /* CMT FR loss recovery ended with the T3 */
927 net->fast_retran_loss_recovery = 0;
928 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
929 (net->flight_size == 0)) {
930 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
931 }
932
933 /*
934 * setup the sat loss recovery that prevents satellite cwnd advance.
935 */
936 stcb->asoc.sat_t3_loss_recovery = 1;
937 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
938
939 /* Backoff the timer and cwnd */
940 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
941 if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
942 (net->dest_state & SCTP_ADDR_PF)) {
943 /* Move all pending over too */
944 sctp_move_chunks_from_net(stcb, net);
945
946 /* Get the address that failed, to
947 * force a new src address selecton and
948 * a route allocation.
949 */
950 if (net->ro._s_addr) {
951 sctp_free_ifa(net->ro._s_addr);
952 net->ro._s_addr = NULL;
953 }
954 net->src_addr_selected = 0;
955
956 /* Force a route allocation too */
957 #if defined(__FreeBSD__) && !defined(__Userspace__)
958 RO_NHFREE(&net->ro);
959 #else
960 if (net->ro.ro_rt) {
961 RTFREE(net->ro.ro_rt);
962 net->ro.ro_rt = NULL;
963 }
964 #endif
965
966 /* Was it our primary? */
967 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
968 /*
969 * Yes, note it as such and find an alternate note:
970 * this means HB code must use this to resent the
971 * primary if it goes active AND if someone does a
972 * change-primary then this flag must be cleared
973 * from any net structures.
974 */
975 if (stcb->asoc.alternate) {
976 sctp_free_remote_addr(stcb->asoc.alternate);
977 }
978 stcb->asoc.alternate = alt;
979 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
980 }
981 }
982 /*
983 * Special case for cookie-echo'ed case, we don't do output but must
984 * await the COOKIE-ACK before retransmission
985 */
986 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
987 /*
988 * Here we just reset the timer and start again since we
989 * have not established the asoc
990 */
991 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
992 return (0);
993 }
994 if (stcb->asoc.prsctp_supported) {
995 struct sctp_tmit_chunk *lchk;
996
997 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
998 /* C3. See if we need to send a Fwd-TSN */
999 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) {
1000 send_forward_tsn(stcb, &stcb->asoc);
1001 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
1002 if (lchk->whoTo != NULL) {
1003 break;
1004 }
1005 }
1006 if (lchk != NULL) {
1007 /* Assure a timer is up */
1008 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
1009 }
1010 }
1011 }
1012 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1013 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
1014 }
1015 return (0);
1016 }
1017
1018 int
1019 sctp_t1init_timer(struct sctp_inpcb *inp,
1020 struct sctp_tcb *stcb,
1021 struct sctp_nets *net)
1022 {
1023 /* bump the thresholds */
1024 if (stcb->asoc.delayed_connection) {
1025 /*
1026 * special hook for delayed connection. The library did NOT
1027 * complete the rest of its sends.
1028 */
1029 stcb->asoc.delayed_connection = 0;
1030 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1031 return (0);
1032 }
1033 if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) {
1034 return (0);
1035 }
1036 if (sctp_threshold_management(inp, stcb, net,
1037 stcb->asoc.max_init_times)) {
1038 /* Association was destroyed */
1039 return (1);
1040 }
1041 stcb->asoc.dropped_special_cnt = 0;
1042 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
1043 if (stcb->asoc.initial_init_rto_max < net->RTO) {
1044 net->RTO = stcb->asoc.initial_init_rto_max;
1045 }
1046 if (stcb->asoc.numnets > 1) {
1047 /* If we have more than one addr use it */
1048 struct sctp_nets *alt;
1049
1050 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1051 if (alt != stcb->asoc.primary_destination) {
1052 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination);
1053 stcb->asoc.primary_destination = alt;
1054 }
1055 }
1056 /* Send out a new init */
1057 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1058 return (0);
1059 }
1060
1061 /*
1062 * For cookie and asconf we actually need to find and mark for resend, then
1063 * increment the resend counter (after all the threshold management stuff of
1064 * course).
1065 */
1066 int
1067 sctp_cookie_timer(struct sctp_inpcb *inp,
1068 struct sctp_tcb *stcb,
1069 struct sctp_nets *net SCTP_UNUSED)
1070 {
1071 struct sctp_nets *alt;
1072 struct sctp_tmit_chunk *cookie;
1073
1074 /* first before all else we must find the cookie */
1075 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1076 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1077 break;
1078 }
1079 }
1080 if (cookie == NULL) {
1081 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
1082 /* FOOBAR! */
1083 struct mbuf *op_err;
1084
1085 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1086 "Cookie timer expired, but no cookie");
1087 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1088 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1089 } else {
1090 #ifdef INVARIANTS
1091 panic("Cookie timer expires in wrong state?");
1092 #else
1093 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(stcb));
1094 return (0);
1095 #endif
1096 }
1097 return (0);
1098 }
1099 /* Ok we found the cookie, threshold management next */
1100 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1101 stcb->asoc.max_init_times)) {
1102 /* Assoc is over */
1103 return (1);
1104 }
1105 /*
1106 * Cleared threshold management, now lets backoff the address
1107 * and select an alternate
1108 */
1109 stcb->asoc.dropped_special_cnt = 0;
1110 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
1111 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1112 if (alt != cookie->whoTo) {
1113 sctp_free_remote_addr(cookie->whoTo);
1114 cookie->whoTo = alt;
1115 atomic_add_int(&alt->ref_count, 1);
1116 }
1117 /* Now mark the retran info */
1118 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1119 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1120 }
1121 cookie->sent = SCTP_DATAGRAM_RESEND;
1122 cookie->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1123 /*
1124 * Now call the output routine to kick out the cookie again, Note we
1125 * don't mark any chunks for retran so that FR will need to kick in
1126 * to move these (or a send timer).
1127 */
1128 return (0);
1129 }
1130
1131 int
1132 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1133 {
1134 struct sctp_nets *alt, *net;
1135 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1136
1137 if (stcb->asoc.stream_reset_outstanding == 0) {
1138 return (0);
1139 }
1140 /* find the existing STRRESET, we use the seq number we sent out on */
1141 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1142 if (strrst == NULL) {
1143 return (0);
1144 }
1145 net = strrst->whoTo;
1146 /* do threshold management */
1147 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1148 /* Assoc is over */
1149 return (1);
1150 }
1151 /*
1152 * Cleared threshold management, now lets backoff the address
1153 * and select an alternate
1154 */
1155 sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1156 alt = sctp_find_alternate_net(stcb, net, 0);
1157 strrst->whoTo = alt;
1158 atomic_add_int(&alt->ref_count, 1);
1159
1160 /* See if a ECN Echo is also stranded */
1161 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1162 if ((chk->whoTo == net) &&
1163 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1164 sctp_free_remote_addr(chk->whoTo);
1165 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1166 chk->sent = SCTP_DATAGRAM_RESEND;
1167 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1168 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1169 }
1170 chk->whoTo = alt;
1171 atomic_add_int(&alt->ref_count, 1);
1172 }
1173 }
1174 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
1175 /*
1176 * If the address went un-reachable, we need to move to
1177 * alternates for ALL chk's in queue
1178 */
1179 sctp_move_chunks_from_net(stcb, net);
1180 }
1181 sctp_free_remote_addr(net);
1182
1183 /* mark the retran info */
1184 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1185 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1186 strrst->sent = SCTP_DATAGRAM_RESEND;
1187 strrst->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1188
1189 /* restart the timer */
1190 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, alt);
1191 return (0);
1192 }
1193
1194 int
1195 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1196 struct sctp_nets *net)
1197 {
1198 struct sctp_nets *alt;
1199 struct sctp_tmit_chunk *asconf, *chk;
1200
1201 /* is this a first send, or a retransmission? */
1202 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
1203 /* compose a new ASCONF chunk and send it */
1204 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
1205 } else {
1206 /*
1207 * Retransmission of the existing ASCONF is needed
1208 */
1209
1210 /* find the existing ASCONF */
1211 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
1212 if (asconf == NULL) {
1213 return (0);
1214 }
1215 net = asconf->whoTo;
1216 /* do threshold management */
1217 if (sctp_threshold_management(inp, stcb, net,
1218 stcb->asoc.max_send_times)) {
1219 /* Assoc is over */
1220 return (1);
1221 }
1222 if (asconf->snd_count > stcb->asoc.max_send_times) {
1223 /*
1224 * Something is rotten: our peer is not responding to
1225 * ASCONFs but apparently is to other chunks. i.e. it
1226 * is not properly handling the chunk type upper bits.
1227 * Mark this peer as ASCONF incapable and cleanup.
1228 */
1229 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1230 sctp_asconf_cleanup(stcb);
1231 return (0);
1232 }
1233 /*
1234 * cleared threshold management, so now backoff the net and
1235 * select an alternate
1236 */
1237 sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1238 alt = sctp_find_alternate_net(stcb, net, 0);
1239 if (asconf->whoTo != alt) {
1240 asconf->whoTo = alt;
1241 atomic_add_int(&alt->ref_count, 1);
1242 }
1243
1244 /* See if an ECN Echo is also stranded */
1245 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1246 if ((chk->whoTo == net) &&
1247 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1248 sctp_free_remote_addr(chk->whoTo);
1249 chk->whoTo = alt;
1250 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1251 chk->sent = SCTP_DATAGRAM_RESEND;
1252 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1253 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1254 }
1255 atomic_add_int(&alt->ref_count, 1);
1256 }
1257 }
1258 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) {
1259 if (chk->whoTo != alt) {
1260 sctp_free_remote_addr(chk->whoTo);
1261 chk->whoTo = alt;
1262 atomic_add_int(&alt->ref_count, 1);
1263 }
1264 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
1265 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1266 chk->sent = SCTP_DATAGRAM_RESEND;
1267 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1268 }
1269 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
1270 /*
1271 * If the address went un-reachable, we need to move
1272 * to the alternate for ALL chunks in queue
1273 */
1274 sctp_move_chunks_from_net(stcb, net);
1275 }
1276 sctp_free_remote_addr(net);
1277
1278 /* mark the retran info */
1279 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1280 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1281 asconf->sent = SCTP_DATAGRAM_RESEND;
1282 asconf->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1283
1284 /* send another ASCONF if any and we can do */
1285 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
1286 }
1287 return (0);
1288 }
1289
1290 /* Mobility adaptation */
1291 void
1292 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1293 {
1294 if (stcb->asoc.deleted_primary == NULL) {
1295 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
1296 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1297 return;
1298 }
1299 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
1300 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
1301 sctp_free_remote_addr(stcb->asoc.deleted_primary);
1302 stcb->asoc.deleted_primary = NULL;
1303 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1304 return;
1305 }
1306
1307 /*
1308 * For the shutdown and shutdown-ack, we do not keep one around on the
1309 * control queue. This means we must generate a new one and call the general
1310 * chunk output routine, AFTER having done threshold management.
1311 * It is assumed that net is non-NULL.
1312 */
1313 int
1314 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1315 struct sctp_nets *net)
1316 {
1317 struct sctp_nets *alt;
1318
1319 /* first threshold management */
1320 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1321 /* Assoc is over */
1322 return (1);
1323 }
1324 sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1325 /* second select an alternative */
1326 alt = sctp_find_alternate_net(stcb, net, 0);
1327
1328 /* third generate a shutdown into the queue for out net */
1329 sctp_send_shutdown(stcb, alt);
1330
1331 /* fourth restart timer */
1332 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1333 return (0);
1334 }
1335
1336 int
1337 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1338 struct sctp_nets *net)
1339 {
1340 struct sctp_nets *alt;
1341
1342 /* first threshold management */
1343 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1344 /* Assoc is over */
1345 return (1);
1346 }
1347 sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1348 /* second select an alternative */
1349 alt = sctp_find_alternate_net(stcb, net, 0);
1350
1351 /* third generate a shutdown into the queue for out net */
1352 sctp_send_shutdown_ack(stcb, alt);
1353
1354 /* fourth restart timer */
1355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1356 return (0);
1357 }
1358
1359 static void
1360 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1361 {
1362 struct sctp_stream_queue_pending *sp;
1363 unsigned int i, chks_in_queue = 0;
1364 int being_filled = 0;
1365
1366 KASSERT(inp != NULL, ("inp is NULL"));
1367 KASSERT(stcb != NULL, ("stcb is NULL"));
1368
1369 SCTP_TCB_SEND_LOCK(stcb);
1370 KASSERT(TAILQ_EMPTY(&stcb->asoc.send_queue), ("send_queue not empty"));
1371 KASSERT(TAILQ_EMPTY(&stcb->asoc.sent_queue), ("sent_queue not empty"));
1372
1373 if (stcb->asoc.sent_queue_retran_cnt) {
1374 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1375 stcb->asoc.sent_queue_retran_cnt);
1376 stcb->asoc.sent_queue_retran_cnt = 0;
1377 }
1378 if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
1379 /* No stream scheduler information, initialize scheduler */
1380 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc);
1381 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
1382 /* yep, we lost a stream or two */
1383 SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n");
1384 } else {
1385 /* no streams lost */
1386 stcb->asoc.total_output_queue_size = 0;
1387 }
1388 }
1389 /* Check to see if some data queued, if so report it */
1390 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1391 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1392 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
1393 if (sp->msg_is_complete)
1394 being_filled++;
1395 chks_in_queue++;
1396 }
1397 }
1398 }
1399 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1400 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1401 stcb->asoc.stream_queue_cnt, chks_in_queue);
1402 }
1403 SCTP_TCB_SEND_UNLOCK(stcb);
1404 if (chks_in_queue) {
1405 /* call the output queue function */
1406 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1407 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1408 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1409 /*
1410 * Probably should go in and make it go back through
1411 * and add fragments allowed
1412 */
1413 if (being_filled == 0) {
1414 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
1415 chks_in_queue);
1416 }
1417 }
1418 } else {
1419 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
1420 (u_long)stcb->asoc.total_output_queue_size);
1421 stcb->asoc.total_output_queue_size = 0;
1422 }
1423 }
1424
1425 int
1426 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1427 struct sctp_nets *net)
1428 {
1429 uint8_t net_was_pf;
1430
1431 if (net->dest_state & SCTP_ADDR_PF) {
1432 net_was_pf = 1;
1433 } else {
1434 net_was_pf = 0;
1435 }
1436 if (net->hb_responded == 0) {
1437 if (net->ro._s_addr) {
1438 /* Invalidate the src address if we did not get
1439 * a response last time.
1440 */
1441 sctp_free_ifa(net->ro._s_addr);
1442 net->ro._s_addr = NULL;
1443 net->src_addr_selected = 0;
1444 }
1445 sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
1446 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1447 /* Assoc is over */
1448 return (1);
1449 }
1450 }
1451 /* Zero PBA, if it needs it */
1452 if (net->partial_bytes_acked) {
1453 net->partial_bytes_acked = 0;
1454 }
1455 if ((stcb->asoc.total_output_queue_size > 0) &&
1456 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1457 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1458 sctp_audit_stream_queues_for_size(inp, stcb);
1459 }
1460 if (!(net->dest_state & SCTP_ADDR_NOHB) &&
1461 !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) {
1462 /* when move to PF during threshold mangement, a HB has been
1463 queued in that routine */
1464 uint32_t ms_gone_by;
1465
1466 if ((net->last_sent_time.tv_sec > 0) ||
1467 (net->last_sent_time.tv_usec > 0)) {
1468 #if defined(__FreeBSD__) && !defined(__Userspace__)
1469 struct timeval diff;
1470
1471 SCTP_GETTIME_TIMEVAL(&diff);
1472 timevalsub(&diff, &net->last_sent_time);
1473 #else
1474 struct timeval diff, now;
1475
1476 SCTP_GETTIME_TIMEVAL(&now);
1477 timersub(&now, &net->last_sent_time, &diff);
1478 #endif
1479 ms_gone_by = (uint32_t)(diff.tv_sec * 1000) +
1480 (uint32_t)(diff.tv_usec / 1000);
1481 } else {
1482 ms_gone_by = 0xffffffff;
1483 }
1484 if ((ms_gone_by >= net->heart_beat_delay) ||
1485 (net->dest_state & SCTP_ADDR_PF)) {
1486 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
1487 }
1488 }
1489 return (0);
1490 }
1491
1492 void
1493 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1494 struct sctp_tcb *stcb,
1495 struct sctp_nets *net)
1496 {
1497 uint32_t next_mtu, mtu;
1498
1499 next_mtu = sctp_get_next_mtu(net->mtu);
1500
1501 if ((next_mtu > net->mtu) && (net->port == 0)) {
1502 if ((net->src_addr_selected == 0) ||
1503 (net->ro._s_addr == NULL) ||
1504 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1505 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1506 sctp_free_ifa(net->ro._s_addr);
1507 net->ro._s_addr = NULL;
1508 net->src_addr_selected = 0;
1509 } else if (net->ro._s_addr == NULL) {
1510 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1511 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1512 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1513 /* KAME hack: embed scopeid */
1514 #if defined(__APPLE__) && !defined(__Userspace__)
1515 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
1516 (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL);
1517 #else
1518 (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL);
1519 #endif
1520 #elif defined(SCTP_KAME)
1521 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
1522 #else
1523 (void)in6_embedscope(&sin6->sin6_addr, sin6);
1524 #endif
1525 }
1526 #endif
1527
1528 net->ro._s_addr = sctp_source_address_selection(inp,
1529 stcb,
1530 (sctp_route_t *)&net->ro,
1531 net, 0, stcb->asoc.vrf_id);
1532 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1533 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1534 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1535 #ifdef SCTP_KAME
1536 (void)sa6_recoverscope(sin6);
1537 #else
1538 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
1539 #endif /* SCTP_KAME */
1540 }
1541 #endif /* INET6 */
1542 }
1543 if (net->ro._s_addr)
1544 net->src_addr_selected = 1;
1545 }
1546 if (net->ro._s_addr) {
1547 #if defined(__FreeBSD__) && !defined(__Userspace__)
1548 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_nh);
1549 #else
1550 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
1551 #endif
1552 #if defined(INET) || defined(INET6)
1553 if (net->port) {
1554 mtu -= sizeof(struct udphdr);
1555 }
1556 #endif
1557 if (mtu > next_mtu) {
1558 net->mtu = next_mtu;
1559 } else {
1560 net->mtu = mtu;
1561 }
1562 }
1563 }
1564 /* restart the timer */
1565 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1566 }
1567
1568 void
1569 sctp_autoclose_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
1570 {
1571 struct timeval tn, *tim_touse;
1572 struct sctp_association *asoc;
1573 uint32_t ticks_gone_by;
1574
1575 (void)SCTP_GETTIME_TIMEVAL(&tn);
1576 if (stcb->asoc.sctp_autoclose_ticks > 0 &&
1577 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1578 /* Auto close is on */
1579 asoc = &stcb->asoc;
1580 /* pick the time to use */
1581 if (asoc->time_last_rcvd.tv_sec >
1582 asoc->time_last_sent.tv_sec) {
1583 tim_touse = &asoc->time_last_rcvd;
1584 } else {
1585 tim_touse = &asoc->time_last_sent;
1586 }
1587 /* Now has long enough transpired to autoclose? */
1588 ticks_gone_by = sctp_secs_to_ticks((uint32_t)(tn.tv_sec - tim_touse->tv_sec));
1589 if (ticks_gone_by >= asoc->sctp_autoclose_ticks) {
1590 /*
1591 * autoclose time has hit, call the output routine,
1592 * which should do nothing just to be SURE we don't
1593 * have hanging data. We can then safely check the
1594 * queues and know that we are clear to send
1595 * shutdown
1596 */
1597 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1598 /* Are we clean? */
1599 if (TAILQ_EMPTY(&asoc->send_queue) &&
1600 TAILQ_EMPTY(&asoc->sent_queue)) {
1601 /*
1602 * there is nothing queued to send, so I'm
1603 * done...
1604 */
1605 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
1606 /* only send SHUTDOWN 1st time thru */
1607 struct sctp_nets *net;
1608
1609 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1610 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1611 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1612 }
1613 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
1614 sctp_stop_timers_for_shutdown(stcb);
1615 if (stcb->asoc.alternate) {
1616 net = stcb->asoc.alternate;
1617 } else {
1618 net = stcb->asoc.primary_destination;
1619 }
1620 sctp_send_shutdown(stcb, net);
1621 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1622 stcb->sctp_ep, stcb, net);
1623 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1624 stcb->sctp_ep, stcb, NULL);
1625 }
1626 }
1627 } else {
1628 /*
1629 * No auto close at this time, reset t-o to check
1630 * later
1631 */
1632 uint32_t tmp;
1633
1634 /* fool the timer startup to use the time left */
1635 tmp = asoc->sctp_autoclose_ticks;
1636 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1637 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1638 /* restore the real tick value */
1639 asoc->sctp_autoclose_ticks = tmp;
1640 }
1641 }
1642 }
1643
1644