1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 362178 2020-06-14 16:05:08Z tuexen $");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_indata.h>
56 #include <netinet/sctp_bsd_addr.h>
57 #include <netinet/sctp_input.h>
58 #include <netinet/sctp_crc32.h>
59 #if defined(__FreeBSD__) && !defined(__Userspace__)
60 #include <netinet/sctp_kdtrace.h>
61 #endif
62 #if defined(__linux__)
63 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
64 #endif
65 #if defined(INET) || defined(INET6)
66 #if !defined(_WIN32)
67 #include <netinet/udp.h>
68 #endif
69 #endif
70 #if !defined(__Userspace__)
71 #if defined(__APPLE__)
72 #include <netinet/in.h>
73 #endif
74 #if defined(__FreeBSD__) && !defined(__Userspace__)
75 #include <netinet/udp_var.h>
76 #include <machine/in_cksum.h>
77 #endif
78 #endif
79 #if defined(__Userspace__) && defined(INET6)
80 #include <netinet6/sctp6_var.h>
81 #endif
82 
83 #if defined(__APPLE__) && !defined(__Userspace__)
84 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
85 #define SCTP_MAX_LINKHDR 16
86 #endif
87 #endif
88 
89 #define SCTP_MAX_GAPS_INARRAY 4
90 struct sack_track {
91 	uint8_t right_edge;	/* mergable on the right edge */
92 	uint8_t left_edge;	/* mergable on the left edge */
93 	uint8_t num_entries;
94 	uint8_t spare;
95 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
96 };
97 
98 const struct sack_track sack_array[256] = {
99 	{0, 0, 0, 0,		/* 0x00 */
100 		{{0, 0},
101 		{0, 0},
102 		{0, 0},
103 		{0, 0}
104 		}
105 	},
106 	{1, 0, 1, 0,		/* 0x01 */
107 		{{0, 0},
108 		{0, 0},
109 		{0, 0},
110 		{0, 0}
111 		}
112 	},
113 	{0, 0, 1, 0,		/* 0x02 */
114 		{{1, 1},
115 		{0, 0},
116 		{0, 0},
117 		{0, 0}
118 		}
119 	},
120 	{1, 0, 1, 0,		/* 0x03 */
121 		{{0, 1},
122 		{0, 0},
123 		{0, 0},
124 		{0, 0}
125 		}
126 	},
127 	{0, 0, 1, 0,		/* 0x04 */
128 		{{2, 2},
129 		{0, 0},
130 		{0, 0},
131 		{0, 0}
132 		}
133 	},
134 	{1, 0, 2, 0,		/* 0x05 */
135 		{{0, 0},
136 		{2, 2},
137 		{0, 0},
138 		{0, 0}
139 		}
140 	},
141 	{0, 0, 1, 0,		/* 0x06 */
142 		{{1, 2},
143 		{0, 0},
144 		{0, 0},
145 		{0, 0}
146 		}
147 	},
148 	{1, 0, 1, 0,		/* 0x07 */
149 		{{0, 2},
150 		{0, 0},
151 		{0, 0},
152 		{0, 0}
153 		}
154 	},
155 	{0, 0, 1, 0,		/* 0x08 */
156 		{{3, 3},
157 		{0, 0},
158 		{0, 0},
159 		{0, 0}
160 		}
161 	},
162 	{1, 0, 2, 0,		/* 0x09 */
163 		{{0, 0},
164 		{3, 3},
165 		{0, 0},
166 		{0, 0}
167 		}
168 	},
169 	{0, 0, 2, 0,		/* 0x0a */
170 		{{1, 1},
171 		{3, 3},
172 		{0, 0},
173 		{0, 0}
174 		}
175 	},
176 	{1, 0, 2, 0,		/* 0x0b */
177 		{{0, 1},
178 		{3, 3},
179 		{0, 0},
180 		{0, 0}
181 		}
182 	},
183 	{0, 0, 1, 0,		/* 0x0c */
184 		{{2, 3},
185 		{0, 0},
186 		{0, 0},
187 		{0, 0}
188 		}
189 	},
190 	{1, 0, 2, 0,		/* 0x0d */
191 		{{0, 0},
192 		{2, 3},
193 		{0, 0},
194 		{0, 0}
195 		}
196 	},
197 	{0, 0, 1, 0,		/* 0x0e */
198 		{{1, 3},
199 		{0, 0},
200 		{0, 0},
201 		{0, 0}
202 		}
203 	},
204 	{1, 0, 1, 0,		/* 0x0f */
205 		{{0, 3},
206 		{0, 0},
207 		{0, 0},
208 		{0, 0}
209 		}
210 	},
211 	{0, 0, 1, 0,		/* 0x10 */
212 		{{4, 4},
213 		{0, 0},
214 		{0, 0},
215 		{0, 0}
216 		}
217 	},
218 	{1, 0, 2, 0,		/* 0x11 */
219 		{{0, 0},
220 		{4, 4},
221 		{0, 0},
222 		{0, 0}
223 		}
224 	},
225 	{0, 0, 2, 0,		/* 0x12 */
226 		{{1, 1},
227 		{4, 4},
228 		{0, 0},
229 		{0, 0}
230 		}
231 	},
232 	{1, 0, 2, 0,		/* 0x13 */
233 		{{0, 1},
234 		{4, 4},
235 		{0, 0},
236 		{0, 0}
237 		}
238 	},
239 	{0, 0, 2, 0,		/* 0x14 */
240 		{{2, 2},
241 		{4, 4},
242 		{0, 0},
243 		{0, 0}
244 		}
245 	},
246 	{1, 0, 3, 0,		/* 0x15 */
247 		{{0, 0},
248 		{2, 2},
249 		{4, 4},
250 		{0, 0}
251 		}
252 	},
253 	{0, 0, 2, 0,		/* 0x16 */
254 		{{1, 2},
255 		{4, 4},
256 		{0, 0},
257 		{0, 0}
258 		}
259 	},
260 	{1, 0, 2, 0,		/* 0x17 */
261 		{{0, 2},
262 		{4, 4},
263 		{0, 0},
264 		{0, 0}
265 		}
266 	},
267 	{0, 0, 1, 0,		/* 0x18 */
268 		{{3, 4},
269 		{0, 0},
270 		{0, 0},
271 		{0, 0}
272 		}
273 	},
274 	{1, 0, 2, 0,		/* 0x19 */
275 		{{0, 0},
276 		{3, 4},
277 		{0, 0},
278 		{0, 0}
279 		}
280 	},
281 	{0, 0, 2, 0,		/* 0x1a */
282 		{{1, 1},
283 		{3, 4},
284 		{0, 0},
285 		{0, 0}
286 		}
287 	},
288 	{1, 0, 2, 0,		/* 0x1b */
289 		{{0, 1},
290 		{3, 4},
291 		{0, 0},
292 		{0, 0}
293 		}
294 	},
295 	{0, 0, 1, 0,		/* 0x1c */
296 		{{2, 4},
297 		{0, 0},
298 		{0, 0},
299 		{0, 0}
300 		}
301 	},
302 	{1, 0, 2, 0,		/* 0x1d */
303 		{{0, 0},
304 		{2, 4},
305 		{0, 0},
306 		{0, 0}
307 		}
308 	},
309 	{0, 0, 1, 0,		/* 0x1e */
310 		{{1, 4},
311 		{0, 0},
312 		{0, 0},
313 		{0, 0}
314 		}
315 	},
316 	{1, 0, 1, 0,		/* 0x1f */
317 		{{0, 4},
318 		{0, 0},
319 		{0, 0},
320 		{0, 0}
321 		}
322 	},
323 	{0, 0, 1, 0,		/* 0x20 */
324 		{{5, 5},
325 		{0, 0},
326 		{0, 0},
327 		{0, 0}
328 		}
329 	},
330 	{1, 0, 2, 0,		/* 0x21 */
331 		{{0, 0},
332 		{5, 5},
333 		{0, 0},
334 		{0, 0}
335 		}
336 	},
337 	{0, 0, 2, 0,		/* 0x22 */
338 		{{1, 1},
339 		{5, 5},
340 		{0, 0},
341 		{0, 0}
342 		}
343 	},
344 	{1, 0, 2, 0,		/* 0x23 */
345 		{{0, 1},
346 		{5, 5},
347 		{0, 0},
348 		{0, 0}
349 		}
350 	},
351 	{0, 0, 2, 0,		/* 0x24 */
352 		{{2, 2},
353 		{5, 5},
354 		{0, 0},
355 		{0, 0}
356 		}
357 	},
358 	{1, 0, 3, 0,		/* 0x25 */
359 		{{0, 0},
360 		{2, 2},
361 		{5, 5},
362 		{0, 0}
363 		}
364 	},
365 	{0, 0, 2, 0,		/* 0x26 */
366 		{{1, 2},
367 		{5, 5},
368 		{0, 0},
369 		{0, 0}
370 		}
371 	},
372 	{1, 0, 2, 0,		/* 0x27 */
373 		{{0, 2},
374 		{5, 5},
375 		{0, 0},
376 		{0, 0}
377 		}
378 	},
379 	{0, 0, 2, 0,		/* 0x28 */
380 		{{3, 3},
381 		{5, 5},
382 		{0, 0},
383 		{0, 0}
384 		}
385 	},
386 	{1, 0, 3, 0,		/* 0x29 */
387 		{{0, 0},
388 		{3, 3},
389 		{5, 5},
390 		{0, 0}
391 		}
392 	},
393 	{0, 0, 3, 0,		/* 0x2a */
394 		{{1, 1},
395 		{3, 3},
396 		{5, 5},
397 		{0, 0}
398 		}
399 	},
400 	{1, 0, 3, 0,		/* 0x2b */
401 		{{0, 1},
402 		{3, 3},
403 		{5, 5},
404 		{0, 0}
405 		}
406 	},
407 	{0, 0, 2, 0,		/* 0x2c */
408 		{{2, 3},
409 		{5, 5},
410 		{0, 0},
411 		{0, 0}
412 		}
413 	},
414 	{1, 0, 3, 0,		/* 0x2d */
415 		{{0, 0},
416 		{2, 3},
417 		{5, 5},
418 		{0, 0}
419 		}
420 	},
421 	{0, 0, 2, 0,		/* 0x2e */
422 		{{1, 3},
423 		{5, 5},
424 		{0, 0},
425 		{0, 0}
426 		}
427 	},
428 	{1, 0, 2, 0,		/* 0x2f */
429 		{{0, 3},
430 		{5, 5},
431 		{0, 0},
432 		{0, 0}
433 		}
434 	},
435 	{0, 0, 1, 0,		/* 0x30 */
436 		{{4, 5},
437 		{0, 0},
438 		{0, 0},
439 		{0, 0}
440 		}
441 	},
442 	{1, 0, 2, 0,		/* 0x31 */
443 		{{0, 0},
444 		{4, 5},
445 		{0, 0},
446 		{0, 0}
447 		}
448 	},
449 	{0, 0, 2, 0,		/* 0x32 */
450 		{{1, 1},
451 		{4, 5},
452 		{0, 0},
453 		{0, 0}
454 		}
455 	},
456 	{1, 0, 2, 0,		/* 0x33 */
457 		{{0, 1},
458 		{4, 5},
459 		{0, 0},
460 		{0, 0}
461 		}
462 	},
463 	{0, 0, 2, 0,		/* 0x34 */
464 		{{2, 2},
465 		{4, 5},
466 		{0, 0},
467 		{0, 0}
468 		}
469 	},
470 	{1, 0, 3, 0,		/* 0x35 */
471 		{{0, 0},
472 		{2, 2},
473 		{4, 5},
474 		{0, 0}
475 		}
476 	},
477 	{0, 0, 2, 0,		/* 0x36 */
478 		{{1, 2},
479 		{4, 5},
480 		{0, 0},
481 		{0, 0}
482 		}
483 	},
484 	{1, 0, 2, 0,		/* 0x37 */
485 		{{0, 2},
486 		{4, 5},
487 		{0, 0},
488 		{0, 0}
489 		}
490 	},
491 	{0, 0, 1, 0,		/* 0x38 */
492 		{{3, 5},
493 		{0, 0},
494 		{0, 0},
495 		{0, 0}
496 		}
497 	},
498 	{1, 0, 2, 0,		/* 0x39 */
499 		{{0, 0},
500 		{3, 5},
501 		{0, 0},
502 		{0, 0}
503 		}
504 	},
505 	{0, 0, 2, 0,		/* 0x3a */
506 		{{1, 1},
507 		{3, 5},
508 		{0, 0},
509 		{0, 0}
510 		}
511 	},
512 	{1, 0, 2, 0,		/* 0x3b */
513 		{{0, 1},
514 		{3, 5},
515 		{0, 0},
516 		{0, 0}
517 		}
518 	},
519 	{0, 0, 1, 0,		/* 0x3c */
520 		{{2, 5},
521 		{0, 0},
522 		{0, 0},
523 		{0, 0}
524 		}
525 	},
526 	{1, 0, 2, 0,		/* 0x3d */
527 		{{0, 0},
528 		{2, 5},
529 		{0, 0},
530 		{0, 0}
531 		}
532 	},
533 	{0, 0, 1, 0,		/* 0x3e */
534 		{{1, 5},
535 		{0, 0},
536 		{0, 0},
537 		{0, 0}
538 		}
539 	},
540 	{1, 0, 1, 0,		/* 0x3f */
541 		{{0, 5},
542 		{0, 0},
543 		{0, 0},
544 		{0, 0}
545 		}
546 	},
547 	{0, 0, 1, 0,		/* 0x40 */
548 		{{6, 6},
549 		{0, 0},
550 		{0, 0},
551 		{0, 0}
552 		}
553 	},
554 	{1, 0, 2, 0,		/* 0x41 */
555 		{{0, 0},
556 		{6, 6},
557 		{0, 0},
558 		{0, 0}
559 		}
560 	},
561 	{0, 0, 2, 0,		/* 0x42 */
562 		{{1, 1},
563 		{6, 6},
564 		{0, 0},
565 		{0, 0}
566 		}
567 	},
568 	{1, 0, 2, 0,		/* 0x43 */
569 		{{0, 1},
570 		{6, 6},
571 		{0, 0},
572 		{0, 0}
573 		}
574 	},
575 	{0, 0, 2, 0,		/* 0x44 */
576 		{{2, 2},
577 		{6, 6},
578 		{0, 0},
579 		{0, 0}
580 		}
581 	},
582 	{1, 0, 3, 0,		/* 0x45 */
583 		{{0, 0},
584 		{2, 2},
585 		{6, 6},
586 		{0, 0}
587 		}
588 	},
589 	{0, 0, 2, 0,		/* 0x46 */
590 		{{1, 2},
591 		{6, 6},
592 		{0, 0},
593 		{0, 0}
594 		}
595 	},
596 	{1, 0, 2, 0,		/* 0x47 */
597 		{{0, 2},
598 		{6, 6},
599 		{0, 0},
600 		{0, 0}
601 		}
602 	},
603 	{0, 0, 2, 0,		/* 0x48 */
604 		{{3, 3},
605 		{6, 6},
606 		{0, 0},
607 		{0, 0}
608 		}
609 	},
610 	{1, 0, 3, 0,		/* 0x49 */
611 		{{0, 0},
612 		{3, 3},
613 		{6, 6},
614 		{0, 0}
615 		}
616 	},
617 	{0, 0, 3, 0,		/* 0x4a */
618 		{{1, 1},
619 		{3, 3},
620 		{6, 6},
621 		{0, 0}
622 		}
623 	},
624 	{1, 0, 3, 0,		/* 0x4b */
625 		{{0, 1},
626 		{3, 3},
627 		{6, 6},
628 		{0, 0}
629 		}
630 	},
631 	{0, 0, 2, 0,		/* 0x4c */
632 		{{2, 3},
633 		{6, 6},
634 		{0, 0},
635 		{0, 0}
636 		}
637 	},
638 	{1, 0, 3, 0,		/* 0x4d */
639 		{{0, 0},
640 		{2, 3},
641 		{6, 6},
642 		{0, 0}
643 		}
644 	},
645 	{0, 0, 2, 0,		/* 0x4e */
646 		{{1, 3},
647 		{6, 6},
648 		{0, 0},
649 		{0, 0}
650 		}
651 	},
652 	{1, 0, 2, 0,		/* 0x4f */
653 		{{0, 3},
654 		{6, 6},
655 		{0, 0},
656 		{0, 0}
657 		}
658 	},
659 	{0, 0, 2, 0,		/* 0x50 */
660 		{{4, 4},
661 		{6, 6},
662 		{0, 0},
663 		{0, 0}
664 		}
665 	},
666 	{1, 0, 3, 0,		/* 0x51 */
667 		{{0, 0},
668 		{4, 4},
669 		{6, 6},
670 		{0, 0}
671 		}
672 	},
673 	{0, 0, 3, 0,		/* 0x52 */
674 		{{1, 1},
675 		{4, 4},
676 		{6, 6},
677 		{0, 0}
678 		}
679 	},
680 	{1, 0, 3, 0,		/* 0x53 */
681 		{{0, 1},
682 		{4, 4},
683 		{6, 6},
684 		{0, 0}
685 		}
686 	},
687 	{0, 0, 3, 0,		/* 0x54 */
688 		{{2, 2},
689 		{4, 4},
690 		{6, 6},
691 		{0, 0}
692 		}
693 	},
694 	{1, 0, 4, 0,		/* 0x55 */
695 		{{0, 0},
696 		{2, 2},
697 		{4, 4},
698 		{6, 6}
699 		}
700 	},
701 	{0, 0, 3, 0,		/* 0x56 */
702 		{{1, 2},
703 		{4, 4},
704 		{6, 6},
705 		{0, 0}
706 		}
707 	},
708 	{1, 0, 3, 0,		/* 0x57 */
709 		{{0, 2},
710 		{4, 4},
711 		{6, 6},
712 		{0, 0}
713 		}
714 	},
715 	{0, 0, 2, 0,		/* 0x58 */
716 		{{3, 4},
717 		{6, 6},
718 		{0, 0},
719 		{0, 0}
720 		}
721 	},
722 	{1, 0, 3, 0,		/* 0x59 */
723 		{{0, 0},
724 		{3, 4},
725 		{6, 6},
726 		{0, 0}
727 		}
728 	},
729 	{0, 0, 3, 0,		/* 0x5a */
730 		{{1, 1},
731 		{3, 4},
732 		{6, 6},
733 		{0, 0}
734 		}
735 	},
736 	{1, 0, 3, 0,		/* 0x5b */
737 		{{0, 1},
738 		{3, 4},
739 		{6, 6},
740 		{0, 0}
741 		}
742 	},
743 	{0, 0, 2, 0,		/* 0x5c */
744 		{{2, 4},
745 		{6, 6},
746 		{0, 0},
747 		{0, 0}
748 		}
749 	},
750 	{1, 0, 3, 0,		/* 0x5d */
751 		{{0, 0},
752 		{2, 4},
753 		{6, 6},
754 		{0, 0}
755 		}
756 	},
757 	{0, 0, 2, 0,		/* 0x5e */
758 		{{1, 4},
759 		{6, 6},
760 		{0, 0},
761 		{0, 0}
762 		}
763 	},
764 	{1, 0, 2, 0,		/* 0x5f */
765 		{{0, 4},
766 		{6, 6},
767 		{0, 0},
768 		{0, 0}
769 		}
770 	},
771 	{0, 0, 1, 0,		/* 0x60 */
772 		{{5, 6},
773 		{0, 0},
774 		{0, 0},
775 		{0, 0}
776 		}
777 	},
778 	{1, 0, 2, 0,		/* 0x61 */
779 		{{0, 0},
780 		{5, 6},
781 		{0, 0},
782 		{0, 0}
783 		}
784 	},
785 	{0, 0, 2, 0,		/* 0x62 */
786 		{{1, 1},
787 		{5, 6},
788 		{0, 0},
789 		{0, 0}
790 		}
791 	},
792 	{1, 0, 2, 0,		/* 0x63 */
793 		{{0, 1},
794 		{5, 6},
795 		{0, 0},
796 		{0, 0}
797 		}
798 	},
799 	{0, 0, 2, 0,		/* 0x64 */
800 		{{2, 2},
801 		{5, 6},
802 		{0, 0},
803 		{0, 0}
804 		}
805 	},
806 	{1, 0, 3, 0,		/* 0x65 */
807 		{{0, 0},
808 		{2, 2},
809 		{5, 6},
810 		{0, 0}
811 		}
812 	},
813 	{0, 0, 2, 0,		/* 0x66 */
814 		{{1, 2},
815 		{5, 6},
816 		{0, 0},
817 		{0, 0}
818 		}
819 	},
820 	{1, 0, 2, 0,		/* 0x67 */
821 		{{0, 2},
822 		{5, 6},
823 		{0, 0},
824 		{0, 0}
825 		}
826 	},
827 	{0, 0, 2, 0,		/* 0x68 */
828 		{{3, 3},
829 		{5, 6},
830 		{0, 0},
831 		{0, 0}
832 		}
833 	},
834 	{1, 0, 3, 0,		/* 0x69 */
835 		{{0, 0},
836 		{3, 3},
837 		{5, 6},
838 		{0, 0}
839 		}
840 	},
841 	{0, 0, 3, 0,		/* 0x6a */
842 		{{1, 1},
843 		{3, 3},
844 		{5, 6},
845 		{0, 0}
846 		}
847 	},
848 	{1, 0, 3, 0,		/* 0x6b */
849 		{{0, 1},
850 		{3, 3},
851 		{5, 6},
852 		{0, 0}
853 		}
854 	},
855 	{0, 0, 2, 0,		/* 0x6c */
856 		{{2, 3},
857 		{5, 6},
858 		{0, 0},
859 		{0, 0}
860 		}
861 	},
862 	{1, 0, 3, 0,		/* 0x6d */
863 		{{0, 0},
864 		{2, 3},
865 		{5, 6},
866 		{0, 0}
867 		}
868 	},
869 	{0, 0, 2, 0,		/* 0x6e */
870 		{{1, 3},
871 		{5, 6},
872 		{0, 0},
873 		{0, 0}
874 		}
875 	},
876 	{1, 0, 2, 0,		/* 0x6f */
877 		{{0, 3},
878 		{5, 6},
879 		{0, 0},
880 		{0, 0}
881 		}
882 	},
883 	{0, 0, 1, 0,		/* 0x70 */
884 		{{4, 6},
885 		{0, 0},
886 		{0, 0},
887 		{0, 0}
888 		}
889 	},
890 	{1, 0, 2, 0,		/* 0x71 */
891 		{{0, 0},
892 		{4, 6},
893 		{0, 0},
894 		{0, 0}
895 		}
896 	},
897 	{0, 0, 2, 0,		/* 0x72 */
898 		{{1, 1},
899 		{4, 6},
900 		{0, 0},
901 		{0, 0}
902 		}
903 	},
904 	{1, 0, 2, 0,		/* 0x73 */
905 		{{0, 1},
906 		{4, 6},
907 		{0, 0},
908 		{0, 0}
909 		}
910 	},
911 	{0, 0, 2, 0,		/* 0x74 */
912 		{{2, 2},
913 		{4, 6},
914 		{0, 0},
915 		{0, 0}
916 		}
917 	},
918 	{1, 0, 3, 0,		/* 0x75 */
919 		{{0, 0},
920 		{2, 2},
921 		{4, 6},
922 		{0, 0}
923 		}
924 	},
925 	{0, 0, 2, 0,		/* 0x76 */
926 		{{1, 2},
927 		{4, 6},
928 		{0, 0},
929 		{0, 0}
930 		}
931 	},
932 	{1, 0, 2, 0,		/* 0x77 */
933 		{{0, 2},
934 		{4, 6},
935 		{0, 0},
936 		{0, 0}
937 		}
938 	},
939 	{0, 0, 1, 0,		/* 0x78 */
940 		{{3, 6},
941 		{0, 0},
942 		{0, 0},
943 		{0, 0}
944 		}
945 	},
946 	{1, 0, 2, 0,		/* 0x79 */
947 		{{0, 0},
948 		{3, 6},
949 		{0, 0},
950 		{0, 0}
951 		}
952 	},
953 	{0, 0, 2, 0,		/* 0x7a */
954 		{{1, 1},
955 		{3, 6},
956 		{0, 0},
957 		{0, 0}
958 		}
959 	},
960 	{1, 0, 2, 0,		/* 0x7b */
961 		{{0, 1},
962 		{3, 6},
963 		{0, 0},
964 		{0, 0}
965 		}
966 	},
967 	{0, 0, 1, 0,		/* 0x7c */
968 		{{2, 6},
969 		{0, 0},
970 		{0, 0},
971 		{0, 0}
972 		}
973 	},
974 	{1, 0, 2, 0,		/* 0x7d */
975 		{{0, 0},
976 		{2, 6},
977 		{0, 0},
978 		{0, 0}
979 		}
980 	},
981 	{0, 0, 1, 0,		/* 0x7e */
982 		{{1, 6},
983 		{0, 0},
984 		{0, 0},
985 		{0, 0}
986 		}
987 	},
988 	{1, 0, 1, 0,		/* 0x7f */
989 		{{0, 6},
990 		{0, 0},
991 		{0, 0},
992 		{0, 0}
993 		}
994 	},
995 	{0, 1, 1, 0,		/* 0x80 */
996 		{{7, 7},
997 		{0, 0},
998 		{0, 0},
999 		{0, 0}
1000 		}
1001 	},
1002 	{1, 1, 2, 0,		/* 0x81 */
1003 		{{0, 0},
1004 		{7, 7},
1005 		{0, 0},
1006 		{0, 0}
1007 		}
1008 	},
1009 	{0, 1, 2, 0,		/* 0x82 */
1010 		{{1, 1},
1011 		{7, 7},
1012 		{0, 0},
1013 		{0, 0}
1014 		}
1015 	},
1016 	{1, 1, 2, 0,		/* 0x83 */
1017 		{{0, 1},
1018 		{7, 7},
1019 		{0, 0},
1020 		{0, 0}
1021 		}
1022 	},
1023 	{0, 1, 2, 0,		/* 0x84 */
1024 		{{2, 2},
1025 		{7, 7},
1026 		{0, 0},
1027 		{0, 0}
1028 		}
1029 	},
1030 	{1, 1, 3, 0,		/* 0x85 */
1031 		{{0, 0},
1032 		{2, 2},
1033 		{7, 7},
1034 		{0, 0}
1035 		}
1036 	},
1037 	{0, 1, 2, 0,		/* 0x86 */
1038 		{{1, 2},
1039 		{7, 7},
1040 		{0, 0},
1041 		{0, 0}
1042 		}
1043 	},
1044 	{1, 1, 2, 0,		/* 0x87 */
1045 		{{0, 2},
1046 		{7, 7},
1047 		{0, 0},
1048 		{0, 0}
1049 		}
1050 	},
1051 	{0, 1, 2, 0,		/* 0x88 */
1052 		{{3, 3},
1053 		{7, 7},
1054 		{0, 0},
1055 		{0, 0}
1056 		}
1057 	},
1058 	{1, 1, 3, 0,		/* 0x89 */
1059 		{{0, 0},
1060 		{3, 3},
1061 		{7, 7},
1062 		{0, 0}
1063 		}
1064 	},
1065 	{0, 1, 3, 0,		/* 0x8a */
1066 		{{1, 1},
1067 		{3, 3},
1068 		{7, 7},
1069 		{0, 0}
1070 		}
1071 	},
1072 	{1, 1, 3, 0,		/* 0x8b */
1073 		{{0, 1},
1074 		{3, 3},
1075 		{7, 7},
1076 		{0, 0}
1077 		}
1078 	},
1079 	{0, 1, 2, 0,		/* 0x8c */
1080 		{{2, 3},
1081 		{7, 7},
1082 		{0, 0},
1083 		{0, 0}
1084 		}
1085 	},
1086 	{1, 1, 3, 0,		/* 0x8d */
1087 		{{0, 0},
1088 		{2, 3},
1089 		{7, 7},
1090 		{0, 0}
1091 		}
1092 	},
1093 	{0, 1, 2, 0,		/* 0x8e */
1094 		{{1, 3},
1095 		{7, 7},
1096 		{0, 0},
1097 		{0, 0}
1098 		}
1099 	},
1100 	{1, 1, 2, 0,		/* 0x8f */
1101 		{{0, 3},
1102 		{7, 7},
1103 		{0, 0},
1104 		{0, 0}
1105 		}
1106 	},
1107 	{0, 1, 2, 0,		/* 0x90 */
1108 		{{4, 4},
1109 		{7, 7},
1110 		{0, 0},
1111 		{0, 0}
1112 		}
1113 	},
1114 	{1, 1, 3, 0,		/* 0x91 */
1115 		{{0, 0},
1116 		{4, 4},
1117 		{7, 7},
1118 		{0, 0}
1119 		}
1120 	},
1121 	{0, 1, 3, 0,		/* 0x92 */
1122 		{{1, 1},
1123 		{4, 4},
1124 		{7, 7},
1125 		{0, 0}
1126 		}
1127 	},
1128 	{1, 1, 3, 0,		/* 0x93 */
1129 		{{0, 1},
1130 		{4, 4},
1131 		{7, 7},
1132 		{0, 0}
1133 		}
1134 	},
1135 	{0, 1, 3, 0,		/* 0x94 */
1136 		{{2, 2},
1137 		{4, 4},
1138 		{7, 7},
1139 		{0, 0}
1140 		}
1141 	},
1142 	{1, 1, 4, 0,		/* 0x95 */
1143 		{{0, 0},
1144 		{2, 2},
1145 		{4, 4},
1146 		{7, 7}
1147 		}
1148 	},
1149 	{0, 1, 3, 0,		/* 0x96 */
1150 		{{1, 2},
1151 		{4, 4},
1152 		{7, 7},
1153 		{0, 0}
1154 		}
1155 	},
1156 	{1, 1, 3, 0,		/* 0x97 */
1157 		{{0, 2},
1158 		{4, 4},
1159 		{7, 7},
1160 		{0, 0}
1161 		}
1162 	},
1163 	{0, 1, 2, 0,		/* 0x98 */
1164 		{{3, 4},
1165 		{7, 7},
1166 		{0, 0},
1167 		{0, 0}
1168 		}
1169 	},
1170 	{1, 1, 3, 0,		/* 0x99 */
1171 		{{0, 0},
1172 		{3, 4},
1173 		{7, 7},
1174 		{0, 0}
1175 		}
1176 	},
1177 	{0, 1, 3, 0,		/* 0x9a */
1178 		{{1, 1},
1179 		{3, 4},
1180 		{7, 7},
1181 		{0, 0}
1182 		}
1183 	},
1184 	{1, 1, 3, 0,		/* 0x9b */
1185 		{{0, 1},
1186 		{3, 4},
1187 		{7, 7},
1188 		{0, 0}
1189 		}
1190 	},
1191 	{0, 1, 2, 0,		/* 0x9c */
1192 		{{2, 4},
1193 		{7, 7},
1194 		{0, 0},
1195 		{0, 0}
1196 		}
1197 	},
1198 	{1, 1, 3, 0,		/* 0x9d */
1199 		{{0, 0},
1200 		{2, 4},
1201 		{7, 7},
1202 		{0, 0}
1203 		}
1204 	},
1205 	{0, 1, 2, 0,		/* 0x9e */
1206 		{{1, 4},
1207 		{7, 7},
1208 		{0, 0},
1209 		{0, 0}
1210 		}
1211 	},
1212 	{1, 1, 2, 0,		/* 0x9f */
1213 		{{0, 4},
1214 		{7, 7},
1215 		{0, 0},
1216 		{0, 0}
1217 		}
1218 	},
1219 	{0, 1, 2, 0,		/* 0xa0 */
1220 		{{5, 5},
1221 		{7, 7},
1222 		{0, 0},
1223 		{0, 0}
1224 		}
1225 	},
1226 	{1, 1, 3, 0,		/* 0xa1 */
1227 		{{0, 0},
1228 		{5, 5},
1229 		{7, 7},
1230 		{0, 0}
1231 		}
1232 	},
1233 	{0, 1, 3, 0,		/* 0xa2 */
1234 		{{1, 1},
1235 		{5, 5},
1236 		{7, 7},
1237 		{0, 0}
1238 		}
1239 	},
1240 	{1, 1, 3, 0,		/* 0xa3 */
1241 		{{0, 1},
1242 		{5, 5},
1243 		{7, 7},
1244 		{0, 0}
1245 		}
1246 	},
1247 	{0, 1, 3, 0,		/* 0xa4 */
1248 		{{2, 2},
1249 		{5, 5},
1250 		{7, 7},
1251 		{0, 0}
1252 		}
1253 	},
1254 	{1, 1, 4, 0,		/* 0xa5 */
1255 		{{0, 0},
1256 		{2, 2},
1257 		{5, 5},
1258 		{7, 7}
1259 		}
1260 	},
1261 	{0, 1, 3, 0,		/* 0xa6 */
1262 		{{1, 2},
1263 		{5, 5},
1264 		{7, 7},
1265 		{0, 0}
1266 		}
1267 	},
1268 	{1, 1, 3, 0,		/* 0xa7 */
1269 		{{0, 2},
1270 		{5, 5},
1271 		{7, 7},
1272 		{0, 0}
1273 		}
1274 	},
1275 	{0, 1, 3, 0,		/* 0xa8 */
1276 		{{3, 3},
1277 		{5, 5},
1278 		{7, 7},
1279 		{0, 0}
1280 		}
1281 	},
1282 	{1, 1, 4, 0,		/* 0xa9 */
1283 		{{0, 0},
1284 		{3, 3},
1285 		{5, 5},
1286 		{7, 7}
1287 		}
1288 	},
1289 	{0, 1, 4, 0,		/* 0xaa */
1290 		{{1, 1},
1291 		{3, 3},
1292 		{5, 5},
1293 		{7, 7}
1294 		}
1295 	},
1296 	{1, 1, 4, 0,		/* 0xab */
1297 		{{0, 1},
1298 		{3, 3},
1299 		{5, 5},
1300 		{7, 7}
1301 		}
1302 	},
1303 	{0, 1, 3, 0,		/* 0xac */
1304 		{{2, 3},
1305 		{5, 5},
1306 		{7, 7},
1307 		{0, 0}
1308 		}
1309 	},
1310 	{1, 1, 4, 0,		/* 0xad */
1311 		{{0, 0},
1312 		{2, 3},
1313 		{5, 5},
1314 		{7, 7}
1315 		}
1316 	},
1317 	{0, 1, 3, 0,		/* 0xae */
1318 		{{1, 3},
1319 		{5, 5},
1320 		{7, 7},
1321 		{0, 0}
1322 		}
1323 	},
1324 	{1, 1, 3, 0,		/* 0xaf */
1325 		{{0, 3},
1326 		{5, 5},
1327 		{7, 7},
1328 		{0, 0}
1329 		}
1330 	},
1331 	{0, 1, 2, 0,		/* 0xb0 */
1332 		{{4, 5},
1333 		{7, 7},
1334 		{0, 0},
1335 		{0, 0}
1336 		}
1337 	},
1338 	{1, 1, 3, 0,		/* 0xb1 */
1339 		{{0, 0},
1340 		{4, 5},
1341 		{7, 7},
1342 		{0, 0}
1343 		}
1344 	},
1345 	{0, 1, 3, 0,		/* 0xb2 */
1346 		{{1, 1},
1347 		{4, 5},
1348 		{7, 7},
1349 		{0, 0}
1350 		}
1351 	},
1352 	{1, 1, 3, 0,		/* 0xb3 */
1353 		{{0, 1},
1354 		{4, 5},
1355 		{7, 7},
1356 		{0, 0}
1357 		}
1358 	},
1359 	{0, 1, 3, 0,		/* 0xb4 */
1360 		{{2, 2},
1361 		{4, 5},
1362 		{7, 7},
1363 		{0, 0}
1364 		}
1365 	},
1366 	{1, 1, 4, 0,		/* 0xb5 */
1367 		{{0, 0},
1368 		{2, 2},
1369 		{4, 5},
1370 		{7, 7}
1371 		}
1372 	},
1373 	{0, 1, 3, 0,		/* 0xb6 */
1374 		{{1, 2},
1375 		{4, 5},
1376 		{7, 7},
1377 		{0, 0}
1378 		}
1379 	},
1380 	{1, 1, 3, 0,		/* 0xb7 */
1381 		{{0, 2},
1382 		{4, 5},
1383 		{7, 7},
1384 		{0, 0}
1385 		}
1386 	},
1387 	{0, 1, 2, 0,		/* 0xb8 */
1388 		{{3, 5},
1389 		{7, 7},
1390 		{0, 0},
1391 		{0, 0}
1392 		}
1393 	},
1394 	{1, 1, 3, 0,		/* 0xb9 */
1395 		{{0, 0},
1396 		{3, 5},
1397 		{7, 7},
1398 		{0, 0}
1399 		}
1400 	},
1401 	{0, 1, 3, 0,		/* 0xba */
1402 		{{1, 1},
1403 		{3, 5},
1404 		{7, 7},
1405 		{0, 0}
1406 		}
1407 	},
1408 	{1, 1, 3, 0,		/* 0xbb */
1409 		{{0, 1},
1410 		{3, 5},
1411 		{7, 7},
1412 		{0, 0}
1413 		}
1414 	},
1415 	{0, 1, 2, 0,		/* 0xbc */
1416 		{{2, 5},
1417 		{7, 7},
1418 		{0, 0},
1419 		{0, 0}
1420 		}
1421 	},
1422 	{1, 1, 3, 0,		/* 0xbd */
1423 		{{0, 0},
1424 		{2, 5},
1425 		{7, 7},
1426 		{0, 0}
1427 		}
1428 	},
1429 	{0, 1, 2, 0,		/* 0xbe */
1430 		{{1, 5},
1431 		{7, 7},
1432 		{0, 0},
1433 		{0, 0}
1434 		}
1435 	},
1436 	{1, 1, 2, 0,		/* 0xbf */
1437 		{{0, 5},
1438 		{7, 7},
1439 		{0, 0},
1440 		{0, 0}
1441 		}
1442 	},
1443 	{0, 1, 1, 0,		/* 0xc0 */
1444 		{{6, 7},
1445 		{0, 0},
1446 		{0, 0},
1447 		{0, 0}
1448 		}
1449 	},
1450 	{1, 1, 2, 0,		/* 0xc1 */
1451 		{{0, 0},
1452 		{6, 7},
1453 		{0, 0},
1454 		{0, 0}
1455 		}
1456 	},
1457 	{0, 1, 2, 0,		/* 0xc2 */
1458 		{{1, 1},
1459 		{6, 7},
1460 		{0, 0},
1461 		{0, 0}
1462 		}
1463 	},
1464 	{1, 1, 2, 0,		/* 0xc3 */
1465 		{{0, 1},
1466 		{6, 7},
1467 		{0, 0},
1468 		{0, 0}
1469 		}
1470 	},
1471 	{0, 1, 2, 0,		/* 0xc4 */
1472 		{{2, 2},
1473 		{6, 7},
1474 		{0, 0},
1475 		{0, 0}
1476 		}
1477 	},
1478 	{1, 1, 3, 0,		/* 0xc5 */
1479 		{{0, 0},
1480 		{2, 2},
1481 		{6, 7},
1482 		{0, 0}
1483 		}
1484 	},
1485 	{0, 1, 2, 0,		/* 0xc6 */
1486 		{{1, 2},
1487 		{6, 7},
1488 		{0, 0},
1489 		{0, 0}
1490 		}
1491 	},
1492 	{1, 1, 2, 0,		/* 0xc7 */
1493 		{{0, 2},
1494 		{6, 7},
1495 		{0, 0},
1496 		{0, 0}
1497 		}
1498 	},
1499 	{0, 1, 2, 0,		/* 0xc8 */
1500 		{{3, 3},
1501 		{6, 7},
1502 		{0, 0},
1503 		{0, 0}
1504 		}
1505 	},
1506 	{1, 1, 3, 0,		/* 0xc9 */
1507 		{{0, 0},
1508 		{3, 3},
1509 		{6, 7},
1510 		{0, 0}
1511 		}
1512 	},
1513 	{0, 1, 3, 0,		/* 0xca */
1514 		{{1, 1},
1515 		{3, 3},
1516 		{6, 7},
1517 		{0, 0}
1518 		}
1519 	},
1520 	{1, 1, 3, 0,		/* 0xcb */
1521 		{{0, 1},
1522 		{3, 3},
1523 		{6, 7},
1524 		{0, 0}
1525 		}
1526 	},
1527 	{0, 1, 2, 0,		/* 0xcc */
1528 		{{2, 3},
1529 		{6, 7},
1530 		{0, 0},
1531 		{0, 0}
1532 		}
1533 	},
1534 	{1, 1, 3, 0,		/* 0xcd */
1535 		{{0, 0},
1536 		{2, 3},
1537 		{6, 7},
1538 		{0, 0}
1539 		}
1540 	},
1541 	{0, 1, 2, 0,		/* 0xce */
1542 		{{1, 3},
1543 		{6, 7},
1544 		{0, 0},
1545 		{0, 0}
1546 		}
1547 	},
1548 	{1, 1, 2, 0,		/* 0xcf */
1549 		{{0, 3},
1550 		{6, 7},
1551 		{0, 0},
1552 		{0, 0}
1553 		}
1554 	},
1555 	{0, 1, 2, 0,		/* 0xd0 */
1556 		{{4, 4},
1557 		{6, 7},
1558 		{0, 0},
1559 		{0, 0}
1560 		}
1561 	},
1562 	{1, 1, 3, 0,		/* 0xd1 */
1563 		{{0, 0},
1564 		{4, 4},
1565 		{6, 7},
1566 		{0, 0}
1567 		}
1568 	},
1569 	{0, 1, 3, 0,		/* 0xd2 */
1570 		{{1, 1},
1571 		{4, 4},
1572 		{6, 7},
1573 		{0, 0}
1574 		}
1575 	},
1576 	{1, 1, 3, 0,		/* 0xd3 */
1577 		{{0, 1},
1578 		{4, 4},
1579 		{6, 7},
1580 		{0, 0}
1581 		}
1582 	},
1583 	{0, 1, 3, 0,		/* 0xd4 */
1584 		{{2, 2},
1585 		{4, 4},
1586 		{6, 7},
1587 		{0, 0}
1588 		}
1589 	},
1590 	{1, 1, 4, 0,		/* 0xd5 */
1591 		{{0, 0},
1592 		{2, 2},
1593 		{4, 4},
1594 		{6, 7}
1595 		}
1596 	},
1597 	{0, 1, 3, 0,		/* 0xd6 */
1598 		{{1, 2},
1599 		{4, 4},
1600 		{6, 7},
1601 		{0, 0}
1602 		}
1603 	},
1604 	{1, 1, 3, 0,		/* 0xd7 */
1605 		{{0, 2},
1606 		{4, 4},
1607 		{6, 7},
1608 		{0, 0}
1609 		}
1610 	},
1611 	{0, 1, 2, 0,		/* 0xd8 */
1612 		{{3, 4},
1613 		{6, 7},
1614 		{0, 0},
1615 		{0, 0}
1616 		}
1617 	},
1618 	{1, 1, 3, 0,		/* 0xd9 */
1619 		{{0, 0},
1620 		{3, 4},
1621 		{6, 7},
1622 		{0, 0}
1623 		}
1624 	},
1625 	{0, 1, 3, 0,		/* 0xda */
1626 		{{1, 1},
1627 		{3, 4},
1628 		{6, 7},
1629 		{0, 0}
1630 		}
1631 	},
1632 	{1, 1, 3, 0,		/* 0xdb */
1633 		{{0, 1},
1634 		{3, 4},
1635 		{6, 7},
1636 		{0, 0}
1637 		}
1638 	},
1639 	{0, 1, 2, 0,		/* 0xdc */
1640 		{{2, 4},
1641 		{6, 7},
1642 		{0, 0},
1643 		{0, 0}
1644 		}
1645 	},
1646 	{1, 1, 3, 0,		/* 0xdd */
1647 		{{0, 0},
1648 		{2, 4},
1649 		{6, 7},
1650 		{0, 0}
1651 		}
1652 	},
1653 	{0, 1, 2, 0,		/* 0xde */
1654 		{{1, 4},
1655 		{6, 7},
1656 		{0, 0},
1657 		{0, 0}
1658 		}
1659 	},
1660 	{1, 1, 2, 0,		/* 0xdf */
1661 		{{0, 4},
1662 		{6, 7},
1663 		{0, 0},
1664 		{0, 0}
1665 		}
1666 	},
1667 	{0, 1, 1, 0,		/* 0xe0 */
1668 		{{5, 7},
1669 		{0, 0},
1670 		{0, 0},
1671 		{0, 0}
1672 		}
1673 	},
1674 	{1, 1, 2, 0,		/* 0xe1 */
1675 		{{0, 0},
1676 		{5, 7},
1677 		{0, 0},
1678 		{0, 0}
1679 		}
1680 	},
1681 	{0, 1, 2, 0,		/* 0xe2 */
1682 		{{1, 1},
1683 		{5, 7},
1684 		{0, 0},
1685 		{0, 0}
1686 		}
1687 	},
1688 	{1, 1, 2, 0,		/* 0xe3 */
1689 		{{0, 1},
1690 		{5, 7},
1691 		{0, 0},
1692 		{0, 0}
1693 		}
1694 	},
1695 	{0, 1, 2, 0,		/* 0xe4 */
1696 		{{2, 2},
1697 		{5, 7},
1698 		{0, 0},
1699 		{0, 0}
1700 		}
1701 	},
1702 	{1, 1, 3, 0,		/* 0xe5 */
1703 		{{0, 0},
1704 		{2, 2},
1705 		{5, 7},
1706 		{0, 0}
1707 		}
1708 	},
1709 	{0, 1, 2, 0,		/* 0xe6 */
1710 		{{1, 2},
1711 		{5, 7},
1712 		{0, 0},
1713 		{0, 0}
1714 		}
1715 	},
1716 	{1, 1, 2, 0,		/* 0xe7 */
1717 		{{0, 2},
1718 		{5, 7},
1719 		{0, 0},
1720 		{0, 0}
1721 		}
1722 	},
1723 	{0, 1, 2, 0,		/* 0xe8 */
1724 		{{3, 3},
1725 		{5, 7},
1726 		{0, 0},
1727 		{0, 0}
1728 		}
1729 	},
1730 	{1, 1, 3, 0,		/* 0xe9 */
1731 		{{0, 0},
1732 		{3, 3},
1733 		{5, 7},
1734 		{0, 0}
1735 		}
1736 	},
1737 	{0, 1, 3, 0,		/* 0xea */
1738 		{{1, 1},
1739 		{3, 3},
1740 		{5, 7},
1741 		{0, 0}
1742 		}
1743 	},
1744 	{1, 1, 3, 0,		/* 0xeb */
1745 		{{0, 1},
1746 		{3, 3},
1747 		{5, 7},
1748 		{0, 0}
1749 		}
1750 	},
1751 	{0, 1, 2, 0,		/* 0xec */
1752 		{{2, 3},
1753 		{5, 7},
1754 		{0, 0},
1755 		{0, 0}
1756 		}
1757 	},
1758 	{1, 1, 3, 0,		/* 0xed */
1759 		{{0, 0},
1760 		{2, 3},
1761 		{5, 7},
1762 		{0, 0}
1763 		}
1764 	},
1765 	{0, 1, 2, 0,		/* 0xee */
1766 		{{1, 3},
1767 		{5, 7},
1768 		{0, 0},
1769 		{0, 0}
1770 		}
1771 	},
1772 	{1, 1, 2, 0,		/* 0xef */
1773 		{{0, 3},
1774 		{5, 7},
1775 		{0, 0},
1776 		{0, 0}
1777 		}
1778 	},
1779 	{0, 1, 1, 0,		/* 0xf0 */
1780 		{{4, 7},
1781 		{0, 0},
1782 		{0, 0},
1783 		{0, 0}
1784 		}
1785 	},
1786 	{1, 1, 2, 0,		/* 0xf1 */
1787 		{{0, 0},
1788 		{4, 7},
1789 		{0, 0},
1790 		{0, 0}
1791 		}
1792 	},
1793 	{0, 1, 2, 0,		/* 0xf2 */
1794 		{{1, 1},
1795 		{4, 7},
1796 		{0, 0},
1797 		{0, 0}
1798 		}
1799 	},
1800 	{1, 1, 2, 0,		/* 0xf3 */
1801 		{{0, 1},
1802 		{4, 7},
1803 		{0, 0},
1804 		{0, 0}
1805 		}
1806 	},
1807 	{0, 1, 2, 0,		/* 0xf4 */
1808 		{{2, 2},
1809 		{4, 7},
1810 		{0, 0},
1811 		{0, 0}
1812 		}
1813 	},
1814 	{1, 1, 3, 0,		/* 0xf5 */
1815 		{{0, 0},
1816 		{2, 2},
1817 		{4, 7},
1818 		{0, 0}
1819 		}
1820 	},
1821 	{0, 1, 2, 0,		/* 0xf6 */
1822 		{{1, 2},
1823 		{4, 7},
1824 		{0, 0},
1825 		{0, 0}
1826 		}
1827 	},
1828 	{1, 1, 2, 0,		/* 0xf7 */
1829 		{{0, 2},
1830 		{4, 7},
1831 		{0, 0},
1832 		{0, 0}
1833 		}
1834 	},
1835 	{0, 1, 1, 0,		/* 0xf8 */
1836 		{{3, 7},
1837 		{0, 0},
1838 		{0, 0},
1839 		{0, 0}
1840 		}
1841 	},
1842 	{1, 1, 2, 0,		/* 0xf9 */
1843 		{{0, 0},
1844 		{3, 7},
1845 		{0, 0},
1846 		{0, 0}
1847 		}
1848 	},
1849 	{0, 1, 2, 0,		/* 0xfa */
1850 		{{1, 1},
1851 		{3, 7},
1852 		{0, 0},
1853 		{0, 0}
1854 		}
1855 	},
1856 	{1, 1, 2, 0,		/* 0xfb */
1857 		{{0, 1},
1858 		{3, 7},
1859 		{0, 0},
1860 		{0, 0}
1861 		}
1862 	},
1863 	{0, 1, 1, 0,		/* 0xfc */
1864 		{{2, 7},
1865 		{0, 0},
1866 		{0, 0},
1867 		{0, 0}
1868 		}
1869 	},
1870 	{1, 1, 2, 0,		/* 0xfd */
1871 		{{0, 0},
1872 		{2, 7},
1873 		{0, 0},
1874 		{0, 0}
1875 		}
1876 	},
1877 	{0, 1, 1, 0,		/* 0xfe */
1878 		{{1, 7},
1879 		{0, 0},
1880 		{0, 0},
1881 		{0, 0}
1882 		}
1883 	},
1884 	{1, 1, 1, 0,		/* 0xff */
1885 		{{0, 7},
1886 		{0, 0},
1887 		{0, 0},
1888 		{0, 0}
1889 		}
1890 	}
1891 };
1892 
1893 
1894 int
sctp_is_address_in_scope(struct sctp_ifa * ifa,struct sctp_scoping * scope,int do_update)1895 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1896                          struct sctp_scoping *scope,
1897                          int do_update)
1898 {
1899 	if ((scope->loopback_scope == 0) &&
1900 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1901 		/*
1902 		 * skip loopback if not in scope *
1903 		 */
1904 		return (0);
1905 	}
1906 	switch (ifa->address.sa.sa_family) {
1907 #ifdef INET
1908 	case AF_INET:
1909 		if (scope->ipv4_addr_legal) {
1910 			struct sockaddr_in *sin;
1911 
1912 			sin = &ifa->address.sin;
1913 			if (sin->sin_addr.s_addr == 0) {
1914 				/* not in scope , unspecified */
1915 				return (0);
1916 			}
1917 			if ((scope->ipv4_local_scope == 0) &&
1918 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1919 				/* private address not in scope */
1920 				return (0);
1921 			}
1922 		} else {
1923 			return (0);
1924 		}
1925 		break;
1926 #endif
1927 #ifdef INET6
1928 	case AF_INET6:
1929 		if (scope->ipv6_addr_legal) {
1930 			struct sockaddr_in6 *sin6;
1931 
1932 			/* Must update the flags,  bummer, which
1933 			 * means any IFA locks must now be applied HERE <->
1934 			 */
1935 			if (do_update) {
1936 				sctp_gather_internal_ifa_flags(ifa);
1937 			}
1938 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1939 				return (0);
1940 			}
1941 			/* ok to use deprecated addresses? */
1942 			sin6 = &ifa->address.sin6;
1943 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1944 				/* skip unspecifed addresses */
1945 				return (0);
1946 			}
1947 			if (		/* (local_scope == 0) && */
1948 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1949 				return (0);
1950 			}
1951 			if ((scope->site_scope == 0) &&
1952 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1953 				return (0);
1954 			}
1955 		} else {
1956 			return (0);
1957 		}
1958 		break;
1959 #endif
1960 #if defined(__Userspace__)
1961 	case AF_CONN:
1962 		if (!scope->conn_addr_legal) {
1963 			return (0);
1964 		}
1965 		break;
1966 #endif
1967 	default:
1968 		return (0);
1969 	}
1970 	return (1);
1971 }
1972 
1973 static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf * m,struct sctp_ifa * ifa,uint16_t * len)1974 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1975 {
1976 #if defined(INET) || defined(INET6)
1977 	struct sctp_paramhdr *paramh;
1978 	struct mbuf *mret;
1979 	uint16_t plen;
1980 #endif
1981 
1982 	switch (ifa->address.sa.sa_family) {
1983 #ifdef INET
1984 	case AF_INET:
1985 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1986 		break;
1987 #endif
1988 #ifdef INET6
1989 	case AF_INET6:
1990 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1991 		break;
1992 #endif
1993 	default:
1994 		return (m);
1995 	}
1996 #if defined(INET) || defined(INET6)
1997 	if (M_TRAILINGSPACE(m) >= plen) {
1998 		/* easy side we just drop it on the end */
1999 		paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2000 		mret = m;
2001 	} else {
2002 		/* Need more space */
2003 		mret = m;
2004 		while (SCTP_BUF_NEXT(mret) != NULL) {
2005 			mret = SCTP_BUF_NEXT(mret);
2006 		}
2007 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2008 		if (SCTP_BUF_NEXT(mret) == NULL) {
2009 			/* We are hosed, can't add more addresses */
2010 			return (m);
2011 		}
2012 		mret = SCTP_BUF_NEXT(mret);
2013 		paramh = mtod(mret, struct sctp_paramhdr *);
2014 	}
2015 	/* now add the parameter */
2016 	switch (ifa->address.sa.sa_family) {
2017 #ifdef INET
2018 	case AF_INET:
2019 	{
2020 		struct sctp_ipv4addr_param *ipv4p;
2021 		struct sockaddr_in *sin;
2022 
2023 		sin = &ifa->address.sin;
2024 		ipv4p = (struct sctp_ipv4addr_param *)paramh;
2025 		paramh->param_type = htons(SCTP_IPV4_ADDRESS);
2026 		paramh->param_length = htons(plen);
2027 		ipv4p->addr = sin->sin_addr.s_addr;
2028 		SCTP_BUF_LEN(mret) += plen;
2029 		break;
2030 	}
2031 #endif
2032 #ifdef INET6
2033 	case AF_INET6:
2034 	{
2035 		struct sctp_ipv6addr_param *ipv6p;
2036 		struct sockaddr_in6 *sin6;
2037 
2038 		sin6 = &ifa->address.sin6;
2039 		ipv6p = (struct sctp_ipv6addr_param *)paramh;
2040 		paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2041 		paramh->param_length = htons(plen);
2042 		memcpy(ipv6p->addr, &sin6->sin6_addr,
2043 		    sizeof(ipv6p->addr));
2044 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2045 		/* clear embedded scope in the address */
2046 		in6_clearscope((struct in6_addr *)ipv6p->addr);
2047 #endif
2048 		SCTP_BUF_LEN(mret) += plen;
2049 		break;
2050 	}
2051 #endif
2052 	default:
2053 		return (m);
2054 	}
2055 	if (len != NULL) {
2056 		*len += plen;
2057 	}
2058 	return (mret);
2059 #endif
2060 }
2061 
2062 
2063 struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_scoping * scope,struct mbuf * m_at,int cnt_inits_to,uint16_t * padding_len,uint16_t * chunk_len)2064 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2065                            struct sctp_scoping *scope,
2066 			   struct mbuf *m_at, int cnt_inits_to,
2067 			   uint16_t *padding_len, uint16_t *chunk_len)
2068 {
2069 	struct sctp_vrf *vrf = NULL;
2070 	int cnt, limit_out = 0, total_count;
2071 	uint32_t vrf_id;
2072 
2073 	vrf_id = inp->def_vrf_id;
2074 	SCTP_IPI_ADDR_RLOCK();
2075 	vrf = sctp_find_vrf(vrf_id);
2076 	if (vrf == NULL) {
2077 		SCTP_IPI_ADDR_RUNLOCK();
2078 		return (m_at);
2079 	}
2080 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2081 		struct sctp_ifa *sctp_ifap;
2082 		struct sctp_ifn *sctp_ifnp;
2083 
2084 		cnt = cnt_inits_to;
2085 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2086 			limit_out = 1;
2087 			cnt = SCTP_ADDRESS_LIMIT;
2088 			goto skip_count;
2089 		}
2090 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2091 			if ((scope->loopback_scope == 0) &&
2092 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2093 				/*
2094 				 * Skip loopback devices if loopback_scope
2095 				 * not set
2096 				 */
2097 				continue;
2098 			}
2099 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2100 #if defined(__FreeBSD__) && !defined(__Userspace__)
2101 #ifdef INET
2102 				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2103 				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2104 				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2105 					continue;
2106 				}
2107 #endif
2108 #ifdef INET6
2109 				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2110 				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2111 				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2112 					continue;
2113 				}
2114 #endif
2115 #endif
2116 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2117 					continue;
2118 				}
2119 #if defined(__Userspace__)
2120 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2121 					continue;
2122 				}
2123 #endif
2124 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2125 					continue;
2126 				}
2127 				cnt++;
2128 				if (cnt > SCTP_ADDRESS_LIMIT) {
2129 					break;
2130 				}
2131 			}
2132 			if (cnt > SCTP_ADDRESS_LIMIT) {
2133 				break;
2134 			}
2135 		}
2136 	skip_count:
2137 		if (cnt > 1) {
2138 			total_count = 0;
2139 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2140 				cnt = 0;
2141 				if ((scope->loopback_scope == 0) &&
2142 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2143 					/*
2144 					 * Skip loopback devices if
2145 					 * loopback_scope not set
2146 					 */
2147 					continue;
2148 				}
2149 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2150 #if defined(__FreeBSD__) && !defined(__Userspace__)
2151 #ifdef INET
2152 					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2153 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2154 					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2155 						continue;
2156 					}
2157 #endif
2158 #ifdef INET6
2159 					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2160 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2161 					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2162 						continue;
2163 					}
2164 #endif
2165 #endif
2166 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2167 						continue;
2168 					}
2169 #if defined(__Userspace__)
2170 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2171 						continue;
2172 					}
2173 #endif
2174 					if (sctp_is_address_in_scope(sctp_ifap,
2175 								     scope, 0) == 0) {
2176 						continue;
2177 					}
2178 					if ((chunk_len != NULL) &&
2179 					    (padding_len != NULL) &&
2180 					    (*padding_len > 0)) {
2181 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2182 						SCTP_BUF_LEN(m_at) += *padding_len;
2183 						*chunk_len += *padding_len;
2184 						*padding_len = 0;
2185 					}
2186 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2187 					if (limit_out) {
2188 						cnt++;
2189 						total_count++;
2190 						if (cnt >= 2) {
2191 							/* two from each address */
2192 							break;
2193 						}
2194 						if (total_count > SCTP_ADDRESS_LIMIT) {
2195 							/* No more addresses */
2196 							break;
2197 						}
2198 					}
2199 				}
2200 			}
2201 		}
2202 	} else {
2203 		struct sctp_laddr *laddr;
2204 
2205 		cnt = cnt_inits_to;
2206 		/* First, how many ? */
2207 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2208 			if (laddr->ifa == NULL) {
2209 				continue;
2210 			}
2211 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2212 				/* Address being deleted by the system, dont
2213 				 * list.
2214 				 */
2215 				continue;
2216 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2217 				/* Address being deleted on this ep
2218 				 * don't list.
2219 				 */
2220 				continue;
2221 			}
2222 #if defined(__Userspace__)
2223 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2224 				continue;
2225 			}
2226 #endif
2227 			if (sctp_is_address_in_scope(laddr->ifa,
2228 						     scope, 1) == 0) {
2229 				continue;
2230 			}
2231 			cnt++;
2232 		}
2233 		/*
2234 		 * To get through a NAT we only list addresses if we have
2235 		 * more than one. That way if you just bind a single address
2236 		 * we let the source of the init dictate our address.
2237 		 */
2238 		if (cnt > 1) {
2239 			cnt = cnt_inits_to;
2240 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2241 				if (laddr->ifa == NULL) {
2242 					continue;
2243 				}
2244 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2245 					continue;
2246 				}
2247 #if defined(__Userspace__)
2248 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2249 					continue;
2250 				}
2251 #endif
2252 				if (sctp_is_address_in_scope(laddr->ifa,
2253 							     scope, 0) == 0) {
2254 					continue;
2255 				}
2256 				if ((chunk_len != NULL) &&
2257 				    (padding_len != NULL) &&
2258 				    (*padding_len > 0)) {
2259 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2260 					SCTP_BUF_LEN(m_at) += *padding_len;
2261 					*chunk_len += *padding_len;
2262 					*padding_len = 0;
2263 				}
2264 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2265 				cnt++;
2266 				if (cnt >= SCTP_ADDRESS_LIMIT) {
2267 					break;
2268 				}
2269 			}
2270 		}
2271 	}
2272 	SCTP_IPI_ADDR_RUNLOCK();
2273 	return (m_at);
2274 }
2275 
2276 static struct sctp_ifa *
sctp_is_ifa_addr_preferred(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2277 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2278 			   uint8_t dest_is_loop,
2279 			   uint8_t dest_is_priv,
2280 			   sa_family_t fam)
2281 {
2282 	uint8_t dest_is_global = 0;
2283 	/* dest_is_priv is true if destination is a private address */
2284 	/* dest_is_loop is true if destination is a loopback addresses */
2285 
2286 	/**
2287 	 * Here we determine if its a preferred address. A preferred address
2288 	 * means it is the same scope or higher scope then the destination.
2289 	 * L = loopback, P = private, G = global
2290 	 * -----------------------------------------
2291 	 *    src    |  dest | result
2292 	 *  ----------------------------------------
2293 	 *     L     |    L  |    yes
2294 	 *  -----------------------------------------
2295 	 *     P     |    L  |    yes-v4 no-v6
2296 	 *  -----------------------------------------
2297 	 *     G     |    L  |    yes-v4 no-v6
2298 	 *  -----------------------------------------
2299 	 *     L     |    P  |    no
2300 	 *  -----------------------------------------
2301 	 *     P     |    P  |    yes
2302 	 *  -----------------------------------------
2303 	 *     G     |    P  |    no
2304 	 *   -----------------------------------------
2305 	 *     L     |    G  |    no
2306 	 *   -----------------------------------------
2307 	 *     P     |    G  |    no
2308 	 *    -----------------------------------------
2309 	 *     G     |    G  |    yes
2310 	 *    -----------------------------------------
2311 	 */
2312 
2313 	if (ifa->address.sa.sa_family != fam) {
2314 		/* forget mis-matched family */
2315 		return (NULL);
2316 	}
2317 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2318 		dest_is_global = 1;
2319 	}
2320 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2321 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2322 	/* Ok the address may be ok */
2323 #ifdef INET6
2324 	if (fam == AF_INET6) {
2325 		/* ok to use deprecated addresses? no lets not! */
2326 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2327 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2328 			return (NULL);
2329 		}
2330 		if (ifa->src_is_priv && !ifa->src_is_loop) {
2331 			if (dest_is_loop) {
2332 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2333 				return (NULL);
2334 			}
2335 		}
2336 		if (ifa->src_is_glob) {
2337 			if (dest_is_loop) {
2338 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2339 				return (NULL);
2340 			}
2341 		}
2342 	}
2343 #endif
2344 	/* Now that we know what is what, implement or table
2345 	 * this could in theory be done slicker (it used to be), but this
2346 	 * is straightforward and easier to validate :-)
2347 	 */
2348 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2349 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2350 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2351 		dest_is_loop, dest_is_priv, dest_is_global);
2352 
2353 	if ((ifa->src_is_loop) && (dest_is_priv)) {
2354 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2355 		return (NULL);
2356 	}
2357 	if ((ifa->src_is_glob) && (dest_is_priv)) {
2358 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2359 		return (NULL);
2360 	}
2361 	if ((ifa->src_is_loop) && (dest_is_global)) {
2362 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2363 		return (NULL);
2364 	}
2365 	if ((ifa->src_is_priv) && (dest_is_global)) {
2366 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2367 		return (NULL);
2368 	}
2369 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2370 	/* its a preferred address */
2371 	return (ifa);
2372 }
2373 
2374 static struct sctp_ifa *
sctp_is_ifa_addr_acceptable(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2375 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2376 			    uint8_t dest_is_loop,
2377 			    uint8_t dest_is_priv,
2378 			    sa_family_t fam)
2379 {
2380 	uint8_t dest_is_global = 0;
2381 
2382 	/**
2383 	 * Here we determine if its a acceptable address. A acceptable
2384 	 * address means it is the same scope or higher scope but we can
2385 	 * allow for NAT which means its ok to have a global dest and a
2386 	 * private src.
2387 	 *
2388 	 * L = loopback, P = private, G = global
2389 	 * -----------------------------------------
2390 	 *  src    |  dest | result
2391 	 * -----------------------------------------
2392 	 *   L     |   L   |    yes
2393 	 *  -----------------------------------------
2394 	 *   P     |   L   |    yes-v4 no-v6
2395 	 *  -----------------------------------------
2396 	 *   G     |   L   |    yes
2397 	 * -----------------------------------------
2398 	 *   L     |   P   |    no
2399 	 * -----------------------------------------
2400 	 *   P     |   P   |    yes
2401 	 * -----------------------------------------
2402 	 *   G     |   P   |    yes - May not work
2403 	 * -----------------------------------------
2404 	 *   L     |   G   |    no
2405 	 * -----------------------------------------
2406 	 *   P     |   G   |    yes - May not work
2407 	 * -----------------------------------------
2408 	 *   G     |   G   |    yes
2409 	 * -----------------------------------------
2410 	 */
2411 
2412 	if (ifa->address.sa.sa_family != fam) {
2413 		/* forget non matching family */
2414 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2415 			ifa->address.sa.sa_family, fam);
2416 		return (NULL);
2417 	}
2418 	/* Ok the address may be ok */
2419 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2420 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2421 		dest_is_loop, dest_is_priv);
2422 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2423 		dest_is_global = 1;
2424 	}
2425 #ifdef INET6
2426 	if (fam == AF_INET6) {
2427 		/* ok to use deprecated addresses? */
2428 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2429 			return (NULL);
2430 		}
2431 		if (ifa->src_is_priv) {
2432 			/* Special case, linklocal to loop */
2433 			if (dest_is_loop)
2434 				return (NULL);
2435 		}
2436 	}
2437 #endif
2438 	/*
2439 	 * Now that we know what is what, implement our table.
2440 	 * This could in theory be done slicker (it used to be), but this
2441 	 * is straightforward and easier to validate :-)
2442 	 */
2443 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2444 		ifa->src_is_loop,
2445 		dest_is_priv);
2446 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2447 		return (NULL);
2448 	}
2449 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2450 		ifa->src_is_loop,
2451 		dest_is_global);
2452 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2453 		return (NULL);
2454 	}
2455 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2456 	/* its an acceptable address */
2457 	return (ifa);
2458 }
2459 
2460 int
sctp_is_addr_restricted(struct sctp_tcb * stcb,struct sctp_ifa * ifa)2461 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2462 {
2463 	struct sctp_laddr *laddr;
2464 
2465 	if (stcb == NULL) {
2466 		/* There are no restrictions, no TCB :-) */
2467 		return (0);
2468 	}
2469 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2470 		if (laddr->ifa == NULL) {
2471 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2472 				__func__);
2473 			continue;
2474 		}
2475 		if (laddr->ifa == ifa) {
2476 			/* Yes it is on the list */
2477 			return (1);
2478 		}
2479 	}
2480 	return (0);
2481 }
2482 
2483 
2484 int
sctp_is_addr_in_ep(struct sctp_inpcb * inp,struct sctp_ifa * ifa)2485 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2486 {
2487 	struct sctp_laddr *laddr;
2488 
2489 	if (ifa == NULL)
2490 		return (0);
2491 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2492 		if (laddr->ifa == NULL) {
2493 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2494 				__func__);
2495 			continue;
2496 		}
2497 		if ((laddr->ifa == ifa) && laddr->action == 0)
2498 			/* same pointer */
2499 			return (1);
2500 	}
2501 	return (0);
2502 }
2503 
2504 
2505 
2506 static struct sctp_ifa *
sctp_choose_boundspecific_inp(struct sctp_inpcb * inp,sctp_route_t * ro,uint32_t vrf_id,int non_asoc_addr_ok,uint8_t dest_is_priv,uint8_t dest_is_loop,sa_family_t fam)2507 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2508 			      sctp_route_t *ro,
2509 			      uint32_t vrf_id,
2510 			      int non_asoc_addr_ok,
2511 			      uint8_t dest_is_priv,
2512 			      uint8_t dest_is_loop,
2513 			      sa_family_t fam)
2514 {
2515 	struct sctp_laddr *laddr, *starting_point;
2516 	void *ifn;
2517 	int resettotop = 0;
2518 	struct sctp_ifn *sctp_ifn;
2519 	struct sctp_ifa *sctp_ifa, *sifa;
2520 	struct sctp_vrf *vrf;
2521 	uint32_t ifn_index;
2522 
2523 	vrf = sctp_find_vrf(vrf_id);
2524 	if (vrf == NULL)
2525 		return (NULL);
2526 
2527 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2528 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2529 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2530 	/*
2531 	 * first question, is the ifn we will emit on in our list, if so, we
2532 	 * want such an address. Note that we first looked for a
2533 	 * preferred address.
2534 	 */
2535 	if (sctp_ifn) {
2536 		/* is a preferred one on the interface we route out? */
2537 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2538 #if defined(__FreeBSD__) && !defined(__Userspace__)
2539 #ifdef INET
2540 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2541 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2542 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2543 				continue;
2544 			}
2545 #endif
2546 #ifdef INET6
2547 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2548 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2549 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2550 				continue;
2551 			}
2552 #endif
2553 #endif
2554 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2555 			    (non_asoc_addr_ok == 0))
2556 				continue;
2557 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2558 							  dest_is_loop,
2559 							  dest_is_priv, fam);
2560 			if (sifa == NULL)
2561 				continue;
2562 			if (sctp_is_addr_in_ep(inp, sifa)) {
2563 				atomic_add_int(&sifa->refcount, 1);
2564 				return (sifa);
2565 			}
2566 		}
2567 	}
2568 	/*
2569 	 * ok, now we now need to find one on the list of the addresses.
2570 	 * We can't get one on the emitting interface so let's find first
2571 	 * a preferred one. If not that an acceptable one otherwise...
2572 	 * we return NULL.
2573 	 */
2574 	starting_point = inp->next_addr_touse;
2575  once_again:
2576 	if (inp->next_addr_touse == NULL) {
2577 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2578 		resettotop = 1;
2579 	}
2580 	for (laddr = inp->next_addr_touse; laddr;
2581 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2582 		if (laddr->ifa == NULL) {
2583 			/* address has been removed */
2584 			continue;
2585 		}
2586 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2587 			/* address is being deleted */
2588 			continue;
2589 		}
2590 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2591 						  dest_is_priv, fam);
2592 		if (sifa == NULL)
2593 			continue;
2594 		atomic_add_int(&sifa->refcount, 1);
2595 		return (sifa);
2596 	}
2597 	if (resettotop == 0) {
2598 		inp->next_addr_touse = NULL;
2599 		goto once_again;
2600 	}
2601 
2602 	inp->next_addr_touse = starting_point;
2603 	resettotop = 0;
2604  once_again_too:
2605 	if (inp->next_addr_touse == NULL) {
2606 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2607 		resettotop = 1;
2608 	}
2609 
2610 	/* ok, what about an acceptable address in the inp */
2611 	for (laddr = inp->next_addr_touse; laddr;
2612 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2613 		if (laddr->ifa == NULL) {
2614 			/* address has been removed */
2615 			continue;
2616 		}
2617 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2618 			/* address is being deleted */
2619 			continue;
2620 		}
2621 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2622 						   dest_is_priv, fam);
2623 		if (sifa == NULL)
2624 			continue;
2625 		atomic_add_int(&sifa->refcount, 1);
2626 		return (sifa);
2627 	}
2628 	if (resettotop == 0) {
2629 		inp->next_addr_touse = NULL;
2630 		goto once_again_too;
2631 	}
2632 
2633 	/*
2634 	 * no address bound can be a source for the destination we are in
2635 	 * trouble
2636 	 */
2637 	return (NULL);
2638 }
2639 
2640 
2641 
2642 static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)2643 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2644 			       struct sctp_tcb *stcb,
2645 			       sctp_route_t *ro,
2646 			       uint32_t vrf_id,
2647 			       uint8_t dest_is_priv,
2648 			       uint8_t dest_is_loop,
2649 			       int non_asoc_addr_ok,
2650 			       sa_family_t fam)
2651 {
2652 	struct sctp_laddr *laddr, *starting_point;
2653 	void *ifn;
2654 	struct sctp_ifn *sctp_ifn;
2655 	struct sctp_ifa *sctp_ifa, *sifa;
2656 	uint8_t start_at_beginning = 0;
2657 	struct sctp_vrf *vrf;
2658 	uint32_t ifn_index;
2659 
2660 	/*
2661 	 * first question, is the ifn we will emit on in our list, if so, we
2662 	 * want that one.
2663 	 */
2664 	vrf = sctp_find_vrf(vrf_id);
2665 	if (vrf == NULL)
2666 		return (NULL);
2667 
2668 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2669 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2670 	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2671 
2672 	/*
2673 	 * first question, is the ifn we will emit on in our list?  If so,
2674 	 * we want that one. First we look for a preferred. Second, we go
2675 	 * for an acceptable.
2676 	 */
2677 	if (sctp_ifn) {
2678 		/* first try for a preferred address on the ep */
2679 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2680 #if defined(__FreeBSD__) && !defined(__Userspace__)
2681 #ifdef INET
2682 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2683 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2684 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2685 				continue;
2686 			}
2687 #endif
2688 #ifdef INET6
2689 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2690 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2691 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2692 				continue;
2693 			}
2694 #endif
2695 #endif
2696 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2697 				continue;
2698 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2699 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2700 				if (sifa == NULL)
2701 					continue;
2702 				if (((non_asoc_addr_ok == 0) &&
2703 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2704 				    (non_asoc_addr_ok &&
2705 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2706 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2707 					/* on the no-no list */
2708 					continue;
2709 				}
2710 				atomic_add_int(&sifa->refcount, 1);
2711 				return (sifa);
2712 			}
2713 		}
2714 		/* next try for an acceptable address on the ep */
2715 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2716 #if defined(__FreeBSD__) && !defined(__Userspace__)
2717 #ifdef INET
2718 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2719 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2720 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2721 				continue;
2722 			}
2723 #endif
2724 #ifdef INET6
2725 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2726 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2727 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2728 				continue;
2729 			}
2730 #endif
2731 #endif
2732 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2733 				continue;
2734 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2735 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2736 				if (sifa == NULL)
2737 					continue;
2738 				if (((non_asoc_addr_ok == 0) &&
2739 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2740 				    (non_asoc_addr_ok &&
2741 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2742 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2743 					/* on the no-no list */
2744 					continue;
2745 				}
2746 				atomic_add_int(&sifa->refcount, 1);
2747 				return (sifa);
2748 			}
2749 		}
2750 
2751 	}
2752 	/*
2753 	 * if we can't find one like that then we must look at all
2754 	 * addresses bound to pick one at first preferable then
2755 	 * secondly acceptable.
2756 	 */
2757 	starting_point = stcb->asoc.last_used_address;
2758  sctp_from_the_top:
2759 	if (stcb->asoc.last_used_address == NULL) {
2760 		start_at_beginning = 1;
2761 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2762 	}
2763 	/* search beginning with the last used address */
2764 	for (laddr = stcb->asoc.last_used_address; laddr;
2765 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2766 		if (laddr->ifa == NULL) {
2767 			/* address has been removed */
2768 			continue;
2769 		}
2770 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2771 			/* address is being deleted */
2772 			continue;
2773 		}
2774 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2775 		if (sifa == NULL)
2776 			continue;
2777 		if (((non_asoc_addr_ok == 0) &&
2778 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2779 		    (non_asoc_addr_ok &&
2780 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2781 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2782 			/* on the no-no list */
2783 			continue;
2784 		}
2785 		stcb->asoc.last_used_address = laddr;
2786 		atomic_add_int(&sifa->refcount, 1);
2787 		return (sifa);
2788 	}
2789 	if (start_at_beginning == 0) {
2790 		stcb->asoc.last_used_address = NULL;
2791 		goto sctp_from_the_top;
2792 	}
2793 	/* now try for any higher scope than the destination */
2794 	stcb->asoc.last_used_address = starting_point;
2795 	start_at_beginning = 0;
2796  sctp_from_the_top2:
2797 	if (stcb->asoc.last_used_address == NULL) {
2798 		start_at_beginning = 1;
2799 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2800 	}
2801 	/* search beginning with the last used address */
2802 	for (laddr = stcb->asoc.last_used_address; laddr;
2803 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2804 		if (laddr->ifa == NULL) {
2805 			/* address has been removed */
2806 			continue;
2807 		}
2808 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2809 			/* address is being deleted */
2810 			continue;
2811 		}
2812 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2813 						   dest_is_priv, fam);
2814 		if (sifa == NULL)
2815 			continue;
2816 		if (((non_asoc_addr_ok == 0) &&
2817 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2818 		    (non_asoc_addr_ok &&
2819 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2820 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2821 			/* on the no-no list */
2822 			continue;
2823 		}
2824 		stcb->asoc.last_used_address = laddr;
2825 		atomic_add_int(&sifa->refcount, 1);
2826 		return (sifa);
2827 	}
2828 	if (start_at_beginning == 0) {
2829 		stcb->asoc.last_used_address = NULL;
2830 		goto sctp_from_the_top2;
2831 	}
2832 	return (NULL);
2833 }
2834 
2835 static struct sctp_ifa *
sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,int addr_wanted,sa_family_t fam,sctp_route_t * ro)2836 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2837 #if defined(__FreeBSD__) && !defined(__Userspace__)
2838                                                  struct sctp_inpcb *inp,
2839 #else
2840                                                  struct sctp_inpcb *inp SCTP_UNUSED,
2841 #endif
2842 						 struct sctp_tcb *stcb,
2843 						 int non_asoc_addr_ok,
2844 						 uint8_t dest_is_loop,
2845 						 uint8_t dest_is_priv,
2846 						 int addr_wanted,
2847 						 sa_family_t fam,
2848 						 sctp_route_t *ro
2849 						 )
2850 {
2851 	struct sctp_ifa *ifa, *sifa;
2852 	int num_eligible_addr = 0;
2853 #ifdef INET6
2854 #ifdef SCTP_EMBEDDED_V6_SCOPE
2855 	struct sockaddr_in6 sin6, lsa6;
2856 
2857 	if (fam == AF_INET6) {
2858 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2859 #ifdef SCTP_KAME
2860 		(void)sa6_recoverscope(&sin6);
2861 #else
2862 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2863 #endif  /* SCTP_KAME */
2864 	}
2865 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2866 #endif	/* INET6 */
2867 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2868 #if defined(__FreeBSD__) && !defined(__Userspace__)
2869 #ifdef INET
2870 		if ((ifa->address.sa.sa_family == AF_INET) &&
2871 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2872 		                      &ifa->address.sin.sin_addr) != 0)) {
2873 			continue;
2874 		}
2875 #endif
2876 #ifdef INET6
2877 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2878 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2879 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2880 			continue;
2881 		}
2882 #endif
2883 #endif
2884 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2885 		    (non_asoc_addr_ok == 0))
2886 			continue;
2887 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2888 						  dest_is_priv, fam);
2889 		if (sifa == NULL)
2890 			continue;
2891 #ifdef INET6
2892 		if (fam == AF_INET6 &&
2893 		    dest_is_loop &&
2894 		    sifa->src_is_loop && sifa->src_is_priv) {
2895 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
2896 			 * to the peer so we will get an abort.
2897 			 */
2898 			continue;
2899 		}
2900 #ifdef SCTP_EMBEDDED_V6_SCOPE
2901 		if (fam == AF_INET6 &&
2902 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2903 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2904 			/* link-local <-> link-local must belong to the same scope. */
2905 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2906 #ifdef SCTP_KAME
2907 			(void)sa6_recoverscope(&lsa6);
2908 #else
2909 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2910 #endif  /* SCTP_KAME */
2911 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2912 				continue;
2913 			}
2914 		}
2915 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2916 #endif	/* INET6 */
2917 
2918 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2919 		/* Check if the IPv6 address matches to next-hop.
2920 		   In the mobile case, old IPv6 address may be not deleted
2921 		   from the interface. Then, the interface has previous and
2922 		   new addresses.  We should use one corresponding to the
2923 		   next-hop.  (by micchie)
2924 		 */
2925 #ifdef INET6
2926 		if (stcb && fam == AF_INET6 &&
2927 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2928 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2929 			    == 0) {
2930 				continue;
2931 			}
2932 		}
2933 #endif
2934 #ifdef INET
2935 		/* Avoid topologically incorrect IPv4 address */
2936 		if (stcb && fam == AF_INET &&
2937 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2938 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2939 				continue;
2940 			}
2941 		}
2942 #endif
2943 #endif
2944 		if (stcb) {
2945 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2946 				continue;
2947 			}
2948 			if (((non_asoc_addr_ok == 0) &&
2949 			     (sctp_is_addr_restricted(stcb, sifa))) ||
2950 			    (non_asoc_addr_ok &&
2951 			     (sctp_is_addr_restricted(stcb, sifa)) &&
2952 			     (!sctp_is_addr_pending(stcb, sifa)))) {
2953 				/*
2954 				 * It is restricted for some reason..
2955 				 * probably not yet added.
2956 				 */
2957 				continue;
2958 			}
2959 		}
2960 		if (num_eligible_addr >= addr_wanted) {
2961 			return (sifa);
2962 		}
2963 		num_eligible_addr++;
2964 	}
2965 	return (NULL);
2966 }
2967 
2968 
2969 static int
sctp_count_num_preferred_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2970 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2971 #if defined(__FreeBSD__) && !defined(__Userspace__)
2972                                   struct sctp_inpcb *inp,
2973 #else
2974                                   struct sctp_inpcb *inp SCTP_UNUSED,
2975 #endif
2976 				  struct sctp_tcb *stcb,
2977 				  int non_asoc_addr_ok,
2978 				  uint8_t dest_is_loop,
2979 				  uint8_t dest_is_priv,
2980 				  sa_family_t fam)
2981 {
2982 	struct sctp_ifa *ifa, *sifa;
2983 	int num_eligible_addr = 0;
2984 
2985 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2986 #if defined(__FreeBSD__) && !defined(__Userspace__)
2987 #ifdef INET
2988 		if ((ifa->address.sa.sa_family == AF_INET) &&
2989 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2990 		                      &ifa->address.sin.sin_addr) != 0)) {
2991 			continue;
2992 		}
2993 #endif
2994 #ifdef INET6
2995 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2996 		    (stcb != NULL) &&
2997 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2998 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2999 			continue;
3000 		}
3001 #endif
3002 #endif
3003 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3004 		    (non_asoc_addr_ok == 0)) {
3005 			continue;
3006 		}
3007 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3008 						  dest_is_priv, fam);
3009 		if (sifa == NULL) {
3010 			continue;
3011 		}
3012 		if (stcb) {
3013 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3014 				continue;
3015 			}
3016 			if (((non_asoc_addr_ok == 0) &&
3017 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3018 			    (non_asoc_addr_ok &&
3019 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3020 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3021 				/*
3022 				 * It is restricted for some reason..
3023 				 * probably not yet added.
3024 				 */
3025 				continue;
3026 			}
3027 		}
3028 		num_eligible_addr++;
3029 	}
3030 	return (num_eligible_addr);
3031 }
3032 
3033 static struct sctp_ifa *
sctp_choose_boundall(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)3034 sctp_choose_boundall(struct sctp_inpcb *inp,
3035                      struct sctp_tcb *stcb,
3036 		     struct sctp_nets *net,
3037 		     sctp_route_t *ro,
3038 		     uint32_t vrf_id,
3039 		     uint8_t dest_is_priv,
3040 		     uint8_t dest_is_loop,
3041 		     int non_asoc_addr_ok,
3042 		     sa_family_t fam)
3043 {
3044 	int cur_addr_num = 0, num_preferred = 0;
3045 	void *ifn;
3046 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3047 	struct sctp_ifa *sctp_ifa, *sifa;
3048 	uint32_t ifn_index;
3049 	struct sctp_vrf *vrf;
3050 #ifdef INET
3051 	int retried = 0;
3052 #endif
3053 
3054 	/*-
3055 	 * For boundall we can use any address in the association.
3056 	 * If non_asoc_addr_ok is set we can use any address (at least in
3057 	 * theory). So we look for preferred addresses first. If we find one,
3058 	 * we use it. Otherwise we next try to get an address on the
3059 	 * interface, which we should be able to do (unless non_asoc_addr_ok
3060 	 * is false and we are routed out that way). In these cases where we
3061 	 * can't use the address of the interface we go through all the
3062 	 * ifn's looking for an address we can use and fill that in. Punting
3063 	 * means we send back address 0, which will probably cause problems
3064 	 * actually since then IP will fill in the address of the route ifn,
3065 	 * which means we probably already rejected it.. i.e. here comes an
3066 	 * abort :-<.
3067 	 */
3068 	vrf = sctp_find_vrf(vrf_id);
3069 	if (vrf == NULL)
3070 		return (NULL);
3071 
3072 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3073 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3074 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3075 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3076 	if (sctp_ifn == NULL) {
3077 		/* ?? We don't have this guy ?? */
3078 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3079 		goto bound_all_plan_b;
3080 	}
3081 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3082 		ifn_index, sctp_ifn->ifn_name);
3083 
3084 	if (net) {
3085 		cur_addr_num = net->indx_of_eligible_next_to_use;
3086 	}
3087 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3088 							  inp, stcb,
3089 							  non_asoc_addr_ok,
3090 							  dest_is_loop,
3091 							  dest_is_priv, fam);
3092 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3093 		num_preferred, sctp_ifn->ifn_name);
3094 	if (num_preferred == 0) {
3095 		/*
3096 		 * no eligible addresses, we must use some other interface
3097 		 * address if we can find one.
3098 		 */
3099 		goto bound_all_plan_b;
3100 	}
3101 	/*
3102 	 * Ok we have num_eligible_addr set with how many we can use, this
3103 	 * may vary from call to call due to addresses being deprecated
3104 	 * etc..
3105 	 */
3106 	if (cur_addr_num >= num_preferred) {
3107 		cur_addr_num = 0;
3108 	}
3109 	/*
3110 	 * select the nth address from the list (where cur_addr_num is the
3111 	 * nth) and 0 is the first one, 1 is the second one etc...
3112 	 */
3113 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3114 
3115 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3116                                                                     dest_is_priv, cur_addr_num, fam, ro);
3117 
3118 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
3119 	if (sctp_ifa) {
3120 		atomic_add_int(&sctp_ifa->refcount, 1);
3121 		if (net) {
3122 			/* save off where the next one we will want */
3123 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3124 		}
3125 		return (sctp_ifa);
3126 	}
3127 	/*
3128 	 * plan_b: Look at all interfaces and find a preferred address. If
3129 	 * no preferred fall through to plan_c.
3130 	 */
3131  bound_all_plan_b:
3132 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3133 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3134 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3135 			sctp_ifn->ifn_name);
3136 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3137 			/* wrong base scope */
3138 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3139 			continue;
3140 		}
3141 		if ((sctp_ifn == looked_at) && looked_at) {
3142 			/* already looked at this guy */
3143 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3144 			continue;
3145 		}
3146 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3147                                                                   dest_is_loop, dest_is_priv, fam);
3148 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3149 			"Found ifn:%p %d preferred source addresses\n",
3150 			ifn, num_preferred);
3151 		if (num_preferred == 0) {
3152 			/* None on this interface. */
3153 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3154 			continue;
3155 		}
3156 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3157 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
3158 			num_preferred, (void *)sctp_ifn, cur_addr_num);
3159 
3160 		/*
3161 		 * Ok we have num_eligible_addr set with how many we can
3162 		 * use, this may vary from call to call due to addresses
3163 		 * being deprecated etc..
3164 		 */
3165 		if (cur_addr_num >= num_preferred) {
3166 			cur_addr_num = 0;
3167 		}
3168 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3169                                                                         dest_is_priv, cur_addr_num, fam, ro);
3170 		if (sifa == NULL)
3171 			continue;
3172 		if (net) {
3173 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3174 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3175 				cur_addr_num);
3176 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3177 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3178 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3179 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3180 		}
3181 		atomic_add_int(&sifa->refcount, 1);
3182 		return (sifa);
3183 	}
3184 #ifdef INET
3185 again_with_private_addresses_allowed:
3186 #endif
3187 	/* plan_c: do we have an acceptable address on the emit interface */
3188 	sifa = NULL;
3189 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3190 	if (emit_ifn == NULL) {
3191 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3192 		goto plan_d;
3193 	}
3194 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3195 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3196 #if defined(__FreeBSD__) && !defined(__Userspace__)
3197 #ifdef INET
3198 		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3199 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3200 		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3201 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3202 			continue;
3203 		}
3204 #endif
3205 #ifdef INET6
3206 		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3207 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3208 		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3209 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3210 			continue;
3211 		}
3212 #endif
3213 #endif
3214 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3215 		    (non_asoc_addr_ok == 0)) {
3216 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3217 			continue;
3218 		}
3219 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3220 						   dest_is_priv, fam);
3221 		if (sifa == NULL) {
3222 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3223 			continue;
3224 		}
3225 		if (stcb) {
3226 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3227 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3228 				sifa = NULL;
3229 				continue;
3230 			}
3231 			if (((non_asoc_addr_ok == 0) &&
3232 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3233 			    (non_asoc_addr_ok &&
3234 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3235 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3236 				/*
3237 				 * It is restricted for some
3238 				 * reason.. probably not yet added.
3239 				 */
3240 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3241 				sifa = NULL;
3242 				continue;
3243 			}
3244 		}
3245 		atomic_add_int(&sifa->refcount, 1);
3246 		goto out;
3247 	}
3248  plan_d:
3249 	/*
3250 	 * plan_d: We are in trouble. No preferred address on the emit
3251 	 * interface. And not even a preferred address on all interfaces.
3252 	 * Go out and see if we can find an acceptable address somewhere
3253 	 * amongst all interfaces.
3254 	 */
3255 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3256 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3257 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3258 			/* wrong base scope */
3259 			continue;
3260 		}
3261 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3262 #if defined(__FreeBSD__) && !defined(__Userspace__)
3263 #ifdef INET
3264 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3265 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3266 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3267 				continue;
3268 			}
3269 #endif
3270 #ifdef INET6
3271 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3272 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3273 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3274 				continue;
3275 			}
3276 #endif
3277 #endif
3278 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3279 			    (non_asoc_addr_ok == 0))
3280 				continue;
3281 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3282 							   dest_is_loop,
3283 							   dest_is_priv, fam);
3284 			if (sifa == NULL)
3285 				continue;
3286 			if (stcb) {
3287 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3288 					sifa = NULL;
3289 					continue;
3290 				}
3291 				if (((non_asoc_addr_ok == 0) &&
3292 				     (sctp_is_addr_restricted(stcb, sifa))) ||
3293 				    (non_asoc_addr_ok &&
3294 				     (sctp_is_addr_restricted(stcb, sifa)) &&
3295 				     (!sctp_is_addr_pending(stcb, sifa)))) {
3296 					/*
3297 					 * It is restricted for some
3298 					 * reason.. probably not yet added.
3299 					 */
3300 					sifa = NULL;
3301 					continue;
3302 				}
3303 			}
3304 			goto out;
3305 		}
3306 	}
3307 #ifdef INET
3308 	if (stcb) {
3309 		if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3310 			stcb->asoc.scope.ipv4_local_scope = 1;
3311 			retried = 1;
3312 			goto again_with_private_addresses_allowed;
3313 		} else if (retried == 1) {
3314 			stcb->asoc.scope.ipv4_local_scope = 0;
3315 		}
3316 	}
3317 #endif
3318 out:
3319 #ifdef INET
3320 	if (sifa) {
3321 		if (retried == 1) {
3322 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3323 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3324 					/* wrong base scope */
3325 					continue;
3326 				}
3327 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3328 					struct sctp_ifa *tmp_sifa;
3329 
3330 #if defined(__FreeBSD__) && !defined(__Userspace__)
3331 #ifdef INET
3332 					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3333 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3334 					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3335 						continue;
3336 					}
3337 #endif
3338 #ifdef INET6
3339 					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3340 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3341 					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3342 						continue;
3343 					}
3344 #endif
3345 #endif
3346 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3347 					    (non_asoc_addr_ok == 0))
3348 						continue;
3349 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3350 					                                       dest_is_loop,
3351 					                                       dest_is_priv, fam);
3352 					if (tmp_sifa == NULL) {
3353 						continue;
3354 					}
3355 					if (tmp_sifa == sifa) {
3356 						continue;
3357 					}
3358 					if (stcb) {
3359 						if (sctp_is_address_in_scope(tmp_sifa,
3360 						                             &stcb->asoc.scope, 0) == 0) {
3361 							continue;
3362 						}
3363 						if (((non_asoc_addr_ok == 0) &&
3364 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3365 						    (non_asoc_addr_ok &&
3366 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3367 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3368 							/*
3369 							 * It is restricted for some
3370 							 * reason.. probably not yet added.
3371 							 */
3372 							continue;
3373 						}
3374 					}
3375 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3376 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3377 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
3378 					}
3379 				}
3380 			}
3381 		}
3382 		atomic_add_int(&sifa->refcount, 1);
3383 	}
3384 #endif
3385 	return (sifa);
3386 }
3387 
3388 
3389 
3390 /* tcb may be NULL */
3391 struct sctp_ifa *
sctp_source_address_selection(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,struct sctp_nets * net,int non_asoc_addr_ok,uint32_t vrf_id)3392 sctp_source_address_selection(struct sctp_inpcb *inp,
3393 			      struct sctp_tcb *stcb,
3394 			      sctp_route_t *ro,
3395 			      struct sctp_nets *net,
3396 			      int non_asoc_addr_ok, uint32_t vrf_id)
3397 {
3398 	struct sctp_ifa *answer;
3399 	uint8_t dest_is_priv, dest_is_loop;
3400 	sa_family_t fam;
3401 #ifdef INET
3402 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3403 #endif
3404 #ifdef INET6
3405 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3406 #endif
3407 
3408 	/**
3409 	 * Rules:
3410 	 * - Find the route if needed, cache if I can.
3411 	 * - Look at interface address in route, Is it in the bound list. If so we
3412 	 *   have the best source.
3413 	 * - If not we must rotate amongst the addresses.
3414 	 *
3415 	 * Cavets and issues
3416 	 *
3417 	 * Do we need to pay attention to scope. We can have a private address
3418 	 * or a global address we are sourcing or sending to. So if we draw
3419 	 * it out
3420 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3421 	 * For V4
3422 	 * ------------------------------------------
3423 	 *      source     *      dest  *  result
3424 	 * -----------------------------------------
3425 	 * <a>  Private    *    Global  *  NAT
3426 	 * -----------------------------------------
3427 	 * <b>  Private    *    Private *  No problem
3428 	 * -----------------------------------------
3429 	 * <c>  Global     *    Private *  Huh, How will this work?
3430 	 * -----------------------------------------
3431 	 * <d>  Global     *    Global  *  No Problem
3432 	 *------------------------------------------
3433 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3434 	 * For V6
3435 	 *------------------------------------------
3436 	 *      source     *      dest  *  result
3437 	 * -----------------------------------------
3438 	 * <a>  Linklocal  *    Global  *
3439 	 * -----------------------------------------
3440 	 * <b>  Linklocal  * Linklocal  *  No problem
3441 	 * -----------------------------------------
3442 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
3443 	 * -----------------------------------------
3444 	 * <d>  Global     *    Global  *  No Problem
3445 	 *------------------------------------------
3446 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3447 	 *
3448 	 * And then we add to that what happens if there are multiple addresses
3449 	 * assigned to an interface. Remember the ifa on a ifn is a linked
3450 	 * list of addresses. So one interface can have more than one IP
3451 	 * address. What happens if we have both a private and a global
3452 	 * address? Do we then use context of destination to sort out which
3453 	 * one is best? And what about NAT's sending P->G may get you a NAT
3454 	 * translation, or should you select the G thats on the interface in
3455 	 * preference.
3456 	 *
3457 	 * Decisions:
3458 	 *
3459 	 * - count the number of addresses on the interface.
3460 	 * - if it is one, no problem except case <c>.
3461 	 *   For <a> we will assume a NAT out there.
3462 	 * - if there are more than one, then we need to worry about scope P
3463 	 *   or G. We should prefer G -> G and P -> P if possible.
3464 	 *   Then as a secondary fall back to mixed types G->P being a last
3465 	 *   ditch one.
3466 	 * - The above all works for bound all, but bound specific we need to
3467 	 *   use the same concept but instead only consider the bound
3468 	 *   addresses. If the bound set is NOT assigned to the interface then
3469 	 *   we must use rotation amongst the bound addresses..
3470 	 */
3471 #if defined(__FreeBSD__) && !defined(__Userspace__)
3472 	if (ro->ro_nh == NULL) {
3473 #else
3474 	if (ro->ro_rt == NULL) {
3475 #endif
3476 		/*
3477 		 * Need a route to cache.
3478 		 */
3479 		SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3480 	}
3481 #if defined(__FreeBSD__) && !defined(__Userspace__)
3482 	if (ro->ro_nh == NULL) {
3483 #else
3484 	if (ro->ro_rt == NULL) {
3485 #endif
3486 		return (NULL);
3487 	}
3488 #if defined(_WIN32)
3489 	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3490 	fam = (sa_family_t)ro->ro_dst.sa_family;
3491 #else
3492 	fam = ro->ro_dst.sa_family;
3493 #endif
3494 	dest_is_priv = dest_is_loop = 0;
3495 	/* Setup our scopes for the destination */
3496 	switch (fam) {
3497 #ifdef INET
3498 	case AF_INET:
3499 		/* Scope based on outbound address */
3500 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3501 			dest_is_loop = 1;
3502 			if (net != NULL) {
3503 				/* mark it as local */
3504 				net->addr_is_local = 1;
3505 			}
3506 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3507 			dest_is_priv = 1;
3508 		}
3509 		break;
3510 #endif
3511 #ifdef INET6
3512 	case AF_INET6:
3513 		/* Scope based on outbound address */
3514 #if defined(_WIN32)
3515 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3516 #else
3517 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3518 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3519 #endif
3520 			/*
3521 			 * If the address is a loopback address, which
3522 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
3523 			 * scope. But we don't use dest_is_priv (link local
3524 			 * addresses).
3525 			 */
3526 			dest_is_loop = 1;
3527 			if (net != NULL) {
3528 				/* mark it as local */
3529 				net->addr_is_local = 1;
3530 			}
3531 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3532 			dest_is_priv = 1;
3533 		}
3534 		break;
3535 #endif
3536 	}
3537 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3538 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3539 	SCTP_IPI_ADDR_RLOCK();
3540 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3541 		/*
3542 		 * Bound all case
3543 		 */
3544 		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3545 					      dest_is_priv, dest_is_loop,
3546 					      non_asoc_addr_ok, fam);
3547 		SCTP_IPI_ADDR_RUNLOCK();
3548 		return (answer);
3549 	}
3550 	/*
3551 	 * Subset bound case
3552 	 */
3553 	if (stcb) {
3554 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3555 							vrf_id,	dest_is_priv,
3556 							dest_is_loop,
3557 							non_asoc_addr_ok, fam);
3558 	} else {
3559 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3560 						       non_asoc_addr_ok,
3561 						       dest_is_priv,
3562 						       dest_is_loop, fam);
3563 	}
3564 	SCTP_IPI_ADDR_RUNLOCK();
3565 	return (answer);
3566 }
3567 
3568 static int
3569 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3570 {
3571 #if defined(_WIN32)
3572 	WSACMSGHDR cmh;
3573 #else
3574 	struct cmsghdr cmh;
3575 #endif
3576 	struct sctp_sndinfo sndinfo;
3577 	struct sctp_prinfo prinfo;
3578 	struct sctp_authinfo authinfo;
3579 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3580 	int found;
3581 
3582 	/*
3583 	 * Independent of how many mbufs, find the c_type inside the control
3584 	 * structure and copy out the data.
3585 	 */
3586 	found = 0;
3587 	tot_len = SCTP_BUF_LEN(control);
3588 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3589 		rem_len = tot_len - off;
3590 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3591 			/* There is not enough room for one more. */
3592 			return (found);
3593 		}
3594 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3595 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3596 			/* We dont't have a complete CMSG header. */
3597 			return (found);
3598 		}
3599 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3600 			/* We don't have the complete CMSG. */
3601 			return (found);
3602 		}
3603 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3604 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3605 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3606 		    ((c_type == cmh.cmsg_type) ||
3607 		     ((c_type == SCTP_SNDRCV) &&
3608 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
3609 		       (cmh.cmsg_type == SCTP_PRINFO) ||
3610 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3611 			if (c_type == cmh.cmsg_type) {
3612 				if (cpsize > INT_MAX) {
3613 					return (found);
3614 				}
3615 				if (cmsg_data_len < (int)cpsize) {
3616 					return (found);
3617 				}
3618 				/* It is exactly what we want. Copy it out. */
3619 				m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3620 				return (1);
3621 			} else {
3622 				struct sctp_sndrcvinfo *sndrcvinfo;
3623 
3624 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3625 				if (found == 0) {
3626 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3627 						return (found);
3628 					}
3629 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3630 				}
3631 				switch (cmh.cmsg_type) {
3632 				case SCTP_SNDINFO:
3633 					if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3634 						return (found);
3635 					}
3636 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3637 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3638 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3639 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3640 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
3641 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3642 					break;
3643 				case SCTP_PRINFO:
3644 					if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3645 						return (found);
3646 					}
3647 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3648 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3649 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3650 					} else {
3651 						sndrcvinfo->sinfo_timetolive = 0;
3652 					}
3653 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3654 					break;
3655 				case SCTP_AUTHINFO:
3656 					if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3657 						return (found);
3658 					}
3659 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3660 					sndrcvinfo->sinfo_keynumber_valid = 1;
3661 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3662 					break;
3663 				default:
3664 					return (found);
3665 				}
3666 				found = 1;
3667 			}
3668 		}
3669 	}
3670 	return (found);
3671 }
3672 
3673 static int
3674 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3675 {
3676 #if defined(_WIN32)
3677 	WSACMSGHDR cmh;
3678 #else
3679 	struct cmsghdr cmh;
3680 #endif
3681 	struct sctp_initmsg initmsg;
3682 #ifdef INET
3683 	struct sockaddr_in sin;
3684 #endif
3685 #ifdef INET6
3686 	struct sockaddr_in6 sin6;
3687 #endif
3688 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3689 
3690 	tot_len = SCTP_BUF_LEN(control);
3691 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3692 		rem_len = tot_len - off;
3693 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3694 			/* There is not enough room for one more. */
3695 			*error = EINVAL;
3696 			return (1);
3697 		}
3698 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3699 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3700 			/* We dont't have a complete CMSG header. */
3701 			*error = EINVAL;
3702 			return (1);
3703 		}
3704 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3705 			/* We don't have the complete CMSG. */
3706 			*error = EINVAL;
3707 			return (1);
3708 		}
3709 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3710 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3711 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3712 			switch (cmh.cmsg_type) {
3713 			case SCTP_INIT:
3714 				if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3715 					*error = EINVAL;
3716 					return (1);
3717 				}
3718 				m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3719 				if (initmsg.sinit_max_attempts)
3720 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3721 				if (initmsg.sinit_num_ostreams)
3722 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3723 				if (initmsg.sinit_max_instreams)
3724 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3725 				if (initmsg.sinit_max_init_timeo)
3726 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3727 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3728 					struct sctp_stream_out *tmp_str;
3729 					unsigned int i;
3730 #if defined(SCTP_DETAILED_STR_STATS)
3731 					int j;
3732 #endif
3733 
3734 					/* Default is NOT correct */
3735 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3736 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3737 					SCTP_TCB_UNLOCK(stcb);
3738 					SCTP_MALLOC(tmp_str,
3739 					            struct sctp_stream_out *,
3740 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3741 					            SCTP_M_STRMO);
3742 					SCTP_TCB_LOCK(stcb);
3743 					if (tmp_str != NULL) {
3744 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3745 						stcb->asoc.strmout = tmp_str;
3746 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3747 					} else {
3748 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3749 					}
3750 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3751 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3752 						stcb->asoc.strmout[i].chunks_on_queues = 0;
3753 						stcb->asoc.strmout[i].next_mid_ordered = 0;
3754 						stcb->asoc.strmout[i].next_mid_unordered = 0;
3755 #if defined(SCTP_DETAILED_STR_STATS)
3756 						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3757 							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3758 							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3759 						}
3760 #else
3761 						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3762 						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3763 #endif
3764 						stcb->asoc.strmout[i].sid = i;
3765 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3766 						stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3767 						stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3768 					}
3769 				}
3770 				break;
3771 #ifdef INET
3772 			case SCTP_DSTADDRV4:
3773 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3774 					*error = EINVAL;
3775 					return (1);
3776 				}
3777 				memset(&sin, 0, sizeof(struct sockaddr_in));
3778 				sin.sin_family = AF_INET;
3779 #ifdef HAVE_SIN_LEN
3780 				sin.sin_len = sizeof(struct sockaddr_in);
3781 #endif
3782 				sin.sin_port = stcb->rport;
3783 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3784 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3785 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3786 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3787 					*error = EINVAL;
3788 					return (1);
3789 				}
3790 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3791 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3792 					*error = ENOBUFS;
3793 					return (1);
3794 				}
3795 				break;
3796 #endif
3797 #ifdef INET6
3798 			case SCTP_DSTADDRV6:
3799 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3800 					*error = EINVAL;
3801 					return (1);
3802 				}
3803 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3804 				sin6.sin6_family = AF_INET6;
3805 #ifdef HAVE_SIN6_LEN
3806 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3807 #endif
3808 				sin6.sin6_port = stcb->rport;
3809 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3810 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3811 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3812 					*error = EINVAL;
3813 					return (1);
3814 				}
3815 #ifdef INET
3816 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3817 					in6_sin6_2_sin(&sin, &sin6);
3818 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3819 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3820 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3821 						*error = EINVAL;
3822 						return (1);
3823 					}
3824 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3825 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3826 						*error = ENOBUFS;
3827 						return (1);
3828 					}
3829 				} else
3830 #endif
3831 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3832 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3833 						*error = ENOBUFS;
3834 						return (1);
3835 					}
3836 				break;
3837 #endif
3838 			default:
3839 				break;
3840 			}
3841 		}
3842 	}
3843 	return (0);
3844 }
3845 
3846 #if defined(INET) || defined(INET6)
3847 static struct sctp_tcb *
3848 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3849                            uint16_t port,
3850                            struct mbuf *control,
3851                            struct sctp_nets **net_p,
3852                            int *error)
3853 {
3854 #if defined(_WIN32)
3855 	WSACMSGHDR cmh;
3856 #else
3857 	struct cmsghdr cmh;
3858 #endif
3859 	struct sctp_tcb *stcb;
3860 	struct sockaddr *addr;
3861 #ifdef INET
3862 	struct sockaddr_in sin;
3863 #endif
3864 #ifdef INET6
3865 	struct sockaddr_in6 sin6;
3866 #endif
3867 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3868 
3869 	tot_len = SCTP_BUF_LEN(control);
3870 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3871 		rem_len = tot_len - off;
3872 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3873 			/* There is not enough room for one more. */
3874 			*error = EINVAL;
3875 			return (NULL);
3876 		}
3877 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3878 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3879 			/* We dont't have a complete CMSG header. */
3880 			*error = EINVAL;
3881 			return (NULL);
3882 		}
3883 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3884 			/* We don't have the complete CMSG. */
3885 			*error = EINVAL;
3886 			return (NULL);
3887 		}
3888 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3889 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3890 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3891 			switch (cmh.cmsg_type) {
3892 #ifdef INET
3893 			case SCTP_DSTADDRV4:
3894 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3895 					*error = EINVAL;
3896 					return (NULL);
3897 				}
3898 				memset(&sin, 0, sizeof(struct sockaddr_in));
3899 				sin.sin_family = AF_INET;
3900 #ifdef HAVE_SIN_LEN
3901 				sin.sin_len = sizeof(struct sockaddr_in);
3902 #endif
3903 				sin.sin_port = port;
3904 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3905 				addr = (struct sockaddr *)&sin;
3906 				break;
3907 #endif
3908 #ifdef INET6
3909 			case SCTP_DSTADDRV6:
3910 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3911 					*error = EINVAL;
3912 					return (NULL);
3913 				}
3914 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3915 				sin6.sin6_family = AF_INET6;
3916 #ifdef HAVE_SIN6_LEN
3917 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3918 #endif
3919 				sin6.sin6_port = port;
3920 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3921 #ifdef INET
3922 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3923 					in6_sin6_2_sin(&sin, &sin6);
3924 					addr = (struct sockaddr *)&sin;
3925 				} else
3926 #endif
3927 					addr = (struct sockaddr *)&sin6;
3928 				break;
3929 #endif
3930 			default:
3931 				addr = NULL;
3932 				break;
3933 			}
3934 			if (addr) {
3935 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3936 				if (stcb != NULL) {
3937 					return (stcb);
3938 				}
3939 			}
3940 		}
3941 	}
3942 	return (NULL);
3943 }
3944 #endif
3945 
3946 static struct mbuf *
3947 sctp_add_cookie(struct mbuf *init, int init_offset,
3948     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3949 {
3950 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3951 	struct sctp_state_cookie *stc;
3952 	struct sctp_paramhdr *ph;
3953 	uint16_t cookie_sz;
3954 
3955 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3956 				      sizeof(struct sctp_paramhdr)), 0,
3957 				     M_NOWAIT, 1, MT_DATA);
3958 	if (mret == NULL) {
3959 		return (NULL);
3960 	}
3961 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3962 	if (copy_init == NULL) {
3963 		sctp_m_freem(mret);
3964 		return (NULL);
3965 	}
3966 #ifdef SCTP_MBUF_LOGGING
3967 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3968 		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3969 	}
3970 #endif
3971 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3972 	    M_NOWAIT);
3973 	if (copy_initack == NULL) {
3974 		sctp_m_freem(mret);
3975 		sctp_m_freem(copy_init);
3976 		return (NULL);
3977 	}
3978 #ifdef SCTP_MBUF_LOGGING
3979 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3980 		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3981 	}
3982 #endif
3983 	/* easy side we just drop it on the end */
3984 	ph = mtod(mret, struct sctp_paramhdr *);
3985 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3986 	    sizeof(struct sctp_paramhdr);
3987 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
3988 	    sizeof(struct sctp_paramhdr));
3989 	ph->param_type = htons(SCTP_STATE_COOKIE);
3990 	ph->param_length = 0;	/* fill in at the end */
3991 	/* Fill in the stc cookie data */
3992 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3993 
3994 	/* tack the INIT and then the INIT-ACK onto the chain */
3995 	cookie_sz = 0;
3996 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3997 		cookie_sz += SCTP_BUF_LEN(m_at);
3998 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3999 			SCTP_BUF_NEXT(m_at) = copy_init;
4000 			break;
4001 		}
4002 	}
4003 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4004 		cookie_sz += SCTP_BUF_LEN(m_at);
4005 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4006 			SCTP_BUF_NEXT(m_at) = copy_initack;
4007 			break;
4008 		}
4009 	}
4010 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4011 		cookie_sz += SCTP_BUF_LEN(m_at);
4012 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4013 			break;
4014 		}
4015 	}
4016 	sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4017 	if (sig == NULL) {
4018 		/* no space, so free the entire chain */
4019 		sctp_m_freem(mret);
4020 		return (NULL);
4021 	}
4022 	SCTP_BUF_NEXT(m_at) = sig;
4023 	SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE;
4024 	cookie_sz += SCTP_SIGNATURE_SIZE;
4025 	ph->param_length = htons(cookie_sz);
4026 	*signature = (uint8_t *)mtod(sig, caddr_t);
4027 	memset(*signature, 0, SCTP_SIGNATURE_SIZE);
4028 	return (mret);
4029 }
4030 
4031 static uint8_t
4032 sctp_get_ect(struct sctp_tcb *stcb)
4033 {
4034 	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4035 		return (SCTP_ECT0_BIT);
4036 	} else {
4037 		return (0);
4038 	}
4039 }
4040 
4041 #if defined(INET) || defined(INET6)
4042 static void
4043 sctp_handle_no_route(struct sctp_tcb *stcb,
4044                      struct sctp_nets *net,
4045                      int so_locked)
4046 {
4047 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4048 
4049 	if (net) {
4050 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4051 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4052 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4053 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4054 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4055 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4056 			                        stcb, 0,
4057 			                        (void *)net,
4058 			                        so_locked);
4059 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
4060 				net->dest_state &= ~SCTP_ADDR_PF;
4061 			}
4062 		}
4063 		if (stcb) {
4064 			if (net == stcb->asoc.primary_destination) {
4065 				/* need a new primary */
4066 				struct sctp_nets *alt;
4067 
4068 				alt = sctp_find_alternate_net(stcb, net, 0);
4069 				if (alt != net) {
4070 					if (stcb->asoc.alternate) {
4071 						sctp_free_remote_addr(stcb->asoc.alternate);
4072 					}
4073 					stcb->asoc.alternate = alt;
4074 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4075 					if (net->ro._s_addr) {
4076 						sctp_free_ifa(net->ro._s_addr);
4077 						net->ro._s_addr = NULL;
4078 					}
4079 					net->src_addr_selected = 0;
4080 				}
4081 			}
4082 		}
4083 	}
4084 }
4085 #endif
4086 
4087 static int
4088 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4089     struct sctp_tcb *stcb,	/* may be NULL */
4090     struct sctp_nets *net,
4091     struct sockaddr *to,
4092     struct mbuf *m,
4093     uint32_t auth_offset,
4094     struct sctp_auth_chunk *auth,
4095     uint16_t auth_keyid,
4096     int nofragment_flag,
4097     int ecn_ok,
4098     int out_of_asoc_ok,
4099     uint16_t src_port,
4100     uint16_t dest_port,
4101     uint32_t v_tag,
4102     uint16_t port,
4103     union sctp_sockstore *over_addr,
4104 #if defined(__FreeBSD__) && !defined(__Userspace__)
4105     uint8_t mflowtype, uint32_t mflowid,
4106 #endif
4107 int so_locked)
4108 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4109 {
4110 	/**
4111 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4112 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4113 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
4114 	 * - calculate and fill in the SCTP checksum.
4115 	 * - prepend an IP address header.
4116 	 * - if boundall use INADDR_ANY.
4117 	 * - if boundspecific do source address selection.
4118 	 * - set fragmentation option for ipV4.
4119 	 * - On return from IP output, check/adjust mtu size of output
4120 	 *   interface and smallest_mtu size as well.
4121 	 */
4122 	/* Will need ifdefs around this */
4123 	struct mbuf *newm;
4124 	struct sctphdr *sctphdr;
4125 	int packet_length;
4126 	int ret;
4127 #if defined(INET) || defined(INET6)
4128 	uint32_t vrf_id;
4129 #endif
4130 #if defined(INET) || defined(INET6)
4131 	struct mbuf *o_pak;
4132 	sctp_route_t *ro = NULL;
4133 	struct udphdr *udp = NULL;
4134 #endif
4135 	uint8_t tos_value;
4136 #if defined(__APPLE__) && !defined(__Userspace__)
4137 	struct socket *so = NULL;
4138 #endif
4139 
4140 #if defined(__APPLE__) && !defined(__Userspace__)
4141 	if (so_locked) {
4142 		sctp_lock_assert(SCTP_INP_SO(inp));
4143 		SCTP_TCB_LOCK_ASSERT(stcb);
4144 	} else {
4145 		sctp_unlock_assert(SCTP_INP_SO(inp));
4146 	}
4147 #endif
4148 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4149 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4150 		sctp_m_freem(m);
4151 		return (EFAULT);
4152 	}
4153 #if defined(INET) || defined(INET6)
4154 	if (stcb) {
4155 		vrf_id = stcb->asoc.vrf_id;
4156 	} else {
4157 		vrf_id = inp->def_vrf_id;
4158 	}
4159 #endif
4160 	/* fill in the HMAC digest for any AUTH chunk in the packet */
4161 	if ((auth != NULL) && (stcb != NULL)) {
4162 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4163 	}
4164 
4165 	if (net) {
4166 		tos_value = net->dscp;
4167 	} else if (stcb) {
4168 		tos_value = stcb->asoc.default_dscp;
4169 	} else {
4170 		tos_value = inp->sctp_ep.default_dscp;
4171 	}
4172 
4173 	switch (to->sa_family) {
4174 #ifdef INET
4175 	case AF_INET:
4176 	{
4177 		struct ip *ip = NULL;
4178 		sctp_route_t iproute;
4179 		int len;
4180 
4181 		len = SCTP_MIN_V4_OVERHEAD;
4182 		if (port) {
4183 			len += sizeof(struct udphdr);
4184 		}
4185 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4186 		if (newm == NULL) {
4187 			sctp_m_freem(m);
4188 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4189 			return (ENOMEM);
4190 		}
4191 		SCTP_ALIGN_TO_END(newm, len);
4192 		SCTP_BUF_LEN(newm) = len;
4193 		SCTP_BUF_NEXT(newm) = m;
4194 		m = newm;
4195 #if defined(__FreeBSD__) && !defined(__Userspace__)
4196 		if (net != NULL) {
4197 			m->m_pkthdr.flowid = net->flowid;
4198 			M_HASHTYPE_SET(m, net->flowtype);
4199 		} else {
4200 			m->m_pkthdr.flowid = mflowid;
4201 			M_HASHTYPE_SET(m, mflowtype);
4202  		}
4203 #endif
4204 		packet_length = sctp_calculate_len(m);
4205 		ip = mtod(m, struct ip *);
4206 		ip->ip_v = IPVERSION;
4207 		ip->ip_hl = (sizeof(struct ip) >> 2);
4208 		if (tos_value == 0) {
4209 			/*
4210 			 * This means especially, that it is not set at the
4211 			 * SCTP layer. So use the value from the IP layer.
4212 			 */
4213 			tos_value = inp->ip_inp.inp.inp_ip_tos;
4214 		}
4215 		tos_value &= 0xfc;
4216 		if (ecn_ok) {
4217 			tos_value |= sctp_get_ect(stcb);
4218 		}
4219 		if ((nofragment_flag) && (port == 0)) {
4220 #if defined(__FreeBSD__) && !defined(__Userspace__)
4221 			ip->ip_off = htons(IP_DF);
4222 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
4223 			ip->ip_off = IP_DF;
4224 #else
4225 			ip->ip_off = htons(IP_DF);
4226 #endif
4227 		} else {
4228 #if defined(__FreeBSD__) && !defined(__Userspace__)
4229 			ip->ip_off = htons(0);
4230 #else
4231 			ip->ip_off = 0;
4232 #endif
4233 		}
4234 #if defined(__Userspace__)
4235 		ip->ip_id = htons(SCTP_IP_ID(inp)++);
4236 #elif defined(__FreeBSD__)
4237 		/* FreeBSD has a function for ip_id's */
4238 		ip_fillid(ip);
4239 #elif defined(__APPLE__)
4240 #if RANDOM_IP_ID
4241 		ip->ip_id = ip_randomid();
4242 #else
4243 		ip->ip_id = htons(ip_id++);
4244 #endif
4245 #else
4246 		ip->ip_id = SCTP_IP_ID(inp)++;
4247 #endif
4248 
4249 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4250 #if defined(__FreeBSD__) && !defined(__Userspace__)
4251 		ip->ip_len = htons(packet_length);
4252 #else
4253 		ip->ip_len = packet_length;
4254 #endif
4255 		ip->ip_tos = tos_value;
4256 		if (port) {
4257 			ip->ip_p = IPPROTO_UDP;
4258 		} else {
4259 			ip->ip_p = IPPROTO_SCTP;
4260 		}
4261 		ip->ip_sum = 0;
4262 		if (net == NULL) {
4263 			ro = &iproute;
4264 			memset(&iproute, 0, sizeof(iproute));
4265 #ifdef HAVE_SA_LEN
4266 			memcpy(&ro->ro_dst, to, to->sa_len);
4267 #else
4268 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4269 #endif
4270 		} else {
4271 			ro = (sctp_route_t *)&net->ro;
4272 		}
4273 		/* Now the address selection part */
4274 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4275 
4276 		/* call the routine to select the src address */
4277 		if (net && out_of_asoc_ok == 0) {
4278 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4279 				sctp_free_ifa(net->ro._s_addr);
4280 				net->ro._s_addr = NULL;
4281 				net->src_addr_selected = 0;
4282 #if defined(__FreeBSD__) && !defined(__Userspace__)
4283 				RO_NHFREE(ro);
4284 #else
4285 				if (ro->ro_rt) {
4286 					RTFREE(ro->ro_rt);
4287 					ro->ro_rt = NULL;
4288 				}
4289 #endif
4290 			}
4291 			if (net->src_addr_selected == 0) {
4292 				/* Cache the source address */
4293 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4294 										ro, net, 0,
4295 										vrf_id);
4296 				net->src_addr_selected = 1;
4297 			}
4298 			if (net->ro._s_addr == NULL) {
4299 				/* No route to host */
4300 				net->src_addr_selected = 0;
4301 				sctp_handle_no_route(stcb, net, so_locked);
4302 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4303 				sctp_m_freem(m);
4304 				return (EHOSTUNREACH);
4305 			}
4306 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4307 		} else {
4308 			if (over_addr == NULL) {
4309 				struct sctp_ifa *_lsrc;
4310 
4311 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4312 				                                      net,
4313 				                                      out_of_asoc_ok,
4314 				                                      vrf_id);
4315 				if (_lsrc == NULL) {
4316 					sctp_handle_no_route(stcb, net, so_locked);
4317 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4318 					sctp_m_freem(m);
4319 					return (EHOSTUNREACH);
4320 				}
4321 				ip->ip_src = _lsrc->address.sin.sin_addr;
4322 				sctp_free_ifa(_lsrc);
4323 			} else {
4324 				ip->ip_src = over_addr->sin.sin_addr;
4325 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4326 			}
4327 		}
4328 		if (port) {
4329 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4330 				sctp_handle_no_route(stcb, net, so_locked);
4331 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4332 				sctp_m_freem(m);
4333 				return (EHOSTUNREACH);
4334 			}
4335 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4336 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4337 			udp->uh_dport = port;
4338 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4339 #if !defined(__Userspace__)
4340 #if defined(__FreeBSD__)
4341 			if (V_udp_cksum) {
4342 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4343 			} else {
4344 				udp->uh_sum = 0;
4345 			}
4346 #else
4347 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4348 #endif
4349 #else
4350 			udp->uh_sum = 0;
4351 #endif
4352 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4353 		} else {
4354 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4355 		}
4356 
4357 		sctphdr->src_port = src_port;
4358 		sctphdr->dest_port = dest_port;
4359 		sctphdr->v_tag = v_tag;
4360 		sctphdr->checksum = 0;
4361 
4362 		/*
4363 		 * If source address selection fails and we find no route
4364 		 * then the ip_output should fail as well with a
4365 		 * NO_ROUTE_TO_HOST type error. We probably should catch
4366 		 * that somewhere and abort the association right away
4367 		 * (assuming this is an INIT being sent).
4368 		 */
4369 #if defined(__FreeBSD__) && !defined(__Userspace__)
4370 		if (ro->ro_nh == NULL) {
4371 #else
4372 		if (ro->ro_rt == NULL) {
4373 #endif
4374 			/*
4375 			 * src addr selection failed to find a route (or
4376 			 * valid source addr), so we can't get there from
4377 			 * here (yet)!
4378 			 */
4379 			sctp_handle_no_route(stcb, net, so_locked);
4380 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4381 			sctp_m_freem(m);
4382 			return (EHOSTUNREACH);
4383 		}
4384 		if (ro != &iproute) {
4385 			memcpy(&iproute, ro, sizeof(*ro));
4386 		}
4387 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4388 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
4389 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4390 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
4391 #if defined(__FreeBSD__) && !defined(__Userspace__)
4392 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4393 			(void *)ro->ro_nh);
4394 #else
4395 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4396 			(void *)ro->ro_rt);
4397 #endif
4398 
4399 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4400 			/* failed to prepend data, give up */
4401 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4402 			sctp_m_freem(m);
4403 			return (ENOMEM);
4404 		}
4405 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4406 		if (port) {
4407 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4408 			SCTP_STAT_INCR(sctps_sendswcrc);
4409 #if !defined(__Userspace__)
4410 #if defined(__FreeBSD__)
4411 			if (V_udp_cksum) {
4412 				SCTP_ENABLE_UDP_CSUM(o_pak);
4413 			}
4414 #else
4415 			SCTP_ENABLE_UDP_CSUM(o_pak);
4416 #endif
4417 #endif
4418 		} else {
4419 #if defined(__FreeBSD__) && !defined(__Userspace__)
4420 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4421 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4422 			SCTP_STAT_INCR(sctps_sendhwcrc);
4423 #else
4424 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4425 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4426 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4427 				SCTP_STAT_INCR(sctps_sendswcrc);
4428 			} else {
4429 				SCTP_STAT_INCR(sctps_sendhwcrc);
4430 			}
4431 #endif
4432 		}
4433 #ifdef SCTP_PACKET_LOGGING
4434 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4435 			sctp_packet_log(o_pak);
4436 #endif
4437 		/* send it out.  table id is taken from stcb */
4438 #if defined(__APPLE__) && !defined(__Userspace__)
4439 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4440 			so = SCTP_INP_SO(inp);
4441 			SCTP_SOCKET_UNLOCK(so, 0);
4442 		}
4443 #endif
4444 #if defined(__FreeBSD__) && !defined(__Userspace__)
4445 		SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4446 #endif
4447 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4448 #if defined(__APPLE__) && !defined(__Userspace__)
4449 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4450 			atomic_add_int(&stcb->asoc.refcnt, 1);
4451 			SCTP_TCB_UNLOCK(stcb);
4452 			SCTP_SOCKET_LOCK(so, 0);
4453 			SCTP_TCB_LOCK(stcb);
4454 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4455 		}
4456 #endif
4457 #if defined(__FreeBSD__) && !defined(__Userspace__)
4458 		if (port) {
4459 			UDPSTAT_INC(udps_opackets);
4460 		}
4461 #endif
4462 		SCTP_STAT_INCR(sctps_sendpackets);
4463 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4464 		if (ret)
4465 			SCTP_STAT_INCR(sctps_senderrors);
4466 
4467 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4468 		if (net == NULL) {
4469 			/* free tempy routes */
4470 #if defined(__FreeBSD__) && !defined(__Userspace__)
4471 			RO_NHFREE(ro);
4472 #else
4473 			if (ro->ro_rt) {
4474 				RTFREE(ro->ro_rt);
4475 				ro->ro_rt = NULL;
4476 			}
4477 #endif
4478 		} else {
4479 #if defined(__FreeBSD__) && !defined(__Userspace__)
4480 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4481 #else
4482 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4483 #endif
4484 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4485 				uint32_t mtu;
4486 
4487 #if defined(__FreeBSD__) && !defined(__Userspace__)
4488 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4489 #else
4490 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4491 #endif
4492 				if (mtu > 0) {
4493 					if (net->port) {
4494 						mtu -= sizeof(struct udphdr);
4495 					}
4496 					if (mtu < net->mtu) {
4497 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4498 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4499 						}
4500 						net->mtu = mtu;
4501 					}
4502 				}
4503 #if defined(__FreeBSD__) && !defined(__Userspace__)
4504 			} else if (ro->ro_nh == NULL) {
4505 #else
4506 			} else if (ro->ro_rt == NULL) {
4507 #endif
4508 				/* route was freed */
4509 				if (net->ro._s_addr &&
4510 				    net->src_addr_selected) {
4511 					sctp_free_ifa(net->ro._s_addr);
4512 					net->ro._s_addr = NULL;
4513 				}
4514 				net->src_addr_selected = 0;
4515 			}
4516 		}
4517 		return (ret);
4518 	}
4519 #endif
4520 #ifdef INET6
4521 	case AF_INET6:
4522 	{
4523 		uint32_t flowlabel, flowinfo;
4524 		struct ip6_hdr *ip6h;
4525 		struct route_in6 ip6route;
4526 #if !defined(__Userspace__)
4527 		struct ifnet *ifp;
4528 #endif
4529 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4530 		int prev_scope = 0;
4531 #ifdef SCTP_EMBEDDED_V6_SCOPE
4532 		struct sockaddr_in6 lsa6_storage;
4533 		int error;
4534 #endif
4535 		u_short prev_port = 0;
4536 		int len;
4537 
4538 		if (net) {
4539 			flowlabel = net->flowlabel;
4540 		} else if (stcb) {
4541 			flowlabel = stcb->asoc.default_flowlabel;
4542 		} else {
4543 			flowlabel = inp->sctp_ep.default_flowlabel;
4544 		}
4545 		if (flowlabel == 0) {
4546 			/*
4547 			 * This means especially, that it is not set at the
4548 			 * SCTP layer. So use the value from the IP layer.
4549 			 */
4550 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4551 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4552 #else
4553 			flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4554 #endif
4555 		}
4556 		flowlabel &= 0x000fffff;
4557 		len = SCTP_MIN_OVERHEAD;
4558 		if (port) {
4559 			len += sizeof(struct udphdr);
4560 		}
4561 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4562 		if (newm == NULL) {
4563 			sctp_m_freem(m);
4564 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4565 			return (ENOMEM);
4566 		}
4567 		SCTP_ALIGN_TO_END(newm, len);
4568 		SCTP_BUF_LEN(newm) = len;
4569 		SCTP_BUF_NEXT(newm) = m;
4570 		m = newm;
4571 #if defined(__FreeBSD__) && !defined(__Userspace__)
4572 		if (net != NULL) {
4573 			m->m_pkthdr.flowid = net->flowid;
4574 			M_HASHTYPE_SET(m, net->flowtype);
4575 		} else {
4576 			m->m_pkthdr.flowid = mflowid;
4577 			M_HASHTYPE_SET(m, mflowtype);
4578  		}
4579 #endif
4580 		packet_length = sctp_calculate_len(m);
4581 
4582 		ip6h = mtod(m, struct ip6_hdr *);
4583 		/* protect *sin6 from overwrite */
4584 		sin6 = (struct sockaddr_in6 *)to;
4585 		tmp = *sin6;
4586 		sin6 = &tmp;
4587 
4588 #ifdef SCTP_EMBEDDED_V6_SCOPE
4589 		/* KAME hack: embed scopeid */
4590 #if defined(__APPLE__) && !defined(__Userspace__)
4591 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4592 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4593 #else
4594 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4595 #endif
4596 #elif defined(SCTP_KAME)
4597 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4598 #else
4599 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4600 #endif
4601 		{
4602 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4603 			sctp_m_freem(m);
4604 			return (EINVAL);
4605 		}
4606 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4607 		if (net == NULL) {
4608 			memset(&ip6route, 0, sizeof(ip6route));
4609 			ro = (sctp_route_t *)&ip6route;
4610 #ifdef HAVE_SIN6_LEN
4611 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4612 #else
4613 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4614 #endif
4615 		} else {
4616 			ro = (sctp_route_t *)&net->ro;
4617 		}
4618 		/*
4619 		 * We assume here that inp_flow is in host byte order within
4620 		 * the TCB!
4621 		 */
4622 		if (tos_value == 0) {
4623 			/*
4624 			 * This means especially, that it is not set at the
4625 			 * SCTP layer. So use the value from the IP layer.
4626 			 */
4627 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4628 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4629 #else
4630 			tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4631 #endif
4632 		}
4633 		tos_value &= 0xfc;
4634 		if (ecn_ok) {
4635 			tos_value |= sctp_get_ect(stcb);
4636 		}
4637 		flowinfo = 0x06;
4638 		flowinfo <<= 8;
4639 		flowinfo |= tos_value;
4640 		flowinfo <<= 20;
4641 		flowinfo |= flowlabel;
4642 		ip6h->ip6_flow = htonl(flowinfo);
4643 		if (port) {
4644 			ip6h->ip6_nxt = IPPROTO_UDP;
4645 		} else {
4646 			ip6h->ip6_nxt = IPPROTO_SCTP;
4647 		}
4648 		ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4649 		ip6h->ip6_dst = sin6->sin6_addr;
4650 
4651 		/*
4652 		 * Add SRC address selection here: we can only reuse to a
4653 		 * limited degree the kame src-addr-sel, since we can try
4654 		 * their selection but it may not be bound.
4655 		 */
4656 		memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4657 		lsa6_tmp.sin6_family = AF_INET6;
4658 #ifdef HAVE_SIN6_LEN
4659 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4660 #endif
4661 		lsa6 = &lsa6_tmp;
4662 		if (net && out_of_asoc_ok == 0) {
4663 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4664 				sctp_free_ifa(net->ro._s_addr);
4665 				net->ro._s_addr = NULL;
4666 				net->src_addr_selected = 0;
4667 #if defined(__FreeBSD__) && !defined(__Userspace__)
4668 				RO_NHFREE(ro);
4669 #else
4670 				if (ro->ro_rt) {
4671 					RTFREE(ro->ro_rt);
4672 					ro->ro_rt = NULL;
4673 				}
4674 #endif
4675 			}
4676 			if (net->src_addr_selected == 0) {
4677 #ifdef SCTP_EMBEDDED_V6_SCOPE
4678 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4679 				/* KAME hack: embed scopeid */
4680 #if defined(__APPLE__) && !defined(__Userspace__)
4681 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4682 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4683 #else
4684 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4685 #endif
4686 #elif defined(SCTP_KAME)
4687 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4688 #else
4689 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4690 #endif
4691 				{
4692 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4693 					sctp_m_freem(m);
4694 					return (EINVAL);
4695 				}
4696 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4697 				/* Cache the source address */
4698 				net->ro._s_addr = sctp_source_address_selection(inp,
4699 										stcb,
4700 										ro,
4701 										net,
4702 										0,
4703 										vrf_id);
4704 #ifdef SCTP_EMBEDDED_V6_SCOPE
4705 #ifdef SCTP_KAME
4706 				(void)sa6_recoverscope(sin6);
4707 #else
4708 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4709 #endif	/* SCTP_KAME */
4710 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4711 				net->src_addr_selected = 1;
4712 			}
4713 			if (net->ro._s_addr == NULL) {
4714 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4715 				net->src_addr_selected = 0;
4716 				sctp_handle_no_route(stcb, net, so_locked);
4717 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4718 				sctp_m_freem(m);
4719 				return (EHOSTUNREACH);
4720 			}
4721 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4722 		} else {
4723 #ifdef SCTP_EMBEDDED_V6_SCOPE
4724 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4725 			/* KAME hack: embed scopeid */
4726 #if defined(__APPLE__) && !defined(__Userspace__)
4727 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4728 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4729 #else
4730 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4731 #endif
4732 #elif defined(SCTP_KAME)
4733 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4734 #else
4735 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4736 #endif
4737 			  {
4738 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4739 				sctp_m_freem(m);
4740 				return (EINVAL);
4741 			  }
4742 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4743 			if (over_addr == NULL) {
4744 				struct sctp_ifa *_lsrc;
4745 
4746 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4747 				                                      net,
4748 				                                      out_of_asoc_ok,
4749 				                                      vrf_id);
4750 				if (_lsrc == NULL) {
4751 					sctp_handle_no_route(stcb, net, so_locked);
4752 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4753 					sctp_m_freem(m);
4754 					return (EHOSTUNREACH);
4755 				}
4756 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4757 				sctp_free_ifa(_lsrc);
4758 			} else {
4759 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4760 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4761 			}
4762 #ifdef SCTP_EMBEDDED_V6_SCOPE
4763 #ifdef SCTP_KAME
4764 			(void)sa6_recoverscope(sin6);
4765 #else
4766 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4767 #endif	/* SCTP_KAME */
4768 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4769 		}
4770 		lsa6->sin6_port = inp->sctp_lport;
4771 
4772 #if defined(__FreeBSD__) && !defined(__Userspace__)
4773 		if (ro->ro_nh == NULL) {
4774 #else
4775 		if (ro->ro_rt == NULL) {
4776 #endif
4777 			/*
4778 			 * src addr selection failed to find a route (or
4779 			 * valid source addr), so we can't get there from
4780 			 * here!
4781 			 */
4782 			sctp_handle_no_route(stcb, net, so_locked);
4783 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4784 			sctp_m_freem(m);
4785 			return (EHOSTUNREACH);
4786 		}
4787 #ifndef SCOPEDROUTING
4788 #ifdef SCTP_EMBEDDED_V6_SCOPE
4789 		/*
4790 		 * XXX: sa6 may not have a valid sin6_scope_id in the
4791 		 * non-SCOPEDROUTING case.
4792 		 */
4793 		memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4794 		lsa6_storage.sin6_family = AF_INET6;
4795 #ifdef HAVE_SIN6_LEN
4796 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
4797 #endif
4798 #ifdef SCTP_KAME
4799 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4800 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4801 #else
4802 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4803 		    NULL)) != 0) {
4804 #endif				/* SCTP_KAME */
4805 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4806 			sctp_m_freem(m);
4807 			return (error);
4808 		}
4809 		/* XXX */
4810 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4811 		lsa6_storage.sin6_port = inp->sctp_lport;
4812 		lsa6 = &lsa6_storage;
4813 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4814 #endif /* SCOPEDROUTING */
4815 		ip6h->ip6_src = lsa6->sin6_addr;
4816 
4817 		if (port) {
4818 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4819 				sctp_handle_no_route(stcb, net, so_locked);
4820 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4821 				sctp_m_freem(m);
4822 				return (EHOSTUNREACH);
4823 			}
4824 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4825 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4826 			udp->uh_dport = port;
4827 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4828 			udp->uh_sum = 0;
4829 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4830 		} else {
4831 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4832 		}
4833 
4834 		sctphdr->src_port = src_port;
4835 		sctphdr->dest_port = dest_port;
4836 		sctphdr->v_tag = v_tag;
4837 		sctphdr->checksum = 0;
4838 
4839 		/*
4840 		 * We set the hop limit now since there is a good chance
4841 		 * that our ro pointer is now filled
4842 		 */
4843 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4844 #if !defined(__Userspace__)
4845 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4846 #endif
4847 
4848 #ifdef SCTP_DEBUG
4849 		/* Copy to be sure something bad is not happening */
4850 		sin6->sin6_addr = ip6h->ip6_dst;
4851 		lsa6->sin6_addr = ip6h->ip6_src;
4852 #endif
4853 
4854 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4855 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4856 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4857 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4858 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4859 		if (net) {
4860 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4861 			/* preserve the port and scope for link local send */
4862 			prev_scope = sin6->sin6_scope_id;
4863 			prev_port = sin6->sin6_port;
4864 		}
4865 
4866 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4867 			/* failed to prepend data, give up */
4868 			sctp_m_freem(m);
4869 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4870 			return (ENOMEM);
4871 		}
4872 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4873 		if (port) {
4874 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4875 			SCTP_STAT_INCR(sctps_sendswcrc);
4876 #if !defined(__Userspace__)
4877 #if defined(_WIN32)
4878 			udp->uh_sum = 0;
4879 #else
4880 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4881 				udp->uh_sum = 0xffff;
4882 			}
4883 #endif
4884 #endif
4885 		} else {
4886 #if defined(__FreeBSD__) && !defined(__Userspace__)
4887 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4888 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4889 			SCTP_STAT_INCR(sctps_sendhwcrc);
4890 #else
4891 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4892 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4893 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4894 				SCTP_STAT_INCR(sctps_sendswcrc);
4895 			} else {
4896 				SCTP_STAT_INCR(sctps_sendhwcrc);
4897 			}
4898 #endif
4899 		}
4900 		/* send it out. table id is taken from stcb */
4901 #if defined(__APPLE__) && !defined(__Userspace__)
4902 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4903 			so = SCTP_INP_SO(inp);
4904 			SCTP_SOCKET_UNLOCK(so, 0);
4905 		}
4906 #endif
4907 #ifdef SCTP_PACKET_LOGGING
4908 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4909 			sctp_packet_log(o_pak);
4910 #endif
4911 #if !defined(__Userspace__)
4912 #if defined(__FreeBSD__)
4913 		SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4914 #endif
4915 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4916 #else
4917 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4918 #endif
4919 #if defined(__APPLE__) && !defined(__Userspace__)
4920 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4921 			atomic_add_int(&stcb->asoc.refcnt, 1);
4922 			SCTP_TCB_UNLOCK(stcb);
4923 			SCTP_SOCKET_LOCK(so, 0);
4924 			SCTP_TCB_LOCK(stcb);
4925 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4926 		}
4927 #endif
4928 		if (net) {
4929 			/* for link local this must be done */
4930 			sin6->sin6_scope_id = prev_scope;
4931 			sin6->sin6_port = prev_port;
4932 		}
4933 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4934 #if defined(__FreeBSD__) && !defined(__Userspace__)
4935 		if (port) {
4936 			UDPSTAT_INC(udps_opackets);
4937 		}
4938 #endif
4939 		SCTP_STAT_INCR(sctps_sendpackets);
4940 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4941 		if (ret) {
4942 			SCTP_STAT_INCR(sctps_senderrors);
4943 		}
4944 		if (net == NULL) {
4945 			/* Now if we had a temp route free it */
4946 #if defined(__FreeBSD__) && !defined(__Userspace__)
4947 			RO_NHFREE(ro);
4948 #else
4949 			if (ro->ro_rt) {
4950 				RTFREE(ro->ro_rt);
4951 				ro->ro_rt = NULL;
4952 			}
4953 #endif
4954 		} else {
4955 			/* PMTU check versus smallest asoc MTU goes here */
4956 #if defined(__FreeBSD__) && !defined(__Userspace__)
4957 			if (ro->ro_nh == NULL) {
4958 #else
4959 			if (ro->ro_rt == NULL) {
4960 #endif
4961 				/* Route was freed */
4962 				if (net->ro._s_addr &&
4963 				    net->src_addr_selected) {
4964 					sctp_free_ifa(net->ro._s_addr);
4965 					net->ro._s_addr = NULL;
4966 				}
4967 				net->src_addr_selected = 0;
4968 			}
4969 #if defined(__FreeBSD__) && !defined(__Userspace__)
4970 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4971 #else
4972 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4973 #endif
4974 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4975 				uint32_t mtu;
4976 
4977 #if defined(__FreeBSD__) && !defined(__Userspace__)
4978 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4979 #else
4980 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4981 #endif
4982 				if (mtu > 0) {
4983 					if (net->port) {
4984 						mtu -= sizeof(struct udphdr);
4985 					}
4986 					if (mtu < net->mtu) {
4987 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4988 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4989 						}
4990 						net->mtu = mtu;
4991 					}
4992 				}
4993 			}
4994 #if !defined(__Userspace__)
4995 			else if (ifp) {
4996 #if defined(_WIN32)
4997 #define ND_IFINFO(ifp)	(ifp)
4998 #define linkmtu		if_mtu
4999 #endif
5000 				if (ND_IFINFO(ifp)->linkmtu &&
5001 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
5002 					sctp_mtu_size_reset(inp,
5003 					    &stcb->asoc,
5004 					    ND_IFINFO(ifp)->linkmtu);
5005 				}
5006 			}
5007 #endif
5008 		}
5009 		return (ret);
5010 	}
5011 #endif
5012 #if defined(__Userspace__)
5013 	case AF_CONN:
5014 	{
5015 		char *buffer;
5016 		struct sockaddr_conn *sconn;
5017 		int len;
5018 
5019 		sconn = (struct sockaddr_conn *)to;
5020 		len = sizeof(struct sctphdr);
5021 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
5022 		if (newm == NULL) {
5023 			sctp_m_freem(m);
5024 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5025 			return (ENOMEM);
5026 		}
5027 		SCTP_ALIGN_TO_END(newm, len);
5028 		SCTP_BUF_LEN(newm) = len;
5029 		SCTP_BUF_NEXT(newm) = m;
5030 		m = newm;
5031 		packet_length = sctp_calculate_len(m);
5032 		sctphdr = mtod(m, struct sctphdr *);
5033 		sctphdr->src_port = src_port;
5034 		sctphdr->dest_port = dest_port;
5035 		sctphdr->v_tag = v_tag;
5036 		sctphdr->checksum = 0;
5037 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5038 			sctphdr->checksum = sctp_calculate_cksum(m, 0);
5039 			SCTP_STAT_INCR(sctps_sendswcrc);
5040 		} else {
5041 			SCTP_STAT_INCR(sctps_sendhwcrc);
5042 		}
5043 		if (tos_value == 0) {
5044 			tos_value = inp->ip_inp.inp.inp_ip_tos;
5045 		}
5046 		tos_value &= 0xfc;
5047 		if (ecn_ok) {
5048 			tos_value |= sctp_get_ect(stcb);
5049 		}
5050 		/* Don't alloc/free for each packet */
5051 		if ((buffer = malloc(packet_length)) != NULL) {
5052 			m_copydata(m, 0, packet_length, buffer);
5053 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5054 			free(buffer);
5055 		} else {
5056 			ret = ENOMEM;
5057 		}
5058 		sctp_m_freem(m);
5059 		return (ret);
5060 	}
5061 #endif
5062 	default:
5063 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5064 		        ((struct sockaddr *)to)->sa_family);
5065 		sctp_m_freem(m);
5066 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5067 		return (EFAULT);
5068 	}
5069 }
5070 
5071 
5072 void
5073 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
5074 {
5075 	struct mbuf *m, *m_last;
5076 	struct sctp_nets *net;
5077 	struct sctp_init_chunk *init;
5078 	struct sctp_supported_addr_param *sup_addr;
5079 	struct sctp_adaptation_layer_indication *ali;
5080 	struct sctp_supported_chunk_types_param *pr_supported;
5081 	struct sctp_paramhdr *ph;
5082 	int cnt_inits_to = 0;
5083 	int error;
5084 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5085 
5086 #if defined(__APPLE__) && !defined(__Userspace__)
5087 	if (so_locked) {
5088 		sctp_lock_assert(SCTP_INP_SO(inp));
5089 	} else {
5090 		sctp_unlock_assert(SCTP_INP_SO(inp));
5091 	}
5092 #endif
5093 	/* INIT's always go to the primary (and usually ONLY address) */
5094 	net = stcb->asoc.primary_destination;
5095 	if (net == NULL) {
5096 		net = TAILQ_FIRST(&stcb->asoc.nets);
5097 		if (net == NULL) {
5098 			/* TSNH */
5099 			return;
5100 		}
5101 		/* we confirm any address we send an INIT to */
5102 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5103 		(void)sctp_set_primary_addr(stcb, NULL, net);
5104 	} else {
5105 		/* we confirm any address we send an INIT to */
5106 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5107 	}
5108 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5109 #ifdef INET6
5110 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5111 		/*
5112 		 * special hook, if we are sending to link local it will not
5113 		 * show up in our private address count.
5114 		 */
5115 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5116 			cnt_inits_to = 1;
5117 	}
5118 #endif
5119 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5120 		/* This case should not happen */
5121 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5122 		return;
5123 	}
5124 	/* start the INIT timer */
5125 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5126 
5127 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5128 	if (m == NULL) {
5129 		/* No memory, INIT timer will re-attempt. */
5130 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5131 		return;
5132 	}
5133 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5134 	padding_len = 0;
5135 	/* Now lets put the chunk header in place */
5136 	init = mtod(m, struct sctp_init_chunk *);
5137 	/* now the chunk header */
5138 	init->ch.chunk_type = SCTP_INITIATION;
5139 	init->ch.chunk_flags = 0;
5140 	/* fill in later from mbuf we build */
5141 	init->ch.chunk_length = 0;
5142 	/* place in my tag */
5143 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5144 	/* set up some of the credits. */
5145 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5146 	                              SCTP_MINIMAL_RWND));
5147 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5148 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5149 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5150 
5151 	/* Adaptation layer indication parameter */
5152 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5153 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5154 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5155 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5156 		ali->ph.param_length = htons(parameter_len);
5157 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5158 		chunk_len += parameter_len;
5159 	}
5160 
5161 	/* ECN parameter */
5162 	if (stcb->asoc.ecn_supported == 1) {
5163 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5164 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5165 		ph->param_type = htons(SCTP_ECN_CAPABLE);
5166 		ph->param_length = htons(parameter_len);
5167 		chunk_len += parameter_len;
5168 	}
5169 
5170 	/* PR-SCTP supported parameter */
5171 	if (stcb->asoc.prsctp_supported == 1) {
5172 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5173 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5174 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5175 		ph->param_length = htons(parameter_len);
5176 		chunk_len += parameter_len;
5177 	}
5178 
5179 	/* Add NAT friendly parameter. */
5180 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5181 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5182 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5183 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5184 		ph->param_length = htons(parameter_len);
5185 		chunk_len += parameter_len;
5186 	}
5187 
5188 	/* And now tell the peer which extensions we support */
5189 	num_ext = 0;
5190 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5191 	if (stcb->asoc.prsctp_supported == 1) {
5192 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5193 		if (stcb->asoc.idata_supported) {
5194 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5195 		}
5196 	}
5197 	if (stcb->asoc.auth_supported == 1) {
5198 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5199 	}
5200 	if (stcb->asoc.asconf_supported == 1) {
5201 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5202 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5203 	}
5204 	if (stcb->asoc.reconfig_supported == 1) {
5205 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5206 	}
5207 	if (stcb->asoc.idata_supported) {
5208 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5209 	}
5210 	if (stcb->asoc.nrsack_supported == 1) {
5211 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5212 	}
5213 	if (stcb->asoc.pktdrop_supported == 1) {
5214 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5215 	}
5216 	if (num_ext > 0) {
5217 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5218 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5219 		pr_supported->ph.param_length = htons(parameter_len);
5220 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5221 		chunk_len += parameter_len;
5222 	}
5223 	/* add authentication parameters */
5224 	if (stcb->asoc.auth_supported) {
5225 		/* attach RANDOM parameter, if available */
5226 		if (stcb->asoc.authinfo.random != NULL) {
5227 			struct sctp_auth_random *randp;
5228 
5229 			if (padding_len > 0) {
5230 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5231 				chunk_len += padding_len;
5232 				padding_len = 0;
5233 			}
5234 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5235 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5236 			/* random key already contains the header */
5237 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5238 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5239 			chunk_len += parameter_len;
5240 		}
5241 		/* add HMAC_ALGO parameter */
5242 		if (stcb->asoc.local_hmacs != NULL) {
5243 			struct sctp_auth_hmac_algo *hmacs;
5244 
5245 			if (padding_len > 0) {
5246 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5247 				chunk_len += padding_len;
5248 				padding_len = 0;
5249 			}
5250 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5251 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5252 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5253 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5254 			hmacs->ph.param_length = htons(parameter_len);
5255 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5256 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5257 			chunk_len += parameter_len;
5258 		}
5259 		/* add CHUNKS parameter */
5260 		if (stcb->asoc.local_auth_chunks != NULL) {
5261 			struct sctp_auth_chunk_list *chunks;
5262 
5263 			if (padding_len > 0) {
5264 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5265 				chunk_len += padding_len;
5266 				padding_len = 0;
5267 			}
5268 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5269 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5270 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5271 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5272 			chunks->ph.param_length = htons(parameter_len);
5273 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5274 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5275 			chunk_len += parameter_len;
5276 		}
5277 	}
5278 
5279 	/* now any cookie time extensions */
5280 	if (stcb->asoc.cookie_preserve_req) {
5281 		struct sctp_cookie_perserve_param *cookie_preserve;
5282 
5283 		if (padding_len > 0) {
5284 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5285 			chunk_len += padding_len;
5286 			padding_len = 0;
5287 		}
5288 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5289 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5290 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5291 		cookie_preserve->ph.param_length = htons(parameter_len);
5292 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5293 		stcb->asoc.cookie_preserve_req = 0;
5294 		chunk_len += parameter_len;
5295 	}
5296 
5297 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5298 		uint8_t i;
5299 
5300 		if (padding_len > 0) {
5301 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5302 			chunk_len += padding_len;
5303 			padding_len = 0;
5304 		}
5305 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5306 		if (stcb->asoc.scope.ipv4_addr_legal) {
5307 			parameter_len += (uint16_t)sizeof(uint16_t);
5308 		}
5309 		if (stcb->asoc.scope.ipv6_addr_legal) {
5310 			parameter_len += (uint16_t)sizeof(uint16_t);
5311 		}
5312 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5313 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5314 		sup_addr->ph.param_length = htons(parameter_len);
5315 		i = 0;
5316 		if (stcb->asoc.scope.ipv4_addr_legal) {
5317 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5318 		}
5319 		if (stcb->asoc.scope.ipv6_addr_legal) {
5320 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5321 		}
5322 		padding_len = 4 - 2 * i;
5323 		chunk_len += parameter_len;
5324 	}
5325 
5326 	SCTP_BUF_LEN(m) = chunk_len;
5327 	/* now the addresses */
5328 	/* To optimize this we could put the scoping stuff
5329 	 * into a structure and remove the individual uint8's from
5330 	 * the assoc structure. Then we could just sifa in the
5331 	 * address within the stcb. But for now this is a quick
5332 	 * hack to get the address stuff teased apart.
5333 	 */
5334 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5335 	                                    m, cnt_inits_to,
5336 	                                    &padding_len, &chunk_len);
5337 
5338 	init->ch.chunk_length = htons(chunk_len);
5339 	if (padding_len > 0) {
5340 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5341 			sctp_m_freem(m);
5342 			return;
5343 		}
5344 	}
5345 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5346 	if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5347 	                                        (struct sockaddr *)&net->ro._l_addr,
5348 	                                        m, 0, NULL, 0, 0, 0, 0,
5349 	                                        inp->sctp_lport, stcb->rport, htonl(0),
5350 	                                        net->port, NULL,
5351 #if defined(__FreeBSD__) && !defined(__Userspace__)
5352 	                                        0, 0,
5353 #endif
5354 	                                        so_locked))) {
5355 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5356 		if (error == ENOBUFS) {
5357 			stcb->asoc.ifp_had_enobuf = 1;
5358 			SCTP_STAT_INCR(sctps_lowlevelerr);
5359 		}
5360 	} else {
5361 		stcb->asoc.ifp_had_enobuf = 0;
5362 	}
5363 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5364 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5365 }
5366 
5367 struct mbuf *
5368 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5369                                       int param_offset, int *abort_processing,
5370                                       struct sctp_chunkhdr *cp,
5371                                       int *nat_friendly,
5372                                       int *cookie_found)
5373 {
5374 	/*
5375 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5376 	 * being equal to the beginning of the params i.e. (iphlen +
5377 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
5378 	 * end of the mbuf verifying that all parameters are known.
5379 	 *
5380 	 * For unknown parameters build and return a mbuf with
5381 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5382 	 * processing this chunk stop, and set *abort_processing to 1.
5383 	 *
5384 	 * By having param_offset be pre-set to where parameters begin it is
5385 	 * hoped that this routine may be reused in the future by new
5386 	 * features.
5387 	 */
5388 	struct sctp_paramhdr *phdr, params;
5389 
5390 	struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
5391 	int at, limit, pad_needed;
5392 	uint16_t ptype, plen, padded_size;
5393 
5394 	*abort_processing = 0;
5395 	if (cookie_found != NULL) {
5396 		*cookie_found = 0;
5397 	}
5398 	mat = in_initpkt;
5399 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5400 	at = param_offset;
5401 	op_err = NULL;
5402 	op_err_last = NULL;
5403 	pad_needed = 0;
5404 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5405 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5406 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5407 		ptype = ntohs(phdr->param_type);
5408 		plen = ntohs(phdr->param_length);
5409 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5410 			/* wacked parameter */
5411 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5412 			goto invalid_size;
5413 		}
5414 		limit -= SCTP_SIZE32(plen);
5415 		/*-
5416 		 * All parameters for all chunks that we know/understand are
5417 		 * listed here. We process them other places and make
5418 		 * appropriate stop actions per the upper bits. However this
5419 		 * is the generic routine processor's can call to get back
5420 		 * an operr.. to either incorporate (init-ack) or send.
5421 		 */
5422 		padded_size = SCTP_SIZE32(plen);
5423 		switch (ptype) {
5424 			/* Param's with variable size */
5425 		case SCTP_HEARTBEAT_INFO:
5426 		case SCTP_UNRECOG_PARAM:
5427 		case SCTP_ERROR_CAUSE_IND:
5428 			/* ok skip fwd */
5429 			at += padded_size;
5430 			break;
5431 		case SCTP_STATE_COOKIE:
5432 			if (cookie_found != NULL) {
5433 				*cookie_found = 1;
5434 			}
5435 			at += padded_size;
5436 			break;
5437 			/* Param's with variable size within a range */
5438 		case SCTP_CHUNK_LIST:
5439 		case SCTP_SUPPORTED_CHUNK_EXT:
5440 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5441 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5442 				goto invalid_size;
5443 			}
5444 			at += padded_size;
5445 			break;
5446 		case SCTP_SUPPORTED_ADDRTYPE:
5447 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5448 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5449 				goto invalid_size;
5450 			}
5451 			at += padded_size;
5452 			break;
5453 		case SCTP_RANDOM:
5454 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5455 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5456 				goto invalid_size;
5457 			}
5458 			at += padded_size;
5459 			break;
5460 		case SCTP_SET_PRIM_ADDR:
5461 		case SCTP_DEL_IP_ADDRESS:
5462 		case SCTP_ADD_IP_ADDRESS:
5463 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5464 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5465 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5466 				goto invalid_size;
5467 			}
5468 			at += padded_size;
5469 			break;
5470 			/* Param's with a fixed size */
5471 		case SCTP_IPV4_ADDRESS:
5472 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5473 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5474 				goto invalid_size;
5475 			}
5476 			at += padded_size;
5477 			break;
5478 		case SCTP_IPV6_ADDRESS:
5479 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5480 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5481 				goto invalid_size;
5482 			}
5483 			at += padded_size;
5484 			break;
5485 		case SCTP_COOKIE_PRESERVE:
5486 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5487 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5488 				goto invalid_size;
5489 			}
5490 			at += padded_size;
5491 			break;
5492 		case SCTP_HAS_NAT_SUPPORT:
5493 			*nat_friendly = 1;
5494 			/* fall through */
5495 		case SCTP_PRSCTP_SUPPORTED:
5496 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5497 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5498 				goto invalid_size;
5499 			}
5500 			at += padded_size;
5501 			break;
5502 		case SCTP_ECN_CAPABLE:
5503 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5504 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5505 				goto invalid_size;
5506 			}
5507 			at += padded_size;
5508 			break;
5509 		case SCTP_ULP_ADAPTATION:
5510 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5511 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5512 				goto invalid_size;
5513 			}
5514 			at += padded_size;
5515 			break;
5516 		case SCTP_SUCCESS_REPORT:
5517 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5518 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5519 				goto invalid_size;
5520 			}
5521 			at += padded_size;
5522 			break;
5523 		case SCTP_HOSTNAME_ADDRESS:
5524 		{
5525 			/* Hostname parameters are deprecated. */
5526 			struct sctp_gen_error_cause *cause;
5527 			int l_len;
5528 
5529 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5530 			*abort_processing = 1;
5531 			sctp_m_freem(op_err);
5532 			op_err = NULL;
5533 			op_err_last = NULL;
5534 #ifdef INET6
5535 			l_len = SCTP_MIN_OVERHEAD;
5536 #else
5537 			l_len = SCTP_MIN_V4_OVERHEAD;
5538 #endif
5539 			l_len += sizeof(struct sctp_chunkhdr);
5540 			l_len += sizeof(struct sctp_gen_error_cause);
5541 			op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5542 			if (op_err != NULL) {
5543 				/*
5544 				 * Pre-reserve space for IP, SCTP, and
5545 				 * chunk header.
5546 				 */
5547 #ifdef INET6
5548 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5549 #else
5550 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5551 #endif
5552 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5553 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5554 				SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5555 				cause = mtod(op_err, struct sctp_gen_error_cause *);
5556 				cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5557 				cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5558 				SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5559 				if (SCTP_BUF_NEXT(op_err) == NULL) {
5560 					sctp_m_freem(op_err);
5561 					op_err = NULL;
5562 					op_err_last = NULL;
5563 				}
5564 			}
5565 			return (op_err);
5566 		}
5567 		default:
5568 			/*
5569 			 * we do not recognize the parameter figure out what
5570 			 * we do.
5571 			 */
5572 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5573 			if ((ptype & 0x4000) == 0x4000) {
5574 				/* Report bit is set?? */
5575 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5576 				if (op_err == NULL) {
5577 					int l_len;
5578 					/* Ok need to try to get an mbuf */
5579 #ifdef INET6
5580 					l_len = SCTP_MIN_OVERHEAD;
5581 #else
5582 					l_len = SCTP_MIN_V4_OVERHEAD;
5583 #endif
5584 					l_len += sizeof(struct sctp_chunkhdr);
5585 					l_len += sizeof(struct sctp_paramhdr);
5586 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5587 					if (op_err) {
5588 						SCTP_BUF_LEN(op_err) = 0;
5589 #ifdef INET6
5590 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5591 #else
5592 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5593 #endif
5594 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5595 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5596 						op_err_last = op_err;
5597 					}
5598 				}
5599 				if (op_err != NULL) {
5600 					/* If we have space */
5601 					struct sctp_paramhdr *param;
5602 
5603 					if (pad_needed > 0) {
5604 						op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5605 					}
5606 					if (op_err_last == NULL) {
5607 						sctp_m_freem(op_err);
5608 						op_err = NULL;
5609 						op_err_last = NULL;
5610 						goto more_processing;
5611 					}
5612 					if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5613 						m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5614 						if (m_tmp == NULL) {
5615 							sctp_m_freem(op_err);
5616 							op_err = NULL;
5617 							op_err_last = NULL;
5618 							goto more_processing;
5619 						}
5620 						SCTP_BUF_LEN(m_tmp) = 0;
5621 						SCTP_BUF_NEXT(m_tmp) = NULL;
5622 						SCTP_BUF_NEXT(op_err_last) = m_tmp;
5623 						op_err_last = m_tmp;
5624 					}
5625 					param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
5626 					param->param_type = htons(SCTP_UNRECOG_PARAM);
5627 					param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5628 					SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5629 					SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5630 					if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5631 						sctp_m_freem(op_err);
5632 						op_err = NULL;
5633 						op_err_last = NULL;
5634 						goto more_processing;
5635 					} else {
5636 						while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5637 							op_err_last = SCTP_BUF_NEXT(op_err_last);
5638 						}
5639 					}
5640 					if (plen % 4 != 0) {
5641 						pad_needed = 4 - (plen % 4);
5642 					} else {
5643 						pad_needed = 0;
5644 					}
5645 				}
5646 			}
5647 		more_processing:
5648 			if ((ptype & 0x8000) == 0x0000) {
5649 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5650 				return (op_err);
5651 			} else {
5652 				/* skip this chunk and continue processing */
5653 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5654 				at += SCTP_SIZE32(plen);
5655 			}
5656 			break;
5657 
5658 		}
5659 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5660 	}
5661 	return (op_err);
5662  invalid_size:
5663 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5664 	*abort_processing = 1;
5665 	sctp_m_freem(op_err);
5666 	op_err = NULL;
5667 	op_err_last = NULL;
5668 	if (phdr != NULL) {
5669 		struct sctp_paramhdr *param;
5670 		int l_len;
5671 #ifdef INET6
5672 		l_len = SCTP_MIN_OVERHEAD;
5673 #else
5674 		l_len = SCTP_MIN_V4_OVERHEAD;
5675 #endif
5676 		l_len += sizeof(struct sctp_chunkhdr);
5677 		l_len += (2 * sizeof(struct sctp_paramhdr));
5678 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5679 		if (op_err) {
5680 			SCTP_BUF_LEN(op_err) = 0;
5681 #ifdef INET6
5682 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5683 #else
5684 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5685 #endif
5686 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5687 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5688 			SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5689 			param = mtod(op_err, struct sctp_paramhdr *);
5690 			param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5691 			param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5692 			param++;
5693 			param->param_type = htons(ptype);
5694 			param->param_length = htons(plen);
5695 		}
5696 	}
5697 	return (op_err);
5698 }
5699 
5700 /*
5701  * Given a INIT chunk, look through the parameters to verify that there
5702  * are no new addresses.
5703  * Return true, if there is a new address or there is a problem parsing
5704    the parameters. Provide an optional error cause used when sending an ABORT.
5705  * Return false, if there are no new addresses and there is no problem in
5706    parameter processing.
5707  */
5708 static bool
5709 sctp_are_there_new_addresses(struct sctp_association *asoc,
5710     struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src,
5711     struct mbuf **op_err)
5712 {
5713 	struct sockaddr *sa_touse;
5714 	struct sockaddr *sa;
5715 	struct sctp_paramhdr *phdr, params;
5716 	struct sctp_nets *net;
5717 #ifdef INET
5718 	struct sockaddr_in sin4, *sa4;
5719 #endif
5720 #ifdef INET6
5721 	struct sockaddr_in6 sin6, *sa6;
5722 #endif
5723 #if defined(__Userspace__)
5724 	struct sockaddr_conn *sac;
5725 #endif
5726 	uint16_t ptype, plen;
5727 	bool fnd, check_src;
5728 
5729 	*op_err = NULL;
5730 
5731 #ifdef INET
5732 	memset(&sin4, 0, sizeof(sin4));
5733 	sin4.sin_family = AF_INET;
5734 #ifdef HAVE_SIN_LEN
5735 	sin4.sin_len = sizeof(sin4);
5736 #endif
5737 #endif
5738 #ifdef INET6
5739 	memset(&sin6, 0, sizeof(sin6));
5740 	sin6.sin6_family = AF_INET6;
5741 #ifdef HAVE_SIN6_LEN
5742 	sin6.sin6_len = sizeof(sin6);
5743 #endif
5744 #endif
5745 	/* First what about the src address of the pkt ? */
5746 	check_src = false;
5747 	switch (src->sa_family) {
5748 #ifdef INET
5749 	case AF_INET:
5750 		if (asoc->scope.ipv4_addr_legal) {
5751 			check_src = true;
5752 		}
5753 		break;
5754 #endif
5755 #ifdef INET6
5756 	case AF_INET6:
5757 		if (asoc->scope.ipv6_addr_legal) {
5758 			check_src = true;
5759 		}
5760 		break;
5761 #endif
5762 #if defined(__Userspace__)
5763 	case AF_CONN:
5764 		if (asoc->scope.conn_addr_legal) {
5765 			check_src = true;
5766 		}
5767 		break;
5768 #endif
5769 	default:
5770 		/* TSNH */
5771 		break;
5772 	}
5773 	if (check_src) {
5774 		fnd = false;
5775 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5776 			sa = (struct sockaddr *)&net->ro._l_addr;
5777 			if (sa->sa_family == src->sa_family) {
5778 #ifdef INET
5779 				if (sa->sa_family == AF_INET) {
5780 					struct sockaddr_in *src4;
5781 
5782 					sa4 = (struct sockaddr_in *)sa;
5783 					src4 = (struct sockaddr_in *)src;
5784 					if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5785 						fnd = true;
5786 						break;
5787 					}
5788 				}
5789 #endif
5790 #ifdef INET6
5791 				if (sa->sa_family == AF_INET6) {
5792 					struct sockaddr_in6 *src6;
5793 
5794 					sa6 = (struct sockaddr_in6 *)sa;
5795 					src6 = (struct sockaddr_in6 *)src;
5796 					if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5797 						fnd = true;
5798 						break;
5799 					}
5800 				}
5801 #endif
5802 #if defined(__Userspace__)
5803 				if (sa->sa_family == AF_CONN) {
5804 					struct sockaddr_conn *srcc;
5805 
5806 					sac = (struct sockaddr_conn *)sa;
5807 					srcc = (struct sockaddr_conn *)src;
5808 					if (sac->sconn_addr == srcc->sconn_addr) {
5809 						fnd = true;
5810 						break;
5811 					}
5812 				}
5813 #endif
5814 			}
5815 		}
5816 		if (!fnd) {
5817 			/*
5818 			 * If sending an ABORT in case of an additional address,
5819 			 * don't use the new address error cause.
5820 			 * This looks no different than if no listener was
5821 			 * present.
5822 			 */
5823 			*op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added");
5824 			return (true);
5825 		}
5826 	}
5827 	/* Ok so far lets munge through the rest of the packet */
5828 	offset += sizeof(struct sctp_init_chunk);
5829 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5830 	while (phdr) {
5831 		sa_touse = NULL;
5832 		ptype = ntohs(phdr->param_type);
5833 		plen = ntohs(phdr->param_length);
5834 		if (offset + plen > limit) {
5835 			*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Partial parameter");
5836 			return (true);
5837 		}
5838 		if (plen < sizeof(struct sctp_paramhdr)) {
5839 			*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length too small");
5840 			return (true);
5841 		}
5842 		switch (ptype) {
5843 #ifdef INET
5844 		case SCTP_IPV4_ADDRESS:
5845 		{
5846 			struct sctp_ipv4addr_param *p4, p4_buf;
5847 
5848 			if (plen != sizeof(struct sctp_ipv4addr_param)) {
5849 				*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal");
5850 				return (true);
5851 			}
5852 			phdr = sctp_get_next_param(in_initpkt, offset,
5853 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5854 			if (phdr == NULL) {
5855 				*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
5856 				return (true);
5857 			}
5858 			if (asoc->scope.ipv4_addr_legal) {
5859 				p4 = (struct sctp_ipv4addr_param *)phdr;
5860 				sin4.sin_addr.s_addr = p4->addr;
5861 				sa_touse = (struct sockaddr *)&sin4;
5862 			}
5863 			break;
5864 		}
5865 #endif
5866 #ifdef INET6
5867 		case SCTP_IPV6_ADDRESS:
5868 		{
5869 			struct sctp_ipv6addr_param *p6, p6_buf;
5870 
5871 			if (plen != sizeof(struct sctp_ipv6addr_param)) {
5872 				*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal");
5873 				return (true);
5874 			}
5875 			phdr = sctp_get_next_param(in_initpkt, offset,
5876 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5877 			if (phdr == NULL) {
5878 				*op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
5879 				return (true);
5880 			}
5881 			if (asoc->scope.ipv6_addr_legal) {
5882 				p6 = (struct sctp_ipv6addr_param *)phdr;
5883 				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5884 				       sizeof(p6->addr));
5885 				sa_touse = (struct sockaddr *)&sin6;
5886 			}
5887 			break;
5888 		}
5889 #endif
5890 		default:
5891 			sa_touse = NULL;
5892 			break;
5893 		}
5894 		if (sa_touse) {
5895 			/* ok, sa_touse points to one to check */
5896 			fnd = false;
5897 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5898 				sa = (struct sockaddr *)&net->ro._l_addr;
5899 				if (sa->sa_family != sa_touse->sa_family) {
5900 					continue;
5901 				}
5902 #ifdef INET
5903 				if (sa->sa_family == AF_INET) {
5904 					sa4 = (struct sockaddr_in *)sa;
5905 					if (sa4->sin_addr.s_addr ==
5906 					    sin4.sin_addr.s_addr) {
5907 						fnd = true;
5908 						break;
5909 					}
5910 				}
5911 #endif
5912 #ifdef INET6
5913 				if (sa->sa_family == AF_INET6) {
5914 					sa6 = (struct sockaddr_in6 *)sa;
5915 					if (SCTP6_ARE_ADDR_EQUAL(
5916 					    sa6, &sin6)) {
5917 						fnd = true;
5918 						break;
5919 					}
5920 				}
5921 #endif
5922 			}
5923 			if (!fnd) {
5924 				/*
5925 				 * If sending an ABORT in case of an additional
5926 				 * address, don't use the new address error
5927 				 * cause.
5928 				 * This looks no different than if no listener
5929 				 * was present.
5930 				 */
5931 				*op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added");
5932 				return (true);
5933 			}
5934 		}
5935 		offset += SCTP_SIZE32(plen);
5936 		if (offset >= limit) {
5937 			break;
5938 		}
5939 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5940 	}
5941 	return (false);
5942 }
5943 
5944 /*
5945  * Given a MBUF chain that was sent into us containing an INIT. Build a
5946  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5947  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5948  * message (i.e. the struct sctp_init_msg).
5949  */
5950 void
5951 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5952                        struct sctp_nets *src_net, struct mbuf *init_pkt,
5953                        int iphlen, int offset,
5954                        struct sockaddr *src, struct sockaddr *dst,
5955                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5956 #if defined(__FreeBSD__) && !defined(__Userspace__)
5957 		       uint8_t mflowtype, uint32_t mflowid,
5958 #endif
5959                        uint32_t vrf_id, uint16_t port)
5960 {
5961 	struct sctp_association *asoc;
5962 	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5963 	struct sctp_init_ack_chunk *initack;
5964 	struct sctp_adaptation_layer_indication *ali;
5965 	struct sctp_supported_chunk_types_param *pr_supported;
5966 	struct sctp_paramhdr *ph;
5967 	union sctp_sockstore *over_addr;
5968 	struct sctp_scoping scp;
5969 	struct timeval now;
5970 #ifdef INET
5971 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5972 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5973 	struct sockaddr_in *sin;
5974 #endif
5975 #ifdef INET6
5976 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5977 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5978 	struct sockaddr_in6 *sin6;
5979 #endif
5980 #if defined(__Userspace__)
5981 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5982 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5983 	struct sockaddr_conn *sconn;
5984 #endif
5985 	struct sockaddr *to;
5986 	struct sctp_state_cookie stc;
5987 	struct sctp_nets *net = NULL;
5988 	uint8_t *signature = NULL;
5989 	int cnt_inits_to = 0;
5990 	uint16_t his_limit, i_want;
5991 	int abort_flag;
5992 	int nat_friendly = 0;
5993 	int error;
5994 	struct socket *so;
5995 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5996 
5997 	if (stcb) {
5998 		asoc = &stcb->asoc;
5999 	} else {
6000 		asoc = NULL;
6001 	}
6002 	if ((asoc != NULL) &&
6003 	    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
6004 		if (sctp_are_there_new_addresses(asoc, init_pkt, offset, offset + ntohs(init_chk->ch.chunk_length), src, &op_err)) {
6005 			/*
6006 			 * new addresses, out of here in non-cookie-wait states
6007 			 */
6008 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6009 #if defined(__FreeBSD__) && !defined(__Userspace__)
6010 			                mflowtype, mflowid, inp->fibnum,
6011 #endif
6012 			                vrf_id, port);
6013 			return;
6014 		}
6015 		if (src_net != NULL && (src_net->port != port)) {
6016 			/*
6017 			 * change of remote encapsulation port, out of here in
6018 			 * non-cookie-wait states
6019 			 *
6020 			 * Send an ABORT, without an specific error cause.
6021 			 * This looks no different than if no listener
6022 			 * was present.
6023 			 */
6024 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6025 			                             "Remote encapsulation port changed");
6026 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
6027 #if defined(__FreeBSD__) && !defined(__Userspace__)
6028 			                mflowtype, mflowid, inp->fibnum,
6029 #endif
6030 			                vrf_id, port);
6031 			return;
6032 		}
6033 	}
6034 	abort_flag = 0;
6035 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
6036 	                                               (offset + sizeof(struct sctp_init_chunk)),
6037 	                                               &abort_flag,
6038 	                                               (struct sctp_chunkhdr *)init_chk,
6039 	                                               &nat_friendly, NULL);
6040 	if (abort_flag) {
6041 	do_a_abort:
6042 		if (op_err == NULL) {
6043 			char msg[SCTP_DIAG_INFO_LEN];
6044 
6045 			SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
6046 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6047 			                             msg);
6048 		}
6049 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
6050 				init_chk->init.initiate_tag, op_err,
6051 #if defined(__FreeBSD__) && !defined(__Userspace__)
6052 		                mflowtype, mflowid, inp->fibnum,
6053 #endif
6054 		                vrf_id, port);
6055 		return;
6056 	}
6057 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6058 	if (m == NULL) {
6059 		/* No memory, INIT timer will re-attempt. */
6060 		sctp_m_freem(op_err);
6061 		return;
6062 	}
6063 	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6064 	padding_len = 0;
6065 
6066 	/*
6067 	 * We might not overwrite the identification[] completely and on
6068 	 * some platforms time_entered will contain some padding.
6069 	 * Therefore zero out the cookie to avoid putting
6070 	 * uninitialized memory on the wire.
6071 	 */
6072 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
6073 
6074 	/* the time I built cookie */
6075 	(void)SCTP_GETTIME_TIMEVAL(&now);
6076 	stc.time_entered.tv_sec = now.tv_sec;
6077 	stc.time_entered.tv_usec = now.tv_usec;
6078 
6079 	/* populate any tie tags */
6080 	if (asoc != NULL) {
6081 		/* unlock before tag selections */
6082 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6083 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6084 		stc.cookie_life = asoc->cookie_life;
6085 		net = asoc->primary_destination;
6086 	} else {
6087 		stc.tie_tag_my_vtag = 0;
6088 		stc.tie_tag_peer_vtag = 0;
6089 		/* life I will award this cookie */
6090 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
6091 	}
6092 
6093 	/* copy in the ports for later check */
6094 	stc.myport = sh->dest_port;
6095 	stc.peerport = sh->src_port;
6096 
6097 	/*
6098 	 * If we wanted to honor cookie life extensions, we would add to
6099 	 * stc.cookie_life. For now we should NOT honor any extension
6100 	 */
6101 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6102 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6103 		stc.ipv6_addr_legal = 1;
6104 		if (SCTP_IPV6_V6ONLY(inp)) {
6105 			stc.ipv4_addr_legal = 0;
6106 		} else {
6107 			stc.ipv4_addr_legal = 1;
6108 		}
6109 #if defined(__Userspace__)
6110 		stc.conn_addr_legal = 0;
6111 #endif
6112 	} else {
6113 		stc.ipv6_addr_legal = 0;
6114 #if defined(__Userspace__)
6115 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6116 			stc.conn_addr_legal = 1;
6117 			stc.ipv4_addr_legal = 0;
6118 		} else {
6119 			stc.conn_addr_legal = 0;
6120 			stc.ipv4_addr_legal = 1;
6121 		}
6122 #else
6123 		stc.ipv4_addr_legal = 1;
6124 #endif
6125 	}
6126 	stc.ipv4_scope = 0;
6127 	if (net == NULL) {
6128 		to = src;
6129 		switch (dst->sa_family) {
6130 #ifdef INET
6131 		case AF_INET:
6132 		{
6133 			/* lookup address */
6134 			stc.address[0] = src4->sin_addr.s_addr;
6135 			stc.address[1] = 0;
6136 			stc.address[2] = 0;
6137 			stc.address[3] = 0;
6138 			stc.addr_type = SCTP_IPV4_ADDRESS;
6139 			/* local from address */
6140 			stc.laddress[0] = dst4->sin_addr.s_addr;
6141 			stc.laddress[1] = 0;
6142 			stc.laddress[2] = 0;
6143 			stc.laddress[3] = 0;
6144 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6145 			/* scope_id is only for v6 */
6146 			stc.scope_id = 0;
6147 			if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6148 			    (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6149 				stc.ipv4_scope = 1;
6150 			}
6151 			/* Must use the address in this case */
6152 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6153 				stc.loopback_scope = 1;
6154 				stc.ipv4_scope = 1;
6155 				stc.site_scope = 1;
6156 				stc.local_scope = 0;
6157 			}
6158 			break;
6159 		}
6160 #endif
6161 #ifdef INET6
6162 		case AF_INET6:
6163 		{
6164 			stc.addr_type = SCTP_IPV6_ADDRESS;
6165 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6166 #if defined(__FreeBSD__) && !defined(__Userspace__)
6167 			stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6168 #else
6169 			stc.scope_id = 0;
6170 #endif
6171 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6172 				stc.loopback_scope = 1;
6173 				stc.local_scope = 0;
6174 				stc.site_scope = 1;
6175 				stc.ipv4_scope = 1;
6176 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6177 			           IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6178 				/*
6179 				 * If the new destination or source is a
6180 				 * LINK_LOCAL we must have common both site and
6181 				 * local scope. Don't set local scope though
6182 				 * since we must depend on the source to be
6183 				 * added implicitly. We cannot assure just
6184 				 * because we share one link that all links are
6185 				 * common.
6186 				 */
6187 #if defined(__APPLE__) && !defined(__Userspace__)
6188 				/* Mac OS X currently doesn't have in6_getscope() */
6189 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
6190 #endif
6191 				stc.local_scope = 0;
6192 				stc.site_scope = 1;
6193 				stc.ipv4_scope = 1;
6194 				/*
6195 				 * we start counting for the private address
6196 				 * stuff at 1. since the link local we
6197 				 * source from won't show up in our scoped
6198 				 * count.
6199 				 */
6200 				cnt_inits_to = 1;
6201 				/* pull out the scope_id from incoming pkt */
6202 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6203 			           IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6204 				/*
6205 				 * If the new destination or source is
6206 				 * SITE_LOCAL then we must have site scope in
6207 				 * common.
6208 				 */
6209 				stc.site_scope = 1;
6210 			}
6211 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6212 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6213 			break;
6214 		}
6215 #endif
6216 #if defined(__Userspace__)
6217 		case AF_CONN:
6218 		{
6219 			/* lookup address */
6220 			stc.address[0] = 0;
6221 			stc.address[1] = 0;
6222 			stc.address[2] = 0;
6223 			stc.address[3] = 0;
6224 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6225 			stc.addr_type = SCTP_CONN_ADDRESS;
6226 			/* local from address */
6227 			stc.laddress[0] = 0;
6228 			stc.laddress[1] = 0;
6229 			stc.laddress[2] = 0;
6230 			stc.laddress[3] = 0;
6231 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6232 			stc.laddr_type = SCTP_CONN_ADDRESS;
6233 			/* scope_id is only for v6 */
6234 			stc.scope_id = 0;
6235 			break;
6236 		}
6237 #endif
6238 		default:
6239 			/* TSNH */
6240 			goto do_a_abort;
6241 			break;
6242 		}
6243 	} else {
6244 		/* set the scope per the existing tcb */
6245 
6246 #ifdef INET6
6247 		struct sctp_nets *lnet;
6248 #endif
6249 
6250 		stc.loopback_scope = asoc->scope.loopback_scope;
6251 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6252 		stc.site_scope = asoc->scope.site_scope;
6253 		stc.local_scope = asoc->scope.local_scope;
6254 #ifdef INET6
6255 		/* Why do we not consider IPv4 LL addresses? */
6256 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6257 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6258 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6259 					/*
6260 					 * if we have a LL address, start
6261 					 * counting at 1.
6262 					 */
6263 					cnt_inits_to = 1;
6264 				}
6265 			}
6266 		}
6267 #endif
6268 		/* use the net pointer */
6269 		to = (struct sockaddr *)&net->ro._l_addr;
6270 		switch (to->sa_family) {
6271 #ifdef INET
6272 		case AF_INET:
6273 			sin = (struct sockaddr_in *)to;
6274 			stc.address[0] = sin->sin_addr.s_addr;
6275 			stc.address[1] = 0;
6276 			stc.address[2] = 0;
6277 			stc.address[3] = 0;
6278 			stc.addr_type = SCTP_IPV4_ADDRESS;
6279 			if (net->src_addr_selected == 0) {
6280 				/*
6281 				 * strange case here, the INIT should have
6282 				 * did the selection.
6283 				 */
6284 				net->ro._s_addr = sctp_source_address_selection(inp,
6285 										stcb, (sctp_route_t *)&net->ro,
6286 										net, 0, vrf_id);
6287 				if (net->ro._s_addr == NULL) {
6288 					sctp_m_freem(op_err);
6289 					sctp_m_freem(m);
6290 					return;
6291 				}
6292 
6293 				net->src_addr_selected = 1;
6294 
6295 			}
6296 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6297 			stc.laddress[1] = 0;
6298 			stc.laddress[2] = 0;
6299 			stc.laddress[3] = 0;
6300 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6301 			/* scope_id is only for v6 */
6302 			stc.scope_id = 0;
6303 			break;
6304 #endif
6305 #ifdef INET6
6306 		case AF_INET6:
6307 			sin6 = (struct sockaddr_in6 *)to;
6308 			memcpy(&stc.address, &sin6->sin6_addr,
6309 			       sizeof(struct in6_addr));
6310 			stc.addr_type = SCTP_IPV6_ADDRESS;
6311 			stc.scope_id = sin6->sin6_scope_id;
6312 			if (net->src_addr_selected == 0) {
6313 				/*
6314 				 * strange case here, the INIT should have
6315 				 * done the selection.
6316 				 */
6317 				net->ro._s_addr = sctp_source_address_selection(inp,
6318 										stcb, (sctp_route_t *)&net->ro,
6319 										net, 0, vrf_id);
6320 				if (net->ro._s_addr == NULL) {
6321 					sctp_m_freem(op_err);
6322 					sctp_m_freem(m);
6323 					return;
6324 				}
6325 
6326 				net->src_addr_selected = 1;
6327 			}
6328 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6329 			       sizeof(struct in6_addr));
6330 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6331 			break;
6332 #endif
6333 #if defined(__Userspace__)
6334 		case AF_CONN:
6335 			sconn = (struct sockaddr_conn *)to;
6336 			stc.address[0] = 0;
6337 			stc.address[1] = 0;
6338 			stc.address[2] = 0;
6339 			stc.address[3] = 0;
6340 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6341 			stc.addr_type = SCTP_CONN_ADDRESS;
6342 			stc.laddress[0] = 0;
6343 			stc.laddress[1] = 0;
6344 			stc.laddress[2] = 0;
6345 			stc.laddress[3] = 0;
6346 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6347 			stc.laddr_type = SCTP_CONN_ADDRESS;
6348 			stc.scope_id = 0;
6349 			break;
6350 #endif
6351 		}
6352 	}
6353 	/* Now lets put the SCTP header in place */
6354 	initack = mtod(m, struct sctp_init_ack_chunk *);
6355 	/* Save it off for quick ref */
6356 	stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6357 	/* who are we */
6358 	memcpy(stc.identification, SCTP_VERSION_STRING,
6359 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6360 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6361 	/* now the chunk header */
6362 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
6363 	initack->ch.chunk_flags = 0;
6364 	/* fill in later from mbuf we build */
6365 	initack->ch.chunk_length = 0;
6366 	/* place in my tag */
6367 	if ((asoc != NULL) &&
6368 	    ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
6369 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
6370 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
6371 		/* re-use the v-tags and init-seq here */
6372 		initack->init.initiate_tag = htonl(asoc->my_vtag);
6373 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
6374 	} else {
6375 		uint32_t vtag, itsn;
6376 
6377 		if (asoc) {
6378 			atomic_add_int(&asoc->refcnt, 1);
6379 			SCTP_TCB_UNLOCK(stcb);
6380 		new_tag:
6381 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6382 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
6383 				/* Got a duplicate vtag on some guy behind a nat
6384 				 * make sure we don't use it.
6385 				 */
6386 				goto new_tag;
6387 			}
6388 			initack->init.initiate_tag = htonl(vtag);
6389 			/* get a TSN to use too */
6390 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6391 			initack->init.initial_tsn = htonl(itsn);
6392 			SCTP_TCB_LOCK(stcb);
6393 			atomic_add_int(&asoc->refcnt, -1);
6394 		} else {
6395 			SCTP_INP_INCR_REF(inp);
6396 			SCTP_INP_RUNLOCK(inp);
6397 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6398 			initack->init.initiate_tag = htonl(vtag);
6399 			/* get a TSN to use too */
6400 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6401 			SCTP_INP_RLOCK(inp);
6402 			SCTP_INP_DECR_REF(inp);
6403 		}
6404 	}
6405 	/* save away my tag to */
6406 	stc.my_vtag = initack->init.initiate_tag;
6407 
6408 	/* set up some of the credits. */
6409 	so = inp->sctp_socket;
6410 	if (so == NULL) {
6411 		/* memory problem */
6412 		sctp_m_freem(op_err);
6413 		sctp_m_freem(m);
6414 		return;
6415 	} else {
6416 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6417 	}
6418 	/* set what I want */
6419 	his_limit = ntohs(init_chk->init.num_inbound_streams);
6420 	/* choose what I want */
6421 	if (asoc != NULL) {
6422 		if (asoc->streamoutcnt > asoc->pre_open_streams) {
6423 			i_want = asoc->streamoutcnt;
6424 		} else {
6425 			i_want = asoc->pre_open_streams;
6426 		}
6427 	} else {
6428 		i_want = inp->sctp_ep.pre_open_stream_count;
6429 	}
6430 	if (his_limit < i_want) {
6431 		/* I Want more :< */
6432 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6433 	} else {
6434 		/* I can have what I want :> */
6435 		initack->init.num_outbound_streams = htons(i_want);
6436 	}
6437 	/* tell him his limit. */
6438 	initack->init.num_inbound_streams =
6439 		htons(inp->sctp_ep.max_open_streams_intome);
6440 
6441 	/* adaptation layer indication parameter */
6442 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6443 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6444 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6445 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6446 		ali->ph.param_length = htons(parameter_len);
6447 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6448 		chunk_len += parameter_len;
6449 	}
6450 
6451 	/* ECN parameter */
6452 	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6453 	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
6454 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6455 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6456 		ph->param_type = htons(SCTP_ECN_CAPABLE);
6457 		ph->param_length = htons(parameter_len);
6458 		chunk_len += parameter_len;
6459 	}
6460 
6461 	/* PR-SCTP supported parameter */
6462 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6463 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6464 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6465 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6466 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6467 		ph->param_length = htons(parameter_len);
6468 		chunk_len += parameter_len;
6469 	}
6470 
6471 	/* Add NAT friendly parameter */
6472 	if (nat_friendly) {
6473 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6474 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6475 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6476 		ph->param_length = htons(parameter_len);
6477 		chunk_len += parameter_len;
6478 	}
6479 
6480 	/* And now tell the peer which extensions we support */
6481 	num_ext = 0;
6482 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6483 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6484 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6485 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6486 		if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6487 		    ((asoc == NULL) && (inp->idata_supported == 1))) {
6488 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6489 		}
6490 	}
6491 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6492 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6493 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6494 	}
6495 	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6496 	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
6497 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6498 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6499 	}
6500 	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6501 	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6502 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6503 	}
6504 	if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6505 	    ((asoc == NULL) && (inp->idata_supported == 1))) {
6506 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6507 	}
6508 	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6509 	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6510 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6511 	}
6512 	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6513 	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6514 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6515 	}
6516 	if (num_ext > 0) {
6517 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6518 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6519 		pr_supported->ph.param_length = htons(parameter_len);
6520 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6521 		chunk_len += parameter_len;
6522 	}
6523 
6524 	/* add authentication parameters */
6525 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6526 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6527 		struct sctp_auth_random *randp;
6528 		struct sctp_auth_hmac_algo *hmacs;
6529 		struct sctp_auth_chunk_list *chunks;
6530 
6531 		if (padding_len > 0) {
6532 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6533 			chunk_len += padding_len;
6534 			padding_len = 0;
6535 		}
6536 		/* generate and add RANDOM parameter */
6537 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6538 		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6539 		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6540 		randp->ph.param_type = htons(SCTP_RANDOM);
6541 		randp->ph.param_length = htons(parameter_len);
6542 		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6543 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6544 		chunk_len += parameter_len;
6545 
6546 		if (padding_len > 0) {
6547 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6548 			chunk_len += padding_len;
6549 			padding_len = 0;
6550 		}
6551 		/* add HMAC_ALGO parameter */
6552 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6553 		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6554 		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6555 		                                        (uint8_t *)hmacs->hmac_ids);
6556 		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6557 		hmacs->ph.param_length = htons(parameter_len);
6558 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6559 		chunk_len += parameter_len;
6560 
6561 		if (padding_len > 0) {
6562 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6563 			chunk_len += padding_len;
6564 			padding_len = 0;
6565 		}
6566 		/* add CHUNKS parameter */
6567 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6568 		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6569 		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6570 		                                           chunks->chunk_types);
6571 		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6572 		chunks->ph.param_length = htons(parameter_len);
6573 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6574 		chunk_len += parameter_len;
6575 	}
6576 	SCTP_BUF_LEN(m) = chunk_len;
6577 	m_last = m;
6578 	/* now the addresses */
6579 	/* To optimize this we could put the scoping stuff
6580 	 * into a structure and remove the individual uint8's from
6581 	 * the stc structure. Then we could just sifa in the
6582 	 * address within the stc.. but for now this is a quick
6583 	 * hack to get the address stuff teased apart.
6584 	 */
6585 	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6586 	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6587 #if defined(__Userspace__)
6588 	scp.conn_addr_legal = stc.conn_addr_legal;
6589 #endif
6590 	scp.loopback_scope = stc.loopback_scope;
6591 	scp.ipv4_local_scope = stc.ipv4_scope;
6592 	scp.local_scope = stc.local_scope;
6593 	scp.site_scope = stc.site_scope;
6594 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6595 	                                    cnt_inits_to,
6596 	                                    &padding_len, &chunk_len);
6597 	/* padding_len can only be positive, if no addresses have been added */
6598 	if (padding_len > 0) {
6599 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6600 		chunk_len += padding_len;
6601 		SCTP_BUF_LEN(m) += padding_len;
6602 		padding_len = 0;
6603 	}
6604 
6605 	/* tack on the operational error if present */
6606 	if (op_err) {
6607 		parameter_len = 0;
6608 		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6609 			parameter_len += SCTP_BUF_LEN(m_tmp);
6610 		}
6611 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6612 		SCTP_BUF_NEXT(m_last) = op_err;
6613 		while (SCTP_BUF_NEXT(m_last) != NULL) {
6614 			m_last = SCTP_BUF_NEXT(m_last);
6615 		}
6616 		chunk_len += parameter_len;
6617 	}
6618 	if (padding_len > 0) {
6619 		m_last = sctp_add_pad_tombuf(m_last, padding_len);
6620 		if (m_last == NULL) {
6621 			/* Houston we have a problem, no space */
6622 			sctp_m_freem(m);
6623 			return;
6624 		}
6625 		chunk_len += padding_len;
6626 		padding_len = 0;
6627 	}
6628 	/* Now we must build a cookie */
6629 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6630 	if (m_cookie == NULL) {
6631 		/* memory problem */
6632 		sctp_m_freem(m);
6633 		return;
6634 	}
6635 	/* Now append the cookie to the end and update the space/size */
6636 	SCTP_BUF_NEXT(m_last) = m_cookie;
6637 	parameter_len = 0;
6638 	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6639 		parameter_len += SCTP_BUF_LEN(m_tmp);
6640 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6641 			m_last = m_tmp;
6642 		}
6643 	}
6644 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6645 	chunk_len += parameter_len;
6646 
6647 	/* Place in the size, but we don't include
6648 	 * the last pad (if any) in the INIT-ACK.
6649 	 */
6650 	initack->ch.chunk_length = htons(chunk_len);
6651 
6652 	/* Time to sign the cookie, we don't sign over the cookie
6653 	 * signature though thus we set trailer.
6654 	 */
6655 	(void)sctp_hmac_m(SCTP_HMAC,
6656 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6657 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6658 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6659 #if defined(__Userspace__)
6660 	/*
6661 	 * Don't put AF_CONN addresses on the wire, in case this is critical
6662 	 * for the application. However, they are protected by the HMAC and
6663 	 * need to be reconstructed before checking the HMAC.
6664 	 * Clearing is only done in the mbuf chain, since the local stc is
6665 	 * not used anymore.
6666 	 */
6667 	if (stc.addr_type == SCTP_CONN_ADDRESS) {
6668 		const void *p = NULL;
6669 
6670 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address),
6671 		           (int)sizeof(void *), (caddr_t)&p);
6672 	}
6673 	if (stc.laddr_type == SCTP_CONN_ADDRESS) {
6674 		const void *p = NULL;
6675 
6676 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress),
6677 		           (int)sizeof(void *), (caddr_t)&p);
6678 	}
6679 #endif
6680 	/*
6681 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6682 	 * here since the timer will drive a retranmission.
6683 	 */
6684 	if (padding_len > 0) {
6685 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6686 			sctp_m_freem(m);
6687 			return;
6688 		}
6689 	}
6690 	if (stc.loopback_scope) {
6691 		over_addr = (union sctp_sockstore *)dst;
6692 	} else {
6693 		over_addr = NULL;
6694 	}
6695 
6696 	if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6697 	                                        0, 0,
6698 	                                        inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6699 	                                        port, over_addr,
6700 #if defined(__FreeBSD__) && !defined(__Userspace__)
6701 	                                        mflowtype, mflowid,
6702 #endif
6703 	                                        SCTP_SO_NOT_LOCKED))) {
6704 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6705 		if (error == ENOBUFS) {
6706 			if (asoc != NULL) {
6707 				asoc->ifp_had_enobuf = 1;
6708 			}
6709 			SCTP_STAT_INCR(sctps_lowlevelerr);
6710 		}
6711 	} else {
6712 		if (asoc != NULL) {
6713 			asoc->ifp_had_enobuf = 0;
6714 		}
6715 	}
6716 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6717 }
6718 
6719 
6720 static void
6721 sctp_prune_prsctp(struct sctp_tcb *stcb,
6722     struct sctp_association *asoc,
6723     struct sctp_sndrcvinfo *srcv,
6724     int dataout)
6725 {
6726 	int freed_spc = 0;
6727 	struct sctp_tmit_chunk *chk, *nchk;
6728 
6729 	SCTP_TCB_LOCK_ASSERT(stcb);
6730 	if ((asoc->prsctp_supported) &&
6731 	    (asoc->sent_queue_cnt_removeable > 0)) {
6732 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6733 			/*
6734 			 * Look for chunks marked with the PR_SCTP flag AND
6735 			 * the buffer space flag. If the one being sent is
6736 			 * equal or greater priority then purge the old one
6737 			 * and free some space.
6738 			 */
6739 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6740 				/*
6741 				 * This one is PR-SCTP AND buffer space
6742 				 * limited type
6743 				 */
6744 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6745 					/*
6746 					 * Lower numbers equates to higher
6747 					 * priority. So if the one we are
6748 					 * looking at has a larger priority,
6749 					 * we want to drop the data and NOT
6750 					 * retransmit it.
6751 					 */
6752 					if (chk->data) {
6753 						/*
6754 						 * We release the book_size
6755 						 * if the mbuf is here
6756 						 */
6757 						int ret_spc;
6758 						uint8_t sent;
6759 
6760 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
6761 							sent = 1;
6762 						else
6763 							sent = 0;
6764 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6765 						    sent,
6766 						    SCTP_SO_LOCKED);
6767 						freed_spc += ret_spc;
6768 						if (freed_spc >= dataout) {
6769 							return;
6770 						}
6771 					}	/* if chunk was present */
6772 				}	/* if of sufficient priority */
6773 			}	/* if chunk has enabled */
6774 		}		/* tailqforeach */
6775 
6776 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6777 			/* Here we must move to the sent queue and mark */
6778 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6779 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6780 					if (chk->data) {
6781 						/*
6782 						 * We release the book_size
6783 						 * if the mbuf is here
6784 						 */
6785 						int ret_spc;
6786 
6787 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6788 						    0, SCTP_SO_LOCKED);
6789 
6790 						freed_spc += ret_spc;
6791 						if (freed_spc >= dataout) {
6792 							return;
6793 						}
6794 					}	/* end if chk->data */
6795 				}	/* end if right class */
6796 			}	/* end if chk pr-sctp */
6797 		}		/* tailqforeachsafe (chk) */
6798 	}			/* if enabled in asoc */
6799 }
6800 
6801 int
6802 sctp_get_frag_point(struct sctp_tcb *stcb,
6803     struct sctp_association *asoc)
6804 {
6805 	int siz, ovh;
6806 
6807 	/*
6808 	 * For endpoints that have both v6 and v4 addresses we must reserve
6809 	 * room for the ipv6 header, for those that are only dealing with V4
6810 	 * we use a larger frag point.
6811 	 */
6812 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6813 		ovh = SCTP_MIN_OVERHEAD;
6814 	} else {
6815 #if defined(__Userspace__)
6816 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6817 			ovh = sizeof(struct sctphdr);
6818 		} else {
6819 			ovh = SCTP_MIN_V4_OVERHEAD;
6820 		}
6821 #else
6822 		ovh = SCTP_MIN_V4_OVERHEAD;
6823 #endif
6824 	}
6825 	ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6826 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6827 		siz = asoc->smallest_mtu - ovh;
6828 	else
6829 		siz = (stcb->asoc.sctp_frag_point - ovh);
6830 	/*
6831 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6832 	 */
6833 	/* A data chunk MUST fit in a cluster */
6834 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6835 	/* } */
6836 
6837 	/* adjust for an AUTH chunk if DATA requires auth */
6838 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6839 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6840 
6841 	if (siz % 4) {
6842 		/* make it an even word boundary please */
6843 		siz -= (siz % 4);
6844 	}
6845 	return (siz);
6846 }
6847 
6848 static void
6849 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6850 {
6851 	/*
6852 	 * We assume that the user wants PR_SCTP_TTL if the user
6853 	 * provides a positive lifetime but does not specify any
6854 	 * PR_SCTP policy.
6855 	 */
6856 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6857 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6858 	} else if (sp->timetolive > 0) {
6859 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6860 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6861 	} else {
6862 		return;
6863 	}
6864 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6865 	case CHUNK_FLAGS_PR_SCTP_BUF:
6866 		/*
6867 		 * Time to live is a priority stored in tv_sec when
6868 		 * doing the buffer drop thing.
6869 		 */
6870 		sp->ts.tv_sec = sp->timetolive;
6871 		sp->ts.tv_usec = 0;
6872 		break;
6873 	case CHUNK_FLAGS_PR_SCTP_TTL:
6874 	{
6875 		struct timeval tv;
6876 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6877 		tv.tv_sec = sp->timetolive / 1000;
6878 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6879 		/* TODO sctp_constants.h needs alternative time macros when
6880 		 *  _KERNEL is undefined.
6881 		 */
6882 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
6883 		timeradd(&sp->ts, &tv, &sp->ts);
6884 #else
6885 		timevaladd(&sp->ts, &tv);
6886 #endif
6887 	}
6888 		break;
6889 	case CHUNK_FLAGS_PR_SCTP_RTX:
6890 		/*
6891 		 * Time to live is a the number or retransmissions
6892 		 * stored in tv_sec.
6893 		 */
6894 		sp->ts.tv_sec = sp->timetolive;
6895 		sp->ts.tv_usec = 0;
6896 		break;
6897 	default:
6898 		SCTPDBG(SCTP_DEBUG_USRREQ1,
6899 			"Unknown PR_SCTP policy %u.\n",
6900 			PR_SCTP_POLICY(sp->sinfo_flags));
6901 		break;
6902 	}
6903 }
6904 
6905 static int
6906 sctp_msg_append(struct sctp_tcb *stcb,
6907 		struct sctp_nets *net,
6908 		struct mbuf *m,
6909 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6910 {
6911 	int error = 0;
6912 	struct mbuf *at;
6913 	struct sctp_stream_queue_pending *sp = NULL;
6914 	struct sctp_stream_out *strm;
6915 
6916 	/* Given an mbuf chain, put it
6917 	 * into the association send queue and
6918 	 * place it on the wheel
6919 	 */
6920 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6921 		/* Invalid stream number */
6922 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6923 		error = EINVAL;
6924 		goto out_now;
6925 	}
6926 	if ((stcb->asoc.stream_locked) &&
6927 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6928 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6929 		error = EINVAL;
6930 		goto out_now;
6931 	}
6932 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6933 	/* Now can we send this? */
6934 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6935 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6936 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6937 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6938 		/* got data while shutting down */
6939 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6940 		error = ECONNRESET;
6941 		goto out_now;
6942 	}
6943 	sctp_alloc_a_strmoq(stcb, sp);
6944 	if (sp == NULL) {
6945 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6946 		error = ENOMEM;
6947 		goto out_now;
6948 	}
6949 	sp->sinfo_flags = srcv->sinfo_flags;
6950 	sp->timetolive = srcv->sinfo_timetolive;
6951 	sp->ppid = srcv->sinfo_ppid;
6952 	sp->context = srcv->sinfo_context;
6953 	sp->fsn = 0;
6954 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6955 		sp->net = net;
6956 		atomic_add_int(&sp->net->ref_count, 1);
6957 	} else {
6958 		sp->net = NULL;
6959 	}
6960 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6961 	sp->sid = srcv->sinfo_stream;
6962 	sp->msg_is_complete = 1;
6963 	sp->sender_all_done = 1;
6964 	sp->some_taken = 0;
6965 	sp->data = m;
6966 	sp->tail_mbuf = NULL;
6967 	sctp_set_prsctp_policy(sp);
6968 	/* We could in theory (for sendall) sifa the length
6969 	 * in, but we would still have to hunt through the
6970 	 * chain since we need to setup the tail_mbuf
6971 	 */
6972 	sp->length = 0;
6973 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6974 		if (SCTP_BUF_NEXT(at) == NULL)
6975 			sp->tail_mbuf = at;
6976 		sp->length += SCTP_BUF_LEN(at);
6977 	}
6978 	if (srcv->sinfo_keynumber_valid) {
6979 		sp->auth_keyid = srcv->sinfo_keynumber;
6980 	} else {
6981 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6982 	}
6983 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6984 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
6985 		sp->holds_key_ref = 1;
6986 	}
6987 	if (hold_stcb_lock == 0) {
6988 		SCTP_TCB_SEND_LOCK(stcb);
6989 	}
6990 	sctp_snd_sb_alloc(stcb, sp->length);
6991 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6992 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6993 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6994 	m = NULL;
6995 	if (hold_stcb_lock == 0) {
6996 		SCTP_TCB_SEND_UNLOCK(stcb);
6997 	}
6998 out_now:
6999 	if (m) {
7000 		sctp_m_freem(m);
7001 	}
7002 	return (error);
7003 }
7004 
7005 
7006 static struct mbuf *
7007 sctp_copy_mbufchain(struct mbuf *clonechain,
7008 		    struct mbuf *outchain,
7009 		    struct mbuf **endofchain,
7010 		    int can_take_mbuf,
7011 		    int sizeofcpy,
7012 		    uint8_t copy_by_ref)
7013 {
7014 	struct mbuf *m;
7015 	struct mbuf *appendchain;
7016 	caddr_t cp;
7017 	int len;
7018 
7019 	if (endofchain == NULL) {
7020 		/* error */
7021 	error_out:
7022 		if (outchain)
7023 			sctp_m_freem(outchain);
7024 		return (NULL);
7025 	}
7026 	if (can_take_mbuf) {
7027 		appendchain = clonechain;
7028 	} else {
7029 		if (!copy_by_ref &&
7030 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
7031 			/* Its not in a cluster */
7032 			if (*endofchain == NULL) {
7033 				/* lets get a mbuf cluster */
7034 				if (outchain == NULL) {
7035 					/* This is the general case */
7036 				new_mbuf:
7037 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7038 					if (outchain == NULL) {
7039 						goto error_out;
7040 					}
7041 					SCTP_BUF_LEN(outchain) = 0;
7042 					*endofchain = outchain;
7043 					/* get the prepend space */
7044 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
7045 				} else {
7046 					/* We really should not get a NULL in endofchain */
7047 					/* find end */
7048 					m = outchain;
7049 					while (m) {
7050 						if (SCTP_BUF_NEXT(m) == NULL) {
7051 							*endofchain = m;
7052 							break;
7053 						}
7054 						m = SCTP_BUF_NEXT(m);
7055 					}
7056 					/* sanity */
7057 					if (*endofchain == NULL) {
7058 						/* huh, TSNH XXX maybe we should panic */
7059 						sctp_m_freem(outchain);
7060 						goto new_mbuf;
7061 					}
7062 				}
7063 				/* get the new end of length */
7064 				len = (int)M_TRAILINGSPACE(*endofchain);
7065 			} else {
7066 				/* how much is left at the end? */
7067 				len = (int)M_TRAILINGSPACE(*endofchain);
7068 			}
7069 			/* Find the end of the data, for appending */
7070 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
7071 
7072 			/* Now lets copy it out */
7073 			if (len >= sizeofcpy) {
7074 				/* It all fits, copy it in */
7075 				m_copydata(clonechain, 0, sizeofcpy, cp);
7076 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7077 			} else {
7078 				/* fill up the end of the chain */
7079 				if (len > 0) {
7080 					m_copydata(clonechain, 0, len, cp);
7081 					SCTP_BUF_LEN((*endofchain)) += len;
7082 					/* now we need another one */
7083 					sizeofcpy -= len;
7084 				}
7085 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7086 				if (m == NULL) {
7087 					/* We failed */
7088 					goto error_out;
7089 				}
7090 				SCTP_BUF_NEXT((*endofchain)) = m;
7091 				*endofchain = m;
7092 				cp = mtod((*endofchain), caddr_t);
7093 				m_copydata(clonechain, len, sizeofcpy, cp);
7094 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7095 			}
7096 			return (outchain);
7097 		} else {
7098 			/* copy the old fashion way */
7099 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7100 #ifdef SCTP_MBUF_LOGGING
7101 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7102 				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7103 			}
7104 #endif
7105 		}
7106 	}
7107 	if (appendchain == NULL) {
7108 		/* error */
7109 		if (outchain)
7110 			sctp_m_freem(outchain);
7111 		return (NULL);
7112 	}
7113 	if (outchain) {
7114 		/* tack on to the end */
7115 		if (*endofchain != NULL) {
7116 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7117 		} else {
7118 			m = outchain;
7119 			while (m) {
7120 				if (SCTP_BUF_NEXT(m) == NULL) {
7121 					SCTP_BUF_NEXT(m) = appendchain;
7122 					break;
7123 				}
7124 				m = SCTP_BUF_NEXT(m);
7125 			}
7126 		}
7127 		/*
7128 		 * save off the end and update the end-chain
7129 		 * position
7130 		 */
7131 		m = appendchain;
7132 		while (m) {
7133 			if (SCTP_BUF_NEXT(m) == NULL) {
7134 				*endofchain = m;
7135 				break;
7136 			}
7137 			m = SCTP_BUF_NEXT(m);
7138 		}
7139 		return (outchain);
7140 	} else {
7141 		/* save off the end and update the end-chain position */
7142 		m = appendchain;
7143 		while (m) {
7144 			if (SCTP_BUF_NEXT(m) == NULL) {
7145 				*endofchain = m;
7146 				break;
7147 			}
7148 			m = SCTP_BUF_NEXT(m);
7149 		}
7150 		return (appendchain);
7151 	}
7152 }
7153 
7154 static int
7155 sctp_med_chunk_output(struct sctp_inpcb *inp,
7156 		      struct sctp_tcb *stcb,
7157 		      struct sctp_association *asoc,
7158 		      int *num_out,
7159 		      int *reason_code,
7160 		      int control_only, int from_where,
7161 		      struct timeval *now, int *now_filled, int frag_point, int so_locked);
7162 
7163 static void
7164 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7165     uint32_t val SCTP_UNUSED)
7166 {
7167 	struct sctp_copy_all *ca;
7168 	struct mbuf *m;
7169 	int ret = 0;
7170 	int added_control = 0;
7171 	int un_sent, do_chunk_output = 1;
7172 	struct sctp_association *asoc;
7173 	struct sctp_nets *net;
7174 
7175 	ca = (struct sctp_copy_all *)ptr;
7176 	if (ca->m == NULL) {
7177 		return;
7178 	}
7179 	if (ca->inp != inp) {
7180 		/* TSNH */
7181 		return;
7182 	}
7183 	if (ca->sndlen > 0) {
7184 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7185 		if (m == NULL) {
7186 			/* can't copy so we are done */
7187 			ca->cnt_failed++;
7188 			return;
7189 		}
7190 #ifdef SCTP_MBUF_LOGGING
7191 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7192 			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7193 		}
7194 #endif
7195 	} else {
7196 		m = NULL;
7197 	}
7198 	SCTP_TCB_LOCK_ASSERT(stcb);
7199 	if (stcb->asoc.alternate) {
7200 		net = stcb->asoc.alternate;
7201 	} else {
7202 		net = stcb->asoc.primary_destination;
7203 	}
7204 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7205 		/* Abort this assoc with m as the user defined reason */
7206 		if (m != NULL) {
7207 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7208 		} else {
7209 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7210 			                          0, M_NOWAIT, 1, MT_DATA);
7211 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7212 		}
7213 		if (m != NULL) {
7214 			struct sctp_paramhdr *ph;
7215 
7216 			ph = mtod(m, struct sctp_paramhdr *);
7217 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7218 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7219 		}
7220 		/* We add one here to keep the assoc from
7221 		 * dis-appearing on us.
7222 		 */
7223 		atomic_add_int(&stcb->asoc.refcnt, 1);
7224 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7225 		/* sctp_abort_an_association calls sctp_free_asoc()
7226 		 * free association will NOT free it since we
7227 		 * incremented the refcnt .. we do this to prevent
7228 		 * it being freed and things getting tricky since
7229 		 * we could end up (from free_asoc) calling inpcb_free
7230 		 * which would get a recursive lock call to the
7231 		 * iterator lock.. But as a consequence of that the
7232 		 * stcb will return to us un-locked.. since free_asoc
7233 		 * returns with either no TCB or the TCB unlocked, we
7234 		 * must relock.. to unlock in the iterator timer :-0
7235 		 */
7236 		SCTP_TCB_LOCK(stcb);
7237 		atomic_add_int(&stcb->asoc.refcnt, -1);
7238 		goto no_chunk_output;
7239 	} else {
7240 		if (m) {
7241 			ret = sctp_msg_append(stcb, net, m,
7242 					      &ca->sndrcv, 1);
7243 		}
7244 		asoc = &stcb->asoc;
7245 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7246 			/* shutdown this assoc */
7247 			if (TAILQ_EMPTY(&asoc->send_queue) &&
7248 			    TAILQ_EMPTY(&asoc->sent_queue) &&
7249 			    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7250 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7251 					goto abort_anyway;
7252 				}
7253 				/* there is nothing queued to send, so I'm done... */
7254 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7255 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7256 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7257 					/* only send SHUTDOWN the first time through */
7258 					if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
7259 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7260 					}
7261 					SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
7262 					sctp_stop_timers_for_shutdown(stcb);
7263 					sctp_send_shutdown(stcb, net);
7264 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7265 							 net);
7266 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7267 					                 NULL);
7268 					added_control = 1;
7269 					do_chunk_output = 0;
7270 				}
7271 			} else {
7272 				/*
7273 				 * we still got (or just got) data to send, so set
7274 				 * SHUTDOWN_PENDING
7275 				 */
7276 				/*
7277 				 * XXX sockets draft says that SCTP_EOF should be
7278 				 * sent with no data.  currently, we will allow user
7279 				 * data to be sent first and move to
7280 				 * SHUTDOWN-PENDING
7281 				 */
7282 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7283 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7284 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7285 					if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7286 						SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
7287 					}
7288 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7289 					if (TAILQ_EMPTY(&asoc->send_queue) &&
7290 					    TAILQ_EMPTY(&asoc->sent_queue) &&
7291 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7292 						struct mbuf *op_err;
7293 						char msg[SCTP_DIAG_INFO_LEN];
7294 
7295 					abort_anyway:
7296 						SCTP_SNPRINTF(msg, sizeof(msg),
7297 						              "%s:%d at %s", __FILE__, __LINE__, __func__);
7298 						op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7299 						                             msg);
7300 						atomic_add_int(&stcb->asoc.refcnt, 1);
7301 						sctp_abort_an_association(stcb->sctp_ep, stcb,
7302 									  op_err, SCTP_SO_NOT_LOCKED);
7303 						atomic_add_int(&stcb->asoc.refcnt, -1);
7304 						goto no_chunk_output;
7305 					}
7306 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7307 					                 NULL);
7308 				}
7309 			}
7310 
7311 		}
7312 	}
7313 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7314 		   (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
7315 
7316 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7317 	    (stcb->asoc.total_flight > 0) &&
7318 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7319 		do_chunk_output = 0;
7320 	}
7321 	if (do_chunk_output)
7322 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7323 	else if (added_control) {
7324 		int num_out, reason, now_filled = 0;
7325 		struct timeval now;
7326 		int frag_point;
7327 
7328 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7329 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7330 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7331 	}
7332  no_chunk_output:
7333 	if (ret) {
7334 		ca->cnt_failed++;
7335 	} else {
7336 		ca->cnt_sent++;
7337 	}
7338 }
7339 
7340 static void
7341 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7342 {
7343 	struct sctp_copy_all *ca;
7344 
7345 	ca = (struct sctp_copy_all *)ptr;
7346 	/*
7347 	 * Do a notify here? Kacheong suggests that the notify be done at
7348 	 * the send time.. so you would push up a notification if any send
7349 	 * failed. Don't know if this is feasible since the only failures we
7350 	 * have is "memory" related and if you cannot get an mbuf to send
7351 	 * the data you surely can't get an mbuf to send up to notify the
7352 	 * user you can't send the data :->
7353 	 */
7354 
7355 	/* now free everything */
7356 	if (ca->inp) {
7357 		/* Lets clear the flag to allow others to run. */
7358 		ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7359 	}
7360 	sctp_m_freem(ca->m);
7361 	SCTP_FREE(ca, SCTP_M_COPYAL);
7362 }
7363 
7364 static struct mbuf *
7365 sctp_copy_out_all(struct uio *uio, ssize_t len)
7366 {
7367 	struct mbuf *ret, *at;
7368 	ssize_t left, willcpy, cancpy, error;
7369 
7370 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7371 	if (ret == NULL) {
7372 		/* TSNH */
7373 		return (NULL);
7374 	}
7375 	left = len;
7376 	SCTP_BUF_LEN(ret) = 0;
7377 	/* save space for the data chunk header */
7378 	cancpy = (int)M_TRAILINGSPACE(ret);
7379 	willcpy = min(cancpy, left);
7380 	at = ret;
7381 	while (left > 0) {
7382 		/* Align data to the end */
7383 		error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
7384 		if (error) {
7385 	err_out_now:
7386 			sctp_m_freem(at);
7387 			return (NULL);
7388 		}
7389 		SCTP_BUF_LEN(at) = (int)willcpy;
7390 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7391 		left -= willcpy;
7392 		if (left > 0) {
7393 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
7394 			if (SCTP_BUF_NEXT(at) == NULL) {
7395 				goto err_out_now;
7396 			}
7397 			at = SCTP_BUF_NEXT(at);
7398 			SCTP_BUF_LEN(at) = 0;
7399 			cancpy = (int)M_TRAILINGSPACE(at);
7400 			willcpy = min(cancpy, left);
7401 		}
7402 	}
7403 	return (ret);
7404 }
7405 
7406 static int
7407 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7408     struct sctp_sndrcvinfo *srcv)
7409 {
7410 	int ret;
7411 	struct sctp_copy_all *ca;
7412 
7413 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
7414 		/* There is another. */
7415 		return (EBUSY);
7416 	}
7417 #if defined(__APPLE__) && !defined(__Userspace__)
7418 #if defined(APPLE_LEOPARD)
7419 	if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7420 #else
7421 	if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7422 #endif
7423 #else
7424 	if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7425 #endif
7426 		/* You must not be larger than the limit! */
7427 		return (EMSGSIZE);
7428 	}
7429 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7430 		    SCTP_M_COPYAL);
7431 	if (ca == NULL) {
7432 		sctp_m_freem(m);
7433 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7434 		return (ENOMEM);
7435 	}
7436 	memset(ca, 0, sizeof(struct sctp_copy_all));
7437 
7438 	ca->inp = inp;
7439 	if (srcv) {
7440 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7441 	}
7442 	/*
7443 	 * take off the sendall flag, it would be bad if we failed to do
7444 	 * this :-0
7445 	 */
7446 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7447 	/* get length and mbuf chain */
7448 	if (uio) {
7449 #if defined(__APPLE__) && !defined(__Userspace__)
7450 #if defined(APPLE_LEOPARD)
7451 		ca->sndlen = uio->uio_resid;
7452 #else
7453 		ca->sndlen = uio_resid(uio);
7454 #endif
7455 #else
7456 		ca->sndlen = uio->uio_resid;
7457 #endif
7458 #if defined(__APPLE__) && !defined(__Userspace__)
7459 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7460 #endif
7461 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
7462 #if defined(__APPLE__) && !defined(__Userspace__)
7463 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7464 #endif
7465 		if (ca->m == NULL) {
7466 			SCTP_FREE(ca, SCTP_M_COPYAL);
7467 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7468 			return (ENOMEM);
7469 		}
7470 	} else {
7471 		/* Gather the length of the send */
7472 		struct mbuf *mat;
7473 
7474 		ca->sndlen = 0;
7475 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7476 			ca->sndlen += SCTP_BUF_LEN(mat);
7477 		}
7478 	}
7479 	inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7480 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7481 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7482 				     SCTP_ASOC_ANY_STATE,
7483 				     (void *)ca, 0,
7484 				     sctp_sendall_completes, inp, 1);
7485 	if (ret) {
7486 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7487 		SCTP_FREE(ca, SCTP_M_COPYAL);
7488 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7489 		return (EFAULT);
7490 	}
7491 	return (0);
7492 }
7493 
7494 
7495 void
7496 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7497 {
7498 	struct sctp_tmit_chunk *chk, *nchk;
7499 
7500 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7501 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7502 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7503 			asoc->ctrl_queue_cnt--;
7504 			if (chk->data) {
7505 				sctp_m_freem(chk->data);
7506 				chk->data = NULL;
7507 			}
7508 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7509 		}
7510 	}
7511 }
7512 
7513 void
7514 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7515 {
7516 	struct sctp_association *asoc;
7517 	struct sctp_tmit_chunk *chk, *nchk;
7518 	struct sctp_asconf_chunk *acp;
7519 
7520 	asoc = &stcb->asoc;
7521 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7522 		/* find SCTP_ASCONF chunk in queue */
7523 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7524 			if (chk->data) {
7525 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
7526 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7527 					/* Not Acked yet */
7528 					break;
7529 				}
7530 			}
7531 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7532 			asoc->ctrl_queue_cnt--;
7533 			if (chk->data) {
7534 				sctp_m_freem(chk->data);
7535 				chk->data = NULL;
7536 			}
7537 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7538 		}
7539 	}
7540 }
7541 
7542 
7543 static void
7544 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7545     struct sctp_association *asoc,
7546     struct sctp_tmit_chunk **data_list,
7547     int bundle_at,
7548     struct sctp_nets *net)
7549 {
7550 	int i;
7551 	struct sctp_tmit_chunk *tp1;
7552 
7553 	for (i = 0; i < bundle_at; i++) {
7554 		/* off of the send queue */
7555 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7556 		asoc->send_queue_cnt--;
7557 		if (i > 0) {
7558 			/*
7559 			 * Any chunk NOT 0 you zap the time chunk 0 gets
7560 			 * zapped or set based on if a RTO measurment is
7561 			 * needed.
7562 			 */
7563 			data_list[i]->do_rtt = 0;
7564 		}
7565 		/* record time */
7566 		data_list[i]->sent_rcv_time = net->last_sent_time;
7567 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7568 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7569 		if (data_list[i]->whoTo == NULL) {
7570 			data_list[i]->whoTo = net;
7571 			atomic_add_int(&net->ref_count, 1);
7572 		}
7573 		/* on to the sent queue */
7574 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7575 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7576 			struct sctp_tmit_chunk *tpp;
7577 
7578 			/* need to move back */
7579 		back_up_more:
7580 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7581 			if (tpp == NULL) {
7582 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7583 				goto all_done;
7584 			}
7585 			tp1 = tpp;
7586 			if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7587 				goto back_up_more;
7588 			}
7589 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7590 		} else {
7591 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
7592 					  data_list[i],
7593 					  sctp_next);
7594 		}
7595 	all_done:
7596 		/* This does not lower until the cum-ack passes it */
7597 		asoc->sent_queue_cnt++;
7598 		if ((asoc->peers_rwnd <= 0) &&
7599 		    (asoc->total_flight == 0) &&
7600 		    (bundle_at == 1)) {
7601 			/* Mark the chunk as being a window probe */
7602 			SCTP_STAT_INCR(sctps_windowprobed);
7603 		}
7604 #ifdef SCTP_AUDITING_ENABLED
7605 		sctp_audit_log(0xC2, 3);
7606 #endif
7607 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
7608 		data_list[i]->snd_count = 1;
7609 		data_list[i]->rec.data.chunk_was_revoked = 0;
7610 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7611 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7612 				       data_list[i]->whoTo->flight_size,
7613 				       data_list[i]->book_size,
7614 				       (uint32_t)(uintptr_t)data_list[i]->whoTo,
7615 				       data_list[i]->rec.data.tsn);
7616 		}
7617 		sctp_flight_size_increase(data_list[i]);
7618 		sctp_total_flight_increase(stcb, data_list[i]);
7619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7620 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7621 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7622 		}
7623 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7624 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7625 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7626 			/* SWS sender side engages */
7627 			asoc->peers_rwnd = 0;
7628 		}
7629 	}
7630 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7631 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7632 	}
7633 }
7634 
7635 static void
7636 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
7637 {
7638 	struct sctp_tmit_chunk *chk, *nchk;
7639 
7640 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7641 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7642 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
7643 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7644 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7645 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7646 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7647 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7648 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7649 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7650 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7651 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7652 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7653 			/* Stray chunks must be cleaned up */
7654 	clean_up_anyway:
7655 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7656 			asoc->ctrl_queue_cnt--;
7657 			if (chk->data) {
7658 				sctp_m_freem(chk->data);
7659 				chk->data = NULL;
7660 			}
7661 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7662 				asoc->fwd_tsn_cnt--;
7663 			}
7664 			sctp_free_a_chunk(stcb, chk, so_locked);
7665 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7666 			/* special handling, we must look into the param */
7667 			if (chk != asoc->str_reset) {
7668 				goto clean_up_anyway;
7669 			}
7670 		}
7671 	}
7672 }
7673 
7674 static uint32_t
7675 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7676                        uint32_t space_left, uint32_t frag_point, int eeor_on)
7677 {
7678 	/* Make a decision on if I should split a
7679 	 * msg into multiple parts. This is only asked of
7680 	 * incomplete messages.
7681 	 */
7682 	if (eeor_on) {
7683 		/* If we are doing EEOR we need to always send
7684 		 * it if its the entire thing, since it might
7685 		 * be all the guy is putting in the hopper.
7686 		 */
7687 		if (space_left >= length) {
7688 			/*-
7689 			 * If we have data outstanding,
7690 			 * we get another chance when the sack
7691 			 * arrives to transmit - wait for more data
7692 			 */
7693 			if (stcb->asoc.total_flight == 0) {
7694 				/* If nothing is in flight, we zero
7695 				 * the packet counter.
7696 				 */
7697 				return (length);
7698 			}
7699 			return (0);
7700 
7701 		} else {
7702 			/* You can fill the rest */
7703 			return (space_left);
7704 		}
7705 	}
7706 	/*-
7707 	 * For those strange folk that make the send buffer
7708 	 * smaller than our fragmentation point, we can't
7709 	 * get a full msg in so we have to allow splitting.
7710 	 */
7711 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7712 		return (length);
7713 	}
7714 	if ((length <= space_left) ||
7715 	    ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7716 		/* Sub-optimial residual don't split in non-eeor mode. */
7717 		return (0);
7718 	}
7719 	/* If we reach here length is larger
7720 	 * than the space_left. Do we wish to split
7721 	 * it for the sake of packet putting together?
7722 	 */
7723 	if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7724 		/* Its ok to split it */
7725 		return (min(space_left, frag_point));
7726 	}
7727 	/* Nope, can't split */
7728 	return (0);
7729 }
7730 
7731 static uint32_t
7732 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7733                       struct sctp_stream_out *strq,
7734                       uint32_t space_left,
7735                       uint32_t frag_point,
7736                       int *giveup,
7737                       int eeor_mode,
7738                       int *bail,
7739                       int so_locked)
7740 {
7741 	/* Move from the stream to the send_queue keeping track of the total */
7742 	struct sctp_association *asoc;
7743 	struct sctp_stream_queue_pending *sp;
7744 	struct sctp_tmit_chunk *chk;
7745 	struct sctp_data_chunk *dchkh=NULL;
7746 	struct sctp_idata_chunk *ndchkh=NULL;
7747 	uint32_t to_move, length;
7748 	int leading;
7749 	uint8_t rcv_flags = 0;
7750 	uint8_t some_taken;
7751 	uint8_t send_lock_up = 0;
7752 
7753 	SCTP_TCB_LOCK_ASSERT(stcb);
7754 	asoc = &stcb->asoc;
7755 one_more_time:
7756 	/*sa_ignore FREED_MEMORY*/
7757 	sp = TAILQ_FIRST(&strq->outqueue);
7758 	if (sp == NULL) {
7759 		if (send_lock_up == 0) {
7760 			SCTP_TCB_SEND_LOCK(stcb);
7761 			send_lock_up = 1;
7762 		}
7763 		sp = TAILQ_FIRST(&strq->outqueue);
7764 		if (sp) {
7765 			goto one_more_time;
7766 		}
7767 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7768 		    (stcb->asoc.idata_supported == 0) &&
7769 		    (strq->last_msg_incomplete)) {
7770 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7771 			            strq->sid,
7772 			            strq->last_msg_incomplete);
7773 			strq->last_msg_incomplete = 0;
7774 		}
7775 		to_move = 0;
7776 		if (send_lock_up) {
7777 			SCTP_TCB_SEND_UNLOCK(stcb);
7778 			send_lock_up = 0;
7779 		}
7780 		goto out_of;
7781 	}
7782 	if ((sp->msg_is_complete) && (sp->length == 0)) {
7783 		if (sp->sender_all_done) {
7784 			/* We are doing deferred cleanup. Last
7785 			 * time through when we took all the data
7786 			 * the sender_all_done was not set.
7787 			 */
7788 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7789 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7790 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7791 				            sp->sender_all_done,
7792 				            sp->length,
7793 				            sp->msg_is_complete,
7794 				            sp->put_last_out,
7795 				            send_lock_up);
7796 			}
7797 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
7798 				SCTP_TCB_SEND_LOCK(stcb);
7799 				send_lock_up = 1;
7800 			}
7801 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7802 			TAILQ_REMOVE(&strq->outqueue, sp, next);
7803 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7804 			if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7805 			    (strq->chunks_on_queues == 0) &&
7806 			    TAILQ_EMPTY(&strq->outqueue)) {
7807 				stcb->asoc.trigger_reset = 1;
7808 			}
7809 			if (sp->net) {
7810 				sctp_free_remote_addr(sp->net);
7811 				sp->net = NULL;
7812 			}
7813 			if (sp->data) {
7814 				sctp_m_freem(sp->data);
7815 				sp->data = NULL;
7816 			}
7817 			sctp_free_a_strmoq(stcb, sp, so_locked);
7818 			/* we can't be locked to it */
7819 			if (send_lock_up) {
7820 				SCTP_TCB_SEND_UNLOCK(stcb);
7821 				send_lock_up = 0;
7822 			}
7823 			/* back to get the next msg */
7824 			goto one_more_time;
7825 		} else {
7826 			/* sender just finished this but
7827 			 * still holds a reference
7828 			 */
7829 			*giveup = 1;
7830 			to_move = 0;
7831 			goto out_of;
7832 		}
7833 	} else {
7834 		/* is there some to get */
7835 		if (sp->length == 0) {
7836 			/* no */
7837 			*giveup = 1;
7838 			to_move = 0;
7839 			goto out_of;
7840 		} else if (sp->discard_rest) {
7841 			if (send_lock_up == 0) {
7842 				SCTP_TCB_SEND_LOCK(stcb);
7843 				send_lock_up = 1;
7844 			}
7845 			/* Whack down the size */
7846 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7847 			if ((stcb->sctp_socket != NULL) &&
7848 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7849 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7850 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7851 			}
7852 			if (sp->data) {
7853 				sctp_m_freem(sp->data);
7854 				sp->data = NULL;
7855 				sp->tail_mbuf = NULL;
7856 			}
7857 			sp->length = 0;
7858 			sp->some_taken = 1;
7859 			*giveup = 1;
7860 			to_move = 0;
7861 			goto out_of;
7862 		}
7863 	}
7864 	some_taken = sp->some_taken;
7865 re_look:
7866 	length = sp->length;
7867 	if (sp->msg_is_complete) {
7868 		/* The message is complete */
7869 		to_move = min(length, frag_point);
7870 		if (to_move == length) {
7871 			/* All of it fits in the MTU */
7872 			if (sp->some_taken) {
7873 				rcv_flags |= SCTP_DATA_LAST_FRAG;
7874 			} else {
7875 				rcv_flags |= SCTP_DATA_NOT_FRAG;
7876 			}
7877 			sp->put_last_out = 1;
7878 			if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7879 				rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7880 			}
7881 		} else {
7882 			/* Not all of it fits, we fragment */
7883 			if (sp->some_taken == 0) {
7884 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7885 			}
7886 			sp->some_taken = 1;
7887 		}
7888 	} else {
7889 		to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7890 		if (to_move) {
7891 			/*-
7892 			 * We use a snapshot of length in case it
7893 			 * is expanding during the compare.
7894 			 */
7895 			uint32_t llen;
7896 
7897 			llen = length;
7898 			if (to_move >= llen) {
7899 				to_move = llen;
7900 				if (send_lock_up == 0) {
7901 					/*-
7902 					 * We are taking all of an incomplete msg
7903 					 * thus we need a send lock.
7904 					 */
7905 					SCTP_TCB_SEND_LOCK(stcb);
7906 					send_lock_up = 1;
7907 					if (sp->msg_is_complete) {
7908 						/* the sender finished the msg */
7909 						goto re_look;
7910 					}
7911 				}
7912 			}
7913 			if (sp->some_taken == 0) {
7914 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7915 				sp->some_taken = 1;
7916 			}
7917 		} else {
7918 			/* Nothing to take. */
7919 			*giveup = 1;
7920 			to_move = 0;
7921 			goto out_of;
7922 		}
7923 	}
7924 
7925 	/* If we reach here, we can copy out a chunk */
7926 	sctp_alloc_a_chunk(stcb, chk);
7927 	if (chk == NULL) {
7928 		/* No chunk memory */
7929 		*giveup = 1;
7930 		to_move = 0;
7931 		goto out_of;
7932 	}
7933 	/* Setup for unordered if needed by looking
7934 	 * at the user sent info flags.
7935 	 */
7936 	if (sp->sinfo_flags & SCTP_UNORDERED) {
7937 		rcv_flags |= SCTP_DATA_UNORDERED;
7938 	}
7939 	if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7940 	    (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7941 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7942 	}
7943 	/* clear out the chunk before setting up */
7944 	memset(chk, 0, sizeof(*chk));
7945 	chk->rec.data.rcv_flags = rcv_flags;
7946 
7947 	if (to_move >= length) {
7948 		/* we think we can steal the whole thing */
7949 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7950 			SCTP_TCB_SEND_LOCK(stcb);
7951 			send_lock_up = 1;
7952 		}
7953 		if (to_move < sp->length) {
7954 			/* bail, it changed */
7955 			goto dont_do_it;
7956 		}
7957 		chk->data = sp->data;
7958 		chk->last_mbuf = sp->tail_mbuf;
7959 		/* register the stealing */
7960 		sp->data = sp->tail_mbuf = NULL;
7961 	} else {
7962 		struct mbuf *m;
7963 	dont_do_it:
7964 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7965 		chk->last_mbuf = NULL;
7966 		if (chk->data == NULL) {
7967 			sp->some_taken = some_taken;
7968 			sctp_free_a_chunk(stcb, chk, so_locked);
7969 			*bail = 1;
7970 			to_move = 0;
7971 			goto out_of;
7972 		}
7973 #ifdef SCTP_MBUF_LOGGING
7974 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7975 			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7976 		}
7977 #endif
7978 		/* Pull off the data */
7979 		m_adj(sp->data, to_move);
7980 		/* Now lets work our way down and compact it */
7981 		m = sp->data;
7982 		while (m && (SCTP_BUF_LEN(m) == 0)) {
7983 			sp->data  = SCTP_BUF_NEXT(m);
7984 			SCTP_BUF_NEXT(m) = NULL;
7985 			if (sp->tail_mbuf == m) {
7986 				/*-
7987 				 * Freeing tail? TSNH since
7988 				 * we supposedly were taking less
7989 				 * than the sp->length.
7990 				 */
7991 #ifdef INVARIANTS
7992 				panic("Huh, freing tail? - TSNH");
7993 #else
7994 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7995 				sp->tail_mbuf = sp->data = NULL;
7996 				sp->length = 0;
7997 #endif
7998 
7999 			}
8000 			sctp_m_free(m);
8001 			m = sp->data;
8002 		}
8003 	}
8004 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
8005 		chk->copy_by_ref = 1;
8006 	} else {
8007 		chk->copy_by_ref = 0;
8008 	}
8009 	/* get last_mbuf and counts of mb usage
8010 	 * This is ugly but hopefully its only one mbuf.
8011 	 */
8012 	if (chk->last_mbuf == NULL) {
8013 		chk->last_mbuf = chk->data;
8014 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
8015 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
8016 		}
8017 	}
8018 
8019 	if (to_move > length) {
8020 		/*- This should not happen either
8021 		 * since we always lower to_move to the size
8022 		 * of sp->length if its larger.
8023 		 */
8024 #ifdef INVARIANTS
8025 		panic("Huh, how can to_move be larger?");
8026 #else
8027 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
8028 		sp->length = 0;
8029 #endif
8030 	} else {
8031 		atomic_subtract_int(&sp->length, to_move);
8032 	}
8033 	leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
8034 	if (M_LEADINGSPACE(chk->data) < leading) {
8035 		/* Not enough room for a chunk header, get some */
8036 		struct mbuf *m;
8037 
8038 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
8039 		if (m == NULL) {
8040 			/*
8041 			 * we're in trouble here. _PREPEND below will free
8042 			 * all the data if there is no leading space, so we
8043 			 * must put the data back and restore.
8044 			 */
8045 			if (send_lock_up == 0) {
8046 				SCTP_TCB_SEND_LOCK(stcb);
8047 				send_lock_up = 1;
8048 			}
8049 			if (sp->data == NULL) {
8050 				/* unsteal the data */
8051 				sp->data = chk->data;
8052 				sp->tail_mbuf = chk->last_mbuf;
8053 			} else {
8054 				struct mbuf *m_tmp;
8055 				/* reassemble the data */
8056 				m_tmp = sp->data;
8057 				sp->data = chk->data;
8058 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
8059 			}
8060 			sp->some_taken = some_taken;
8061 			atomic_add_int(&sp->length, to_move);
8062 			chk->data = NULL;
8063 			*bail = 1;
8064 			sctp_free_a_chunk(stcb, chk, so_locked);
8065 			to_move = 0;
8066 			goto out_of;
8067 		} else {
8068 			SCTP_BUF_LEN(m) = 0;
8069 			SCTP_BUF_NEXT(m) = chk->data;
8070 			chk->data = m;
8071 			M_ALIGN(chk->data, 4);
8072 		}
8073 	}
8074 	SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
8075 	if (chk->data == NULL) {
8076 		/* HELP, TSNH since we assured it would not above? */
8077 #ifdef INVARIANTS
8078 		panic("prepend failes HELP?");
8079 #else
8080 		SCTP_PRINTF("prepend fails HELP?\n");
8081 		sctp_free_a_chunk(stcb, chk, so_locked);
8082 #endif
8083 		*bail = 1;
8084 		to_move = 0;
8085 		goto out_of;
8086 	}
8087 	sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
8088 	chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
8089 	chk->book_size_scale = 0;
8090 	chk->sent = SCTP_DATAGRAM_UNSENT;
8091 
8092 	chk->flags = 0;
8093 	chk->asoc = &stcb->asoc;
8094 	chk->pad_inplace = 0;
8095 	chk->no_fr_allowed = 0;
8096 	if (stcb->asoc.idata_supported == 0) {
8097 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8098 			/* Just use 0. The receiver ignores the values. */
8099 			chk->rec.data.mid = 0;
8100 		} else {
8101 			chk->rec.data.mid = strq->next_mid_ordered;
8102 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8103 				strq->next_mid_ordered++;
8104 			}
8105 		}
8106 	} else {
8107 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8108 			chk->rec.data.mid = strq->next_mid_unordered;
8109 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8110 				strq->next_mid_unordered++;
8111 			}
8112 		} else {
8113 			chk->rec.data.mid = strq->next_mid_ordered;
8114 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8115 				strq->next_mid_ordered++;
8116 			}
8117 		}
8118 	}
8119 	chk->rec.data.sid = sp->sid;
8120 	chk->rec.data.ppid = sp->ppid;
8121 	chk->rec.data.context = sp->context;
8122 	chk->rec.data.doing_fast_retransmit = 0;
8123 
8124 	chk->rec.data.timetodrop = sp->ts;
8125 	chk->flags = sp->act_flags;
8126 
8127 	if (sp->net) {
8128 		chk->whoTo = sp->net;
8129 		atomic_add_int(&chk->whoTo->ref_count, 1);
8130 	} else
8131 		chk->whoTo = NULL;
8132 
8133 	if (sp->holds_key_ref) {
8134 		chk->auth_keyid = sp->auth_keyid;
8135 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
8136 		chk->holds_key_ref = 1;
8137 	}
8138 #if defined(__FreeBSD__) && !defined(__Userspace__)
8139 	chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8140 #else
8141 	chk->rec.data.tsn = asoc->sending_seq++;
8142 #endif
8143 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8144 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8145 		               (uint32_t)(uintptr_t)stcb, sp->length,
8146 		               (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8147 		               chk->rec.data.tsn);
8148 	}
8149 	if (stcb->asoc.idata_supported == 0) {
8150 		dchkh = mtod(chk->data, struct sctp_data_chunk *);
8151 	} else {
8152 		ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8153 	}
8154 	/*
8155 	 * Put the rest of the things in place now. Size was done
8156 	 * earlier in previous loop prior to padding.
8157 	 */
8158 
8159 #ifdef SCTP_ASOCLOG_OF_TSNS
8160 	SCTP_TCB_LOCK_ASSERT(stcb);
8161 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8162 		asoc->tsn_out_at = 0;
8163 		asoc->tsn_out_wrapped = 1;
8164 	}
8165 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8166 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8167 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8168 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8169 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8170 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8171 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8172 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8173 	asoc->tsn_out_at++;
8174 #endif
8175 	if (stcb->asoc.idata_supported == 0) {
8176 		dchkh->ch.chunk_type = SCTP_DATA;
8177 		dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8178 		dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8179 		dchkh->dp.sid = htons(strq->sid);
8180 		dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8181 		dchkh->dp.ppid = chk->rec.data.ppid;
8182 		dchkh->ch.chunk_length = htons(chk->send_size);
8183 	} else {
8184 		ndchkh->ch.chunk_type = SCTP_IDATA;
8185 		ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8186 		ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8187 		ndchkh->dp.sid = htons(strq->sid);
8188 		ndchkh->dp.reserved = htons(0);
8189 		ndchkh->dp.mid = htonl(chk->rec.data.mid);
8190 		if (sp->fsn == 0)
8191 			ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8192 		else
8193 			ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8194 		sp->fsn++;
8195 		ndchkh->ch.chunk_length = htons(chk->send_size);
8196 	}
8197 	/* Now advance the chk->send_size by the actual pad needed. */
8198 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8199 		/* need a pad */
8200 		struct mbuf *lm;
8201 		int pads;
8202 
8203 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8204 		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8205 		if (lm != NULL) {
8206 			chk->last_mbuf = lm;
8207 			chk->pad_inplace = 1;
8208 		}
8209 		chk->send_size += pads;
8210 	}
8211 	if (PR_SCTP_ENABLED(chk->flags)) {
8212 		asoc->pr_sctp_cnt++;
8213 	}
8214 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8215 		/* All done pull and kill the message */
8216 		if (sp->put_last_out == 0) {
8217 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8218 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8219 			            sp->sender_all_done,
8220 			            sp->length,
8221 			            sp->msg_is_complete,
8222 			            sp->put_last_out,
8223 			            send_lock_up);
8224 		}
8225 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8226 			SCTP_TCB_SEND_LOCK(stcb);
8227 			send_lock_up = 1;
8228 		}
8229 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8230 		TAILQ_REMOVE(&strq->outqueue, sp, next);
8231 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8232 		if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8233 		    (strq->chunks_on_queues == 0) &&
8234 		    TAILQ_EMPTY(&strq->outqueue)) {
8235 			stcb->asoc.trigger_reset = 1;
8236 		}
8237 		if (sp->net) {
8238 			sctp_free_remote_addr(sp->net);
8239 			sp->net = NULL;
8240 		}
8241 		if (sp->data) {
8242 			sctp_m_freem(sp->data);
8243 			sp->data = NULL;
8244 		}
8245 		sctp_free_a_strmoq(stcb, sp, so_locked);
8246 	}
8247 	asoc->chunks_on_out_queue++;
8248 	strq->chunks_on_queues++;
8249 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8250 	asoc->send_queue_cnt++;
8251 out_of:
8252 	if (send_lock_up) {
8253 		SCTP_TCB_SEND_UNLOCK(stcb);
8254 	}
8255 	return (to_move);
8256 }
8257 
8258 
8259 static void
8260 sctp_fill_outqueue(struct sctp_tcb *stcb,
8261     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked)
8262 {
8263 	struct sctp_association *asoc;
8264 	struct sctp_stream_out *strq;
8265 	uint32_t space_left, moved, total_moved;
8266 	int bail, giveup;
8267 
8268 	SCTP_TCB_LOCK_ASSERT(stcb);
8269 	asoc = &stcb->asoc;
8270 	total_moved = 0;
8271 	switch (net->ro._l_addr.sa.sa_family) {
8272 #ifdef INET
8273 		case AF_INET:
8274 			space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
8275 			break;
8276 #endif
8277 #ifdef INET6
8278 		case AF_INET6:
8279 			space_left = net->mtu - SCTP_MIN_OVERHEAD;
8280 			break;
8281 #endif
8282 #if defined(__Userspace__)
8283 		case AF_CONN:
8284 			space_left = net->mtu - sizeof(struct sctphdr);
8285 			break;
8286 #endif
8287 		default:
8288 			/* TSNH */
8289 			space_left = net->mtu;
8290 			break;
8291 	}
8292 	/* Need an allowance for the data chunk header too */
8293 	space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8294 
8295 	/* must make even word boundary */
8296 	space_left &= 0xfffffffc;
8297 	strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8298 	giveup = 0;
8299 	bail = 0;
8300 	while ((space_left > 0) && (strq != NULL)) {
8301 		moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
8302 		                              &giveup, eeor_mode, &bail, so_locked);
8303 		stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
8304 		if ((giveup != 0) || (bail != 0)) {
8305 			break;
8306 		}
8307 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8308 		total_moved += moved;
8309 		if (space_left >= moved) {
8310 			space_left -= moved;
8311 		} else {
8312 			space_left = 0;
8313 		}
8314 		if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
8315 			space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8316 		} else {
8317 			space_left = 0;
8318 		}
8319 		space_left &= 0xfffffffc;
8320 	}
8321 	if (bail != 0)
8322 		*quit_now = 1;
8323 
8324 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8325 
8326 	if (total_moved == 0) {
8327 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8328 		    (net == stcb->asoc.primary_destination)) {
8329 			/* ran dry for primary network net */
8330 			SCTP_STAT_INCR(sctps_primary_randry);
8331 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
8332 			/* ran dry with CMT on */
8333 			SCTP_STAT_INCR(sctps_cmt_randry);
8334 		}
8335 	}
8336 }
8337 
8338 void
8339 sctp_fix_ecn_echo(struct sctp_association *asoc)
8340 {
8341 	struct sctp_tmit_chunk *chk;
8342 
8343 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8344 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8345 			chk->sent = SCTP_DATAGRAM_UNSENT;
8346 		}
8347 	}
8348 }
8349 
8350 void
8351 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8352 {
8353 	struct sctp_association *asoc;
8354 	struct sctp_tmit_chunk *chk;
8355 	struct sctp_stream_queue_pending *sp;
8356 	unsigned int i;
8357 
8358 	if (net == NULL) {
8359 		return;
8360 	}
8361 	asoc = &stcb->asoc;
8362 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8363 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8364 			if (sp->net == net) {
8365 				sctp_free_remote_addr(sp->net);
8366 				sp->net = NULL;
8367 			}
8368 		}
8369 	}
8370 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8371 		if (chk->whoTo == net) {
8372 			sctp_free_remote_addr(chk->whoTo);
8373 			chk->whoTo = NULL;
8374 		}
8375 	}
8376 }
8377 
8378 int
8379 sctp_med_chunk_output(struct sctp_inpcb *inp,
8380 		      struct sctp_tcb *stcb,
8381 		      struct sctp_association *asoc,
8382 		      int *num_out,
8383 		      int *reason_code,
8384 		      int control_only, int from_where,
8385 		      struct timeval *now, int *now_filled, int frag_point, int so_locked)
8386 {
8387 	/**
8388 	 * Ok this is the generic chunk service queue. we must do the
8389 	 * following:
8390 	 * - Service the stream queue that is next, moving any
8391 	 *   message (note I must get a complete message i.e. FIRST/MIDDLE and
8392 	 *   LAST to the out queue in one pass) and assigning TSN's. This
8393 	 *   only applys though if the peer does not support NDATA. For NDATA
8394 	 *   chunks its ok to not send the entire message ;-)
8395 	 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8396 	 *   fomulate and send the low level chunks. Making sure to combine
8397 	 *   any control in the control chunk queue also.
8398 	 */
8399 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8400 	struct mbuf *outchain, *endoutchain;
8401 	struct sctp_tmit_chunk *chk, *nchk;
8402 
8403 	/* temp arrays for unlinking */
8404 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8405 	int no_fragmentflg, error;
8406 	unsigned int max_rwnd_per_dest, max_send_per_dest;
8407 	int one_chunk, hbflag, skip_data_for_this_net;
8408 	int asconf, cookie, no_out_cnt;
8409 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8410 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8411 	int tsns_sent = 0;
8412 	uint32_t auth_offset;
8413 	struct sctp_auth_chunk *auth;
8414 	uint16_t auth_keyid;
8415 	int override_ok = 1;
8416 	int skip_fill_up = 0;
8417 	int data_auth_reqd = 0;
8418 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8419 	   the destination. */
8420 	int quit_now = 0;
8421 
8422 #if defined(__APPLE__) && !defined(__Userspace__)
8423 	if (so_locked) {
8424 		sctp_lock_assert(SCTP_INP_SO(inp));
8425 	} else {
8426 		sctp_unlock_assert(SCTP_INP_SO(inp));
8427 	}
8428 #endif
8429 	*num_out = 0;
8430 	*reason_code = 0;
8431 	auth_keyid = stcb->asoc.authinfo.active_keyid;
8432 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8433 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8434 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8435 		eeor_mode = 1;
8436 	} else {
8437 		eeor_mode = 0;
8438 	}
8439 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
8440 	/*
8441 	 * First lets prime the pump. For each destination, if there is room
8442 	 * in the flight size, attempt to pull an MTU's worth out of the
8443 	 * stream queues into the general send_queue
8444 	 */
8445 #ifdef SCTP_AUDITING_ENABLED
8446 	sctp_audit_log(0xC2, 2);
8447 #endif
8448 	SCTP_TCB_LOCK_ASSERT(stcb);
8449 	hbflag = 0;
8450 	if (control_only)
8451 		no_data_chunks = 1;
8452 	else
8453 		no_data_chunks = 0;
8454 
8455 	/* Nothing to possible to send? */
8456 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8457 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8458 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8459 	    TAILQ_EMPTY(&asoc->send_queue) &&
8460 	    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8461 	nothing_to_send:
8462 		*reason_code = 9;
8463 		return (0);
8464 	}
8465 	if (asoc->peers_rwnd == 0) {
8466 		/* No room in peers rwnd */
8467 		*reason_code = 1;
8468 		if (asoc->total_flight > 0) {
8469 			/* we are allowed one chunk in flight */
8470 			no_data_chunks = 1;
8471 		}
8472 	}
8473 	if (stcb->asoc.ecn_echo_cnt_onq) {
8474 		/* Record where a sack goes, if any */
8475 		if (no_data_chunks &&
8476 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8477 			/* Nothing but ECNe to send - we don't do that */
8478 			goto nothing_to_send;
8479 		}
8480 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8481 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8482 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8483 				sack_goes_to = chk->whoTo;
8484 				break;
8485 			}
8486 		}
8487 	}
8488 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8489 	if (stcb->sctp_socket)
8490 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8491 	else
8492 		max_send_per_dest = 0;
8493 	if (no_data_chunks == 0) {
8494 		/* How many non-directed chunks are there? */
8495 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8496 			if (chk->whoTo == NULL) {
8497 				/* We already have non-directed
8498 				 * chunks on the queue, no need
8499 				 * to do a fill-up.
8500 				 */
8501 				skip_fill_up = 1;
8502 				break;
8503 			}
8504 		}
8505 
8506 	}
8507 	if ((no_data_chunks == 0) &&
8508 	    (skip_fill_up == 0) &&
8509 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8510 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8511 			/*
8512 			 * This for loop we are in takes in
8513 			 * each net, if its's got space in cwnd and
8514 			 * has data sent to it (when CMT is off) then it
8515 			 * calls sctp_fill_outqueue for the net. This gets
8516 			 * data on the send queue for that network.
8517 			 *
8518 			 * In sctp_fill_outqueue TSN's are assigned and
8519 			 * data is copied out of the stream buffers. Note
8520 			 * mostly copy by reference (we hope).
8521 			 */
8522 			net->window_probe = 0;
8523 			if ((net != stcb->asoc.alternate) &&
8524 			    ((net->dest_state & SCTP_ADDR_PF) ||
8525 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8526 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8527 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8528 					sctp_log_cwnd(stcb, net, 1,
8529 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8530 				}
8531 				continue;
8532 			}
8533 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8534 			    (net->flight_size == 0)) {
8535 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8536 			}
8537 			if (net->flight_size >= net->cwnd) {
8538 				/* skip this network, no room - can't fill */
8539 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8540 					sctp_log_cwnd(stcb, net, 3,
8541 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8542 				}
8543 				continue;
8544 			}
8545 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8546 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8547 			}
8548 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8549 			if (quit_now) {
8550 				/* memory alloc failure */
8551 				no_data_chunks = 1;
8552 				break;
8553 			}
8554 		}
8555 	}
8556 	/* now service each destination and send out what we can for it */
8557 	/* Nothing to send? */
8558 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8559 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8560 	    TAILQ_EMPTY(&asoc->send_queue)) {
8561 		*reason_code = 8;
8562 		return (0);
8563 	}
8564 
8565 	if (asoc->sctp_cmt_on_off > 0) {
8566 		/* get the last start point */
8567 		start_at = asoc->last_net_cmt_send_started;
8568 		if (start_at == NULL) {
8569 			/* null so to beginning */
8570 			start_at = TAILQ_FIRST(&asoc->nets);
8571 		} else {
8572 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8573 			if (start_at == NULL) {
8574 				start_at = TAILQ_FIRST(&asoc->nets);
8575 			}
8576 		}
8577 		asoc->last_net_cmt_send_started = start_at;
8578 	} else {
8579 		start_at = TAILQ_FIRST(&asoc->nets);
8580 	}
8581 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8582 		if (chk->whoTo == NULL) {
8583 			if (asoc->alternate) {
8584 				chk->whoTo = asoc->alternate;
8585 			} else {
8586 				chk->whoTo = asoc->primary_destination;
8587 			}
8588 			atomic_add_int(&chk->whoTo->ref_count, 1);
8589 		}
8590 	}
8591 	old_start_at = NULL;
8592 again_one_more_time:
8593 	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8594 		/* how much can we send? */
8595 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8596 		if (old_start_at && (old_start_at == net)) {
8597 			/* through list ocmpletely. */
8598 			break;
8599 		}
8600 		tsns_sent = 0xa;
8601 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8602 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8603 		    (net->flight_size >= net->cwnd)) {
8604 			/* Nothing on control or asconf and flight is full, we can skip
8605 			 * even in the CMT case.
8606 			 */
8607 			continue;
8608 		}
8609 		bundle_at = 0;
8610 		endoutchain = outchain = NULL;
8611 		auth = NULL;
8612 		auth_offset = 0;
8613 		no_fragmentflg = 1;
8614 		one_chunk = 0;
8615 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8616 			skip_data_for_this_net = 1;
8617 		} else {
8618 			skip_data_for_this_net = 0;
8619 		}
8620 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8621 #ifdef INET
8622 		case AF_INET:
8623 			mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8624 			break;
8625 #endif
8626 #ifdef INET6
8627 		case AF_INET6:
8628 			mtu = net->mtu - SCTP_MIN_OVERHEAD;
8629 			break;
8630 #endif
8631 #if defined(__Userspace__)
8632 		case AF_CONN:
8633 			mtu = net->mtu - sizeof(struct sctphdr);
8634 			break;
8635 #endif
8636 		default:
8637 			/* TSNH */
8638 			mtu = net->mtu;
8639 			break;
8640 		}
8641 		mx_mtu = mtu;
8642 		to_out = 0;
8643 		if (mtu > asoc->peers_rwnd) {
8644 			if (asoc->total_flight > 0) {
8645 				/* We have a packet in flight somewhere */
8646 				r_mtu = asoc->peers_rwnd;
8647 			} else {
8648 				/* We are always allowed to send one MTU out */
8649 				one_chunk = 1;
8650 				r_mtu = mtu;
8651 			}
8652 		} else {
8653 			r_mtu = mtu;
8654 		}
8655 		error = 0;
8656 		/************************/
8657 		/* ASCONF transmission */
8658 		/************************/
8659 		/* Now first lets go through the asconf queue */
8660 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8661 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8662 				continue;
8663 			}
8664 			if (chk->whoTo == NULL) {
8665 				if (asoc->alternate == NULL) {
8666 					if (asoc->primary_destination != net) {
8667 						break;
8668 					}
8669 				} else {
8670 					if (asoc->alternate != net) {
8671 						break;
8672 					}
8673 				}
8674 			} else {
8675 				if (chk->whoTo != net) {
8676 					break;
8677 				}
8678 			}
8679 			if (chk->data == NULL) {
8680 				break;
8681 			}
8682 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8683 			    chk->sent != SCTP_DATAGRAM_RESEND) {
8684 				break;
8685 			}
8686 			/*
8687 			 * if no AUTH is yet included and this chunk
8688 			 * requires it, make sure to account for it.  We
8689 			 * don't apply the size until the AUTH chunk is
8690 			 * actually added below in case there is no room for
8691 			 * this chunk. NOTE: we overload the use of "omtu"
8692 			 * here
8693 			 */
8694 			if ((auth == NULL) &&
8695 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8696 							stcb->asoc.peer_auth_chunks)) {
8697 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8698 			} else
8699 				omtu = 0;
8700 			/* Here we do NOT factor the r_mtu */
8701 			if ((chk->send_size < (int)(mtu - omtu)) ||
8702 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8703 				/*
8704 				 * We probably should glom the mbuf chain
8705 				 * from the chk->data for control but the
8706 				 * problem is it becomes yet one more level
8707 				 * of tracking to do if for some reason
8708 				 * output fails. Then I have got to
8709 				 * reconstruct the merged control chain.. el
8710 				 * yucko.. for now we take the easy way and
8711 				 * do the copy
8712 				 */
8713 				/*
8714 				 * Add an AUTH chunk, if chunk requires it
8715 				 * save the offset into the chain for AUTH
8716 				 */
8717 				if ((auth == NULL) &&
8718 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8719 								 stcb->asoc.peer_auth_chunks))) {
8720 					outchain = sctp_add_auth_chunk(outchain,
8721 								       &endoutchain,
8722 								       &auth,
8723 								       &auth_offset,
8724 								       stcb,
8725 								       chk->rec.chunk_id.id);
8726 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8727 				}
8728 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8729 							       (int)chk->rec.chunk_id.can_take_data,
8730 							       chk->send_size, chk->copy_by_ref);
8731 				if (outchain == NULL) {
8732 					*reason_code = 8;
8733 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8734 					return (ENOMEM);
8735 				}
8736 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8737 				/* update our MTU size */
8738 				if (mtu > (chk->send_size + omtu))
8739 					mtu -= (chk->send_size + omtu);
8740 				else
8741 					mtu = 0;
8742 				to_out += (chk->send_size + omtu);
8743 				/* Do clear IP_DF ? */
8744 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8745 					no_fragmentflg = 0;
8746 				}
8747 				if (chk->rec.chunk_id.can_take_data)
8748 					chk->data = NULL;
8749 				/*
8750 				 * set hb flag since we can
8751 				 * use these for RTO
8752 				 */
8753 				hbflag = 1;
8754 				asconf = 1;
8755 				/*
8756 				 * should sysctl this: don't
8757 				 * bundle data with ASCONF
8758 				 * since it requires AUTH
8759 				 */
8760 				no_data_chunks = 1;
8761 				chk->sent = SCTP_DATAGRAM_SENT;
8762 				if (chk->whoTo == NULL) {
8763 					chk->whoTo = net;
8764 					atomic_add_int(&net->ref_count, 1);
8765 				}
8766 				chk->snd_count++;
8767 				if (mtu == 0) {
8768 					/*
8769 					 * Ok we are out of room but we can
8770 					 * output without effecting the
8771 					 * flight size since this little guy
8772 					 * is a control only packet.
8773 					 */
8774 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8775 					/*
8776 					 * do NOT clear the asconf
8777 					 * flag as it is used to do
8778 					 * appropriate source address
8779 					 * selection.
8780 					 */
8781 					if (*now_filled == 0) {
8782 						(void)SCTP_GETTIME_TIMEVAL(now);
8783 						*now_filled = 1;
8784 					}
8785 					net->last_sent_time = *now;
8786 					hbflag = 0;
8787 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8788 					                                        (struct sockaddr *)&net->ro._l_addr,
8789 					                                        outchain, auth_offset, auth,
8790 					                                        stcb->asoc.authinfo.active_keyid,
8791 					                                        no_fragmentflg, 0, asconf,
8792 					                                        inp->sctp_lport, stcb->rport,
8793 					                                        htonl(stcb->asoc.peer_vtag),
8794 					                                        net->port, NULL,
8795 #if defined(__FreeBSD__) && !defined(__Userspace__)
8796 					                                        0, 0,
8797 #endif
8798 					                                        so_locked))) {
8799 						/* error, we could not output */
8800 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8801 						if (from_where == 0) {
8802 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8803 						}
8804 						if (error == ENOBUFS) {
8805 							asoc->ifp_had_enobuf = 1;
8806 							SCTP_STAT_INCR(sctps_lowlevelerr);
8807 						}
8808 						/* error, could not output */
8809 						if (error == EHOSTUNREACH) {
8810 							/*
8811 							 * Destination went
8812 							 * unreachable
8813 							 * during this send
8814 							 */
8815 							sctp_move_chunks_from_net(stcb, net);
8816 						}
8817 						*reason_code = 7;
8818 						break;
8819 					} else {
8820 						asoc->ifp_had_enobuf = 0;
8821 					}
8822 					/*
8823 					 * increase the number we sent, if a
8824 					 * cookie is sent we don't tell them
8825 					 * any was sent out.
8826 					 */
8827 					outchain = endoutchain = NULL;
8828 					auth = NULL;
8829 					auth_offset = 0;
8830 					if (!no_out_cnt)
8831 						*num_out += ctl_cnt;
8832 					/* recalc a clean slate and setup */
8833 					switch (net->ro._l_addr.sa.sa_family) {
8834 #ifdef INET
8835 						case AF_INET:
8836 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8837 							break;
8838 #endif
8839 #ifdef INET6
8840 						case AF_INET6:
8841 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8842 							break;
8843 #endif
8844 #if defined(__Userspace__)
8845 						case AF_CONN:
8846 							mtu = net->mtu - sizeof(struct sctphdr);
8847 							break;
8848 #endif
8849 						default:
8850 							/* TSNH */
8851 							mtu = net->mtu;
8852 							break;
8853 					}
8854 					to_out = 0;
8855 					no_fragmentflg = 1;
8856 				}
8857 			}
8858 		}
8859 		if (error != 0) {
8860 			/* try next net */
8861 			continue;
8862 		}
8863 		/************************/
8864 		/* Control transmission */
8865 		/************************/
8866 		/* Now first lets go through the control queue */
8867 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8868 			if ((sack_goes_to) &&
8869 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8870 			    (chk->whoTo != sack_goes_to)) {
8871 				/*
8872 				 * if we have a sack in queue, and we are looking at an
8873 				 * ecn echo that is NOT queued to where the sack is going..
8874 				 */
8875 				if (chk->whoTo == net) {
8876 					/* Don't transmit it to where its going (current net) */
8877 					continue;
8878 				} else if (sack_goes_to == net) {
8879 					/* But do transmit it to this address */
8880 					goto skip_net_check;
8881 				}
8882 			}
8883 			if (chk->whoTo == NULL) {
8884 				if (asoc->alternate == NULL) {
8885 					if (asoc->primary_destination != net) {
8886 						continue;
8887 					}
8888 				} else {
8889 					if (asoc->alternate != net) {
8890 						continue;
8891 					}
8892 				}
8893 			} else {
8894 				if (chk->whoTo != net) {
8895 					continue;
8896 				}
8897 			}
8898 		skip_net_check:
8899 			if (chk->data == NULL) {
8900 				continue;
8901 			}
8902 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8903 				/*
8904 				 * It must be unsent. Cookies and ASCONF's
8905 				 * hang around but there timers will force
8906 				 * when marked for resend.
8907 				 */
8908 				continue;
8909 			}
8910 			/*
8911 			 * if no AUTH is yet included and this chunk
8912 			 * requires it, make sure to account for it.  We
8913 			 * don't apply the size until the AUTH chunk is
8914 			 * actually added below in case there is no room for
8915 			 * this chunk. NOTE: we overload the use of "omtu"
8916 			 * here
8917 			 */
8918 			if ((auth == NULL) &&
8919 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8920 							stcb->asoc.peer_auth_chunks)) {
8921 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8922 			} else
8923 				omtu = 0;
8924 			/* Here we do NOT factor the r_mtu */
8925 			if ((chk->send_size <= (int)(mtu - omtu)) ||
8926 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8927 				/*
8928 				 * We probably should glom the mbuf chain
8929 				 * from the chk->data for control but the
8930 				 * problem is it becomes yet one more level
8931 				 * of tracking to do if for some reason
8932 				 * output fails. Then I have got to
8933 				 * reconstruct the merged control chain.. el
8934 				 * yucko.. for now we take the easy way and
8935 				 * do the copy
8936 				 */
8937 				/*
8938 				 * Add an AUTH chunk, if chunk requires it
8939 				 * save the offset into the chain for AUTH
8940 				 */
8941 				if ((auth == NULL) &&
8942 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8943 								 stcb->asoc.peer_auth_chunks))) {
8944 					outchain = sctp_add_auth_chunk(outchain,
8945 								       &endoutchain,
8946 								       &auth,
8947 								       &auth_offset,
8948 								       stcb,
8949 								       chk->rec.chunk_id.id);
8950 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8951 				}
8952 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8953 							       (int)chk->rec.chunk_id.can_take_data,
8954 							       chk->send_size, chk->copy_by_ref);
8955 				if (outchain == NULL) {
8956 					*reason_code = 8;
8957 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8958 					return (ENOMEM);
8959 				}
8960 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8961 				/* update our MTU size */
8962 				if (mtu > (chk->send_size + omtu))
8963 					mtu -= (chk->send_size + omtu);
8964 				else
8965 					mtu = 0;
8966 				to_out += (chk->send_size + omtu);
8967 				/* Do clear IP_DF ? */
8968 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8969 					no_fragmentflg = 0;
8970 				}
8971 				if (chk->rec.chunk_id.can_take_data)
8972 					chk->data = NULL;
8973 				/* Mark things to be removed, if needed */
8974 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8975 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8976 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8977 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8978 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8979 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8980 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8981 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8982 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8983 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8984 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8985 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8986 						hbflag = 1;
8987 					}
8988 					/* remove these chunks at the end */
8989 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8990 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8991 						/* turn off the timer */
8992 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8993 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8994 							                inp, stcb, NULL,
8995 							                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8996 						}
8997 					}
8998 					ctl_cnt++;
8999 				} else {
9000 					/*
9001 					 * Other chunks, since they have
9002 					 * timers running (i.e. COOKIE)
9003 					 * we just "trust" that it
9004 					 * gets sent or retransmitted.
9005 					 */
9006 					ctl_cnt++;
9007 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9008 						cookie = 1;
9009 						no_out_cnt = 1;
9010 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
9011 						/*
9012 						 * Increment ecne send count here
9013 						 * this means we may be over-zealous in
9014 						 * our counting if the send fails, but its
9015 						 * the best place to do it (we used to do
9016 						 * it in the queue of the chunk, but that did
9017 						 * not tell how many times it was sent.
9018 						 */
9019 						SCTP_STAT_INCR(sctps_sendecne);
9020 					}
9021 					chk->sent = SCTP_DATAGRAM_SENT;
9022 					if (chk->whoTo == NULL) {
9023 						chk->whoTo = net;
9024 						atomic_add_int(&net->ref_count, 1);
9025 					}
9026 					chk->snd_count++;
9027 				}
9028 				if (mtu == 0) {
9029 					/*
9030 					 * Ok we are out of room but we can
9031 					 * output without effecting the
9032 					 * flight size since this little guy
9033 					 * is a control only packet.
9034 					 */
9035 					if (asconf) {
9036 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
9037 						/*
9038 						 * do NOT clear the asconf
9039 						 * flag as it is used to do
9040 						 * appropriate source address
9041 						 * selection.
9042 						 */
9043 					}
9044 					if (cookie) {
9045 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9046 						cookie = 0;
9047 					}
9048 					/* Only HB or ASCONF advances time */
9049 					if (hbflag) {
9050 						if (*now_filled == 0) {
9051 							(void)SCTP_GETTIME_TIMEVAL(now);
9052 							*now_filled = 1;
9053 						}
9054 						net->last_sent_time = *now;
9055 						hbflag = 0;
9056 					}
9057 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9058 					                                        (struct sockaddr *)&net->ro._l_addr,
9059 					                                        outchain,
9060 					                                        auth_offset, auth,
9061 					                                        stcb->asoc.authinfo.active_keyid,
9062 					                                        no_fragmentflg, 0, asconf,
9063 					                                        inp->sctp_lport, stcb->rport,
9064 					                                        htonl(stcb->asoc.peer_vtag),
9065 					                                        net->port, NULL,
9066 #if defined(__FreeBSD__) && !defined(__Userspace__)
9067 					                                        0, 0,
9068 #endif
9069 					                                        so_locked))) {
9070 						/* error, we could not output */
9071 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9072 						if (from_where == 0) {
9073 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
9074 						}
9075 						if (error == ENOBUFS) {
9076 							asoc->ifp_had_enobuf = 1;
9077 							SCTP_STAT_INCR(sctps_lowlevelerr);
9078 						}
9079 						if (error == EHOSTUNREACH) {
9080 							/*
9081 							 * Destination went
9082 							 * unreachable
9083 							 * during this send
9084 							 */
9085 							sctp_move_chunks_from_net(stcb, net);
9086 						}
9087 						*reason_code = 7;
9088 						break;
9089 					} else {
9090 						asoc->ifp_had_enobuf = 0;
9091 					}
9092 					/*
9093 					 * increase the number we sent, if a
9094 					 * cookie is sent we don't tell them
9095 					 * any was sent out.
9096 					 */
9097 					outchain = endoutchain = NULL;
9098 					auth = NULL;
9099 					auth_offset = 0;
9100 					if (!no_out_cnt)
9101 						*num_out += ctl_cnt;
9102 					/* recalc a clean slate and setup */
9103 					switch (net->ro._l_addr.sa.sa_family) {
9104 #ifdef INET
9105 						case AF_INET:
9106 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9107 							break;
9108 #endif
9109 #ifdef INET6
9110 						case AF_INET6:
9111 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
9112 							break;
9113 #endif
9114 #if defined(__Userspace__)
9115 						case AF_CONN:
9116 							mtu = net->mtu - sizeof(struct sctphdr);
9117 							break;
9118 #endif
9119 						default:
9120 							/* TSNH */
9121 							mtu = net->mtu;
9122 							break;
9123 					}
9124 					to_out = 0;
9125 					no_fragmentflg = 1;
9126 				}
9127 			}
9128 		}
9129 		if (error != 0) {
9130 			/* try next net */
9131 			continue;
9132 		}
9133 		/* JRI: if dest is in PF state, do not send data to it */
9134 		if ((asoc->sctp_cmt_on_off > 0) &&
9135 		    (net != stcb->asoc.alternate) &&
9136 		    (net->dest_state & SCTP_ADDR_PF)) {
9137 			goto no_data_fill;
9138 		}
9139 		if (net->flight_size >= net->cwnd) {
9140 			goto no_data_fill;
9141 		}
9142 		if ((asoc->sctp_cmt_on_off > 0) &&
9143 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9144 		    (net->flight_size > max_rwnd_per_dest)) {
9145 			goto no_data_fill;
9146 		}
9147 		/*
9148 		 * We need a specific accounting for the usage of the
9149 		 * send buffer. We also need to check the number of messages
9150 		 * per net. For now, this is better than nothing and it
9151 		 * disabled by default...
9152 		 */
9153 		if ((asoc->sctp_cmt_on_off > 0) &&
9154 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9155 		    (max_send_per_dest > 0) &&
9156 		    (net->flight_size > max_send_per_dest)) {
9157 			goto no_data_fill;
9158 		}
9159 		/*********************/
9160 		/* Data transmission */
9161 		/*********************/
9162 		/*
9163 		 * if AUTH for DATA is required and no AUTH has been added
9164 		 * yet, account for this in the mtu now... if no data can be
9165 		 * bundled, this adjustment won't matter anyways since the
9166 		 * packet will be going out...
9167 		 */
9168 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9169 							     stcb->asoc.peer_auth_chunks);
9170 		if (data_auth_reqd && (auth == NULL)) {
9171 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9172 		}
9173 		/* now lets add any data within the MTU constraints */
9174 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9175 #ifdef INET
9176 		case AF_INET:
9177 			if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9178 				omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9179 			else
9180 				omtu = 0;
9181 			break;
9182 #endif
9183 #ifdef INET6
9184 		case AF_INET6:
9185 			if (net->mtu > SCTP_MIN_OVERHEAD)
9186 				omtu = net->mtu - SCTP_MIN_OVERHEAD;
9187 			else
9188 				omtu = 0;
9189 			break;
9190 #endif
9191 #if defined(__Userspace__)
9192 		case AF_CONN:
9193 			if (net->mtu > sizeof(struct sctphdr)) {
9194 				omtu = net->mtu - sizeof(struct sctphdr);
9195 			} else {
9196 				omtu = 0;
9197 			}
9198 			break;
9199 #endif
9200 		default:
9201 			/* TSNH */
9202 			omtu = 0;
9203 			break;
9204 		}
9205 		if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
9206 		      (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9207 		     (skip_data_for_this_net == 0)) ||
9208 		    (cookie)) {
9209 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9210 				if (no_data_chunks) {
9211 					/* let only control go out */
9212 					*reason_code = 1;
9213 					break;
9214 				}
9215 				if (net->flight_size >= net->cwnd) {
9216 					/* skip this net, no room for data */
9217 					*reason_code = 2;
9218 					break;
9219 				}
9220 				if ((chk->whoTo != NULL) &&
9221 				    (chk->whoTo != net)) {
9222 					/* Don't send the chunk on this net */
9223 					continue;
9224 				}
9225 
9226 				if (asoc->sctp_cmt_on_off == 0) {
9227 					if ((asoc->alternate) &&
9228 					    (asoc->alternate != net) &&
9229 					    (chk->whoTo == NULL)) {
9230 						continue;
9231 					} else if ((net != asoc->primary_destination) &&
9232 						   (asoc->alternate == NULL) &&
9233 						   (chk->whoTo == NULL)) {
9234 						continue;
9235 					}
9236 				}
9237 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9238 					/*-
9239 					 * strange, we have a chunk that is
9240 					 * to big for its destination and
9241 					 * yet no fragment ok flag.
9242 					 * Something went wrong when the
9243 					 * PMTU changed...we did not mark
9244 					 * this chunk for some reason?? I
9245 					 * will fix it here by letting IP
9246 					 * fragment it for now and printing
9247 					 * a warning. This really should not
9248 					 * happen ...
9249 					 */
9250 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9251 						    chk->send_size, mtu);
9252 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9253 				}
9254 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9255 				    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9256 					struct sctp_data_chunk *dchkh;
9257 
9258 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
9259 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9260 				}
9261 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9262 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9263 					/* ok we will add this one */
9264 
9265 					/*
9266 					 * Add an AUTH chunk, if chunk
9267 					 * requires it, save the offset into
9268 					 * the chain for AUTH
9269 					 */
9270 					if (data_auth_reqd) {
9271 						if (auth == NULL) {
9272 							outchain = sctp_add_auth_chunk(outchain,
9273 										       &endoutchain,
9274 										       &auth,
9275 										       &auth_offset,
9276 										       stcb,
9277 										       SCTP_DATA);
9278 							auth_keyid = chk->auth_keyid;
9279 							override_ok = 0;
9280 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9281 						} else if (override_ok) {
9282 							/* use this data's keyid */
9283 							auth_keyid = chk->auth_keyid;
9284 							override_ok = 0;
9285 						} else if (auth_keyid != chk->auth_keyid) {
9286 							/* different keyid, so done bundling */
9287 							break;
9288 						}
9289 					}
9290 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9291 								       chk->send_size, chk->copy_by_ref);
9292 					if (outchain == NULL) {
9293 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9294 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9295 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9296 						}
9297 						*reason_code = 3;
9298 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9299 						return (ENOMEM);
9300 					}
9301 					/* upate our MTU size */
9302 					/* Do clear IP_DF ? */
9303 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9304 						no_fragmentflg = 0;
9305 					}
9306 					/* unsigned subtraction of mtu */
9307 					if (mtu > chk->send_size)
9308 						mtu -= chk->send_size;
9309 					else
9310 						mtu = 0;
9311 					/* unsigned subtraction of r_mtu */
9312 					if (r_mtu > chk->send_size)
9313 						r_mtu -= chk->send_size;
9314 					else
9315 						r_mtu = 0;
9316 
9317 					to_out += chk->send_size;
9318 					if ((to_out > mx_mtu) && no_fragmentflg) {
9319 #ifdef INVARIANTS
9320 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9321 #else
9322 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9323 							    mx_mtu, to_out);
9324 #endif
9325 					}
9326 					chk->window_probe = 0;
9327 					data_list[bundle_at++] = chk;
9328 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9329 						break;
9330 					}
9331 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9332 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9333 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9334 						} else {
9335 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9336 						}
9337 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9338 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9339 							/* Count number of user msg's that were fragmented
9340 							 * we do this by counting when we see a LAST fragment
9341 							 * only.
9342 							 */
9343 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9344 					}
9345 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9346 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9347 							data_list[0]->window_probe = 1;
9348 							net->window_probe = 1;
9349 						}
9350 						break;
9351 					}
9352 				} else {
9353 					/*
9354 					 * Must be sent in order of the
9355 					 * TSN's (on a network)
9356 					 */
9357 					break;
9358 				}
9359 			}	/* for (chunk gather loop for this net) */
9360 		}		/* if asoc.state OPEN */
9361 	no_data_fill:
9362 		/* Is there something to send for this destination? */
9363 		if (outchain) {
9364 			/* We may need to start a control timer or two */
9365 			if (asconf) {
9366 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9367 						 stcb, net);
9368 				/*
9369 				 * do NOT clear the asconf flag as it is used
9370 				 * to do appropriate source address selection.
9371 				 */
9372 			}
9373 			if (cookie) {
9374 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9375 				cookie = 0;
9376 			}
9377 			/* must start a send timer if data is being sent */
9378 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9379 				/*
9380 				 * no timer running on this destination
9381 				 * restart it.
9382 				 */
9383 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9384 			}
9385 			if (bundle_at || hbflag) {
9386 				/* For data/asconf and hb set time */
9387 				if (*now_filled == 0) {
9388 					(void)SCTP_GETTIME_TIMEVAL(now);
9389 					*now_filled = 1;
9390 				}
9391 				net->last_sent_time = *now;
9392 			}
9393 			/* Now send it, if there is anything to send :> */
9394 			if ((error = sctp_lowlevel_chunk_output(inp,
9395 			                                        stcb,
9396 			                                        net,
9397 			                                        (struct sockaddr *)&net->ro._l_addr,
9398 			                                        outchain,
9399 			                                        auth_offset,
9400 			                                        auth,
9401 			                                        auth_keyid,
9402 			                                        no_fragmentflg,
9403 			                                        bundle_at,
9404 			                                        asconf,
9405 			                                        inp->sctp_lport, stcb->rport,
9406 			                                        htonl(stcb->asoc.peer_vtag),
9407 			                                        net->port, NULL,
9408 #if defined(__FreeBSD__) && !defined(__Userspace__)
9409 			                                        0, 0,
9410 #endif
9411 			                                        so_locked))) {
9412 				/* error, we could not output */
9413 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9414 				if (from_where == 0) {
9415 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
9416 				}
9417 				if (error == ENOBUFS) {
9418 					asoc->ifp_had_enobuf = 1;
9419 					SCTP_STAT_INCR(sctps_lowlevelerr);
9420 				}
9421 				if (error == EHOSTUNREACH) {
9422 					/*
9423 					 * Destination went unreachable
9424 					 * during this send
9425 					 */
9426 					sctp_move_chunks_from_net(stcb, net);
9427 				}
9428 				*reason_code = 6;
9429 				/*-
9430 				 * I add this line to be paranoid. As far as
9431 				 * I can tell the continue, takes us back to
9432 				 * the top of the for, but just to make sure
9433 				 * I will reset these again here.
9434 				 */
9435 				ctl_cnt = bundle_at = 0;
9436 				continue; /* This takes us back to the for() for the nets. */
9437 			} else {
9438 				asoc->ifp_had_enobuf = 0;
9439 			}
9440 			endoutchain = NULL;
9441 			auth = NULL;
9442 			auth_offset = 0;
9443 			if (!no_out_cnt) {
9444 				*num_out += (ctl_cnt + bundle_at);
9445 			}
9446 			if (bundle_at) {
9447 				/* setup for a RTO measurement */
9448 				tsns_sent = data_list[0]->rec.data.tsn;
9449 				/* fill time if not already filled */
9450 				if (*now_filled == 0) {
9451 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9452 					*now_filled = 1;
9453 					*now = asoc->time_last_sent;
9454 				} else {
9455 					asoc->time_last_sent = *now;
9456 				}
9457 				if (net->rto_needed) {
9458 					data_list[0]->do_rtt = 1;
9459 					net->rto_needed = 0;
9460 				}
9461 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9462 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9463 			}
9464 			if (one_chunk) {
9465 				break;
9466 			}
9467 		}
9468 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9469 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9470 		}
9471 	}
9472 	if (old_start_at == NULL) {
9473 		old_start_at = start_at;
9474 		start_at = TAILQ_FIRST(&asoc->nets);
9475 		if (old_start_at)
9476 			goto again_one_more_time;
9477 	}
9478 
9479 	/*
9480 	 * At the end there should be no NON timed chunks hanging on this
9481 	 * queue.
9482 	 */
9483 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9484 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9485 	}
9486 	if ((*num_out == 0) && (*reason_code == 0)) {
9487 		*reason_code = 4;
9488 	} else {
9489 		*reason_code = 5;
9490 	}
9491 	sctp_clean_up_ctl(stcb, asoc, so_locked);
9492 	return (0);
9493 }
9494 
9495 void
9496 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9497 {
9498 	/*-
9499 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9500 	 * the control chunk queue.
9501 	 */
9502 	struct sctp_chunkhdr *hdr;
9503 	struct sctp_tmit_chunk *chk;
9504 	struct mbuf *mat, *last_mbuf;
9505 	uint32_t chunk_length;
9506 	uint16_t padding_length;
9507 
9508 	SCTP_TCB_LOCK_ASSERT(stcb);
9509 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9510 	if (op_err == NULL) {
9511 		return;
9512 	}
9513 	last_mbuf = NULL;
9514 	chunk_length = 0;
9515 	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9516 		chunk_length += SCTP_BUF_LEN(mat);
9517 		if (SCTP_BUF_NEXT(mat) == NULL) {
9518 			last_mbuf = mat;
9519 		}
9520 	}
9521 	if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9522 		sctp_m_freem(op_err);
9523 		return;
9524 	}
9525 	padding_length = chunk_length % 4;
9526 	if (padding_length != 0) {
9527 		padding_length = 4 - padding_length;
9528 	}
9529 	if (padding_length != 0) {
9530 		if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9531 			sctp_m_freem(op_err);
9532 			return;
9533 		}
9534 	}
9535 	sctp_alloc_a_chunk(stcb, chk);
9536 	if (chk == NULL) {
9537 		/* no memory */
9538 		sctp_m_freem(op_err);
9539 		return;
9540 	}
9541 	chk->copy_by_ref = 0;
9542 	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9543 	chk->rec.chunk_id.can_take_data = 0;
9544 	chk->flags = 0;
9545 	chk->send_size = (uint16_t)chunk_length;
9546 	chk->sent = SCTP_DATAGRAM_UNSENT;
9547 	chk->snd_count = 0;
9548 	chk->asoc = &stcb->asoc;
9549 	chk->data = op_err;
9550 	chk->whoTo = NULL;
9551 	hdr = mtod(op_err, struct sctp_chunkhdr *);
9552 	hdr->chunk_type = SCTP_OPERATION_ERROR;
9553 	hdr->chunk_flags = 0;
9554 	hdr->chunk_length = htons(chk->send_size);
9555 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9556 	chk->asoc->ctrl_queue_cnt++;
9557 }
9558 
9559 int
9560 sctp_send_cookie_echo(struct mbuf *m,
9561     int offset, int limit,
9562     struct sctp_tcb *stcb,
9563     struct sctp_nets *net)
9564 {
9565 	/*-
9566 	 * pull out the cookie and put it at the front of the control chunk
9567 	 * queue.
9568 	 */
9569 	int at;
9570 	struct mbuf *cookie;
9571 	struct sctp_paramhdr param, *phdr;
9572 	struct sctp_chunkhdr *hdr;
9573 	struct sctp_tmit_chunk *chk;
9574 	uint16_t ptype, plen;
9575 
9576 	SCTP_TCB_LOCK_ASSERT(stcb);
9577 	/* First find the cookie in the param area */
9578 	cookie = NULL;
9579 	at = offset + sizeof(struct sctp_init_chunk);
9580 	for (;;) {
9581 		phdr = sctp_get_next_param(m, at, &param, sizeof(param));
9582 		if (phdr == NULL) {
9583 			return (-3);
9584 		}
9585 		ptype = ntohs(phdr->param_type);
9586 		plen = ntohs(phdr->param_length);
9587 		if (plen < sizeof(struct sctp_paramhdr)) {
9588 			return (-6);
9589 		}
9590 		if (ptype == SCTP_STATE_COOKIE) {
9591 			int pad;
9592 
9593 			/* found the cookie */
9594 			if (at + plen > limit) {
9595 				return (-7);
9596 			}
9597 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9598 			if (cookie == NULL) {
9599 				/* No memory */
9600 				return (-2);
9601 			}
9602 			if ((pad = (plen % 4)) > 0) {
9603 				pad = 4 - pad;
9604 			}
9605 			if (pad > 0) {
9606 				if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9607 					return (-8);
9608 				}
9609 			}
9610 #ifdef SCTP_MBUF_LOGGING
9611 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9612 				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9613 			}
9614 #endif
9615 			break;
9616 		}
9617 		at += SCTP_SIZE32(plen);
9618 	}
9619 	/* ok, we got the cookie lets change it into a cookie echo chunk */
9620 	/* first the change from param to cookie */
9621 	hdr = mtod(cookie, struct sctp_chunkhdr *);
9622 	hdr->chunk_type = SCTP_COOKIE_ECHO;
9623 	hdr->chunk_flags = 0;
9624 	/* get the chunk stuff now and place it in the FRONT of the queue */
9625 	sctp_alloc_a_chunk(stcb, chk);
9626 	if (chk == NULL) {
9627 		/* no memory */
9628 		sctp_m_freem(cookie);
9629 		return (-5);
9630 	}
9631 	chk->copy_by_ref = 0;
9632 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9633 	chk->rec.chunk_id.can_take_data = 0;
9634 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9635 	chk->send_size = SCTP_SIZE32(plen);
9636 	chk->sent = SCTP_DATAGRAM_UNSENT;
9637 	chk->snd_count = 0;
9638 	chk->asoc = &stcb->asoc;
9639 	chk->data = cookie;
9640 	chk->whoTo = net;
9641 	atomic_add_int(&chk->whoTo->ref_count, 1);
9642 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9643 	chk->asoc->ctrl_queue_cnt++;
9644 	return (0);
9645 }
9646 
9647 void
9648 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9649     struct mbuf *m,
9650     int offset,
9651     int chk_length,
9652     struct sctp_nets *net)
9653 {
9654 	/*
9655 	 * take a HB request and make it into a HB ack and send it.
9656 	 */
9657 	struct mbuf *outchain;
9658 	struct sctp_chunkhdr *chdr;
9659 	struct sctp_tmit_chunk *chk;
9660 
9661 	if (net == NULL)
9662 		/* must have a net pointer */
9663 		return;
9664 
9665 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9666 	if (outchain == NULL) {
9667 		/* gak out of memory */
9668 		return;
9669 	}
9670 #ifdef SCTP_MBUF_LOGGING
9671 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9672 		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9673 	}
9674 #endif
9675 	chdr = mtod(outchain, struct sctp_chunkhdr *);
9676 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9677 	chdr->chunk_flags = 0;
9678 	if (chk_length % 4 != 0) {
9679 		sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9680 	}
9681 	sctp_alloc_a_chunk(stcb, chk);
9682 	if (chk == NULL) {
9683 		/* no memory */
9684 		sctp_m_freem(outchain);
9685 		return;
9686 	}
9687 	chk->copy_by_ref = 0;
9688 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9689 	chk->rec.chunk_id.can_take_data = 1;
9690 	chk->flags = 0;
9691 	chk->send_size = chk_length;
9692 	chk->sent = SCTP_DATAGRAM_UNSENT;
9693 	chk->snd_count = 0;
9694 	chk->asoc = &stcb->asoc;
9695 	chk->data = outchain;
9696 	chk->whoTo = net;
9697 	atomic_add_int(&chk->whoTo->ref_count, 1);
9698 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9699 	chk->asoc->ctrl_queue_cnt++;
9700 }
9701 
9702 void
9703 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9704 {
9705 	/* formulate and queue a cookie-ack back to sender */
9706 	struct mbuf *cookie_ack;
9707 	struct sctp_chunkhdr *hdr;
9708 	struct sctp_tmit_chunk *chk;
9709 
9710 	SCTP_TCB_LOCK_ASSERT(stcb);
9711 
9712 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9713 	if (cookie_ack == NULL) {
9714 		/* no mbuf's */
9715 		return;
9716 	}
9717 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9718 	sctp_alloc_a_chunk(stcb, chk);
9719 	if (chk == NULL) {
9720 		/* no memory */
9721 		sctp_m_freem(cookie_ack);
9722 		return;
9723 	}
9724 	chk->copy_by_ref = 0;
9725 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9726 	chk->rec.chunk_id.can_take_data = 1;
9727 	chk->flags = 0;
9728 	chk->send_size = sizeof(struct sctp_chunkhdr);
9729 	chk->sent = SCTP_DATAGRAM_UNSENT;
9730 	chk->snd_count = 0;
9731 	chk->asoc = &stcb->asoc;
9732 	chk->data = cookie_ack;
9733 	if (chk->asoc->last_control_chunk_from != NULL) {
9734 		chk->whoTo = chk->asoc->last_control_chunk_from;
9735 		atomic_add_int(&chk->whoTo->ref_count, 1);
9736 	} else {
9737 		chk->whoTo = NULL;
9738 	}
9739 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9740 	hdr->chunk_type = SCTP_COOKIE_ACK;
9741 	hdr->chunk_flags = 0;
9742 	hdr->chunk_length = htons(chk->send_size);
9743 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9744 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9745 	chk->asoc->ctrl_queue_cnt++;
9746 	return;
9747 }
9748 
9749 
9750 void
9751 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9752 {
9753 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
9754 	struct mbuf *m_shutdown_ack;
9755 	struct sctp_shutdown_ack_chunk *ack_cp;
9756 	struct sctp_tmit_chunk *chk;
9757 
9758 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9759 	if (m_shutdown_ack == NULL) {
9760 		/* no mbuf's */
9761 		return;
9762 	}
9763 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9764 	sctp_alloc_a_chunk(stcb, chk);
9765 	if (chk == NULL) {
9766 		/* no memory */
9767 		sctp_m_freem(m_shutdown_ack);
9768 		return;
9769 	}
9770 	chk->copy_by_ref = 0;
9771 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9772 	chk->rec.chunk_id.can_take_data = 1;
9773 	chk->flags = 0;
9774 	chk->send_size = sizeof(struct sctp_chunkhdr);
9775 	chk->sent = SCTP_DATAGRAM_UNSENT;
9776 	chk->snd_count = 0;
9777 	chk->asoc = &stcb->asoc;
9778 	chk->data = m_shutdown_ack;
9779 	chk->whoTo = net;
9780 	if (chk->whoTo) {
9781 		atomic_add_int(&chk->whoTo->ref_count, 1);
9782 	}
9783 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9784 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9785 	ack_cp->ch.chunk_flags = 0;
9786 	ack_cp->ch.chunk_length = htons(chk->send_size);
9787 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9788 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9789 	chk->asoc->ctrl_queue_cnt++;
9790 	return;
9791 }
9792 
9793 void
9794 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9795 {
9796 	/* formulate and queue a SHUTDOWN to the sender */
9797 	struct mbuf *m_shutdown;
9798 	struct sctp_shutdown_chunk *shutdown_cp;
9799 	struct sctp_tmit_chunk *chk;
9800 
9801 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9802 		if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9803 			/* We already have a SHUTDOWN queued. Reuse it. */
9804 			if (chk->whoTo) {
9805 				sctp_free_remote_addr(chk->whoTo);
9806 				chk->whoTo = NULL;
9807 			}
9808 			break;
9809 		}
9810 	}
9811 	if (chk == NULL) {
9812 		m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9813 		if (m_shutdown == NULL) {
9814 			/* no mbuf's */
9815 			return;
9816 		}
9817 		SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9818 		sctp_alloc_a_chunk(stcb, chk);
9819 		if (chk == NULL) {
9820 			/* no memory */
9821 			sctp_m_freem(m_shutdown);
9822 			return;
9823 		}
9824 		chk->copy_by_ref = 0;
9825 		chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9826 		chk->rec.chunk_id.can_take_data = 1;
9827 		chk->flags = 0;
9828 		chk->send_size = sizeof(struct sctp_shutdown_chunk);
9829 		chk->sent = SCTP_DATAGRAM_UNSENT;
9830 		chk->snd_count = 0;
9831 		chk->asoc = &stcb->asoc;
9832 		chk->data = m_shutdown;
9833 		chk->whoTo = net;
9834 		if (chk->whoTo) {
9835 			atomic_add_int(&chk->whoTo->ref_count, 1);
9836 		}
9837 		shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9838 		shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9839 		shutdown_cp->ch.chunk_flags = 0;
9840 		shutdown_cp->ch.chunk_length = htons(chk->send_size);
9841 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9842 		SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9843 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9844 		chk->asoc->ctrl_queue_cnt++;
9845 	} else {
9846 		TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9847 		chk->whoTo = net;
9848 		if (chk->whoTo) {
9849 			atomic_add_int(&chk->whoTo->ref_count, 1);
9850 		}
9851 		shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9852 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9853 		TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9854 	}
9855 	return;
9856 }
9857 
9858 void
9859 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9860 {
9861 	/*
9862 	 * formulate and queue an ASCONF to the peer.
9863 	 * ASCONF parameters should be queued on the assoc queue.
9864 	 */
9865 	struct sctp_tmit_chunk *chk;
9866 	struct mbuf *m_asconf;
9867 	int len;
9868 
9869 	SCTP_TCB_LOCK_ASSERT(stcb);
9870 
9871 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9872 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9873 		/* can't send a new one if there is one in flight already */
9874 		return;
9875 	}
9876 
9877 	/* compose an ASCONF chunk, maximum length is PMTU */
9878 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9879 	if (m_asconf == NULL) {
9880 		return;
9881 	}
9882 
9883 	sctp_alloc_a_chunk(stcb, chk);
9884 	if (chk == NULL) {
9885 		/* no memory */
9886 		sctp_m_freem(m_asconf);
9887 		return;
9888 	}
9889 
9890 	chk->copy_by_ref = 0;
9891 	chk->rec.chunk_id.id = SCTP_ASCONF;
9892 	chk->rec.chunk_id.can_take_data = 0;
9893 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9894 	chk->data = m_asconf;
9895 	chk->send_size = len;
9896 	chk->sent = SCTP_DATAGRAM_UNSENT;
9897 	chk->snd_count = 0;
9898 	chk->asoc = &stcb->asoc;
9899 	chk->whoTo = net;
9900 	if (chk->whoTo) {
9901 		atomic_add_int(&chk->whoTo->ref_count, 1);
9902 	}
9903 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9904 	chk->asoc->ctrl_queue_cnt++;
9905 	return;
9906 }
9907 
9908 void
9909 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9910 {
9911 	/*
9912 	 * formulate and queue a asconf-ack back to sender.
9913 	 * the asconf-ack must be stored in the tcb.
9914 	 */
9915 	struct sctp_tmit_chunk *chk;
9916 	struct sctp_asconf_ack *ack, *latest_ack;
9917 	struct mbuf *m_ack;
9918 	struct sctp_nets *net = NULL;
9919 
9920 	SCTP_TCB_LOCK_ASSERT(stcb);
9921 	/* Get the latest ASCONF-ACK */
9922 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9923 	if (latest_ack == NULL) {
9924 		return;
9925 	}
9926 	if (latest_ack->last_sent_to != NULL &&
9927 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9928 		/* we're doing a retransmission */
9929 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9930 		if (net == NULL) {
9931 			/* no alternate */
9932 			if (stcb->asoc.last_control_chunk_from == NULL) {
9933 				if (stcb->asoc.alternate) {
9934 					net = stcb->asoc.alternate;
9935 				} else {
9936 					net = stcb->asoc.primary_destination;
9937 				}
9938 			} else {
9939 				net = stcb->asoc.last_control_chunk_from;
9940 			}
9941 		}
9942 	} else {
9943 		/* normal case */
9944 		if (stcb->asoc.last_control_chunk_from == NULL) {
9945 			if (stcb->asoc.alternate) {
9946 				net = stcb->asoc.alternate;
9947 			} else {
9948 				net = stcb->asoc.primary_destination;
9949 			}
9950 		} else {
9951 			net = stcb->asoc.last_control_chunk_from;
9952 		}
9953 	}
9954 	latest_ack->last_sent_to = net;
9955 
9956 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9957 		if (ack->data == NULL) {
9958 			continue;
9959 		}
9960 
9961 		/* copy the asconf_ack */
9962 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9963 		if (m_ack == NULL) {
9964 			/* couldn't copy it */
9965 			return;
9966 		}
9967 #ifdef SCTP_MBUF_LOGGING
9968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9969 			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9970 		}
9971 #endif
9972 
9973 		sctp_alloc_a_chunk(stcb, chk);
9974 		if (chk == NULL) {
9975 			/* no memory */
9976 			if (m_ack)
9977 				sctp_m_freem(m_ack);
9978 			return;
9979 		}
9980 		chk->copy_by_ref = 0;
9981 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9982 		chk->rec.chunk_id.can_take_data = 1;
9983 		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9984 		chk->whoTo = net;
9985 		if (chk->whoTo) {
9986 			atomic_add_int(&chk->whoTo->ref_count, 1);
9987 		}
9988 		chk->data = m_ack;
9989 		chk->send_size = ack->len;
9990 		chk->sent = SCTP_DATAGRAM_UNSENT;
9991 		chk->snd_count = 0;
9992 		chk->asoc = &stcb->asoc;
9993 
9994 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9995 		chk->asoc->ctrl_queue_cnt++;
9996 	}
9997 	return;
9998 }
9999 
10000 
10001 static int
10002 sctp_chunk_retransmission(struct sctp_inpcb *inp,
10003     struct sctp_tcb *stcb,
10004     struct sctp_association *asoc,
10005     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
10006 {
10007 	/*-
10008 	 * send out one MTU of retransmission. If fast_retransmit is
10009 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
10010 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
10011 	 * retransmit them by themselves.
10012 	 *
10013 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
10014 	 * marked for resend and bundle them all together (up to a MTU of
10015 	 * destination). The address to send to should have been
10016 	 * selected/changed where the retransmission was marked (i.e. in FR
10017 	 * or t3-timeout routines).
10018 	 */
10019 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
10020 	struct sctp_tmit_chunk *chk, *fwd;
10021 	struct mbuf *m, *endofchain;
10022 	struct sctp_nets *net = NULL;
10023 	uint32_t tsns_sent = 0;
10024 	int no_fragmentflg, bundle_at, cnt_thru;
10025 	unsigned int mtu;
10026 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
10027 	struct sctp_auth_chunk *auth = NULL;
10028 	uint32_t auth_offset = 0;
10029 	uint16_t auth_keyid;
10030 	int override_ok = 1;
10031 	int data_auth_reqd = 0;
10032 	uint32_t dmtu = 0;
10033 
10034 #if defined(__APPLE__) && !defined(__Userspace__)
10035 	if (so_locked) {
10036 		sctp_lock_assert(SCTP_INP_SO(inp));
10037 	} else {
10038 		sctp_unlock_assert(SCTP_INP_SO(inp));
10039 	}
10040 #endif
10041 	SCTP_TCB_LOCK_ASSERT(stcb);
10042 	tmr_started = ctl_cnt = bundle_at = error = 0;
10043 	no_fragmentflg = 1;
10044 	fwd_tsn = 0;
10045 	*cnt_out = 0;
10046 	fwd = NULL;
10047 	endofchain = m = NULL;
10048 	auth_keyid = stcb->asoc.authinfo.active_keyid;
10049 #ifdef SCTP_AUDITING_ENABLED
10050 	sctp_audit_log(0xC3, 1);
10051 #endif
10052 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
10053 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
10054 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
10055 			asoc->sent_queue_retran_cnt);
10056 		asoc->sent_queue_cnt = 0;
10057 		asoc->sent_queue_cnt_removeable = 0;
10058 		/* send back 0/0 so we enter normal transmission */
10059 		*cnt_out = 0;
10060 		return (0);
10061 	}
10062 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10063 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
10064 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
10065 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
10066 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
10067 				continue;
10068 			}
10069 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10070 				if (chk != asoc->str_reset) {
10071 					/*
10072 					 * not eligible for retran if its
10073 					 * not ours
10074 					 */
10075 					continue;
10076 				}
10077 			}
10078 			ctl_cnt++;
10079 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10080 				fwd_tsn = 1;
10081 			}
10082 			/*
10083 			 * Add an AUTH chunk, if chunk requires it save the
10084 			 * offset into the chain for AUTH
10085 			 */
10086 			if ((auth == NULL) &&
10087 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10088 							 stcb->asoc.peer_auth_chunks))) {
10089 				m = sctp_add_auth_chunk(m, &endofchain,
10090 							&auth, &auth_offset,
10091 							stcb,
10092 							chk->rec.chunk_id.id);
10093 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10094 			}
10095 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10096 			break;
10097 		}
10098 	}
10099 	one_chunk = 0;
10100 	cnt_thru = 0;
10101 	/* do we have control chunks to retransmit? */
10102 	if (m != NULL) {
10103 		/* Start a timer no matter if we succeed or fail */
10104 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10105 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10106 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10107 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10108 		chk->snd_count++;	/* update our count */
10109 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10110 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10111 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10112 		                                        no_fragmentflg, 0, 0,
10113 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10114 		                                        chk->whoTo->port, NULL,
10115 #if defined(__FreeBSD__) && !defined(__Userspace__)
10116 		                                        0, 0,
10117 #endif
10118 		                                        so_locked))) {
10119 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10120 			if (error == ENOBUFS) {
10121 				asoc->ifp_had_enobuf = 1;
10122 				SCTP_STAT_INCR(sctps_lowlevelerr);
10123 			}
10124 			return (error);
10125 		} else {
10126 			asoc->ifp_had_enobuf = 0;
10127 		}
10128 		endofchain = NULL;
10129 		auth = NULL;
10130 		auth_offset = 0;
10131 		/*
10132 		 * We don't want to mark the net->sent time here since this
10133 		 * we use this for HB and retrans cannot measure RTT
10134 		 */
10135 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10136 		*cnt_out += 1;
10137 		chk->sent = SCTP_DATAGRAM_SENT;
10138 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10139 		if (fwd_tsn == 0) {
10140 			return (0);
10141 		} else {
10142 			/* Clean up the fwd-tsn list */
10143 			sctp_clean_up_ctl(stcb, asoc, so_locked);
10144 			return (0);
10145 		}
10146 	}
10147 	/*
10148 	 * Ok, it is just data retransmission we need to do or that and a
10149 	 * fwd-tsn with it all.
10150 	 */
10151 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
10152 		return (SCTP_RETRAN_DONE);
10153 	}
10154 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
10155 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
10156 		/* not yet open, resend the cookie and that is it */
10157 		return (1);
10158 	}
10159 #ifdef SCTP_AUDITING_ENABLED
10160 	sctp_auditing(20, inp, stcb, NULL);
10161 #endif
10162 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10163 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10164 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
10165 			/* No, not sent to this net or not ready for rtx */
10166 			continue;
10167 		}
10168 		if (chk->data == NULL) {
10169 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10170 			            chk->rec.data.tsn, chk->snd_count, chk->sent);
10171 			continue;
10172 		}
10173 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10174 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10175 			struct mbuf *op_err;
10176 			char msg[SCTP_DIAG_INFO_LEN];
10177 
10178 			SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10179 			              chk->rec.data.tsn, chk->snd_count);
10180 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10181 			                             msg);
10182 			atomic_add_int(&stcb->asoc.refcnt, 1);
10183 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10184 			                          so_locked);
10185 			SCTP_TCB_LOCK(stcb);
10186 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
10187 			return (SCTP_RETRAN_EXIT);
10188 		}
10189 		/* pick up the net */
10190 		net = chk->whoTo;
10191 		switch (net->ro._l_addr.sa.sa_family) {
10192 #ifdef INET
10193 			case AF_INET:
10194 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10195 				break;
10196 #endif
10197 #ifdef INET6
10198 			case AF_INET6:
10199 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
10200 				break;
10201 #endif
10202 #if defined(__Userspace__)
10203 			case AF_CONN:
10204 				mtu = net->mtu - sizeof(struct sctphdr);
10205 				break;
10206 #endif
10207 			default:
10208 				/* TSNH */
10209 				mtu = net->mtu;
10210 				break;
10211 		}
10212 
10213 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10214 			/* No room in peers rwnd */
10215 			uint32_t tsn;
10216 
10217 			tsn = asoc->last_acked_seq + 1;
10218 			if (tsn == chk->rec.data.tsn) {
10219 				/*
10220 				 * we make a special exception for this
10221 				 * case. The peer has no rwnd but is missing
10222 				 * the lowest chunk.. which is probably what
10223 				 * is holding up the rwnd.
10224 				 */
10225 				goto one_chunk_around;
10226 			}
10227 			return (1);
10228 		}
10229 	one_chunk_around:
10230 		if (asoc->peers_rwnd < mtu) {
10231 			one_chunk = 1;
10232 			if ((asoc->peers_rwnd == 0) &&
10233 			    (asoc->total_flight == 0)) {
10234 				chk->window_probe = 1;
10235 				chk->whoTo->window_probe = 1;
10236 			}
10237 		}
10238 #ifdef SCTP_AUDITING_ENABLED
10239 		sctp_audit_log(0xC3, 2);
10240 #endif
10241 		bundle_at = 0;
10242 		m = NULL;
10243 		net->fast_retran_ip = 0;
10244 		if (chk->rec.data.doing_fast_retransmit == 0) {
10245 			/*
10246 			 * if no FR in progress skip destination that have
10247 			 * flight_size > cwnd.
10248 			 */
10249 			if (net->flight_size >= net->cwnd) {
10250 				continue;
10251 			}
10252 		} else {
10253 			/*
10254 			 * Mark the destination net to have FR recovery
10255 			 * limits put on it.
10256 			 */
10257 			*fr_done = 1;
10258 			net->fast_retran_ip = 1;
10259 		}
10260 
10261 		/*
10262 		 * if no AUTH is yet included and this chunk requires it,
10263 		 * make sure to account for it.  We don't apply the size
10264 		 * until the AUTH chunk is actually added below in case
10265 		 * there is no room for this chunk.
10266 		 */
10267 		if (data_auth_reqd && (auth == NULL)) {
10268 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10269 		} else
10270 			dmtu = 0;
10271 
10272 		if ((chk->send_size <= (mtu - dmtu)) ||
10273 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10274 			/* ok we will add this one */
10275 			if (data_auth_reqd) {
10276 				if (auth == NULL) {
10277 					m = sctp_add_auth_chunk(m,
10278 								&endofchain,
10279 								&auth,
10280 								&auth_offset,
10281 								stcb,
10282 								SCTP_DATA);
10283 					auth_keyid = chk->auth_keyid;
10284 					override_ok = 0;
10285 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10286 				} else if (override_ok) {
10287 					auth_keyid = chk->auth_keyid;
10288 					override_ok = 0;
10289 				} else if (chk->auth_keyid != auth_keyid) {
10290 					/* different keyid, so done bundling */
10291 					break;
10292 				}
10293 			}
10294 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10295 			if (m == NULL) {
10296 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10297 				return (ENOMEM);
10298 			}
10299 			/* Do clear IP_DF ? */
10300 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10301 				no_fragmentflg = 0;
10302 			}
10303 			/* upate our MTU size */
10304 			if (mtu > (chk->send_size + dmtu))
10305 				mtu -= (chk->send_size + dmtu);
10306 			else
10307 				mtu = 0;
10308 			data_list[bundle_at++] = chk;
10309 			if (one_chunk && (asoc->total_flight <= 0)) {
10310 				SCTP_STAT_INCR(sctps_windowprobed);
10311 			}
10312 		}
10313 		if (one_chunk == 0) {
10314 			/*
10315 			 * now are there anymore forward from chk to pick
10316 			 * up?
10317 			 */
10318 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10319 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10320 					/* Nope, not for retran */
10321 					continue;
10322 				}
10323 				if (fwd->whoTo != net) {
10324 					/* Nope, not the net in question */
10325 					continue;
10326 				}
10327 				if (data_auth_reqd && (auth == NULL)) {
10328 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10329 				} else
10330 					dmtu = 0;
10331 				if (fwd->send_size <= (mtu - dmtu)) {
10332 					if (data_auth_reqd) {
10333 						if (auth == NULL) {
10334 							m = sctp_add_auth_chunk(m,
10335 										&endofchain,
10336 										&auth,
10337 										&auth_offset,
10338 										stcb,
10339 										SCTP_DATA);
10340 							auth_keyid = fwd->auth_keyid;
10341 							override_ok = 0;
10342 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10343 						} else if (override_ok) {
10344 							auth_keyid = fwd->auth_keyid;
10345 							override_ok = 0;
10346 						} else if (fwd->auth_keyid != auth_keyid) {
10347 							/* different keyid, so done bundling */
10348 							break;
10349 						}
10350 					}
10351 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10352 					if (m == NULL) {
10353 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10354 						return (ENOMEM);
10355 					}
10356 					/* Do clear IP_DF ? */
10357 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10358 						no_fragmentflg = 0;
10359 					}
10360 					/* upate our MTU size */
10361 					if (mtu > (fwd->send_size + dmtu))
10362 						mtu -= (fwd->send_size + dmtu);
10363 					else
10364 						mtu = 0;
10365 					data_list[bundle_at++] = fwd;
10366 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10367 						break;
10368 					}
10369 				} else {
10370 					/* can't fit so we are done */
10371 					break;
10372 				}
10373 			}
10374 		}
10375 		/* Is there something to send for this destination? */
10376 		if (m) {
10377 			/*
10378 			 * No matter if we fail/or succeed we should start a
10379 			 * timer. A failure is like a lost IP packet :-)
10380 			 */
10381 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10382 				/*
10383 				 * no timer running on this destination
10384 				 * restart it.
10385 				 */
10386 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10387 				tmr_started = 1;
10388 			}
10389 			/* Now lets send it, if there is anything to send :> */
10390 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10391 			                                        (struct sockaddr *)&net->ro._l_addr, m,
10392 			                                        auth_offset, auth, auth_keyid,
10393 			                                        no_fragmentflg, 0, 0,
10394 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10395 			                                        net->port, NULL,
10396 #if defined(__FreeBSD__) && !defined(__Userspace__)
10397 			                                        0, 0,
10398 #endif
10399 			                                        so_locked))) {
10400 				/* error, we could not output */
10401 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10402 				if (error == ENOBUFS) {
10403 					asoc->ifp_had_enobuf = 1;
10404 					SCTP_STAT_INCR(sctps_lowlevelerr);
10405 				}
10406 				return (error);
10407 			} else {
10408 				asoc->ifp_had_enobuf = 0;
10409 			}
10410 			endofchain = NULL;
10411 			auth = NULL;
10412 			auth_offset = 0;
10413 			/* For HB's */
10414 			/*
10415 			 * We don't want to mark the net->sent time here
10416 			 * since this we use this for HB and retrans cannot
10417 			 * measure RTT
10418 			 */
10419 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10420 
10421 			/* For auto-close */
10422 			cnt_thru++;
10423 			if (*now_filled == 0) {
10424 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10425 				*now = asoc->time_last_sent;
10426 				*now_filled = 1;
10427 			} else {
10428 				asoc->time_last_sent = *now;
10429 			}
10430 			*cnt_out += bundle_at;
10431 #ifdef SCTP_AUDITING_ENABLED
10432 			sctp_audit_log(0xC4, bundle_at);
10433 #endif
10434 			if (bundle_at) {
10435 				tsns_sent = data_list[0]->rec.data.tsn;
10436 			}
10437 			for (i = 0; i < bundle_at; i++) {
10438 				SCTP_STAT_INCR(sctps_sendretransdata);
10439 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
10440 				/*
10441 				 * When we have a revoked data, and we
10442 				 * retransmit it, then we clear the revoked
10443 				 * flag since this flag dictates if we
10444 				 * subtracted from the fs
10445 				 */
10446 				if (data_list[i]->rec.data.chunk_was_revoked) {
10447 					/* Deflate the cwnd */
10448 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10449 					data_list[i]->rec.data.chunk_was_revoked = 0;
10450 				}
10451 				data_list[i]->snd_count++;
10452 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10453 				/* record the time */
10454 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
10455 				if (data_list[i]->book_size_scale) {
10456 					/*
10457 					 * need to double the book size on
10458 					 * this one
10459 					 */
10460 					data_list[i]->book_size_scale = 0;
10461 					/* Since we double the booksize, we must
10462 					 * also double the output queue size, since this
10463 					 * get shrunk when we free by this amount.
10464 					 */
10465 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10466 					data_list[i]->book_size *= 2;
10467 
10468 
10469 				} else {
10470 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10471 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10472 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10473 					}
10474 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10475 									    (uint32_t) (data_list[i]->send_size +
10476 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10477 				}
10478 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10479 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10480 						       data_list[i]->whoTo->flight_size,
10481 						       data_list[i]->book_size,
10482 						       (uint32_t)(uintptr_t)data_list[i]->whoTo,
10483 						       data_list[i]->rec.data.tsn);
10484 				}
10485 				sctp_flight_size_increase(data_list[i]);
10486 				sctp_total_flight_increase(stcb, data_list[i]);
10487 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10488 					/* SWS sender side engages */
10489 					asoc->peers_rwnd = 0;
10490 				}
10491 				if ((i == 0) &&
10492 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
10493 					SCTP_STAT_INCR(sctps_sendfastretrans);
10494 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10495 					    (tmr_started == 0)) {
10496 						/*-
10497 						 * ok we just fast-retrans'd
10498 						 * the lowest TSN, i.e the
10499 						 * first on the list. In
10500 						 * this case we want to give
10501 						 * some more time to get a
10502 						 * SACK back without a
10503 						 * t3-expiring.
10504 						 */
10505 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10506 						                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10507 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10508 					}
10509 				}
10510 			}
10511 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10512 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10513 			}
10514 #ifdef SCTP_AUDITING_ENABLED
10515 			sctp_auditing(21, inp, stcb, NULL);
10516 #endif
10517 		} else {
10518 			/* None will fit */
10519 			return (1);
10520 		}
10521 		if (asoc->sent_queue_retran_cnt <= 0) {
10522 			/* all done we have no more to retran */
10523 			asoc->sent_queue_retran_cnt = 0;
10524 			break;
10525 		}
10526 		if (one_chunk) {
10527 			/* No more room in rwnd */
10528 			return (1);
10529 		}
10530 		/* stop the for loop here. we sent out a packet */
10531 		break;
10532 	}
10533 	return (0);
10534 }
10535 
10536 static void
10537 sctp_timer_validation(struct sctp_inpcb *inp,
10538     struct sctp_tcb *stcb,
10539     struct sctp_association *asoc)
10540 {
10541 	struct sctp_nets *net;
10542 
10543 	/* Validate that a timer is running somewhere */
10544 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10545 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10546 			/* Here is a timer */
10547 			return;
10548 		}
10549 	}
10550 	SCTP_TCB_LOCK_ASSERT(stcb);
10551 	/* Gak, we did not have a timer somewhere */
10552 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10553 	if (asoc->alternate) {
10554 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10555 	} else {
10556 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10557 	}
10558 	return;
10559 }
10560 
10561 void
10562 sctp_chunk_output(struct sctp_inpcb *inp,
10563     struct sctp_tcb *stcb,
10564     int from_where,
10565     int so_locked)
10566 {
10567 	/*-
10568 	 * Ok this is the generic chunk service queue. we must do the
10569 	 * following:
10570 	 * - See if there are retransmits pending, if so we must
10571 	 *   do these first.
10572 	 * - Service the stream queue that is next, moving any
10573 	 *   message (note I must get a complete message i.e.
10574 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10575 	 *   TSN's
10576 	 * - Check to see if the cwnd/rwnd allows any output, if so we
10577 	 *   go ahead and fomulate and send the low level chunks. Making sure
10578 	 *   to combine any control in the control chunk queue also.
10579 	 */
10580 	struct sctp_association *asoc;
10581 	struct sctp_nets *net;
10582 	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10583 	unsigned int burst_cnt = 0;
10584 	struct timeval now;
10585 	int now_filled = 0;
10586 	int nagle_on;
10587 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10588 	int un_sent = 0;
10589 	int fr_done;
10590 	unsigned int tot_frs = 0;
10591 
10592 #if defined(__APPLE__) && !defined(__Userspace__)
10593 	if (so_locked) {
10594 		sctp_lock_assert(SCTP_INP_SO(inp));
10595 	} else {
10596 		sctp_unlock_assert(SCTP_INP_SO(inp));
10597 	}
10598 #endif
10599 	asoc = &stcb->asoc;
10600 do_it_again:
10601 	/* The Nagle algorithm is only applied when handling a send call. */
10602 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10603 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10604 			nagle_on = 0;
10605 		} else {
10606 			nagle_on = 1;
10607 		}
10608 	} else {
10609 		nagle_on = 0;
10610 	}
10611 	SCTP_TCB_LOCK_ASSERT(stcb);
10612 
10613 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10614 
10615 	if ((un_sent <= 0) &&
10616 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10617 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10618 	    (asoc->sent_queue_retran_cnt == 0) &&
10619 	    (asoc->trigger_reset == 0)) {
10620 		/* Nothing to do unless there is something to be sent left */
10621 		return;
10622 	}
10623 	/* Do we have something to send, data or control AND
10624 	 * a sack timer running, if so piggy-back the sack.
10625 	 */
10626 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10627 		sctp_send_sack(stcb, so_locked);
10628 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10629 		                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10630 	}
10631 	while (asoc->sent_queue_retran_cnt) {
10632 		/*-
10633 		 * Ok, it is retransmission time only, we send out only ONE
10634 		 * packet with a single call off to the retran code.
10635 		 */
10636 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10637 			/*-
10638 			 * Special hook for handling cookiess discarded
10639 			 * by peer that carried data. Send cookie-ack only
10640 			 * and then the next call with get the retran's.
10641 			 */
10642 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10643 						    from_where,
10644 						    &now, &now_filled, frag_point, so_locked);
10645 			return;
10646 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10647 			/* if its not from a HB then do it */
10648 			fr_done = 0;
10649 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10650 			if (fr_done) {
10651 				tot_frs++;
10652 			}
10653 		} else {
10654 			/*
10655 			 * its from any other place, we don't allow retran
10656 			 * output (only control)
10657 			 */
10658 			ret = 1;
10659 		}
10660 		if (ret > 0) {
10661 			/* Can't send anymore */
10662 			/*-
10663 			 * now lets push out control by calling med-level
10664 			 * output once. this assures that we WILL send HB's
10665 			 * if queued too.
10666 			 */
10667 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10668 						    from_where,
10669 						    &now, &now_filled, frag_point, so_locked);
10670 #ifdef SCTP_AUDITING_ENABLED
10671 			sctp_auditing(8, inp, stcb, NULL);
10672 #endif
10673 			sctp_timer_validation(inp, stcb, asoc);
10674 			return;
10675 		}
10676 		if (ret < 0) {
10677 			/*-
10678 			 * The count was off.. retran is not happening so do
10679 			 * the normal retransmission.
10680 			 */
10681 #ifdef SCTP_AUDITING_ENABLED
10682 			sctp_auditing(9, inp, stcb, NULL);
10683 #endif
10684 			if (ret == SCTP_RETRAN_EXIT) {
10685 				return;
10686 			}
10687 			break;
10688 		}
10689 		if (from_where == SCTP_OUTPUT_FROM_T3) {
10690 			/* Only one transmission allowed out of a timeout */
10691 #ifdef SCTP_AUDITING_ENABLED
10692 			sctp_auditing(10, inp, stcb, NULL);
10693 #endif
10694 			/* Push out any control */
10695 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10696 						    &now, &now_filled, frag_point, so_locked);
10697 			return;
10698 		}
10699 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10700 			/* Hit FR burst limit */
10701 			return;
10702 		}
10703 		if ((num_out == 0) && (ret == 0)) {
10704 			/* No more retrans to send */
10705 			break;
10706 		}
10707 	}
10708 #ifdef SCTP_AUDITING_ENABLED
10709 	sctp_auditing(12, inp, stcb, NULL);
10710 #endif
10711 	/* Check for bad destinations, if they exist move chunks around. */
10712 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10713 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10714 			/*-
10715 			 * if possible move things off of this address we
10716 			 * still may send below due to the dormant state but
10717 			 * we try to find an alternate address to send to
10718 			 * and if we have one we move all queued data on the
10719 			 * out wheel to this alternate address.
10720 			 */
10721 			if (net->ref_count > 1)
10722 				sctp_move_chunks_from_net(stcb, net);
10723 		} else {
10724 			/*-
10725 			 * if ((asoc->sat_network) || (net->addr_is_local))
10726 			 * { burst_limit = asoc->max_burst *
10727 			 * SCTP_SAT_NETWORK_BURST_INCR; }
10728 			 */
10729 			if (asoc->max_burst > 0) {
10730 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10731 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10732 						/* JRS - Use the congestion control given in the congestion control module */
10733 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10734 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10735 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10736 						}
10737 						SCTP_STAT_INCR(sctps_maxburstqueued);
10738 					}
10739 					net->fast_retran_ip = 0;
10740 				} else {
10741 					if (net->flight_size == 0) {
10742 						/* Should be decaying the cwnd here */
10743 						;
10744 					}
10745 				}
10746 			}
10747 		}
10748 
10749 	}
10750 	burst_cnt = 0;
10751 	do {
10752 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10753 					      &reason_code, 0, from_where,
10754 					      &now, &now_filled, frag_point, so_locked);
10755 		if (error) {
10756 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10757 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10758 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10759 			}
10760 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10761 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10762 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10763 			}
10764 			break;
10765 		}
10766 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10767 
10768 		tot_out += num_out;
10769 		burst_cnt++;
10770 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10771 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10772 			if (num_out == 0) {
10773 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10774 			}
10775 		}
10776 		if (nagle_on) {
10777 			/*
10778 			 * When the Nagle algorithm is used, look at how much
10779 			 * is unsent, then if its smaller than an MTU and we
10780 			 * have data in flight we stop, except if we are
10781 			 * handling a fragmented user message.
10782 			 */
10783 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10784 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10785 			    (stcb->asoc.total_flight > 0)) {
10786 /*	&&		     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10787 				break;
10788 			}
10789 		}
10790 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10791 		    TAILQ_EMPTY(&asoc->send_queue) &&
10792 		    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10793 			/* Nothing left to send */
10794 			break;
10795 		}
10796 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10797 			/* Nothing left to send */
10798 			break;
10799 		}
10800 	} while (num_out &&
10801 	         ((asoc->max_burst == 0) ||
10802 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10803 		  (burst_cnt < asoc->max_burst)));
10804 
10805 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10806 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10807 			SCTP_STAT_INCR(sctps_maxburstqueued);
10808 			asoc->burst_limit_applied = 1;
10809 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10810 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10811 			}
10812 		} else {
10813 			asoc->burst_limit_applied = 0;
10814 		}
10815 	}
10816 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10817 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10818 	}
10819 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10820 		tot_out);
10821 
10822 	/*-
10823 	 * Now we need to clean up the control chunk chain if a ECNE is on
10824 	 * it. It must be marked as UNSENT again so next call will continue
10825 	 * to send it until such time that we get a CWR, to remove it.
10826 	 */
10827 	if (stcb->asoc.ecn_echo_cnt_onq)
10828 		sctp_fix_ecn_echo(asoc);
10829 
10830 	if (stcb->asoc.trigger_reset) {
10831 		if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0)  {
10832 			goto do_it_again;
10833 		}
10834 	}
10835 	return;
10836 }
10837 
10838 
10839 int
10840 sctp_output(
10841 	struct sctp_inpcb *inp,
10842 	struct mbuf *m,
10843 	struct sockaddr *addr,
10844 	struct mbuf *control,
10845 #if defined(__FreeBSD__) && !defined(__Userspace__)
10846 	struct thread *p,
10847 #elif defined(_WIN32) && !defined(__Userspace__)
10848 	PKTHREAD p,
10849 #else
10850 #if defined(__APPLE__) && !defined(__Userspace__)
10851 	struct proc *p SCTP_UNUSED,
10852 #else
10853 	struct proc *p,
10854 #endif
10855 #endif
10856 	int flags)
10857 {
10858 	if (inp == NULL) {
10859 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10860 		return (EINVAL);
10861 	}
10862 
10863 	if (inp->sctp_socket == NULL) {
10864 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10865 		return (EINVAL);
10866 	}
10867 	return (sctp_sosend(inp->sctp_socket,
10868 			    addr,
10869 			    (struct uio *)NULL,
10870 			    m,
10871 			    control,
10872 #if defined(__APPLE__) && !defined(__Userspace__)
10873 			    flags
10874 #else
10875 			    flags, p
10876 #endif
10877 			));
10878 }
10879 
10880 void
10881 send_forward_tsn(struct sctp_tcb *stcb,
10882 		 struct sctp_association *asoc)
10883 {
10884 	struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10885 	struct sctp_forward_tsn_chunk *fwdtsn;
10886 	struct sctp_strseq *strseq;
10887 	struct sctp_strseq_mid *strseq_m;
10888 	uint32_t advance_peer_ack_point;
10889 	unsigned int cnt_of_space, i, ovh;
10890 	unsigned int space_needed;
10891 	unsigned int cnt_of_skipped = 0;
10892 
10893 	SCTP_TCB_LOCK_ASSERT(stcb);
10894 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10895 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10896 			/* mark it to unsent */
10897 			chk->sent = SCTP_DATAGRAM_UNSENT;
10898 			chk->snd_count = 0;
10899 			/* Do we correct its output location? */
10900 			if (chk->whoTo) {
10901 				sctp_free_remote_addr(chk->whoTo);
10902 				chk->whoTo = NULL;
10903 			}
10904 			goto sctp_fill_in_rest;
10905 		}
10906 	}
10907 	/* Ok if we reach here we must build one */
10908 	sctp_alloc_a_chunk(stcb, chk);
10909 	if (chk == NULL) {
10910 		return;
10911 	}
10912 	asoc->fwd_tsn_cnt++;
10913 	chk->copy_by_ref = 0;
10914 	/*
10915 	 * We don't do the old thing here since
10916 	 * this is used not for on-wire but to
10917 	 * tell if we are sending a fwd-tsn by
10918 	 * the stack during output. And if its
10919 	 * a IFORWARD or a FORWARD it is a fwd-tsn.
10920 	 */
10921 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10922 	chk->rec.chunk_id.can_take_data = 0;
10923 	chk->flags = 0;
10924 	chk->asoc = asoc;
10925 	chk->whoTo = NULL;
10926 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10927 	if (chk->data == NULL) {
10928 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10929 		return;
10930 	}
10931 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10932 	chk->sent = SCTP_DATAGRAM_UNSENT;
10933 	chk->snd_count = 0;
10934 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10935 	asoc->ctrl_queue_cnt++;
10936 sctp_fill_in_rest:
10937 	/*-
10938 	 * Here we go through and fill out the part that deals with
10939 	 * stream/seq of the ones we skip.
10940 	 */
10941 	SCTP_BUF_LEN(chk->data) = 0;
10942 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10943 		if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10944 		    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10945 			/* no more to look at */
10946 			break;
10947 		}
10948 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10949 			/* We don't report these */
10950 			continue;
10951 		}
10952 		cnt_of_skipped++;
10953 	}
10954 	if (asoc->idata_supported) {
10955 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10956 		                (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10957 	} else {
10958 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10959 		                (cnt_of_skipped * sizeof(struct sctp_strseq)));
10960 	}
10961 	cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10962 
10963 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10964 		ovh = SCTP_MIN_OVERHEAD;
10965 	} else {
10966 		ovh = SCTP_MIN_V4_OVERHEAD;
10967 	}
10968 	if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10969 		/* trim to a mtu size */
10970 		cnt_of_space = asoc->smallest_mtu - ovh;
10971 	}
10972 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10973 		sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10974 			       0xff, 0, cnt_of_skipped,
10975 			       asoc->advanced_peer_ack_point);
10976 	}
10977 	advance_peer_ack_point = asoc->advanced_peer_ack_point;
10978 	if (cnt_of_space < space_needed) {
10979 		/*-
10980 		 * ok we must trim down the chunk by lowering the
10981 		 * advance peer ack point.
10982 		 */
10983 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10984 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10985 				       0xff, 0xff, cnt_of_space,
10986 				       space_needed);
10987 		}
10988 		cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10989 		if (asoc->idata_supported) {
10990 			cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10991 		} else {
10992 			cnt_of_skipped /= sizeof(struct sctp_strseq);
10993 		}
10994 		/*-
10995 		 * Go through and find the TSN that will be the one
10996 		 * we report.
10997 		 */
10998 		at = TAILQ_FIRST(&asoc->sent_queue);
10999 		if (at != NULL) {
11000 			for (i = 0; i < cnt_of_skipped; i++) {
11001 				tp1 = TAILQ_NEXT(at, sctp_next);
11002 				if (tp1 == NULL) {
11003 					break;
11004 				}
11005 				at = tp1;
11006 			}
11007 		}
11008 		if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
11009 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
11010 				       0xff, cnt_of_skipped, at->rec.data.tsn,
11011 				       asoc->advanced_peer_ack_point);
11012 		}
11013 		last = at;
11014 		/*-
11015 		 * last now points to last one I can report, update
11016 		 * peer ack point
11017 		 */
11018 		if (last) {
11019 			advance_peer_ack_point = last->rec.data.tsn;
11020 		}
11021 		if (asoc->idata_supported) {
11022 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11023 			               cnt_of_skipped * sizeof(struct sctp_strseq_mid);
11024 		} else {
11025 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
11026 			               cnt_of_skipped * sizeof(struct sctp_strseq);
11027 		}
11028 	}
11029 	chk->send_size = space_needed;
11030 	/* Setup the chunk */
11031 	fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
11032 	fwdtsn->ch.chunk_length = htons(chk->send_size);
11033 	fwdtsn->ch.chunk_flags = 0;
11034 	if (asoc->idata_supported) {
11035 		fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
11036 	} else {
11037 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
11038 	}
11039 	fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
11040 	SCTP_BUF_LEN(chk->data) = chk->send_size;
11041 	fwdtsn++;
11042 	/*-
11043 	 * Move pointer to after the fwdtsn and transfer to the
11044 	 * strseq pointer.
11045 	 */
11046 	if (asoc->idata_supported) {
11047 		strseq_m = (struct sctp_strseq_mid *)fwdtsn;
11048 		strseq = NULL;
11049 	} else {
11050 		strseq = (struct sctp_strseq *)fwdtsn;
11051 		strseq_m = NULL;
11052 	}
11053 	/*-
11054 	 * Now populate the strseq list. This is done blindly
11055 	 * without pulling out duplicate stream info. This is
11056 	 * inefficent but won't harm the process since the peer will
11057 	 * look at these in sequence and will thus release anything.
11058 	 * It could mean we exceed the PMTU and chop off some that
11059 	 * we could have included.. but this is unlikely (aka 1432/4
11060 	 * would mean 300+ stream seq's would have to be reported in
11061 	 * one FWD-TSN. With a bit of work we can later FIX this to
11062 	 * optimize and pull out duplicates.. but it does add more
11063 	 * overhead. So for now... not!
11064 	 */
11065 	i = 0;
11066 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11067 		if (i >= cnt_of_skipped) {
11068 			break;
11069 		}
11070 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11071 			/* We don't report these */
11072 			continue;
11073 		}
11074 		if (at->rec.data.tsn == advance_peer_ack_point) {
11075 			at->rec.data.fwd_tsn_cnt = 0;
11076 		}
11077 		if (asoc->idata_supported) {
11078 			strseq_m->sid = htons(at->rec.data.sid);
11079 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11080 				strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11081 			} else {
11082 				strseq_m->flags = 0;
11083 			}
11084 			strseq_m->mid = htonl(at->rec.data.mid);
11085 			strseq_m++;
11086 		} else {
11087 			strseq->sid = htons(at->rec.data.sid);
11088 			strseq->ssn = htons((uint16_t)at->rec.data.mid);
11089 			strseq++;
11090 		}
11091 		i++;
11092 	}
11093 	return;
11094 }
11095 
11096 void
11097 sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
11098 {
11099 	/*-
11100 	 * Queue up a SACK or NR-SACK in the control queue.
11101 	 * We must first check to see if a SACK or NR-SACK is
11102 	 * somehow on the control queue.
11103 	 * If so, we will take and and remove the old one.
11104 	 */
11105 	struct sctp_association *asoc;
11106 	struct sctp_tmit_chunk *chk, *a_chk;
11107 	struct sctp_sack_chunk *sack;
11108 	struct sctp_nr_sack_chunk *nr_sack;
11109 	struct sctp_gap_ack_block *gap_descriptor;
11110 	const struct sack_track *selector;
11111 	int mergeable = 0;
11112 	int offset;
11113 	caddr_t limit;
11114 	uint32_t *dup;
11115 	int limit_reached = 0;
11116 	unsigned int i, siz, j;
11117 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11118 	int num_dups = 0;
11119 	int space_req;
11120 	uint32_t highest_tsn;
11121 	uint8_t flags;
11122 	uint8_t type;
11123 	uint8_t tsn_map;
11124 
11125 	if (stcb->asoc.nrsack_supported == 1) {
11126 		type = SCTP_NR_SELECTIVE_ACK;
11127 	} else {
11128 		type = SCTP_SELECTIVE_ACK;
11129 	}
11130 	a_chk = NULL;
11131 	asoc = &stcb->asoc;
11132 	SCTP_TCB_LOCK_ASSERT(stcb);
11133 	if (asoc->last_data_chunk_from == NULL) {
11134 		/* Hmm we never received anything */
11135 		return;
11136 	}
11137 	sctp_slide_mapping_arrays(stcb);
11138 	sctp_set_rwnd(stcb, asoc);
11139 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11140 		if (chk->rec.chunk_id.id == type) {
11141 			/* Hmm, found a sack already on queue, remove it */
11142 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11143 			asoc->ctrl_queue_cnt--;
11144 			a_chk = chk;
11145 			if (a_chk->data) {
11146 				sctp_m_freem(a_chk->data);
11147 				a_chk->data = NULL;
11148 			}
11149 			if (a_chk->whoTo) {
11150 				sctp_free_remote_addr(a_chk->whoTo);
11151 				a_chk->whoTo = NULL;
11152 			}
11153 			break;
11154 		}
11155 	}
11156 	if (a_chk == NULL) {
11157 		sctp_alloc_a_chunk(stcb, a_chk);
11158 		if (a_chk == NULL) {
11159 			/* No memory so we drop the idea, and set a timer */
11160 			if (stcb->asoc.delayed_ack) {
11161 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11162 				                stcb->sctp_ep, stcb, NULL,
11163 				                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11164 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11165 				    stcb->sctp_ep, stcb, NULL);
11166 			} else {
11167 				stcb->asoc.send_sack = 1;
11168 			}
11169 			return;
11170 		}
11171 		a_chk->copy_by_ref = 0;
11172 		a_chk->rec.chunk_id.id = type;
11173 		a_chk->rec.chunk_id.can_take_data = 1;
11174 	}
11175 	/* Clear our pkt counts */
11176 	asoc->data_pkts_seen = 0;
11177 
11178 	a_chk->flags = 0;
11179 	a_chk->asoc = asoc;
11180 	a_chk->snd_count = 0;
11181 	a_chk->send_size = 0;	/* fill in later */
11182 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
11183 	a_chk->whoTo = NULL;
11184 
11185 	if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11186 		/*-
11187 		 * Ok, the destination for the SACK is unreachable, lets see if
11188 		 * we can select an alternate to asoc->last_data_chunk_from
11189 		 */
11190 		a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11191 		if (a_chk->whoTo == NULL) {
11192 			/* Nope, no alternate */
11193 			a_chk->whoTo = asoc->last_data_chunk_from;
11194 		}
11195 	} else {
11196 		a_chk->whoTo = asoc->last_data_chunk_from;
11197 	}
11198 	if (a_chk->whoTo) {
11199 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
11200 	}
11201 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11202 		highest_tsn = asoc->highest_tsn_inside_map;
11203 	} else {
11204 		highest_tsn = asoc->highest_tsn_inside_nr_map;
11205 	}
11206 	if (highest_tsn == asoc->cumulative_tsn) {
11207 		/* no gaps */
11208 		if (type == SCTP_SELECTIVE_ACK) {
11209 			space_req = sizeof(struct sctp_sack_chunk);
11210 		} else {
11211 			space_req = sizeof(struct sctp_nr_sack_chunk);
11212 		}
11213 	} else {
11214 		/* gaps get a cluster */
11215 		space_req = MCLBYTES;
11216 	}
11217 	/* Ok now lets formulate a MBUF with our sack */
11218 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11219 	if ((a_chk->data == NULL) ||
11220 	    (a_chk->whoTo == NULL)) {
11221 		/* rats, no mbuf memory */
11222 		if (a_chk->data) {
11223 			/* was a problem with the destination */
11224 			sctp_m_freem(a_chk->data);
11225 			a_chk->data = NULL;
11226 		}
11227 		sctp_free_a_chunk(stcb, a_chk, so_locked);
11228 		/* sa_ignore NO_NULL_CHK */
11229 		if (stcb->asoc.delayed_ack) {
11230 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11231 			                stcb->sctp_ep, stcb, NULL,
11232 			                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
11233 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11234 			    stcb->sctp_ep, stcb, NULL);
11235 		} else {
11236 			stcb->asoc.send_sack = 1;
11237 		}
11238 		return;
11239 	}
11240 	/* ok, lets go through and fill it in */
11241 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11242 	space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11243 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11244 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11245 	}
11246 	limit = mtod(a_chk->data, caddr_t);
11247 	limit += space;
11248 
11249 	flags = 0;
11250 
11251 	if ((asoc->sctp_cmt_on_off > 0) &&
11252 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11253 		/*-
11254 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11255 		 * received, then set high bit to 1, else 0. Reset
11256 		 * pkts_rcvd.
11257 		 */
11258 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11259 		asoc->cmt_dac_pkts_rcvd = 0;
11260 	}
11261 #ifdef SCTP_ASOCLOG_OF_TSNS
11262 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11263 	stcb->asoc.cumack_log_atsnt++;
11264 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11265 		stcb->asoc.cumack_log_atsnt = 0;
11266 	}
11267 #endif
11268 	/* reset the readers interpretation */
11269 	stcb->freed_by_sorcv_sincelast = 0;
11270 
11271 	if (type == SCTP_SELECTIVE_ACK) {
11272 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11273 		nr_sack = NULL;
11274 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11275 		if (highest_tsn > asoc->mapping_array_base_tsn) {
11276 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11277 		} else {
11278 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
11279 		}
11280 	} else {
11281 		sack = NULL;
11282 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11283 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11284 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11285 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11286 		} else {
11287 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11288 		}
11289 	}
11290 
11291 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11292 		offset = 1;
11293 	} else {
11294 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11295 	}
11296 	if (((type == SCTP_SELECTIVE_ACK) &&
11297 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11298 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
11299 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11300 		/* we have a gap .. maybe */
11301 		for (i = 0; i < siz; i++) {
11302 			tsn_map = asoc->mapping_array[i];
11303 			if (type == SCTP_SELECTIVE_ACK) {
11304 				tsn_map |= asoc->nr_mapping_array[i];
11305 			}
11306 			if (i == 0) {
11307 				/*
11308 				 * Clear all bits corresponding to TSNs
11309 				 * smaller or equal to the cumulative TSN.
11310 				 */
11311 				tsn_map &= (~0U << (1 - offset));
11312 			}
11313 			selector = &sack_array[tsn_map];
11314 			if (mergeable && selector->right_edge) {
11315 				/*
11316 				 * Backup, left and right edges were ok to
11317 				 * merge.
11318 				 */
11319 				num_gap_blocks--;
11320 				gap_descriptor--;
11321 			}
11322 			if (selector->num_entries == 0)
11323 				mergeable = 0;
11324 			else {
11325 				for (j = 0; j < selector->num_entries; j++) {
11326 					if (mergeable && selector->right_edge) {
11327 						/*
11328 						 * do a merge by NOT setting
11329 						 * the left side
11330 						 */
11331 						mergeable = 0;
11332 					} else {
11333 						/*
11334 						 * no merge, set the left
11335 						 * side
11336 						 */
11337 						mergeable = 0;
11338 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
11339 					}
11340 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
11341 					num_gap_blocks++;
11342 					gap_descriptor++;
11343 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11344 						/* no more room */
11345 						limit_reached = 1;
11346 						break;
11347 					}
11348 				}
11349 				if (selector->left_edge) {
11350 					mergeable = 1;
11351 				}
11352 			}
11353 			if (limit_reached) {
11354 				/* Reached the limit stop */
11355 				break;
11356 			}
11357 			offset += 8;
11358 		}
11359 	}
11360 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
11361 	    (limit_reached == 0)) {
11362 
11363 		mergeable = 0;
11364 
11365 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11366 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11367 		} else {
11368 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11369 		}
11370 
11371 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11372 			offset = 1;
11373 		} else {
11374 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11375 		}
11376 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11377 			/* we have a gap .. maybe */
11378 			for (i = 0; i < siz; i++) {
11379 				tsn_map = asoc->nr_mapping_array[i];
11380 				if (i == 0) {
11381 					/*
11382 					 * Clear all bits corresponding to TSNs
11383 					 * smaller or equal to the cumulative TSN.
11384 					 */
11385 					tsn_map &= (~0U << (1 - offset));
11386 				}
11387 				selector = &sack_array[tsn_map];
11388 				if (mergeable && selector->right_edge) {
11389 					/*
11390 					* Backup, left and right edges were ok to
11391 					* merge.
11392 					*/
11393 					num_nr_gap_blocks--;
11394 					gap_descriptor--;
11395 				}
11396 				if (selector->num_entries == 0)
11397 					mergeable = 0;
11398 				else {
11399 					for (j = 0; j < selector->num_entries; j++) {
11400 						if (mergeable && selector->right_edge) {
11401 							/*
11402 							* do a merge by NOT setting
11403 							* the left side
11404 							*/
11405 							mergeable = 0;
11406 						} else {
11407 							/*
11408 							* no merge, set the left
11409 							* side
11410 							*/
11411 							mergeable = 0;
11412 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
11413 						}
11414 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
11415 						num_nr_gap_blocks++;
11416 						gap_descriptor++;
11417 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11418 							/* no more room */
11419 							limit_reached = 1;
11420 							break;
11421 						}
11422 					}
11423 					if (selector->left_edge) {
11424 						mergeable = 1;
11425 					}
11426 				}
11427 				if (limit_reached) {
11428 					/* Reached the limit stop */
11429 					break;
11430 				}
11431 				offset += 8;
11432 			}
11433 		}
11434 	}
11435 	/* now we must add any dups we are going to report. */
11436 	if ((limit_reached == 0) && (asoc->numduptsns)) {
11437 		dup = (uint32_t *) gap_descriptor;
11438 		for (i = 0; i < asoc->numduptsns; i++) {
11439 			*dup = htonl(asoc->dup_tsns[i]);
11440 			dup++;
11441 			num_dups++;
11442 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11443 				/* no more room */
11444 				break;
11445 			}
11446 		}
11447 		asoc->numduptsns = 0;
11448 	}
11449 	/*
11450 	 * now that the chunk is prepared queue it to the control chunk
11451 	 * queue.
11452 	 */
11453 	if (type == SCTP_SELECTIVE_ACK) {
11454 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11455 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11456 		                              num_dups * sizeof(int32_t));
11457 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11458 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11459 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11460 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11461 		sack->sack.num_dup_tsns = htons(num_dups);
11462 		sack->ch.chunk_type = type;
11463 		sack->ch.chunk_flags = flags;
11464 		sack->ch.chunk_length = htons(a_chk->send_size);
11465 	} else {
11466 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11467 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11468 		                              num_dups * sizeof(int32_t));
11469 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11470 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11471 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11472 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11473 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11474 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11475 		nr_sack->nr_sack.reserved = 0;
11476 		nr_sack->ch.chunk_type = type;
11477 		nr_sack->ch.chunk_flags = flags;
11478 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
11479 	}
11480 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11481 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
11482 	asoc->ctrl_queue_cnt++;
11483 	asoc->send_sack = 0;
11484 	SCTP_STAT_INCR(sctps_sendsacks);
11485 	return;
11486 }
11487 
11488 void
11489 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
11490 {
11491 	struct mbuf *m_abort, *m, *m_last;
11492 	struct mbuf *m_out, *m_end = NULL;
11493 	struct sctp_abort_chunk *abort;
11494 	struct sctp_auth_chunk *auth = NULL;
11495 	struct sctp_nets *net;
11496 	uint32_t vtag;
11497 	uint32_t auth_offset = 0;
11498 	int error;
11499 	uint16_t cause_len, chunk_len, padding_len;
11500 
11501 #if defined(__APPLE__) && !defined(__Userspace__)
11502 	if (so_locked) {
11503 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11504 	} else {
11505 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11506 	}
11507 #endif
11508 	SCTP_TCB_LOCK_ASSERT(stcb);
11509 	/*-
11510 	 * Add an AUTH chunk, if chunk requires it and save the offset into
11511 	 * the chain for AUTH
11512 	 */
11513 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11514 	                                stcb->asoc.peer_auth_chunks)) {
11515 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11516 					    stcb, SCTP_ABORT_ASSOCIATION);
11517 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11518 	} else {
11519 		m_out = NULL;
11520 	}
11521 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11522 	if (m_abort == NULL) {
11523 		if (m_out) {
11524 			sctp_m_freem(m_out);
11525 		}
11526 		if (operr) {
11527 			sctp_m_freem(operr);
11528 		}
11529 		return;
11530 	}
11531 	/* link in any error */
11532 	SCTP_BUF_NEXT(m_abort) = operr;
11533 	cause_len = 0;
11534 	m_last = NULL;
11535 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11536 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
11537 		if (SCTP_BUF_NEXT(m) == NULL) {
11538 			m_last = m;
11539 		}
11540 	}
11541 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11542 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11543 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11544 	if (m_out == NULL) {
11545 		/* NO Auth chunk prepended, so reserve space in front */
11546 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11547 		m_out = m_abort;
11548 	} else {
11549 		/* Put AUTH chunk at the front of the chain */
11550 		SCTP_BUF_NEXT(m_end) = m_abort;
11551 	}
11552 	if (stcb->asoc.alternate) {
11553 		net = stcb->asoc.alternate;
11554 	} else {
11555 		net = stcb->asoc.primary_destination;
11556 	}
11557 	/* Fill in the ABORT chunk header. */
11558 	abort = mtod(m_abort, struct sctp_abort_chunk *);
11559 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11560 	if (stcb->asoc.peer_vtag == 0) {
11561 		/* This happens iff the assoc is in COOKIE-WAIT state. */
11562 		vtag = stcb->asoc.my_vtag;
11563 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11564 	} else {
11565 		vtag = stcb->asoc.peer_vtag;
11566 		abort->ch.chunk_flags = 0;
11567 	}
11568 	abort->ch.chunk_length = htons(chunk_len);
11569 	/* Add padding, if necessary. */
11570 	if (padding_len > 0) {
11571 		if ((m_last == NULL) ||
11572 		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11573 			sctp_m_freem(m_out);
11574 			return;
11575 		}
11576 	}
11577 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11578 	                                        (struct sockaddr *)&net->ro._l_addr,
11579 	                                        m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11580 	                                        stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11581 	                                        stcb->asoc.primary_destination->port, NULL,
11582 #if defined(__FreeBSD__) && !defined(__Userspace__)
11583 	                                        0, 0,
11584 #endif
11585 	                                        so_locked))) {
11586 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11587 		if (error == ENOBUFS) {
11588 			stcb->asoc.ifp_had_enobuf = 1;
11589 			SCTP_STAT_INCR(sctps_lowlevelerr);
11590 		}
11591 	} else {
11592 		stcb->asoc.ifp_had_enobuf = 0;
11593 	}
11594 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11595 }
11596 
11597 void
11598 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11599                             struct sctp_nets *net,
11600                             int reflect_vtag)
11601 {
11602 	/* formulate and SEND a SHUTDOWN-COMPLETE */
11603 	struct mbuf *m_shutdown_comp;
11604 	struct sctp_shutdown_complete_chunk *shutdown_complete;
11605 	uint32_t vtag;
11606 	int error;
11607 	uint8_t flags;
11608 
11609 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11610 	if (m_shutdown_comp == NULL) {
11611 		/* no mbuf's */
11612 		return;
11613 	}
11614 	if (reflect_vtag) {
11615 		flags = SCTP_HAD_NO_TCB;
11616 		vtag = stcb->asoc.my_vtag;
11617 	} else {
11618 		flags = 0;
11619 		vtag = stcb->asoc.peer_vtag;
11620 	}
11621 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11622 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11623 	shutdown_complete->ch.chunk_flags = flags;
11624 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11625 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11626 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11627 	                                        (struct sockaddr *)&net->ro._l_addr,
11628 	                                        m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11629 	                                        stcb->sctp_ep->sctp_lport, stcb->rport,
11630 	                                        htonl(vtag),
11631 	                                        net->port, NULL,
11632 #if defined(__FreeBSD__) && !defined(__Userspace__)
11633 	                                        0, 0,
11634 #endif
11635 	                                        SCTP_SO_NOT_LOCKED))) {
11636 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11637 		if (error == ENOBUFS) {
11638 			stcb->asoc.ifp_had_enobuf = 1;
11639 			SCTP_STAT_INCR(sctps_lowlevelerr);
11640 		}
11641 	} else {
11642 		stcb->asoc.ifp_had_enobuf = 0;
11643 	}
11644 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11645 	return;
11646 }
11647 
11648 #if defined(__FreeBSD__) && !defined(__Userspace__)
11649 static void
11650 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11651                    struct sctphdr *sh, uint32_t vtag,
11652                    uint8_t type, struct mbuf *cause,
11653                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11654                    uint32_t vrf_id, uint16_t port)
11655 #else
11656 static void
11657 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11658                    struct sctphdr *sh, uint32_t vtag,
11659                    uint8_t type, struct mbuf *cause,
11660                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11661 #endif
11662 {
11663 	struct mbuf *o_pak;
11664 	struct mbuf *mout;
11665 	struct sctphdr *shout;
11666 	struct sctp_chunkhdr *ch;
11667 #if defined(INET) || defined(INET6)
11668 	struct udphdr *udp;
11669 #endif
11670 	int ret, len, cause_len, padding_len;
11671 #ifdef INET
11672 #if defined(__APPLE__) && !defined(__Userspace__)
11673 	sctp_route_t ro;
11674 #endif
11675 	struct sockaddr_in *src_sin, *dst_sin;
11676 	struct ip *ip;
11677 #endif
11678 #ifdef INET6
11679 	struct sockaddr_in6 *src_sin6, *dst_sin6;
11680 	struct ip6_hdr *ip6;
11681 #endif
11682 
11683 	/* Compute the length of the cause and add final padding. */
11684 	cause_len = 0;
11685 	if (cause != NULL) {
11686 		struct mbuf *m_at, *m_last = NULL;
11687 
11688 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11689 			if (SCTP_BUF_NEXT(m_at) == NULL)
11690 				m_last = m_at;
11691 			cause_len += SCTP_BUF_LEN(m_at);
11692 		}
11693 		padding_len = cause_len % 4;
11694 		if (padding_len != 0) {
11695 			padding_len = 4 - padding_len;
11696 		}
11697 		if (padding_len != 0) {
11698 			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11699 				sctp_m_freem(cause);
11700 				return;
11701 			}
11702 		}
11703 	} else {
11704 		padding_len = 0;
11705 	}
11706 	/* Get an mbuf for the header. */
11707 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11708 	switch (dst->sa_family) {
11709 #ifdef INET
11710 	case AF_INET:
11711 		len += sizeof(struct ip);
11712 		break;
11713 #endif
11714 #ifdef INET6
11715 	case AF_INET6:
11716 		len += sizeof(struct ip6_hdr);
11717 		break;
11718 #endif
11719 	default:
11720 		break;
11721 	}
11722 #if defined(INET) || defined(INET6)
11723 	if (port) {
11724 		len += sizeof(struct udphdr);
11725 	}
11726 #endif
11727 #if defined(__APPLE__) && !defined(__Userspace__)
11728 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11729 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11730 #else
11731 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11732 #endif
11733 #else
11734 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11735 #endif
11736 	if (mout == NULL) {
11737 		if (cause) {
11738 			sctp_m_freem(cause);
11739 		}
11740 		return;
11741 	}
11742 #if defined(__APPLE__) && !defined(__Userspace__)
11743 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11744 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11745 #else
11746 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11747 #endif
11748 #else
11749 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11750 #endif
11751 	SCTP_BUF_LEN(mout) = len;
11752 	SCTP_BUF_NEXT(mout) = cause;
11753 #if defined(__FreeBSD__) && !defined(__Userspace__)
11754 	M_SETFIB(mout, fibnum);
11755 	mout->m_pkthdr.flowid = mflowid;
11756 	M_HASHTYPE_SET(mout, mflowtype);
11757 #endif
11758 #ifdef INET
11759 	ip = NULL;
11760 #endif
11761 #ifdef INET6
11762 	ip6 = NULL;
11763 #endif
11764 	switch (dst->sa_family) {
11765 #ifdef INET
11766 	case AF_INET:
11767 		src_sin = (struct sockaddr_in *)src;
11768 		dst_sin = (struct sockaddr_in *)dst;
11769 		ip = mtod(mout, struct ip *);
11770 		ip->ip_v = IPVERSION;
11771 		ip->ip_hl = (sizeof(struct ip) >> 2);
11772 		ip->ip_tos = 0;
11773 #if defined(__FreeBSD__) && !defined(__Userspace__)
11774 		ip->ip_off = htons(IP_DF);
11775 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
11776 		ip->ip_off = IP_DF;
11777 #else
11778 		ip->ip_off = htons(IP_DF);
11779 #endif
11780 #if defined(__Userspace__)
11781 		ip->ip_id = htons(ip_id++);
11782 #elif defined(__FreeBSD__)
11783 		ip_fillid(ip);
11784 #elif defined(__APPLE__)
11785 #if RANDOM_IP_ID
11786 		ip->ip_id = ip_randomid();
11787 #else
11788 		ip->ip_id = htons(ip_id++);
11789 #endif
11790 #else
11791 		ip->ip_id = ip_id++;
11792 #endif
11793 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11794 		if (port) {
11795 			ip->ip_p = IPPROTO_UDP;
11796 		} else {
11797 			ip->ip_p = IPPROTO_SCTP;
11798 		}
11799 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11800 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11801 		ip->ip_sum = 0;
11802 		len = sizeof(struct ip);
11803 		shout = (struct sctphdr *)((caddr_t)ip + len);
11804 		break;
11805 #endif
11806 #ifdef INET6
11807 	case AF_INET6:
11808 		src_sin6 = (struct sockaddr_in6 *)src;
11809 		dst_sin6 = (struct sockaddr_in6 *)dst;
11810 		ip6 = mtod(mout, struct ip6_hdr *);
11811 		ip6->ip6_flow = htonl(0x60000000);
11812 #if defined(__FreeBSD__) && !defined(__Userspace__)
11813 		if (V_ip6_auto_flowlabel) {
11814 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11815 		}
11816 #endif
11817 #if defined(__Userspace__)
11818 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
11819 #else
11820 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11821 #endif
11822 		if (port) {
11823 			ip6->ip6_nxt = IPPROTO_UDP;
11824 		} else {
11825 			ip6->ip6_nxt = IPPROTO_SCTP;
11826 		}
11827 		ip6->ip6_src = dst_sin6->sin6_addr;
11828 		ip6->ip6_dst = src_sin6->sin6_addr;
11829 		len = sizeof(struct ip6_hdr);
11830 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
11831 		break;
11832 #endif
11833 	default:
11834 		len = 0;
11835 		shout = mtod(mout, struct sctphdr *);
11836 		break;
11837 	}
11838 #if defined(INET) || defined(INET6)
11839 	if (port) {
11840 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11841 			sctp_m_freem(mout);
11842 			return;
11843 		}
11844 		udp = (struct udphdr *)shout;
11845 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11846 		udp->uh_dport = port;
11847 		udp->uh_sum = 0;
11848 		udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11849 		                                sizeof(struct sctphdr) +
11850 		                                sizeof(struct sctp_chunkhdr) +
11851 		                                cause_len + padding_len));
11852 		len += sizeof(struct udphdr);
11853 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11854 	} else {
11855 		udp = NULL;
11856 	}
11857 #endif
11858 	shout->src_port = sh->dest_port;
11859 	shout->dest_port = sh->src_port;
11860 	shout->checksum = 0;
11861 	if (vtag) {
11862 		shout->v_tag = htonl(vtag);
11863 	} else {
11864 		shout->v_tag = sh->v_tag;
11865 	}
11866 	len += sizeof(struct sctphdr);
11867 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11868 	ch->chunk_type = type;
11869 	if (vtag) {
11870 		ch->chunk_flags = 0;
11871 	} else {
11872 		ch->chunk_flags = SCTP_HAD_NO_TCB;
11873 	}
11874 	ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11875 	len += sizeof(struct sctp_chunkhdr);
11876 	len += cause_len + padding_len;
11877 
11878 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11879 		sctp_m_freem(mout);
11880 		return;
11881 	}
11882 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
11883 	switch (dst->sa_family) {
11884 #ifdef INET
11885 	case AF_INET:
11886 #if defined(__APPLE__) && !defined(__Userspace__)
11887 		/* zap the stack pointer to the route */
11888 		memset(&ro, 0, sizeof(sctp_route_t));
11889 #endif
11890 		if (port) {
11891 #if !defined(_WIN32) && !defined(__Userspace__)
11892 #if defined(__FreeBSD__)
11893 			if (V_udp_cksum) {
11894 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11895 			} else {
11896 				udp->uh_sum = 0;
11897 			}
11898 #else
11899 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11900 #endif
11901 #else
11902 			udp->uh_sum = 0;
11903 #endif
11904 		}
11905 #if defined(__FreeBSD__) && !defined(__Userspace__)
11906 		ip->ip_len = htons(len);
11907 #elif defined(__APPLE__) || defined(__Userspace__)
11908 		ip->ip_len = len;
11909 #else
11910 		ip->ip_len = htons(len);
11911 #endif
11912 		if (port) {
11913 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11914 			SCTP_STAT_INCR(sctps_sendswcrc);
11915 #if !defined(_WIN32) && !defined(__Userspace__)
11916 #if defined(__FreeBSD__)
11917 			if (V_udp_cksum) {
11918 				SCTP_ENABLE_UDP_CSUM(o_pak);
11919 			}
11920 #else
11921 			SCTP_ENABLE_UDP_CSUM(o_pak);
11922 #endif
11923 #endif
11924 		} else {
11925 #if defined(__FreeBSD__) && !defined(__Userspace__)
11926 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11927 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11928 			SCTP_STAT_INCR(sctps_sendhwcrc);
11929 #else
11930 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11931 			SCTP_STAT_INCR(sctps_sendswcrc);
11932 #endif
11933 		}
11934 #ifdef SCTP_PACKET_LOGGING
11935 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11936 			sctp_packet_log(o_pak);
11937 		}
11938 #endif
11939 #if defined(__APPLE__) && !defined(__Userspace__)
11940 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11941 		/* Free the route if we got one back */
11942 		if (ro.ro_rt) {
11943 			RTFREE(ro.ro_rt);
11944 			ro.ro_rt = NULL;
11945 		}
11946 #else
11947 #if defined(__FreeBSD__) && !defined(__Userspace__)
11948 		SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11949 #endif
11950 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11951 #endif
11952 		break;
11953 #endif
11954 #ifdef INET6
11955 	case AF_INET6:
11956 		ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11957 		if (port) {
11958 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11959 			SCTP_STAT_INCR(sctps_sendswcrc);
11960 #if !defined(__Userspace__)
11961 #if defined(_WIN32)
11962 			udp->uh_sum = 0;
11963 #else
11964 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11965 				udp->uh_sum = 0xffff;
11966 			}
11967 #endif
11968 #endif
11969 		} else {
11970 #if defined(__FreeBSD__) && !defined(__Userspace__)
11971 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11972 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11973 			SCTP_STAT_INCR(sctps_sendhwcrc);
11974 #else
11975 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11976 			SCTP_STAT_INCR(sctps_sendswcrc);
11977 #endif
11978 		}
11979 #ifdef SCTP_PACKET_LOGGING
11980 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11981 			sctp_packet_log(o_pak);
11982 		}
11983 #endif
11984 #if defined(__FreeBSD__) && !defined(__Userspace__)
11985 		SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11986 #endif
11987 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11988 		break;
11989 #endif
11990 #if defined(__Userspace__)
11991 	case AF_CONN:
11992 	{
11993 		char *buffer;
11994 		struct sockaddr_conn *sconn;
11995 
11996 		sconn = (struct sockaddr_conn *)src;
11997 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
11998 			shout->checksum = sctp_calculate_cksum(mout, 0);
11999 			SCTP_STAT_INCR(sctps_sendswcrc);
12000 		} else {
12001 			SCTP_STAT_INCR(sctps_sendhwcrc);
12002 		}
12003 #ifdef SCTP_PACKET_LOGGING
12004 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
12005 			sctp_packet_log(mout);
12006 		}
12007 #endif
12008 		/* Don't alloc/free for each packet */
12009 		if ((buffer = malloc(len)) != NULL) {
12010 			m_copydata(mout, 0, len, buffer);
12011 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
12012 			free(buffer);
12013 		} else {
12014 			ret = ENOMEM;
12015 		}
12016 		sctp_m_freem(mout);
12017 		break;
12018 	}
12019 #endif
12020 	default:
12021 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
12022 		        dst->sa_family);
12023 		sctp_m_freem(mout);
12024 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12025 		return;
12026 	}
12027 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
12028 #if defined(__FreeBSD__) && !defined(__Userspace__)
12029 	if (port) {
12030 		UDPSTAT_INC(udps_opackets);
12031 	}
12032 #endif
12033 	SCTP_STAT_INCR(sctps_sendpackets);
12034 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12035 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12036 	if (ret) {
12037 		SCTP_STAT_INCR(sctps_senderrors);
12038 	}
12039 	return;
12040 }
12041 
12042 void
12043 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
12044                              struct sctphdr *sh,
12045 #if defined(__FreeBSD__) && !defined(__Userspace__)
12046                              uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12047 #endif
12048                              uint32_t vrf_id, uint16_t port)
12049 {
12050 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
12051 #if defined(__FreeBSD__) && !defined(__Userspace__)
12052 	                   mflowtype, mflowid, fibnum,
12053 #endif
12054 	                   vrf_id, port);
12055 }
12056 
12057 void
12058 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked)
12059 {
12060 	struct sctp_tmit_chunk *chk;
12061 	struct sctp_heartbeat_chunk *hb;
12062 	struct timeval now;
12063 
12064 	SCTP_TCB_LOCK_ASSERT(stcb);
12065 	if (net == NULL) {
12066 		return;
12067 	}
12068 	(void)SCTP_GETTIME_TIMEVAL(&now);
12069 	switch (net->ro._l_addr.sa.sa_family) {
12070 #ifdef INET
12071 	case AF_INET:
12072 		break;
12073 #endif
12074 #ifdef INET6
12075 	case AF_INET6:
12076 		break;
12077 #endif
12078 #if defined(__Userspace__)
12079 	case AF_CONN:
12080 		break;
12081 #endif
12082 	default:
12083 		return;
12084 	}
12085 	sctp_alloc_a_chunk(stcb, chk);
12086 	if (chk == NULL) {
12087 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12088 		return;
12089 	}
12090 
12091 	chk->copy_by_ref = 0;
12092 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12093 	chk->rec.chunk_id.can_take_data = 1;
12094 	chk->flags = 0;
12095 	chk->asoc = &stcb->asoc;
12096 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12097 
12098 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12099 	if (chk->data == NULL) {
12100 		sctp_free_a_chunk(stcb, chk, so_locked);
12101 		return;
12102 	}
12103 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12104 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12105 	chk->sent = SCTP_DATAGRAM_UNSENT;
12106 	chk->snd_count = 0;
12107 	chk->whoTo = net;
12108 	atomic_add_int(&chk->whoTo->ref_count, 1);
12109 	/* Now we have a mbuf that we can fill in with the details */
12110 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12111 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12112 	/* fill out chunk header */
12113 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12114 	hb->ch.chunk_flags = 0;
12115 	hb->ch.chunk_length = htons(chk->send_size);
12116 	/* Fill out hb parameter */
12117 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12118 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12119 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12120 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12121 	/* Did our user request this one, put it in */
12122 	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12123 #ifdef HAVE_SA_LEN
12124 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12125 #else
12126 	switch (net->ro._l_addr.sa.sa_family) {
12127 #ifdef INET
12128 	case AF_INET:
12129 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12130 		break;
12131 #endif
12132 #ifdef INET6
12133 	case AF_INET6:
12134 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12135 		break;
12136 #endif
12137 #if defined(__Userspace__)
12138 	case AF_CONN:
12139 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12140 		break;
12141 #endif
12142 	default:
12143 		hb->heartbeat.hb_info.addr_len = 0;
12144 		break;
12145 	}
12146 #endif
12147 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12148 		/*
12149 		 * we only take from the entropy pool if the address is not
12150 		 * confirmed.
12151 		 */
12152 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12153 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12154 	} else {
12155 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12156 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12157 	}
12158 	switch (net->ro._l_addr.sa.sa_family) {
12159 #ifdef INET
12160 	case AF_INET:
12161 		memcpy(hb->heartbeat.hb_info.address,
12162 		       &net->ro._l_addr.sin.sin_addr,
12163 		       sizeof(net->ro._l_addr.sin.sin_addr));
12164 		break;
12165 #endif
12166 #ifdef INET6
12167 	case AF_INET6:
12168 		memcpy(hb->heartbeat.hb_info.address,
12169 		       &net->ro._l_addr.sin6.sin6_addr,
12170 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
12171 		break;
12172 #endif
12173 #if defined(__Userspace__)
12174 	case AF_CONN:
12175 		memcpy(hb->heartbeat.hb_info.address,
12176 		       &net->ro._l_addr.sconn.sconn_addr,
12177 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
12178 		break;
12179 #endif
12180 	default:
12181 		if (chk->data) {
12182 			sctp_m_freem(chk->data);
12183 			chk->data = NULL;
12184 		}
12185 		sctp_free_a_chunk(stcb, chk, so_locked);
12186 		return;
12187 		break;
12188 	}
12189 	net->hb_responded = 0;
12190 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12191 	stcb->asoc.ctrl_queue_cnt++;
12192 	SCTP_STAT_INCR(sctps_sendheartbeat);
12193 	return;
12194 }
12195 
12196 void
12197 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12198 		   uint32_t high_tsn)
12199 {
12200 	struct sctp_association *asoc;
12201 	struct sctp_ecne_chunk *ecne;
12202 	struct sctp_tmit_chunk *chk;
12203 
12204 	if (net == NULL) {
12205 		return;
12206 	}
12207 	asoc = &stcb->asoc;
12208 	SCTP_TCB_LOCK_ASSERT(stcb);
12209 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12210 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12211 			/* found a previous ECN_ECHO update it if needed */
12212 			uint32_t cnt, ctsn;
12213 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12214 			ctsn = ntohl(ecne->tsn);
12215 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12216 				ecne->tsn = htonl(high_tsn);
12217 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
12218 			}
12219 			cnt = ntohl(ecne->num_pkts_since_cwr);
12220 			cnt++;
12221 			ecne->num_pkts_since_cwr = htonl(cnt);
12222 			return;
12223 		}
12224 	}
12225 	/* nope could not find one to update so we must build one */
12226 	sctp_alloc_a_chunk(stcb, chk);
12227 	if (chk == NULL) {
12228 		return;
12229 	}
12230 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
12231 	chk->copy_by_ref = 0;
12232 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12233 	chk->rec.chunk_id.can_take_data = 0;
12234 	chk->flags = 0;
12235 	chk->asoc = &stcb->asoc;
12236 	chk->send_size = sizeof(struct sctp_ecne_chunk);
12237 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12238 	if (chk->data == NULL) {
12239 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12240 		return;
12241 	}
12242 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12243 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12244 	chk->sent = SCTP_DATAGRAM_UNSENT;
12245 	chk->snd_count = 0;
12246 	chk->whoTo = net;
12247 	atomic_add_int(&chk->whoTo->ref_count, 1);
12248 
12249 	stcb->asoc.ecn_echo_cnt_onq++;
12250 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12251 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
12252 	ecne->ch.chunk_flags = 0;
12253 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12254 	ecne->tsn = htonl(high_tsn);
12255 	ecne->num_pkts_since_cwr = htonl(1);
12256 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12257 	asoc->ctrl_queue_cnt++;
12258 }
12259 
12260 void
12261 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12262     struct mbuf *m, int len, int iphlen, int bad_crc)
12263 {
12264 	struct sctp_association *asoc;
12265 	struct sctp_pktdrop_chunk *drp;
12266 	struct sctp_tmit_chunk *chk;
12267 	uint8_t *datap;
12268 	int was_trunc = 0;
12269 	int fullsz = 0;
12270 	long spc;
12271 	int offset;
12272 	struct sctp_chunkhdr *ch, chunk_buf;
12273 	unsigned int chk_length;
12274 
12275         if (!stcb) {
12276             return;
12277         }
12278 	asoc = &stcb->asoc;
12279 	SCTP_TCB_LOCK_ASSERT(stcb);
12280 	if (asoc->pktdrop_supported == 0) {
12281 		/*-
12282 		 * peer must declare support before I send one.
12283 		 */
12284 		return;
12285 	}
12286 	if (stcb->sctp_socket == NULL) {
12287 		return;
12288 	}
12289 	sctp_alloc_a_chunk(stcb, chk);
12290 	if (chk == NULL) {
12291 		return;
12292 	}
12293 	chk->copy_by_ref = 0;
12294 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12295 	chk->rec.chunk_id.can_take_data = 1;
12296 	chk->flags = 0;
12297 	len -= iphlen;
12298 	chk->send_size = len;
12299 	/* Validate that we do not have an ABORT in here. */
12300 	offset = iphlen + sizeof(struct sctphdr);
12301 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12302 						   sizeof(*ch), (uint8_t *) & chunk_buf);
12303 	while (ch != NULL) {
12304 		chk_length = ntohs(ch->chunk_length);
12305 		if (chk_length < sizeof(*ch)) {
12306 			/* break to abort land */
12307 			break;
12308 		}
12309 		switch (ch->chunk_type) {
12310 		case SCTP_PACKET_DROPPED:
12311 		case SCTP_ABORT_ASSOCIATION:
12312 		case SCTP_INITIATION_ACK:
12313 			/**
12314 			 * We don't respond with an PKT-DROP to an ABORT
12315 			 * or PKT-DROP. We also do not respond to an
12316 			 * INIT-ACK, because we can't know if the initiation
12317 			 * tag is correct or not.
12318 			 */
12319 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12320 			return;
12321 		default:
12322 			break;
12323 		}
12324 		offset += SCTP_SIZE32(chk_length);
12325 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12326 		    sizeof(*ch), (uint8_t *) & chunk_buf);
12327 	}
12328 
12329 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12330 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12331 		/* only send 1 mtu worth, trim off the
12332 		 * excess on the end.
12333 		 */
12334 		fullsz = len;
12335 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12336 		was_trunc = 1;
12337 	}
12338 	chk->asoc = &stcb->asoc;
12339 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12340 	if (chk->data == NULL) {
12341 jump_out:
12342 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12343 		return;
12344 	}
12345 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12346 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12347 	if (drp == NULL) {
12348 		sctp_m_freem(chk->data);
12349 		chk->data = NULL;
12350 		goto jump_out;
12351 	}
12352 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12353 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12354 	chk->book_size_scale = 0;
12355 	if (was_trunc) {
12356 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12357 		drp->trunc_len = htons(fullsz);
12358 		/* Len is already adjusted to size minus overhead above
12359 		 * take out the pkt_drop chunk itself from it.
12360 		 */
12361 		chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12362 		len = chk->send_size;
12363 	} else {
12364 		/* no truncation needed */
12365 		drp->ch.chunk_flags = 0;
12366 		drp->trunc_len = htons(0);
12367 	}
12368 	if (bad_crc) {
12369 		drp->ch.chunk_flags |= SCTP_BADCRC;
12370 	}
12371 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12372 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12373 	chk->sent = SCTP_DATAGRAM_UNSENT;
12374 	chk->snd_count = 0;
12375 	if (net) {
12376 		/* we should hit here */
12377 		chk->whoTo = net;
12378 		atomic_add_int(&chk->whoTo->ref_count, 1);
12379 	} else {
12380 		chk->whoTo = NULL;
12381 	}
12382 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12383 	drp->ch.chunk_length = htons(chk->send_size);
12384 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12385 	if (spc < 0) {
12386 		spc = 0;
12387 	}
12388 	drp->bottle_bw = htonl(spc);
12389 	if (asoc->my_rwnd) {
12390 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
12391 		    asoc->size_on_all_streams +
12392 		    asoc->my_rwnd_control_len +
12393 		    stcb->sctp_socket->so_rcv.sb_cc);
12394 	} else {
12395 		/*-
12396 		 * If my rwnd is 0, possibly from mbuf depletion as well as
12397 		 * space used, tell the peer there is NO space aka onq == bw
12398 		 */
12399 		drp->current_onq = htonl(spc);
12400 	}
12401 	drp->reserved = 0;
12402 	datap = drp->data;
12403 	m_copydata(m, iphlen, len, (caddr_t)datap);
12404 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12405 	asoc->ctrl_queue_cnt++;
12406 }
12407 
12408 void
12409 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12410 {
12411 	struct sctp_association *asoc;
12412 	struct sctp_cwr_chunk *cwr;
12413 	struct sctp_tmit_chunk *chk;
12414 
12415 	SCTP_TCB_LOCK_ASSERT(stcb);
12416 	if (net == NULL) {
12417 		return;
12418 	}
12419 	asoc = &stcb->asoc;
12420 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12421 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12422 			/* found a previous CWR queued to same destination update it if needed */
12423 			uint32_t ctsn;
12424 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12425 			ctsn = ntohl(cwr->tsn);
12426 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12427 				cwr->tsn = htonl(high_tsn);
12428 			}
12429 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12430 				/* Make sure override is carried */
12431 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12432 			}
12433 			return;
12434 		}
12435 	}
12436 	sctp_alloc_a_chunk(stcb, chk);
12437 	if (chk == NULL) {
12438 		return;
12439 	}
12440 	chk->copy_by_ref = 0;
12441 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
12442 	chk->rec.chunk_id.can_take_data = 1;
12443 	chk->flags = 0;
12444 	chk->asoc = &stcb->asoc;
12445 	chk->send_size = sizeof(struct sctp_cwr_chunk);
12446 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12447 	if (chk->data == NULL) {
12448 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12449 		return;
12450 	}
12451 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12452 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12453 	chk->sent = SCTP_DATAGRAM_UNSENT;
12454 	chk->snd_count = 0;
12455 	chk->whoTo = net;
12456 	atomic_add_int(&chk->whoTo->ref_count, 1);
12457 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12458 	cwr->ch.chunk_type = SCTP_ECN_CWR;
12459 	cwr->ch.chunk_flags = override;
12460 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12461 	cwr->tsn = htonl(high_tsn);
12462 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12463 	asoc->ctrl_queue_cnt++;
12464 }
12465 
12466 static int
12467 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12468                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12469 {
12470 	uint16_t len, old_len, i;
12471 	struct sctp_stream_reset_out_request *req_out;
12472 	struct sctp_chunkhdr *ch;
12473 	int at;
12474 	int number_entries=0;
12475 
12476 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12477 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12478 	/* get to new offset for the param. */
12479 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12480 	/* now how long will this param be? */
12481 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12482 		if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12483 		    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12484 		    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12485 			number_entries++;
12486 		}
12487 	}
12488 	if (number_entries == 0) {
12489 		return (0);
12490 	}
12491 	if (number_entries == stcb->asoc.streamoutcnt) {
12492 		number_entries = 0;
12493 	}
12494 	if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12495 		number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12496 	}
12497 	len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12498 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12499 	req_out->ph.param_length = htons(len);
12500 	req_out->request_seq = htonl(seq);
12501 	req_out->response_seq = htonl(resp_seq);
12502 	req_out->send_reset_at_tsn = htonl(last_sent);
12503 	at = 0;
12504 	if (number_entries) {
12505 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12506 			if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12507 			    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12508 			    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12509 				req_out->list_of_streams[at] = htons(i);
12510 				at++;
12511 				stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12512 				if (at >= number_entries) {
12513 					break;
12514 				}
12515 			}
12516 		}
12517 	} else {
12518 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12519 			stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12520 		}
12521 	}
12522 	if (SCTP_SIZE32(len) > len) {
12523 		/*-
12524 		 * Need to worry about the pad we may end up adding to the
12525 		 * end. This is easy since the struct is either aligned to 4
12526 		 * bytes or 2 bytes off.
12527 		 */
12528 		req_out->list_of_streams[number_entries] = 0;
12529 	}
12530 	/* now fix the chunk length */
12531 	ch->chunk_length = htons(len + old_len);
12532 	chk->book_size = len + old_len;
12533 	chk->book_size_scale = 0;
12534 	chk->send_size = SCTP_SIZE32(chk->book_size);
12535 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12536 	return (1);
12537 }
12538 
12539 static void
12540 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12541                          int number_entries, uint16_t *list,
12542                          uint32_t seq)
12543 {
12544 	uint16_t len, old_len, i;
12545 	struct sctp_stream_reset_in_request *req_in;
12546 	struct sctp_chunkhdr *ch;
12547 
12548 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12549 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12550 
12551 	/* get to new offset for the param. */
12552 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12553 	/* now how long will this param be? */
12554 	len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12555 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12556 	req_in->ph.param_length = htons(len);
12557 	req_in->request_seq = htonl(seq);
12558 	if (number_entries) {
12559 		for (i = 0; i < number_entries; i++) {
12560 			req_in->list_of_streams[i] = htons(list[i]);
12561 		}
12562 	}
12563 	if (SCTP_SIZE32(len) > len) {
12564 		/*-
12565 		 * Need to worry about the pad we may end up adding to the
12566 		 * end. This is easy since the struct is either aligned to 4
12567 		 * bytes or 2 bytes off.
12568 		 */
12569 		req_in->list_of_streams[number_entries] = 0;
12570 	}
12571 	/* now fix the chunk length */
12572 	ch->chunk_length = htons(len + old_len);
12573 	chk->book_size = len + old_len;
12574 	chk->book_size_scale = 0;
12575 	chk->send_size = SCTP_SIZE32(chk->book_size);
12576 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12577 	return;
12578 }
12579 
12580 static void
12581 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12582                           uint32_t seq)
12583 {
12584 	uint16_t len, old_len;
12585 	struct sctp_stream_reset_tsn_request *req_tsn;
12586 	struct sctp_chunkhdr *ch;
12587 
12588 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12589 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12590 
12591 	/* get to new offset for the param. */
12592 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12593 	/* now how long will this param be? */
12594 	len = sizeof(struct sctp_stream_reset_tsn_request);
12595 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12596 	req_tsn->ph.param_length = htons(len);
12597 	req_tsn->request_seq = htonl(seq);
12598 
12599 	/* now fix the chunk length */
12600 	ch->chunk_length = htons(len + old_len);
12601 	chk->send_size = len + old_len;
12602 	chk->book_size = SCTP_SIZE32(chk->send_size);
12603 	chk->book_size_scale = 0;
12604 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12605 	return;
12606 }
12607 
12608 void
12609 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12610                              uint32_t resp_seq, uint32_t result)
12611 {
12612 	uint16_t len, old_len;
12613 	struct sctp_stream_reset_response *resp;
12614 	struct sctp_chunkhdr *ch;
12615 
12616 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12617 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12618 
12619 	/* get to new offset for the param. */
12620 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12621 	/* now how long will this param be? */
12622 	len = sizeof(struct sctp_stream_reset_response);
12623 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12624 	resp->ph.param_length = htons(len);
12625 	resp->response_seq = htonl(resp_seq);
12626 	resp->result = ntohl(result);
12627 
12628 	/* now fix the chunk length */
12629 	ch->chunk_length = htons(len + old_len);
12630 	chk->book_size = len + old_len;
12631 	chk->book_size_scale = 0;
12632 	chk->send_size = SCTP_SIZE32(chk->book_size);
12633 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12634 	return;
12635 }
12636 
12637 void
12638 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12639 				 struct sctp_stream_reset_list *ent,
12640 				 int response)
12641 {
12642 	struct sctp_association *asoc;
12643 	struct sctp_tmit_chunk *chk;
12644 	struct sctp_chunkhdr *ch;
12645 
12646 	asoc = &stcb->asoc;
12647 
12648 	/*
12649 	 * Reset our last reset action to the new one IP -> response
12650 	 * (PERFORMED probably). This assures that if we fail to send, a
12651 	 * retran from the peer will get the new response.
12652 	 */
12653 	asoc->last_reset_action[0] = response;
12654 	if (asoc->stream_reset_outstanding) {
12655 		return;
12656 	}
12657 	sctp_alloc_a_chunk(stcb, chk);
12658 	if (chk == NULL) {
12659 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12660 		return;
12661 	}
12662 	chk->copy_by_ref = 0;
12663 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12664 	chk->rec.chunk_id.can_take_data = 0;
12665 	chk->flags = 0;
12666 	chk->asoc = &stcb->asoc;
12667 	chk->book_size = sizeof(struct sctp_chunkhdr);
12668 	chk->send_size = SCTP_SIZE32(chk->book_size);
12669 	chk->book_size_scale = 0;
12670 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12671 	if (chk->data == NULL) {
12672 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12673 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12674 		return;
12675 	}
12676 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12677 	/* setup chunk parameters */
12678 	chk->sent = SCTP_DATAGRAM_UNSENT;
12679 	chk->snd_count = 0;
12680 	if (stcb->asoc.alternate) {
12681 		chk->whoTo = stcb->asoc.alternate;
12682 	} else {
12683 		chk->whoTo = stcb->asoc.primary_destination;
12684 	}
12685 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12686 	ch->chunk_type = SCTP_STREAM_RESET;
12687 	ch->chunk_flags = 0;
12688 	ch->chunk_length = htons(chk->book_size);
12689 	atomic_add_int(&chk->whoTo->ref_count, 1);
12690 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12691 	sctp_add_stream_reset_result(chk, ent->seq, response);
12692 	/* insert the chunk for sending */
12693 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12694 			  chk,
12695 			  sctp_next);
12696 	asoc->ctrl_queue_cnt++;
12697 }
12698 
12699 void
12700 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12701                                  uint32_t resp_seq, uint32_t result,
12702                                  uint32_t send_una, uint32_t recv_next)
12703 {
12704 	uint16_t len, old_len;
12705 	struct sctp_stream_reset_response_tsn *resp;
12706 	struct sctp_chunkhdr *ch;
12707 
12708 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12709 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12710 
12711 	/* get to new offset for the param. */
12712 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12713 	/* now how long will this param be? */
12714 	len = sizeof(struct sctp_stream_reset_response_tsn);
12715 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12716 	resp->ph.param_length = htons(len);
12717 	resp->response_seq = htonl(resp_seq);
12718 	resp->result = htonl(result);
12719 	resp->senders_next_tsn = htonl(send_una);
12720 	resp->receivers_next_tsn = htonl(recv_next);
12721 
12722 	/* now fix the chunk length */
12723 	ch->chunk_length = htons(len + old_len);
12724 	chk->book_size = len + old_len;
12725 	chk->send_size = SCTP_SIZE32(chk->book_size);
12726 	chk->book_size_scale = 0;
12727 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12728 	return;
12729 }
12730 
12731 static void
12732 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12733 		       uint32_t seq,
12734 		       uint16_t adding)
12735 {
12736 	uint16_t len, old_len;
12737 	struct sctp_chunkhdr *ch;
12738 	struct sctp_stream_reset_add_strm *addstr;
12739 
12740 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12741 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12742 
12743 	/* get to new offset for the param. */
12744 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12745 	/* now how long will this param be? */
12746 	len = sizeof(struct sctp_stream_reset_add_strm);
12747 
12748 	/* Fill it out. */
12749 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12750 	addstr->ph.param_length = htons(len);
12751 	addstr->request_seq = htonl(seq);
12752 	addstr->number_of_streams = htons(adding);
12753 	addstr->reserved = 0;
12754 
12755 	/* now fix the chunk length */
12756 	ch->chunk_length = htons(len + old_len);
12757 	chk->send_size = len + old_len;
12758 	chk->book_size = SCTP_SIZE32(chk->send_size);
12759 	chk->book_size_scale = 0;
12760 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12761 	return;
12762 }
12763 
12764 static void
12765 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12766                       uint32_t seq,
12767                       uint16_t adding)
12768 {
12769 	uint16_t len, old_len;
12770 	struct sctp_chunkhdr *ch;
12771 	struct sctp_stream_reset_add_strm *addstr;
12772 
12773 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12774 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12775 
12776 	/* get to new offset for the param. */
12777 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12778 	/* now how long will this param be? */
12779 	len = sizeof(struct sctp_stream_reset_add_strm);
12780 	/* Fill it out. */
12781 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12782 	addstr->ph.param_length = htons(len);
12783 	addstr->request_seq = htonl(seq);
12784 	addstr->number_of_streams = htons(adding);
12785 	addstr->reserved = 0;
12786 
12787 	/* now fix the chunk length */
12788 	ch->chunk_length = htons(len + old_len);
12789 	chk->send_size = len + old_len;
12790 	chk->book_size = SCTP_SIZE32(chk->send_size);
12791 	chk->book_size_scale = 0;
12792 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12793 	return;
12794 }
12795 
12796 int
12797 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12798 {
12799 	struct sctp_association *asoc;
12800 	struct sctp_tmit_chunk *chk;
12801 	struct sctp_chunkhdr *ch;
12802 	uint32_t seq;
12803 
12804 	asoc = &stcb->asoc;
12805 	asoc->trigger_reset = 0;
12806 	if (asoc->stream_reset_outstanding) {
12807 		return (EALREADY);
12808 	}
12809 	sctp_alloc_a_chunk(stcb, chk);
12810 	if (chk == NULL) {
12811 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12812 		return (ENOMEM);
12813 	}
12814 	chk->copy_by_ref = 0;
12815 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12816 	chk->rec.chunk_id.can_take_data = 0;
12817 	chk->flags = 0;
12818 	chk->asoc = &stcb->asoc;
12819 	chk->book_size = sizeof(struct sctp_chunkhdr);
12820 	chk->send_size = SCTP_SIZE32(chk->book_size);
12821 	chk->book_size_scale = 0;
12822 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12823 	if (chk->data == NULL) {
12824 		sctp_free_a_chunk(stcb, chk, so_locked);
12825 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12826 		return (ENOMEM);
12827 	}
12828 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12829 
12830 	/* setup chunk parameters */
12831 	chk->sent = SCTP_DATAGRAM_UNSENT;
12832 	chk->snd_count = 0;
12833 	if (stcb->asoc.alternate) {
12834 		chk->whoTo = stcb->asoc.alternate;
12835 	} else {
12836 		chk->whoTo = stcb->asoc.primary_destination;
12837 	}
12838 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12839 	ch->chunk_type = SCTP_STREAM_RESET;
12840 	ch->chunk_flags = 0;
12841 	ch->chunk_length = htons(chk->book_size);
12842 	atomic_add_int(&chk->whoTo->ref_count, 1);
12843 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12844 	seq = stcb->asoc.str_reset_seq_out;
12845 	if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12846 		seq++;
12847 		asoc->stream_reset_outstanding++;
12848 	} else {
12849 		m_freem(chk->data);
12850 		chk->data = NULL;
12851 		sctp_free_a_chunk(stcb, chk, so_locked);
12852 		return (ENOENT);
12853 	}
12854 	asoc->str_reset = chk;
12855 	/* insert the chunk for sending */
12856 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12857 			  chk,
12858 			  sctp_next);
12859 	asoc->ctrl_queue_cnt++;
12860 
12861 	if (stcb->asoc.send_sack) {
12862 		sctp_send_sack(stcb, so_locked);
12863 	}
12864 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12865 	return (0);
12866 }
12867 
12868 int
12869 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12870                         uint16_t number_entries, uint16_t *list,
12871                         uint8_t send_in_req,
12872                         uint8_t send_tsn_req,
12873                         uint8_t add_stream,
12874                         uint16_t adding_o,
12875                         uint16_t adding_i, uint8_t peer_asked)
12876 {
12877 	struct sctp_association *asoc;
12878 	struct sctp_tmit_chunk *chk;
12879 	struct sctp_chunkhdr *ch;
12880 	int can_send_out_req=0;
12881 	uint32_t seq;
12882 
12883 	asoc = &stcb->asoc;
12884 	if (asoc->stream_reset_outstanding) {
12885 		/*-
12886 		 * Already one pending, must get ACK back to clear the flag.
12887 		 */
12888 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12889 		return (EBUSY);
12890 	}
12891 	if ((send_in_req == 0) && (send_tsn_req == 0) &&
12892 	    (add_stream == 0)) {
12893 		/* nothing to do */
12894 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12895 		return (EINVAL);
12896 	}
12897 	if (send_tsn_req && send_in_req) {
12898 		/* error, can't do that */
12899 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12900 		return (EINVAL);
12901 	} else if (send_in_req) {
12902 		can_send_out_req = 1;
12903 	}
12904 	if (number_entries > (MCLBYTES -
12905 	                      SCTP_MIN_OVERHEAD -
12906 	                      sizeof(struct sctp_chunkhdr) -
12907 	                      sizeof(struct sctp_stream_reset_out_request)) /
12908 	                     sizeof(uint16_t)) {
12909 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12910 		return (ENOMEM);
12911 	}
12912 	sctp_alloc_a_chunk(stcb, chk);
12913 	if (chk == NULL) {
12914 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12915 		return (ENOMEM);
12916 	}
12917 	chk->copy_by_ref = 0;
12918 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12919 	chk->rec.chunk_id.can_take_data = 0;
12920 	chk->flags = 0;
12921 	chk->asoc = &stcb->asoc;
12922 	chk->book_size = sizeof(struct sctp_chunkhdr);
12923 	chk->send_size = SCTP_SIZE32(chk->book_size);
12924 	chk->book_size_scale = 0;
12925 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12926 	if (chk->data == NULL) {
12927 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12928 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12929 		return (ENOMEM);
12930 	}
12931 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12932 
12933 	/* setup chunk parameters */
12934 	chk->sent = SCTP_DATAGRAM_UNSENT;
12935 	chk->snd_count = 0;
12936 	if (stcb->asoc.alternate) {
12937 		chk->whoTo = stcb->asoc.alternate;
12938 	} else {
12939 		chk->whoTo = stcb->asoc.primary_destination;
12940 	}
12941 	atomic_add_int(&chk->whoTo->ref_count, 1);
12942 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12943 	ch->chunk_type = SCTP_STREAM_RESET;
12944 	ch->chunk_flags = 0;
12945 	ch->chunk_length = htons(chk->book_size);
12946 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12947 
12948 	seq = stcb->asoc.str_reset_seq_out;
12949 	if (can_send_out_req) {
12950 		int ret;
12951 	        ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12952 		if (ret) {
12953 			seq++;
12954 			asoc->stream_reset_outstanding++;
12955 		}
12956 	}
12957 	if ((add_stream & 1) &&
12958 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12959 		/* Need to allocate more */
12960 		struct sctp_stream_out *oldstream;
12961 		struct sctp_stream_queue_pending *sp, *nsp;
12962 		int i;
12963 #if defined(SCTP_DETAILED_STR_STATS)
12964 		int j;
12965 #endif
12966 
12967 		oldstream = stcb->asoc.strmout;
12968 		/* get some more */
12969 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12970 			    (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12971 			    SCTP_M_STRMO);
12972 		if (stcb->asoc.strmout == NULL) {
12973 			uint8_t x;
12974 			stcb->asoc.strmout = oldstream;
12975 			/* Turn off the bit */
12976 			x = add_stream & 0xfe;
12977 			add_stream = x;
12978 			goto skip_stuff;
12979 		}
12980 		/* Ok now we proceed with copying the old out stuff and
12981 		 * initializing the new stuff.
12982 		 */
12983 		SCTP_TCB_SEND_LOCK(stcb);
12984 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12985 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12986 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12987 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12988 			stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12989 			stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12990 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12991 			stcb->asoc.strmout[i].sid = i;
12992 			stcb->asoc.strmout[i].state = oldstream[i].state;
12993 			/* FIX ME FIX ME */
12994 			/* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
12995 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12996 			/* now anything on those queues? */
12997 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12998 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12999 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
13000 			}
13001 
13002 		}
13003 		/* now the new streams */
13004 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
13005 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
13006 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
13007 			stcb->asoc.strmout[i].chunks_on_queues = 0;
13008 #if defined(SCTP_DETAILED_STR_STATS)
13009 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
13010 				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
13011 				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
13012 			}
13013 #else
13014 			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
13015 			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
13016 #endif
13017 			stcb->asoc.strmout[i].next_mid_ordered = 0;
13018 			stcb->asoc.strmout[i].next_mid_unordered = 0;
13019 			stcb->asoc.strmout[i].sid = i;
13020 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
13021 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
13022 			stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
13023 		}
13024 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
13025 		SCTP_FREE(oldstream, SCTP_M_STRMO);
13026 		SCTP_TCB_SEND_UNLOCK(stcb);
13027 	}
13028 skip_stuff:
13029 	if ((add_stream & 1) && (adding_o > 0)) {
13030 		asoc->strm_pending_add_size = adding_o;
13031 		asoc->peer_req_out = peer_asked;
13032 		sctp_add_an_out_stream(chk, seq, adding_o);
13033 		seq++;
13034 		asoc->stream_reset_outstanding++;
13035 	}
13036 	if ((add_stream & 2) && (adding_i > 0)) {
13037 		sctp_add_an_in_stream(chk, seq, adding_i);
13038 		seq++;
13039 		asoc->stream_reset_outstanding++;
13040 	}
13041 	if (send_in_req) {
13042 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
13043 		seq++;
13044 		asoc->stream_reset_outstanding++;
13045 	}
13046 	if (send_tsn_req) {
13047 		sctp_add_stream_reset_tsn(chk, seq);
13048 		asoc->stream_reset_outstanding++;
13049 	}
13050 	asoc->str_reset = chk;
13051 	/* insert the chunk for sending */
13052 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
13053 			  chk,
13054 			  sctp_next);
13055 	asoc->ctrl_queue_cnt++;
13056 	if (stcb->asoc.send_sack) {
13057 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
13058 	}
13059 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13060 	return (0);
13061 }
13062 
13063 void
13064 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13065                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13066 #if defined(__FreeBSD__) && !defined(__Userspace__)
13067                 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13068 #endif
13069                 uint32_t vrf_id, uint16_t port)
13070 {
13071 	/* Don't respond to an ABORT with an ABORT. */
13072 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13073 		if (cause)
13074 			sctp_m_freem(cause);
13075 		return;
13076 	}
13077 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13078 #if defined(__FreeBSD__) && !defined(__Userspace__)
13079 	                   mflowtype, mflowid, fibnum,
13080 #endif
13081 	                   vrf_id, port);
13082 	return;
13083 }
13084 
13085 void
13086 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13087                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13088 #if defined(__FreeBSD__) && !defined(__Userspace__)
13089                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13090 #endif
13091                    uint32_t vrf_id, uint16_t port)
13092 {
13093 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13094 #if defined(__FreeBSD__) && !defined(__Userspace__)
13095 	                   mflowtype, mflowid, fibnum,
13096 #endif
13097 	                   vrf_id, port);
13098 	return;
13099 }
13100 
13101 static struct mbuf *
13102 sctp_copy_resume(struct uio *uio,
13103 		 int max_send_len,
13104 #if defined(__FreeBSD__) || defined(__Userspace__)
13105 		 int user_marks_eor,
13106 #endif
13107 		 int *error,
13108 		 uint32_t *sndout,
13109 		 struct mbuf **new_tail)
13110 {
13111 #if defined(__FreeBSD__) || defined(__Userspace__)
13112 	struct mbuf *m;
13113 
13114 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13115 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13116 	if (m == NULL) {
13117 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13118 		*error = ENOBUFS;
13119 	} else {
13120 		*sndout = m_length(m, NULL);
13121 		*new_tail = m_last(m);
13122 	}
13123 	return (m);
13124 #else
13125 	int left, cancpy, willcpy;
13126 	struct mbuf *m, *head;
13127 
13128 #if defined(__APPLE__) && !defined(__Userspace__)
13129 #if defined(APPLE_LEOPARD)
13130 	left = (int)min(uio->uio_resid, max_send_len);
13131 #else
13132 	left = (int)min(uio_resid(uio), max_send_len);
13133 #endif
13134 #else
13135 	left = (int)min(uio->uio_resid, max_send_len);
13136 #endif
13137 	/* Always get a header just in case */
13138 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13139 	if (head == NULL) {
13140 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13141 		*error = ENOBUFS;
13142 		return (NULL);
13143 	}
13144 	cancpy = (int)M_TRAILINGSPACE(head);
13145 	willcpy = min(cancpy, left);
13146 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
13147 	if (*error) {
13148 		sctp_m_freem(head);
13149 		return (NULL);
13150 	}
13151 	*sndout += willcpy;
13152 	left -= willcpy;
13153 	SCTP_BUF_LEN(head) = willcpy;
13154 	m = head;
13155 	*new_tail = head;
13156 	while (left > 0) {
13157 		/* move in user data */
13158 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13159 		if (SCTP_BUF_NEXT(m) == NULL) {
13160 			sctp_m_freem(head);
13161 			*new_tail = NULL;
13162 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13163 			*error = ENOBUFS;
13164 			return (NULL);
13165 		}
13166 		m = SCTP_BUF_NEXT(m);
13167 		cancpy = (int)M_TRAILINGSPACE(m);
13168 		willcpy = min(cancpy, left);
13169 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
13170 		if (*error) {
13171 			sctp_m_freem(head);
13172 			*new_tail = NULL;
13173 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13174 			*error = EFAULT;
13175 			return (NULL);
13176 		}
13177 		SCTP_BUF_LEN(m) = willcpy;
13178 		left -= willcpy;
13179 		*sndout += willcpy;
13180 		*new_tail = m;
13181 		if (left == 0) {
13182 			SCTP_BUF_NEXT(m) = NULL;
13183 		}
13184 	}
13185 	return (head);
13186 #endif
13187 }
13188 
13189 static int
13190 sctp_copy_one(struct sctp_stream_queue_pending *sp,
13191               struct uio *uio,
13192               int resv_upfront)
13193 {
13194 #if defined(__FreeBSD__) || defined(__Userspace__)
13195 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13196 	                       resv_upfront, 0);
13197 	if (sp->data == NULL) {
13198 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13199 		return (ENOBUFS);
13200 	}
13201 
13202 	sp->tail_mbuf = m_last(sp->data);
13203 	return (0);
13204 #else
13205 	int left;
13206 	int cancpy, willcpy, error;
13207 	struct mbuf *m, *head;
13208 	int cpsz = 0;
13209 
13210 	/* First one gets a header */
13211 	left = sp->length;
13212 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13213 	if (m == NULL) {
13214 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13215 		return (ENOBUFS);
13216 	}
13217 	/*-
13218 	 * Add this one for m in now, that way if the alloc fails we won't
13219 	 * have a bad cnt.
13220 	 */
13221 	SCTP_BUF_RESV_UF(m, resv_upfront);
13222 	cancpy = (int)M_TRAILINGSPACE(m);
13223 	willcpy = min(cancpy, left);
13224 	while (left > 0) {
13225 		/* move in user data */
13226 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
13227 		if (error) {
13228 			sctp_m_freem(head);
13229 			return (error);
13230 		}
13231 		SCTP_BUF_LEN(m) = willcpy;
13232 		left -= willcpy;
13233 		cpsz += willcpy;
13234 		if (left > 0) {
13235 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13236 			if (SCTP_BUF_NEXT(m) == NULL) {
13237 				/*
13238 				 * the head goes back to caller, he can free
13239 				 * the rest
13240 				 */
13241 				sctp_m_freem(head);
13242 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13243 				return (ENOBUFS);
13244 			}
13245 			m = SCTP_BUF_NEXT(m);
13246 			cancpy = (int)M_TRAILINGSPACE(m);
13247 			willcpy = min(cancpy, left);
13248 		} else {
13249 			sp->tail_mbuf = m;
13250 			SCTP_BUF_NEXT(m) = NULL;
13251 		}
13252 	}
13253 	sp->data = head;
13254 	sp->length = cpsz;
13255 	return (0);
13256 #endif
13257 }
13258 
13259 
13260 
13261 static struct sctp_stream_queue_pending *
13262 sctp_copy_it_in(struct sctp_tcb *stcb,
13263     struct sctp_association *asoc,
13264     struct sctp_sndrcvinfo *srcv,
13265     struct uio *uio,
13266     struct sctp_nets *net,
13267     ssize_t max_send_len,
13268     int user_marks_eor,
13269     int *error)
13270 
13271 {
13272 	/*-
13273 	 * This routine must be very careful in its work. Protocol
13274 	 * processing is up and running so care must be taken to spl...()
13275 	 * when you need to do something that may effect the stcb/asoc. The
13276 	 * sb is locked however. When data is copied the protocol processing
13277 	 * should be enabled since this is a slower operation...
13278 	 */
13279 	struct sctp_stream_queue_pending *sp = NULL;
13280 	int resv_in_first;
13281 
13282 	*error = 0;
13283 	/* Now can we send this? */
13284 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13285 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13286 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13287 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13288 		/* got data while shutting down */
13289 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13290 		*error = ECONNRESET;
13291 		goto out_now;
13292 	}
13293 	sctp_alloc_a_strmoq(stcb, sp);
13294 	if (sp == NULL) {
13295 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13296 		*error = ENOMEM;
13297 		goto out_now;
13298 	}
13299 	sp->act_flags = 0;
13300 	sp->sender_all_done = 0;
13301 	sp->sinfo_flags = srcv->sinfo_flags;
13302 	sp->timetolive = srcv->sinfo_timetolive;
13303 	sp->ppid = srcv->sinfo_ppid;
13304 	sp->context = srcv->sinfo_context;
13305 	sp->fsn = 0;
13306 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13307 
13308 	sp->sid = srcv->sinfo_stream;
13309 #if defined(__APPLE__) && !defined(__Userspace__)
13310 #if defined(APPLE_LEOPARD)
13311 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13312 #else
13313 	sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13314 #endif
13315 #else
13316 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13317 #endif
13318 #if defined(__APPLE__) && !defined(__Userspace__)
13319 #if defined(APPLE_LEOPARD)
13320 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13321 #else
13322 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
13323 #endif
13324 #else
13325 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13326 #endif
13327 	    ((user_marks_eor == 0) ||
13328 	     (srcv->sinfo_flags & SCTP_EOF) ||
13329 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13330 		sp->msg_is_complete = 1;
13331 	} else {
13332 		sp->msg_is_complete = 0;
13333 	}
13334 	sp->sender_all_done = 0;
13335 	sp->some_taken = 0;
13336 	sp->put_last_out = 0;
13337 	resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
13338 	sp->data = sp->tail_mbuf = NULL;
13339 	if (sp->length == 0) {
13340 		goto skip_copy;
13341 	}
13342 	if (srcv->sinfo_keynumber_valid) {
13343 		sp->auth_keyid = srcv->sinfo_keynumber;
13344 	} else {
13345 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13346 	}
13347 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13348 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
13349 		sp->holds_key_ref = 1;
13350 	}
13351 #if defined(__APPLE__) && !defined(__Userspace__)
13352 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13353 #endif
13354 	*error = sctp_copy_one(sp, uio, resv_in_first);
13355 #if defined(__APPLE__) && !defined(__Userspace__)
13356 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13357 #endif
13358  skip_copy:
13359 	if (*error) {
13360 #if defined(__Userspace__)
13361 		SCTP_TCB_LOCK(stcb);
13362 #endif
13363 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13364 #if defined(__Userspace__)
13365 		SCTP_TCB_UNLOCK(stcb);
13366 #endif
13367 		sp = NULL;
13368 	} else {
13369 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13370 			sp->net = net;
13371 			atomic_add_int(&sp->net->ref_count, 1);
13372 		} else {
13373 			sp->net = NULL;
13374 		}
13375 		sctp_set_prsctp_policy(sp);
13376 	}
13377 out_now:
13378 	return (sp);
13379 }
13380 
13381 
13382 int
13383 sctp_sosend(struct socket *so,
13384             struct sockaddr *addr,
13385             struct uio *uio,
13386             struct mbuf *top,
13387             struct mbuf *control,
13388 #if defined(__APPLE__) && !defined(__Userspace__)
13389             int flags
13390 #else
13391             int flags,
13392 #if defined(__FreeBSD__) && !defined(__Userspace__)
13393             struct thread *p
13394 #elif defined(_WIN32) && !defined(__Userspace__)
13395             PKTHREAD p
13396 #else
13397 #if defined(__Userspace__)
13398             /*
13399 	     * proc is a dummy in __Userspace__ and will not be passed
13400 	     * to sctp_lower_sosend
13401 	     */
13402 #endif
13403             struct proc *p
13404 #endif
13405 #endif
13406 )
13407 {
13408 #if defined(__APPLE__) && !defined(__Userspace__)
13409 	struct proc *p = current_proc();
13410 #endif
13411 	int error, use_sndinfo = 0;
13412 	struct sctp_sndrcvinfo sndrcvninfo;
13413 	struct sockaddr *addr_to_use;
13414 #if defined(INET) && defined(INET6)
13415 	struct sockaddr_in sin;
13416 #endif
13417 
13418 #if defined(__APPLE__) && !defined(__Userspace__)
13419 	SCTP_SOCKET_LOCK(so, 1);
13420 #endif
13421 	if (control) {
13422 		/* process cmsg snd/rcv info (maybe a assoc-id) */
13423 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13424 		    sizeof(sndrcvninfo))) {
13425 			/* got one */
13426 			use_sndinfo = 1;
13427 		}
13428 	}
13429 	addr_to_use = addr;
13430 #if defined(INET) && defined(INET6)
13431 	if ((addr) && (addr->sa_family == AF_INET6)) {
13432 		struct sockaddr_in6 *sin6;
13433 
13434 		sin6 = (struct sockaddr_in6 *)addr;
13435 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13436 			in6_sin6_2_sin(&sin, sin6);
13437 			addr_to_use = (struct sockaddr *)&sin;
13438 		}
13439 	}
13440 #endif
13441 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
13442 				  control,
13443 				  flags,
13444 				  use_sndinfo ? &sndrcvninfo: NULL
13445 #if !defined(__Userspace__)
13446 				  , p
13447 #endif
13448 		);
13449 #if defined(__APPLE__) && !defined(__Userspace__)
13450 	SCTP_SOCKET_UNLOCK(so, 1);
13451 #endif
13452 	return (error);
13453 }
13454 
13455 
13456 int
13457 sctp_lower_sosend(struct socket *so,
13458                   struct sockaddr *addr,
13459                   struct uio *uio,
13460                   struct mbuf *i_pak,
13461                   struct mbuf *control,
13462                   int flags,
13463                   struct sctp_sndrcvinfo *srcv
13464 #if !defined(__Userspace__)
13465                   ,
13466 #if defined(__FreeBSD__)
13467                   struct thread *p
13468 #elif defined(_WIN32)
13469                   PKTHREAD p
13470 #else
13471                   struct proc *p
13472 #endif
13473 #endif
13474 	)
13475 {
13476 #if defined(__FreeBSD__) && !defined(__Userspace__)
13477 	struct epoch_tracker et;
13478 #endif
13479 	ssize_t sndlen = 0, max_len, local_add_more;
13480 	int error, len;
13481 	struct mbuf *top = NULL;
13482 	int queue_only = 0, queue_only_for_init = 0;
13483 	int free_cnt_applied = 0;
13484 	int un_sent;
13485 	int now_filled = 0;
13486 	unsigned int inqueue_bytes = 0;
13487 	struct sctp_block_entry be;
13488 	struct sctp_inpcb *inp;
13489 	struct sctp_tcb *stcb = NULL;
13490 	struct timeval now;
13491 	struct sctp_nets *net;
13492 	struct sctp_association *asoc;
13493 	struct sctp_inpcb *t_inp;
13494 	int user_marks_eor;
13495 	int create_lock_applied = 0;
13496 	int nagle_applies = 0;
13497 	int some_on_control = 0;
13498 	int got_all_of_the_send = 0;
13499 	int hold_tcblock = 0;
13500 	int non_blocking = 0;
13501 	ssize_t local_soresv = 0;
13502 	uint16_t port;
13503 	uint16_t sinfo_flags;
13504 	sctp_assoc_t sinfo_assoc_id;
13505 
13506 	error = 0;
13507 	net = NULL;
13508 	stcb = NULL;
13509 	asoc = NULL;
13510 
13511 #if defined(__APPLE__) && !defined(__Userspace__)
13512 	sctp_lock_assert(so);
13513 #endif
13514 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13515 	if (inp == NULL) {
13516 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13517 		error = EINVAL;
13518 		if (i_pak) {
13519 			SCTP_RELEASE_PKT(i_pak);
13520 		}
13521 		return (error);
13522 	}
13523 	if ((uio == NULL) && (i_pak == NULL)) {
13524 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13525 		return (EINVAL);
13526 	}
13527 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13528 	atomic_add_int(&inp->total_sends, 1);
13529 	if (uio) {
13530 #if defined(__APPLE__) && !defined(__Userspace__)
13531 #if defined(APPLE_LEOPARD)
13532 		if (uio->uio_resid < 0) {
13533 #else
13534 		if (uio_resid(uio) < 0) {
13535 #endif
13536 #else
13537 		if (uio->uio_resid < 0) {
13538 #endif
13539 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13540 			return (EINVAL);
13541 		}
13542 #if defined(__APPLE__) && !defined(__Userspace__)
13543 #if defined(APPLE_LEOPARD)
13544 		sndlen = uio->uio_resid;
13545 #else
13546 		sndlen = uio_resid(uio);
13547 #endif
13548 #else
13549 		sndlen = uio->uio_resid;
13550 #endif
13551 	} else {
13552 		top = SCTP_HEADER_TO_CHAIN(i_pak);
13553 		sndlen = SCTP_HEADER_LEN(i_pak);
13554 	}
13555 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
13556 	        (void *)addr,
13557 	        sndlen);
13558 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13559 	    SCTP_IS_LISTENING(inp)) {
13560 		/* The listener can NOT send */
13561 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13562 		error = ENOTCONN;
13563 		goto out_unlocked;
13564 	}
13565 	/**
13566 	 * Pre-screen address, if one is given the sin-len
13567 	 * must be set correctly!
13568 	 */
13569 	if (addr) {
13570 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13571 		switch (raddr->sa.sa_family) {
13572 #ifdef INET
13573 		case AF_INET:
13574 #ifdef HAVE_SIN_LEN
13575 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13576 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13577 				error = EINVAL;
13578 				goto out_unlocked;
13579 			}
13580 #endif
13581 			port = raddr->sin.sin_port;
13582 			break;
13583 #endif
13584 #ifdef INET6
13585 		case AF_INET6:
13586 #ifdef HAVE_SIN6_LEN
13587 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13588 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13589 				error = EINVAL;
13590 				goto out_unlocked;
13591 			}
13592 #endif
13593 			port = raddr->sin6.sin6_port;
13594 			break;
13595 #endif
13596 #if defined(__Userspace__)
13597 		case AF_CONN:
13598 #ifdef HAVE_SCONN_LEN
13599 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13600 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13601 				error = EINVAL;
13602 				goto out_unlocked;
13603 			}
13604 #endif
13605 			port = raddr->sconn.sconn_port;
13606 			break;
13607 #endif
13608 		default:
13609 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13610 			error = EAFNOSUPPORT;
13611 			goto out_unlocked;
13612 		}
13613 	} else
13614 		port = 0;
13615 
13616 	if (srcv) {
13617 		sinfo_flags = srcv->sinfo_flags;
13618 		sinfo_assoc_id = srcv->sinfo_assoc_id;
13619 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
13620 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13621 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13622 			error = EINVAL;
13623 			goto out_unlocked;
13624 		}
13625 		if (srcv->sinfo_flags)
13626 			SCTP_STAT_INCR(sctps_sends_with_flags);
13627 	} else {
13628 		sinfo_flags = inp->def_send.sinfo_flags;
13629 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13630 	}
13631 #if defined(__FreeBSD__) && !defined(__Userspace__)
13632 	if (flags & MSG_EOR) {
13633 		sinfo_flags |= SCTP_EOR;
13634 	}
13635 	if (flags & MSG_EOF) {
13636 		sinfo_flags |= SCTP_EOF;
13637 	}
13638 #endif
13639 	if (sinfo_flags & SCTP_SENDALL) {
13640 		/* its a sendall */
13641 		error = sctp_sendall(inp, uio, top, srcv);
13642 		top = NULL;
13643 		goto out_unlocked;
13644 	}
13645 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13646 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13647 		error = EINVAL;
13648 		goto out_unlocked;
13649 	}
13650 	/* now we must find the assoc */
13651 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13652 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13653 		SCTP_INP_RLOCK(inp);
13654 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
13655 		if (stcb) {
13656 			SCTP_TCB_LOCK(stcb);
13657 			hold_tcblock = 1;
13658 		}
13659 		SCTP_INP_RUNLOCK(inp);
13660 	} else if (sinfo_assoc_id) {
13661 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13662 		if (stcb != NULL) {
13663 			hold_tcblock = 1;
13664 		}
13665 	} else if (addr) {
13666 		/*-
13667 		 * Since we did not use findep we must
13668 		 * increment it, and if we don't find a tcb
13669 		 * decrement it.
13670 		 */
13671 		SCTP_INP_WLOCK(inp);
13672 		SCTP_INP_INCR_REF(inp);
13673 		SCTP_INP_WUNLOCK(inp);
13674 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13675 		if (stcb == NULL) {
13676 			SCTP_INP_WLOCK(inp);
13677 			SCTP_INP_DECR_REF(inp);
13678 			SCTP_INP_WUNLOCK(inp);
13679 		} else {
13680 			hold_tcblock = 1;
13681 		}
13682 	}
13683 	if ((stcb == NULL) && (addr)) {
13684 		/* Possible implicit send? */
13685 		SCTP_ASOC_CREATE_LOCK(inp);
13686 		create_lock_applied = 1;
13687 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13688 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13689 			/* Should I really unlock ? */
13690 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13691 			error = EINVAL;
13692 			goto out_unlocked;
13693 
13694 		}
13695 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13696 		    (addr->sa_family == AF_INET6)) {
13697 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13698 			error = EINVAL;
13699 			goto out_unlocked;
13700 		}
13701 		SCTP_INP_WLOCK(inp);
13702 		SCTP_INP_INCR_REF(inp);
13703 		SCTP_INP_WUNLOCK(inp);
13704 		/* With the lock applied look again */
13705 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13706 #if defined(INET) || defined(INET6)
13707 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13708 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13709 		}
13710 #endif
13711 		if (stcb == NULL) {
13712 			SCTP_INP_WLOCK(inp);
13713 			SCTP_INP_DECR_REF(inp);
13714 			SCTP_INP_WUNLOCK(inp);
13715 		} else {
13716 			hold_tcblock = 1;
13717 		}
13718 		if (error) {
13719 			goto out_unlocked;
13720 		}
13721 		if (t_inp != inp) {
13722 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13723 			error = ENOTCONN;
13724 			goto out_unlocked;
13725 		}
13726 	}
13727 	if (stcb == NULL) {
13728 		if (addr == NULL) {
13729 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13730 			error = ENOENT;
13731 			goto out_unlocked;
13732 		} else {
13733 			/* We must go ahead and start the INIT process */
13734 			uint32_t vrf_id;
13735 
13736 			if ((sinfo_flags & SCTP_ABORT) ||
13737 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13738 				/*-
13739 				 * User asks to abort a non-existant assoc,
13740 				 * or EOF a non-existant assoc with no data
13741 				 */
13742 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13743 				error = ENOENT;
13744 				goto out_unlocked;
13745 			}
13746 			/* get an asoc/stcb struct */
13747 			vrf_id = inp->def_vrf_id;
13748 #ifdef INVARIANTS
13749 			if (create_lock_applied == 0) {
13750 				panic("Error, should hold create lock and I don't?");
13751 			}
13752 #endif
13753 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13754 			                       inp->sctp_ep.pre_open_stream_count,
13755 			                       inp->sctp_ep.port,
13756 #if !defined(__Userspace__)
13757 			                       p,
13758 #else
13759 			                       (struct proc *)NULL,
13760 #endif
13761 			                       SCTP_INITIALIZE_AUTH_PARAMS);
13762 			if (stcb == NULL) {
13763 				/* Error is setup for us in the call */
13764 				goto out_unlocked;
13765 			}
13766 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13767 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13768 				/* Set the connected flag so we can queue data */
13769 				soisconnecting(so);
13770 			}
13771 			hold_tcblock = 1;
13772 			if (create_lock_applied) {
13773 				SCTP_ASOC_CREATE_UNLOCK(inp);
13774 				create_lock_applied = 0;
13775 			} else {
13776 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13777 			}
13778 			/* Turn on queue only flag to prevent data from being sent */
13779 			queue_only = 1;
13780 			asoc = &stcb->asoc;
13781 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13782 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13783 
13784 			if (control) {
13785 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13786 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13787 					                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
13788 					hold_tcblock = 0;
13789 					stcb = NULL;
13790 					goto out_unlocked;
13791 				}
13792 			}
13793 			/* out with the INIT */
13794 			queue_only_for_init = 1;
13795 			/*-
13796 			 * we may want to dig in after this call and adjust the MTU
13797 			 * value. It defaulted to 1500 (constant) but the ro
13798 			 * structure may now have an update and thus we may need to
13799 			 * change it BEFORE we append the message.
13800 			 */
13801 		}
13802 	} else
13803 		asoc = &stcb->asoc;
13804 	if (srcv == NULL) {
13805 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13806 		sinfo_flags = srcv->sinfo_flags;
13807 #if defined(__FreeBSD__) && !defined(__Userspace__)
13808 		if (flags & MSG_EOR) {
13809 			sinfo_flags |= SCTP_EOR;
13810 		}
13811 		if (flags & MSG_EOF) {
13812 			sinfo_flags |= SCTP_EOF;
13813 		}
13814 #endif
13815 	}
13816 	if (sinfo_flags & SCTP_ADDR_OVER) {
13817 		if (addr)
13818 			net = sctp_findnet(stcb, addr);
13819 		else
13820 			net = NULL;
13821 		if ((net == NULL) ||
13822 		    ((port != 0) && (port != stcb->rport))) {
13823 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13824 			error = EINVAL;
13825 			goto out_unlocked;
13826 		}
13827 	} else {
13828 		if (stcb->asoc.alternate) {
13829 			net = stcb->asoc.alternate;
13830 		} else {
13831 			net = stcb->asoc.primary_destination;
13832 		}
13833 	}
13834 	atomic_add_int(&stcb->total_sends, 1);
13835 	/* Keep the stcb from being freed under our feet */
13836 	atomic_add_int(&asoc->refcnt, 1);
13837 	free_cnt_applied = 1;
13838 
13839 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13840 		if (sndlen > (ssize_t)asoc->smallest_mtu) {
13841 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13842 			error = EMSGSIZE;
13843 			goto out_unlocked;
13844 		}
13845 	}
13846 #if defined(__Userspace__)
13847 	if (inp->recv_callback) {
13848 		non_blocking = 1;
13849 	}
13850 #endif
13851 	if (SCTP_SO_IS_NBIO(so)
13852 #if defined(__FreeBSD__) && !defined(__Userspace__)
13853 	     || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
13854 #endif
13855 	    ) {
13856 		non_blocking = 1;
13857 	}
13858 	/* would we block? */
13859 	if (non_blocking) {
13860 		ssize_t amount;
13861 
13862 		if (hold_tcblock == 0) {
13863 			SCTP_TCB_LOCK(stcb);
13864 			hold_tcblock = 1;
13865 		}
13866 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13867 		if (user_marks_eor == 0) {
13868 			amount = sndlen;
13869 		} else {
13870 			amount = 1;
13871 		}
13872 		if ((SCTP_SB_LIMIT_SND(so) <  (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13873 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13874 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13875 			if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
13876 				error = EMSGSIZE;
13877 			else
13878 				error = EWOULDBLOCK;
13879 			goto out_unlocked;
13880 		}
13881 		stcb->asoc.sb_send_resv += (uint32_t)sndlen;
13882 		SCTP_TCB_UNLOCK(stcb);
13883 		hold_tcblock = 0;
13884 	} else {
13885 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13886 	}
13887 	local_soresv = sndlen;
13888 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13889 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13890 		error = ECONNRESET;
13891 		goto out_unlocked;
13892 	}
13893 	if (create_lock_applied) {
13894 		SCTP_ASOC_CREATE_UNLOCK(inp);
13895 		create_lock_applied = 0;
13896 	}
13897 	/* Is the stream no. valid? */
13898 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13899 		/* Invalid stream number */
13900 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13901 		error = EINVAL;
13902 		goto out_unlocked;
13903 	}
13904 	if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
13905 	    (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
13906 		/*
13907 		 * Can't queue any data while stream reset is underway.
13908 		 */
13909 		if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
13910 			error = EAGAIN;
13911 		} else {
13912 			error = EINVAL;
13913 		}
13914 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
13915 		goto out_unlocked;
13916 	}
13917 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13918 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13919 		queue_only = 1;
13920 	}
13921 	/* we are now done with all control */
13922 	if (control) {
13923 		sctp_m_freem(control);
13924 		control = NULL;
13925 	}
13926 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13927 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13928 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13929 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13930 		if (sinfo_flags & SCTP_ABORT) {
13931 			;
13932 		} else {
13933 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13934 			error = ECONNRESET;
13935 			goto out_unlocked;
13936 		}
13937 	}
13938 	/* Ok, we will attempt a msgsnd :> */
13939 #if !(defined(_WIN32) || defined(__Userspace__))
13940 	if (p) {
13941 #if defined(__FreeBSD__)
13942 		p->td_ru.ru_msgsnd++;
13943 #else
13944 		p->p_stats->p_ru.ru_msgsnd++;
13945 #endif
13946 	}
13947 #endif
13948 	/* Are we aborting? */
13949 	if (sinfo_flags & SCTP_ABORT) {
13950 		struct mbuf *mm;
13951 		ssize_t tot_demand, tot_out = 0, max_out;
13952 
13953 		SCTP_STAT_INCR(sctps_sends_with_abort);
13954 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13955 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13956 			/* It has to be up before we abort */
13957 			/* how big is the user initiated abort? */
13958 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13959 			error = EINVAL;
13960 			goto out;
13961 		}
13962 		if (hold_tcblock) {
13963 			SCTP_TCB_UNLOCK(stcb);
13964 			hold_tcblock = 0;
13965 		}
13966 		if (top) {
13967 			struct mbuf *cntm = NULL;
13968 
13969 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13970 			if (sndlen != 0) {
13971 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13972 					tot_out += SCTP_BUF_LEN(cntm);
13973 				}
13974 			}
13975 		} else {
13976 			/* Must fit in a MTU */
13977 			tot_out = sndlen;
13978 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13979 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13980 				/* To big */
13981 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13982 				error = EMSGSIZE;
13983 				goto out;
13984 			}
13985 			mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
13986 		}
13987 		if (mm == NULL) {
13988 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13989 			error = ENOMEM;
13990 			goto out;
13991 		}
13992 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13993 		max_out -= sizeof(struct sctp_abort_msg);
13994 		if (tot_out > max_out) {
13995 			tot_out = max_out;
13996 		}
13997 		if (mm) {
13998 			struct sctp_paramhdr *ph;
13999 
14000 			/* now move forward the data pointer */
14001 			ph = mtod(mm, struct sctp_paramhdr *);
14002 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
14003 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
14004 			ph++;
14005 			SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
14006 			if (top == NULL) {
14007 #if defined(__APPLE__) && !defined(__Userspace__)
14008 				SCTP_SOCKET_UNLOCK(so, 0);
14009 #endif
14010 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
14011 #if defined(__APPLE__) && !defined(__Userspace__)
14012 				SCTP_SOCKET_LOCK(so, 0);
14013 #endif
14014 				if (error) {
14015 					/*-
14016 					 * Here if we can't get his data we
14017 					 * still abort we just don't get to
14018 					 * send the users note :-0
14019 					 */
14020 					sctp_m_freem(mm);
14021 					mm = NULL;
14022 				}
14023 			} else {
14024 				if (sndlen != 0) {
14025 					SCTP_BUF_NEXT(mm) = top;
14026 				}
14027 			}
14028 		}
14029 		if (hold_tcblock == 0) {
14030 			SCTP_TCB_LOCK(stcb);
14031 		}
14032 		atomic_add_int(&stcb->asoc.refcnt, -1);
14033 		free_cnt_applied = 0;
14034 		/* release this lock, otherwise we hang on ourselves */
14035 #if defined(__FreeBSD__) && !defined(__Userspace__)
14036 		NET_EPOCH_ENTER(et);
14037 #endif
14038 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
14039 #if defined(__FreeBSD__) && !defined(__Userspace__)
14040 		NET_EPOCH_EXIT(et);
14041 #endif
14042 		/* now relock the stcb so everything is sane */
14043 		hold_tcblock = 0;
14044 		stcb = NULL;
14045 		/* In this case top is already chained to mm
14046 		 * avoid double free, since we free it below if
14047 		 * top != NULL and driver would free it after sending
14048 		 * the packet out
14049 		 */
14050 		if (sndlen != 0) {
14051 			top = NULL;
14052 		}
14053 		goto out_unlocked;
14054 	}
14055 	/* Calculate the maximum we can send */
14056 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14057 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14058 		max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14059 	} else {
14060 		max_len = 0;
14061 	}
14062 	if (hold_tcblock) {
14063 		SCTP_TCB_UNLOCK(stcb);
14064 		hold_tcblock = 0;
14065 	}
14066 	if (asoc->strmout == NULL) {
14067 		/* huh? software error */
14068 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14069 		error = EFAULT;
14070 		goto out_unlocked;
14071 	}
14072 
14073 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
14074 	if ((user_marks_eor == 0) &&
14075 	    (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14076 		/* It will NEVER fit */
14077 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14078 		error = EMSGSIZE;
14079 		goto out_unlocked;
14080 	}
14081 	if ((uio == NULL) && user_marks_eor) {
14082 		/*-
14083 		 * We do not support eeor mode for
14084 		 * sending with mbuf chains (like sendfile).
14085 		 */
14086 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14087 		error = EINVAL;
14088 		goto out_unlocked;
14089 	}
14090 
14091 	if (user_marks_eor) {
14092 		local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14093 	} else {
14094 		/*-
14095 		 * For non-eeor the whole message must fit in
14096 		 * the socket send buffer.
14097 		 */
14098 		local_add_more = sndlen;
14099 	}
14100 	len = 0;
14101 	if (non_blocking) {
14102 		goto skip_preblock;
14103 	}
14104 	if (((max_len <= local_add_more) &&
14105 	     ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14106 	    (max_len == 0) ||
14107 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14108 		/* No room right now ! */
14109 		SOCKBUF_LOCK(&so->so_snd);
14110 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14111 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14112 		       ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14113 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
14114 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
14115 			        inqueue_bytes,
14116 			        local_add_more,
14117 			        stcb->asoc.stream_queue_cnt,
14118 			        stcb->asoc.chunks_on_out_queue,
14119 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14120 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14121 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14122 			}
14123 			be.error = 0;
14124 #if !(defined(_WIN32) && !defined(__Userspace__))
14125 			stcb->block_entry = &be;
14126 #endif
14127 			error = sbwait(&so->so_snd);
14128 			stcb->block_entry = NULL;
14129 			if (error || so->so_error || be.error) {
14130 				if (error == 0) {
14131 					if (so->so_error)
14132 						error = so->so_error;
14133 					if (be.error) {
14134 						error = be.error;
14135 					}
14136 				}
14137 				SOCKBUF_UNLOCK(&so->so_snd);
14138 				goto out_unlocked;
14139 			}
14140 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14141 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14142 				               asoc, stcb->asoc.total_output_queue_size);
14143 			}
14144 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14145 				SOCKBUF_UNLOCK(&so->so_snd);
14146 				goto out_unlocked;
14147 			}
14148 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14149 		}
14150 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14151 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
14152 		} else {
14153 			max_len = 0;
14154 		}
14155 		SOCKBUF_UNLOCK(&so->so_snd);
14156 	}
14157 
14158 skip_preblock:
14159 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14160 		goto out_unlocked;
14161 	}
14162 #if defined(__APPLE__) && !defined(__Userspace__)
14163 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14164 #endif
14165 	/* sndlen covers for mbuf case
14166 	 * uio_resid covers for the non-mbuf case
14167 	 * NOTE: uio will be null when top/mbuf is passed
14168 	 */
14169 	if (sndlen == 0) {
14170 		if (sinfo_flags & SCTP_EOF) {
14171 			got_all_of_the_send = 1;
14172 			goto dataless_eof;
14173 		} else {
14174 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14175 			error = EINVAL;
14176 			goto out;
14177 		}
14178 	}
14179 	if (top == NULL) {
14180 		struct sctp_stream_queue_pending *sp;
14181 		struct sctp_stream_out *strm;
14182 		uint32_t sndout;
14183 
14184 		SCTP_TCB_SEND_LOCK(stcb);
14185 		if ((asoc->stream_locked) &&
14186 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
14187 			SCTP_TCB_SEND_UNLOCK(stcb);
14188 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14189 			error = EINVAL;
14190 			goto out;
14191 		}
14192 		SCTP_TCB_SEND_UNLOCK(stcb);
14193 
14194 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14195 		if (strm->last_msg_incomplete == 0) {
14196 		do_a_copy_in:
14197 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14198 			if (error) {
14199 				goto out;
14200 			}
14201 			SCTP_TCB_SEND_LOCK(stcb);
14202 			if (sp->msg_is_complete) {
14203 				strm->last_msg_incomplete = 0;
14204 				asoc->stream_locked = 0;
14205 			} else {
14206 				/* Just got locked to this guy in
14207 				 * case of an interrupt.
14208 				 */
14209 				strm->last_msg_incomplete = 1;
14210 				if (stcb->asoc.idata_supported == 0) {
14211 					asoc->stream_locked = 1;
14212 					asoc->stream_locked_on  = srcv->sinfo_stream;
14213 				}
14214 				sp->sender_all_done = 0;
14215 			}
14216 			sctp_snd_sb_alloc(stcb, sp->length);
14217 			atomic_add_int(&asoc->stream_queue_cnt, 1);
14218 			if (sinfo_flags & SCTP_UNORDERED) {
14219 				SCTP_STAT_INCR(sctps_sends_with_unord);
14220 			}
14221 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14222 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14223 			SCTP_TCB_SEND_UNLOCK(stcb);
14224 		} else {
14225 			SCTP_TCB_SEND_LOCK(stcb);
14226 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14227 			SCTP_TCB_SEND_UNLOCK(stcb);
14228 			if (sp == NULL) {
14229 				/* ???? Huh ??? last msg is gone */
14230 #ifdef INVARIANTS
14231 				panic("Warning: Last msg marked incomplete, yet nothing left?");
14232 #else
14233 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14234 				strm->last_msg_incomplete = 0;
14235 #endif
14236 				goto do_a_copy_in;
14237 
14238 			}
14239 		}
14240 #if defined(__APPLE__) && !defined(__Userspace__)
14241 #if defined(APPLE_LEOPARD)
14242 		while (uio->uio_resid > 0) {
14243 #else
14244 		while (uio_resid(uio) > 0) {
14245 #endif
14246 #else
14247 		while (uio->uio_resid > 0) {
14248 #endif
14249 			/* How much room do we have? */
14250 			struct mbuf *new_tail, *mm;
14251 
14252 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14253 			if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14254 				max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14255 			else
14256 				max_len = 0;
14257 
14258 			if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14259 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14260 #if defined(__APPLE__) && !defined(__Userspace__)
14261 #if defined(APPLE_LEOPARD)
14262 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14263 #else
14264 			    (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
14265 #endif
14266 #else
14267 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14268 #endif
14269 				sndout = 0;
14270 				new_tail = NULL;
14271 				if (hold_tcblock) {
14272 					SCTP_TCB_UNLOCK(stcb);
14273 					hold_tcblock = 0;
14274 				}
14275 #if defined(__APPLE__) && !defined(__Userspace__)
14276 				SCTP_SOCKET_UNLOCK(so, 0);
14277 #endif
14278 #if defined(__FreeBSD__) || defined(__Userspace__)
14279 				mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
14280 #else
14281 				mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
14282 #endif
14283 #if defined(__APPLE__) && !defined(__Userspace__)
14284 				SCTP_SOCKET_LOCK(so, 0);
14285 #endif
14286 				if ((mm == NULL) || error) {
14287 					if (mm) {
14288 						sctp_m_freem(mm);
14289 					}
14290 					goto out;
14291 				}
14292 				/* Update the mbuf and count */
14293 				SCTP_TCB_SEND_LOCK(stcb);
14294 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14295 					/* we need to get out.
14296 					 * Peer probably aborted.
14297 					 */
14298 					sctp_m_freem(mm);
14299 					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
14300 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14301 						error = ECONNRESET;
14302 					}
14303 					SCTP_TCB_SEND_UNLOCK(stcb);
14304 					goto out;
14305 				}
14306 				if (sp->tail_mbuf) {
14307 					/* tack it to the end */
14308 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14309 					sp->tail_mbuf = new_tail;
14310 				} else {
14311 					/* A stolen mbuf */
14312 					sp->data = mm;
14313 					sp->tail_mbuf = new_tail;
14314 				}
14315 				sctp_snd_sb_alloc(stcb, sndout);
14316 				atomic_add_int(&sp->length, sndout);
14317 				len += sndout;
14318 				if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14319 					sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14320 				}
14321 
14322 				/* Did we reach EOR? */
14323 #if defined(__APPLE__) && !defined(__Userspace__)
14324 #if defined(APPLE_LEOPARD)
14325 				if ((uio->uio_resid == 0) &&
14326 #else
14327 				if ((uio_resid(uio) == 0) &&
14328 #endif
14329 #else
14330 				if ((uio->uio_resid == 0) &&
14331 #endif
14332 				    ((user_marks_eor == 0) ||
14333 				     (sinfo_flags & SCTP_EOF) ||
14334 				     (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
14335 					sp->msg_is_complete = 1;
14336 				} else {
14337 					sp->msg_is_complete = 0;
14338 				}
14339 				SCTP_TCB_SEND_UNLOCK(stcb);
14340 			}
14341 #if defined(__APPLE__) && !defined(__Userspace__)
14342 #if defined(APPLE_LEOPARD)
14343 			if (uio->uio_resid == 0) {
14344 #else
14345 			if (uio_resid(uio) == 0) {
14346 #endif
14347 #else
14348 			if (uio->uio_resid == 0) {
14349 #endif
14350 				/* got it all? */
14351 				continue;
14352 			}
14353 			/* PR-SCTP? */
14354 			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14355 				/* This is ugly but we must assure locking order */
14356 				if (hold_tcblock == 0) {
14357 					SCTP_TCB_LOCK(stcb);
14358 					hold_tcblock = 1;
14359 				}
14360 				sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
14361 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14362 				if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14363 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14364 				else
14365 					max_len = 0;
14366 				if (max_len > 0) {
14367 					continue;
14368 				}
14369 				SCTP_TCB_UNLOCK(stcb);
14370 				hold_tcblock = 0;
14371 			}
14372 			/* wait for space now */
14373 			if (non_blocking) {
14374 				/* Non-blocking io in place out */
14375 				goto skip_out_eof;
14376 			}
14377 			/* What about the INIT, send it maybe */
14378 			if (queue_only_for_init) {
14379 				if (hold_tcblock == 0) {
14380 					SCTP_TCB_LOCK(stcb);
14381 					hold_tcblock = 1;
14382 				}
14383 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14384 					/* a collision took us forward? */
14385 					queue_only = 0;
14386 				} else {
14387 #if defined(__FreeBSD__) && !defined(__Userspace__)
14388 					NET_EPOCH_ENTER(et);
14389 #endif
14390 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14391 #if defined(__FreeBSD__) && !defined(__Userspace__)
14392 					NET_EPOCH_EXIT(et);
14393 #endif
14394 					SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14395 					queue_only = 1;
14396 				}
14397 			}
14398 			if ((net->flight_size > net->cwnd) &&
14399 			    (asoc->sctp_cmt_on_off == 0)) {
14400 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14401 				queue_only = 1;
14402 			} else if (asoc->ifp_had_enobuf) {
14403 				SCTP_STAT_INCR(sctps_ifnomemqueued);
14404 				if (net->flight_size > (2 * net->mtu)) {
14405 					queue_only = 1;
14406 				}
14407 				asoc->ifp_had_enobuf = 0;
14408 			}
14409 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14410 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14411 			    (stcb->asoc.total_flight > 0) &&
14412 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14413 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14414 
14415 				/*-
14416 				 * Ok, Nagle is set on and we have data outstanding.
14417 				 * Don't send anything and let SACKs drive out the
14418 				 * data unless we have a "full" segment to send.
14419 				 */
14420 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14421 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14422 				}
14423 				SCTP_STAT_INCR(sctps_naglequeued);
14424 				nagle_applies = 1;
14425 			} else {
14426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14427 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14428 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14429 				}
14430 				SCTP_STAT_INCR(sctps_naglesent);
14431 				nagle_applies = 0;
14432 			}
14433 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14434 
14435 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14436 					       nagle_applies, un_sent);
14437 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14438 					       stcb->asoc.total_flight,
14439 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14440 			}
14441 			if (queue_only_for_init)
14442 				queue_only_for_init = 0;
14443 			if ((queue_only == 0) && (nagle_applies == 0)) {
14444 				/*-
14445 				 * need to start chunk output
14446 				 * before blocking.. note that if
14447 				 * a lock is already applied, then
14448 				 * the input via the net is happening
14449 				 * and I don't need to start output :-D
14450 				 */
14451 #if defined(__FreeBSD__) && !defined(__Userspace__)
14452 				NET_EPOCH_ENTER(et);
14453 #endif
14454 				if (hold_tcblock == 0) {
14455 					if (SCTP_TCB_TRYLOCK(stcb)) {
14456 						hold_tcblock = 1;
14457 						sctp_chunk_output(inp,
14458 								  stcb,
14459 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14460 					}
14461 				} else {
14462 					sctp_chunk_output(inp,
14463 							  stcb,
14464 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14465 				}
14466 #if defined(__FreeBSD__) && !defined(__Userspace__)
14467 				NET_EPOCH_EXIT(et);
14468 #endif
14469 			}
14470 			if (hold_tcblock == 1) {
14471 				SCTP_TCB_UNLOCK(stcb);
14472 				hold_tcblock = 0;
14473 			}
14474 			SOCKBUF_LOCK(&so->so_snd);
14475 			/*-
14476 			 * This is a bit strange, but I think it will
14477 			 * work. The total_output_queue_size is locked and
14478 			 * protected by the TCB_LOCK, which we just released.
14479 			 * There is a race that can occur between releasing it
14480 			 * above, and me getting the socket lock, where sacks
14481 			 * come in but we have not put the SB_WAIT on the
14482 			 * so_snd buffer to get the wakeup. After the LOCK
14483 			 * is applied the sack_processing will also need to
14484 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
14485 			 * once we have the socket buffer lock if we recheck the
14486 			 * size we KNOW we will get to sleep safely with the
14487 			 * wakeup flag in place.
14488 			 */
14489 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14490 			if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
14491 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14492 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14493 #if defined(__APPLE__) && !defined(__Userspace__)
14494 #if defined(APPLE_LEOPARD)
14495 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14496 						       asoc, uio->uio_resid);
14497 #else
14498 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14499 						       asoc, uio_resid(uio));
14500 #endif
14501 #else
14502 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14503 						       asoc, uio->uio_resid);
14504 #endif
14505 				}
14506 				be.error = 0;
14507 #if !(defined(_WIN32) && !defined(__Userspace__))
14508 				stcb->block_entry = &be;
14509 #endif
14510 #if defined(__APPLE__) && !defined(__Userspace__)
14511 				sbunlock(&so->so_snd, 1);
14512 #endif
14513 				error = sbwait(&so->so_snd);
14514 				stcb->block_entry = NULL;
14515 
14516 				if (error || so->so_error || be.error) {
14517 					if (error == 0) {
14518 						if (so->so_error)
14519 							error = so->so_error;
14520 						if (be.error) {
14521 							error = be.error;
14522 						}
14523 					}
14524 					SOCKBUF_UNLOCK(&so->so_snd);
14525 					goto out_unlocked;
14526 				}
14527 
14528 #if defined(__APPLE__) && !defined(__Userspace__)
14529 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14530 #endif
14531 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14532 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14533 						       asoc, stcb->asoc.total_output_queue_size);
14534 				}
14535 			}
14536 			SOCKBUF_UNLOCK(&so->so_snd);
14537 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14538 				goto out_unlocked;
14539 			}
14540 		}
14541 		SCTP_TCB_SEND_LOCK(stcb);
14542 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14543 			SCTP_TCB_SEND_UNLOCK(stcb);
14544 			goto out_unlocked;
14545 		}
14546 		if (sp) {
14547 			if (sp->msg_is_complete == 0) {
14548 				strm->last_msg_incomplete = 1;
14549 				if (stcb->asoc.idata_supported == 0) {
14550 					asoc->stream_locked = 1;
14551 					asoc->stream_locked_on  = srcv->sinfo_stream;
14552 				}
14553 			} else {
14554 				sp->sender_all_done = 1;
14555 				strm->last_msg_incomplete = 0;
14556 				asoc->stream_locked = 0;
14557 			}
14558 		} else {
14559 			SCTP_PRINTF("Huh no sp TSNH?\n");
14560 			strm->last_msg_incomplete = 0;
14561 			asoc->stream_locked = 0;
14562 		}
14563 		SCTP_TCB_SEND_UNLOCK(stcb);
14564 #if defined(__APPLE__) && !defined(__Userspace__)
14565 #if defined(APPLE_LEOPARD)
14566 		if (uio->uio_resid == 0) {
14567 #else
14568 		if (uio_resid(uio) == 0) {
14569 #endif
14570 #else
14571 		if (uio->uio_resid == 0) {
14572 #endif
14573 			got_all_of_the_send = 1;
14574 		}
14575 	} else {
14576 		/* We send in a 0, since we do NOT have any locks */
14577 		error = sctp_msg_append(stcb, net, top, srcv, 0);
14578 		top = NULL;
14579 		if (sinfo_flags & SCTP_EOF) {
14580 			got_all_of_the_send = 1;
14581 		}
14582 	}
14583 	if (error) {
14584 		goto out;
14585 	}
14586 dataless_eof:
14587 	/* EOF thing ? */
14588 	if ((sinfo_flags & SCTP_EOF) &&
14589 	    (got_all_of_the_send == 1)) {
14590 		SCTP_STAT_INCR(sctps_sends_with_eof);
14591 		error = 0;
14592 		if (hold_tcblock == 0) {
14593 			SCTP_TCB_LOCK(stcb);
14594 			hold_tcblock = 1;
14595 		}
14596 		if (TAILQ_EMPTY(&asoc->send_queue) &&
14597 		    TAILQ_EMPTY(&asoc->sent_queue) &&
14598 		    sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14599 			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14600 				goto abort_anyway;
14601 			}
14602 			/* there is nothing queued to send, so I'm done... */
14603 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14604 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14605 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14606 				struct sctp_nets *netp;
14607 
14608 				/* only send SHUTDOWN the first time through */
14609 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14610 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14611 				}
14612 				SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
14613 				sctp_stop_timers_for_shutdown(stcb);
14614 				if (stcb->asoc.alternate) {
14615 					netp = stcb->asoc.alternate;
14616 				} else {
14617 					netp = stcb->asoc.primary_destination;
14618 				}
14619 				sctp_send_shutdown(stcb, netp);
14620 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14621 				                 netp);
14622 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14623 				                 NULL);
14624 			}
14625 		} else {
14626 			/*-
14627 			 * we still got (or just got) data to send, so set
14628 			 * SHUTDOWN_PENDING
14629 			 */
14630 			/*-
14631 			 * XXX sockets draft says that SCTP_EOF should be
14632 			 * sent with no data.  currently, we will allow user
14633 			 * data to be sent first and move to
14634 			 * SHUTDOWN-PENDING
14635 			 */
14636 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14637 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14638 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14639 				if (hold_tcblock == 0) {
14640 					SCTP_TCB_LOCK(stcb);
14641 					hold_tcblock = 1;
14642 				}
14643 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14644 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
14645 				}
14646 				SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
14647 				if (TAILQ_EMPTY(&asoc->send_queue) &&
14648 				    TAILQ_EMPTY(&asoc->sent_queue) &&
14649 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14650 					struct mbuf *op_err;
14651 					char msg[SCTP_DIAG_INFO_LEN];
14652 
14653 				abort_anyway:
14654 					if (free_cnt_applied) {
14655 						atomic_add_int(&stcb->asoc.refcnt, -1);
14656 						free_cnt_applied = 0;
14657 					}
14658 					SCTP_SNPRINTF(msg, sizeof(msg),
14659 					              "%s:%d at %s", __FILE__, __LINE__, __func__);
14660 					op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14661 					                             msg);
14662 #if defined(__FreeBSD__) && !defined(__Userspace__)
14663 					NET_EPOCH_ENTER(et);
14664 #endif
14665 					sctp_abort_an_association(stcb->sctp_ep, stcb,
14666 					                          op_err, SCTP_SO_LOCKED);
14667 #if defined(__FreeBSD__) && !defined(__Userspace__)
14668 					NET_EPOCH_EXIT(et);
14669 #endif
14670 					/* now relock the stcb so everything is sane */
14671 					hold_tcblock = 0;
14672 					stcb = NULL;
14673 					goto out;
14674 				}
14675 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14676 				                 NULL);
14677 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14678 			}
14679 		}
14680 	}
14681 skip_out_eof:
14682 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14683 		some_on_control = 1;
14684 	}
14685 	if (queue_only_for_init) {
14686 		if (hold_tcblock == 0) {
14687 			SCTP_TCB_LOCK(stcb);
14688 			hold_tcblock = 1;
14689 		}
14690 		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14691 			/* a collision took us forward? */
14692 			queue_only = 0;
14693 		} else {
14694 #if defined(__FreeBSD__) && !defined(__Userspace__)
14695 			NET_EPOCH_ENTER(et);
14696 #endif
14697 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14698 #if defined(__FreeBSD__) && !defined(__Userspace__)
14699 			NET_EPOCH_EXIT(et);
14700 #endif
14701 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14702 			queue_only = 1;
14703 		}
14704 	}
14705 	if ((net->flight_size > net->cwnd) &&
14706 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
14707 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14708 		queue_only = 1;
14709 	} else if (asoc->ifp_had_enobuf) {
14710 		SCTP_STAT_INCR(sctps_ifnomemqueued);
14711 		if (net->flight_size > (2 * net->mtu)) {
14712 			queue_only = 1;
14713 		}
14714 		asoc->ifp_had_enobuf = 0;
14715 	}
14716 	un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14717 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14718 	    (stcb->asoc.total_flight > 0) &&
14719 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14720 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14721 		/*-
14722 		 * Ok, Nagle is set on and we have data outstanding.
14723 		 * Don't send anything and let SACKs drive out the
14724 		 * data unless wen have a "full" segment to send.
14725 		 */
14726 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14727 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14728 		}
14729 		SCTP_STAT_INCR(sctps_naglequeued);
14730 		nagle_applies = 1;
14731 	} else {
14732 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14733 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14734 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14735 		}
14736 		SCTP_STAT_INCR(sctps_naglesent);
14737 		nagle_applies = 0;
14738 	}
14739 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14740 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14741 		               nagle_applies, un_sent);
14742 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14743 		               stcb->asoc.total_flight,
14744 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14745 	}
14746 #if defined(__FreeBSD__) && !defined(__Userspace__)
14747 	NET_EPOCH_ENTER(et);
14748 #endif
14749 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14750 		/* we can attempt to send too. */
14751 		if (hold_tcblock == 0) {
14752 			/* If there is activity recv'ing sacks no need to send */
14753 			if (SCTP_TCB_TRYLOCK(stcb)) {
14754 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14755 				hold_tcblock = 1;
14756 			}
14757 		} else {
14758 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14759 		}
14760 	} else if ((queue_only == 0) &&
14761 	           (stcb->asoc.peers_rwnd == 0) &&
14762 	           (stcb->asoc.total_flight == 0)) {
14763 		/* We get to have a probe outstanding */
14764 		if (hold_tcblock == 0) {
14765 			hold_tcblock = 1;
14766 			SCTP_TCB_LOCK(stcb);
14767 		}
14768 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14769 	} else if (some_on_control) {
14770 		int num_out, reason, frag_point;
14771 
14772 		/* Here we do control only */
14773 		if (hold_tcblock == 0) {
14774 			hold_tcblock = 1;
14775 			SCTP_TCB_LOCK(stcb);
14776 		}
14777 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14778 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14779 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14780 	}
14781 #if defined(__FreeBSD__) && !defined(__Userspace__)
14782 	NET_EPOCH_EXIT(et);
14783 #endif
14784 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14785 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
14786 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14787 	        stcb->asoc.total_output_queue_size, error);
14788 
14789 out:
14790 #if defined(__APPLE__) && !defined(__Userspace__)
14791 	sbunlock(&so->so_snd, 1);
14792 #endif
14793 out_unlocked:
14794 
14795 	if (local_soresv && stcb) {
14796 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14797 	}
14798 	if (create_lock_applied) {
14799 		SCTP_ASOC_CREATE_UNLOCK(inp);
14800 	}
14801 	if ((stcb) && hold_tcblock) {
14802 		SCTP_TCB_UNLOCK(stcb);
14803 	}
14804 	if (stcb && free_cnt_applied) {
14805 		atomic_add_int(&stcb->asoc.refcnt, -1);
14806 	}
14807 #ifdef INVARIANTS
14808 #if defined(__FreeBSD__) && !defined(__Userspace__)
14809 	if (stcb) {
14810 		if (mtx_owned(&stcb->tcb_mtx)) {
14811 			panic("Leaving with tcb mtx owned?");
14812 		}
14813 		if (mtx_owned(&stcb->tcb_send_mtx)) {
14814 			panic("Leaving with tcb send mtx owned?");
14815 		}
14816 	}
14817 #endif
14818 #endif
14819 	if (top) {
14820 		sctp_m_freem(top);
14821 	}
14822 	if (control) {
14823 		sctp_m_freem(control);
14824 	}
14825 	return (error);
14826 }
14827 
14828 
14829 /*
14830  * generate an AUTHentication chunk, if required
14831  */
14832 struct mbuf *
14833 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14834     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14835     struct sctp_tcb *stcb, uint8_t chunk)
14836 {
14837 	struct mbuf *m_auth;
14838 	struct sctp_auth_chunk *auth;
14839 	int chunk_len;
14840 	struct mbuf *cn;
14841 
14842 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14843 	    (stcb == NULL))
14844 		return (m);
14845 
14846 	if (stcb->asoc.auth_supported == 0) {
14847 		return (m);
14848 	}
14849 	/* does the requested chunk require auth? */
14850 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14851 		return (m);
14852 	}
14853 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14854 	if (m_auth == NULL) {
14855 		/* no mbuf's */
14856 		return (m);
14857 	}
14858 	/* reserve some space if this will be the first mbuf */
14859 	if (m == NULL)
14860 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14861 	/* fill in the AUTH chunk details */
14862 	auth = mtod(m_auth, struct sctp_auth_chunk *);
14863 	memset(auth, 0, sizeof(*auth));
14864 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
14865 	auth->ch.chunk_flags = 0;
14866 	chunk_len = sizeof(*auth) +
14867 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14868 	auth->ch.chunk_length = htons(chunk_len);
14869 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14870 	/* key id and hmac digest will be computed and filled in upon send */
14871 
14872 	/* save the offset where the auth was inserted into the chain */
14873 	*offset = 0;
14874 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14875 		*offset += SCTP_BUF_LEN(cn);
14876 	}
14877 
14878 	/* update length and return pointer to the auth chunk */
14879 	SCTP_BUF_LEN(m_auth) = chunk_len;
14880 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14881 	if (auth_ret != NULL)
14882 		*auth_ret = auth;
14883 
14884 	return (m);
14885 }
14886 
14887 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
14888 #ifdef INET6
14889 int
14890 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14891 {
14892 	struct nd_prefix *pfx = NULL;
14893 	struct nd_pfxrouter *pfxrtr = NULL;
14894 	struct sockaddr_in6 gw6;
14895 
14896 #if defined(__FreeBSD__)
14897 	if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
14898 #else
14899 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14900 #endif
14901 		return (0);
14902 
14903 	/* get prefix entry of address */
14904 #if defined(__FreeBSD__)
14905 	ND6_RLOCK();
14906 #endif
14907 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14908 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14909 			continue;
14910 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14911 		    &src6->sin6_addr, &pfx->ndpr_mask))
14912 			break;
14913 	}
14914 	/* no prefix entry in the prefix list */
14915 	if (pfx == NULL) {
14916 #if defined(__FreeBSD__)
14917 		ND6_RUNLOCK();
14918 #endif
14919 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14920 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14921 		return (0);
14922 	}
14923 
14924 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14925 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14926 
14927 	/* search installed gateway from prefix entry */
14928 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14929 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
14930 		gw6.sin6_family = AF_INET6;
14931 #ifdef HAVE_SIN6_LEN
14932 		gw6.sin6_len = sizeof(struct sockaddr_in6);
14933 #endif
14934 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14935 		    sizeof(struct in6_addr));
14936 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14937 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14938 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14939 #if defined(__FreeBSD__)
14940 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14941 #else
14942 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14943 #endif
14944 #if defined(__FreeBSD__)
14945 		if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
14946 			ND6_RUNLOCK();
14947 #else
14948 		if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
14949 #endif
14950 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14951 			return (1);
14952 		}
14953 	}
14954 #if defined(__FreeBSD__)
14955 	ND6_RUNLOCK();
14956 #endif
14957 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14958 	return (0);
14959 }
14960 #endif
14961 
14962 int
14963 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14964 {
14965 #ifdef INET
14966 	struct sockaddr_in *sin, *mask;
14967 	struct ifaddr *ifa;
14968 	struct in_addr srcnetaddr, gwnetaddr;
14969 
14970 #if defined(__FreeBSD__)
14971 	if (ro == NULL || ro->ro_nh == NULL ||
14972 #else
14973 	if (ro == NULL || ro->ro_rt == NULL ||
14974 #endif
14975 	    sifa->address.sa.sa_family != AF_INET) {
14976 		return (0);
14977 	}
14978 	ifa = (struct ifaddr *)sifa->ifa;
14979 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14980 	sin = &sifa->address.sin;
14981 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14982 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14983 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14984 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14985 
14986 #if defined(__FreeBSD__)
14987 	sin = &ro->ro_nh->gw4_sa;
14988 #else
14989 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14990 #endif
14991 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14992 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14993 #if defined(__FreeBSD__)
14994 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14995 #else
14996 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14997 #endif
14998 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14999 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
15000 		return (1);
15001 	}
15002 #endif
15003 	return (0);
15004 }
15005 #elif defined(__Userspace__)
15006 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
15007 int
15008 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
15009 {
15010     return (0);
15011 }
15012 int
15013 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
15014 {
15015     return (0);
15016 }
15017 
15018 #endif
15019