1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctputil.h>
52 #include <netinet/sctp_auth.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_indata.h>
56 #include <netinet/sctp_bsd_addr.h>
57 #include <netinet/sctp_input.h>
58 #include <netinet/sctp_crc32.h>
59 #if defined(__FreeBSD__) && !defined(__Userspace__)
60 #include <netinet/sctp_kdtrace.h>
61 #endif
62 #if defined(__linux__)
63 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
64 #endif
65 #if defined(INET) || defined(INET6)
66 #if !defined(_WIN32)
67 #include <netinet/udp.h>
68 #endif
69 #endif
70 #if !defined(__Userspace__)
71 #if defined(__APPLE__)
72 #include <netinet/in.h>
73 #endif
74 #if defined(__FreeBSD__) && !defined(__Userspace__)
75 #include <netinet/udp_var.h>
76 #include <machine/in_cksum.h>
77 #endif
78 #endif
79 #if defined(__Userspace__) && defined(INET6)
80 #include <netinet6/sctp6_var.h>
81 #endif
82 #if defined(__APPLE__) && !defined(__Userspace__)
83 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
84 #define SCTP_MAX_LINKHDR 16
85 #endif
86 #endif
87 
88 #define SCTP_MAX_GAPS_INARRAY 4
89 struct sack_track {
90 	uint8_t right_edge;	/* mergable on the right edge */
91 	uint8_t left_edge;	/* mergable on the left edge */
92 	uint8_t num_entries;
93 	uint8_t spare;
94 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
95 };
96 
97 const struct sack_track sack_array[256] = {
98 	{0, 0, 0, 0,		/* 0x00 */
99 		{{0, 0},
100 		{0, 0},
101 		{0, 0},
102 		{0, 0}
103 		}
104 	},
105 	{1, 0, 1, 0,		/* 0x01 */
106 		{{0, 0},
107 		{0, 0},
108 		{0, 0},
109 		{0, 0}
110 		}
111 	},
112 	{0, 0, 1, 0,		/* 0x02 */
113 		{{1, 1},
114 		{0, 0},
115 		{0, 0},
116 		{0, 0}
117 		}
118 	},
119 	{1, 0, 1, 0,		/* 0x03 */
120 		{{0, 1},
121 		{0, 0},
122 		{0, 0},
123 		{0, 0}
124 		}
125 	},
126 	{0, 0, 1, 0,		/* 0x04 */
127 		{{2, 2},
128 		{0, 0},
129 		{0, 0},
130 		{0, 0}
131 		}
132 	},
133 	{1, 0, 2, 0,		/* 0x05 */
134 		{{0, 0},
135 		{2, 2},
136 		{0, 0},
137 		{0, 0}
138 		}
139 	},
140 	{0, 0, 1, 0,		/* 0x06 */
141 		{{1, 2},
142 		{0, 0},
143 		{0, 0},
144 		{0, 0}
145 		}
146 	},
147 	{1, 0, 1, 0,		/* 0x07 */
148 		{{0, 2},
149 		{0, 0},
150 		{0, 0},
151 		{0, 0}
152 		}
153 	},
154 	{0, 0, 1, 0,		/* 0x08 */
155 		{{3, 3},
156 		{0, 0},
157 		{0, 0},
158 		{0, 0}
159 		}
160 	},
161 	{1, 0, 2, 0,		/* 0x09 */
162 		{{0, 0},
163 		{3, 3},
164 		{0, 0},
165 		{0, 0}
166 		}
167 	},
168 	{0, 0, 2, 0,		/* 0x0a */
169 		{{1, 1},
170 		{3, 3},
171 		{0, 0},
172 		{0, 0}
173 		}
174 	},
175 	{1, 0, 2, 0,		/* 0x0b */
176 		{{0, 1},
177 		{3, 3},
178 		{0, 0},
179 		{0, 0}
180 		}
181 	},
182 	{0, 0, 1, 0,		/* 0x0c */
183 		{{2, 3},
184 		{0, 0},
185 		{0, 0},
186 		{0, 0}
187 		}
188 	},
189 	{1, 0, 2, 0,		/* 0x0d */
190 		{{0, 0},
191 		{2, 3},
192 		{0, 0},
193 		{0, 0}
194 		}
195 	},
196 	{0, 0, 1, 0,		/* 0x0e */
197 		{{1, 3},
198 		{0, 0},
199 		{0, 0},
200 		{0, 0}
201 		}
202 	},
203 	{1, 0, 1, 0,		/* 0x0f */
204 		{{0, 3},
205 		{0, 0},
206 		{0, 0},
207 		{0, 0}
208 		}
209 	},
210 	{0, 0, 1, 0,		/* 0x10 */
211 		{{4, 4},
212 		{0, 0},
213 		{0, 0},
214 		{0, 0}
215 		}
216 	},
217 	{1, 0, 2, 0,		/* 0x11 */
218 		{{0, 0},
219 		{4, 4},
220 		{0, 0},
221 		{0, 0}
222 		}
223 	},
224 	{0, 0, 2, 0,		/* 0x12 */
225 		{{1, 1},
226 		{4, 4},
227 		{0, 0},
228 		{0, 0}
229 		}
230 	},
231 	{1, 0, 2, 0,		/* 0x13 */
232 		{{0, 1},
233 		{4, 4},
234 		{0, 0},
235 		{0, 0}
236 		}
237 	},
238 	{0, 0, 2, 0,		/* 0x14 */
239 		{{2, 2},
240 		{4, 4},
241 		{0, 0},
242 		{0, 0}
243 		}
244 	},
245 	{1, 0, 3, 0,		/* 0x15 */
246 		{{0, 0},
247 		{2, 2},
248 		{4, 4},
249 		{0, 0}
250 		}
251 	},
252 	{0, 0, 2, 0,		/* 0x16 */
253 		{{1, 2},
254 		{4, 4},
255 		{0, 0},
256 		{0, 0}
257 		}
258 	},
259 	{1, 0, 2, 0,		/* 0x17 */
260 		{{0, 2},
261 		{4, 4},
262 		{0, 0},
263 		{0, 0}
264 		}
265 	},
266 	{0, 0, 1, 0,		/* 0x18 */
267 		{{3, 4},
268 		{0, 0},
269 		{0, 0},
270 		{0, 0}
271 		}
272 	},
273 	{1, 0, 2, 0,		/* 0x19 */
274 		{{0, 0},
275 		{3, 4},
276 		{0, 0},
277 		{0, 0}
278 		}
279 	},
280 	{0, 0, 2, 0,		/* 0x1a */
281 		{{1, 1},
282 		{3, 4},
283 		{0, 0},
284 		{0, 0}
285 		}
286 	},
287 	{1, 0, 2, 0,		/* 0x1b */
288 		{{0, 1},
289 		{3, 4},
290 		{0, 0},
291 		{0, 0}
292 		}
293 	},
294 	{0, 0, 1, 0,		/* 0x1c */
295 		{{2, 4},
296 		{0, 0},
297 		{0, 0},
298 		{0, 0}
299 		}
300 	},
301 	{1, 0, 2, 0,		/* 0x1d */
302 		{{0, 0},
303 		{2, 4},
304 		{0, 0},
305 		{0, 0}
306 		}
307 	},
308 	{0, 0, 1, 0,		/* 0x1e */
309 		{{1, 4},
310 		{0, 0},
311 		{0, 0},
312 		{0, 0}
313 		}
314 	},
315 	{1, 0, 1, 0,		/* 0x1f */
316 		{{0, 4},
317 		{0, 0},
318 		{0, 0},
319 		{0, 0}
320 		}
321 	},
322 	{0, 0, 1, 0,		/* 0x20 */
323 		{{5, 5},
324 		{0, 0},
325 		{0, 0},
326 		{0, 0}
327 		}
328 	},
329 	{1, 0, 2, 0,		/* 0x21 */
330 		{{0, 0},
331 		{5, 5},
332 		{0, 0},
333 		{0, 0}
334 		}
335 	},
336 	{0, 0, 2, 0,		/* 0x22 */
337 		{{1, 1},
338 		{5, 5},
339 		{0, 0},
340 		{0, 0}
341 		}
342 	},
343 	{1, 0, 2, 0,		/* 0x23 */
344 		{{0, 1},
345 		{5, 5},
346 		{0, 0},
347 		{0, 0}
348 		}
349 	},
350 	{0, 0, 2, 0,		/* 0x24 */
351 		{{2, 2},
352 		{5, 5},
353 		{0, 0},
354 		{0, 0}
355 		}
356 	},
357 	{1, 0, 3, 0,		/* 0x25 */
358 		{{0, 0},
359 		{2, 2},
360 		{5, 5},
361 		{0, 0}
362 		}
363 	},
364 	{0, 0, 2, 0,		/* 0x26 */
365 		{{1, 2},
366 		{5, 5},
367 		{0, 0},
368 		{0, 0}
369 		}
370 	},
371 	{1, 0, 2, 0,		/* 0x27 */
372 		{{0, 2},
373 		{5, 5},
374 		{0, 0},
375 		{0, 0}
376 		}
377 	},
378 	{0, 0, 2, 0,		/* 0x28 */
379 		{{3, 3},
380 		{5, 5},
381 		{0, 0},
382 		{0, 0}
383 		}
384 	},
385 	{1, 0, 3, 0,		/* 0x29 */
386 		{{0, 0},
387 		{3, 3},
388 		{5, 5},
389 		{0, 0}
390 		}
391 	},
392 	{0, 0, 3, 0,		/* 0x2a */
393 		{{1, 1},
394 		{3, 3},
395 		{5, 5},
396 		{0, 0}
397 		}
398 	},
399 	{1, 0, 3, 0,		/* 0x2b */
400 		{{0, 1},
401 		{3, 3},
402 		{5, 5},
403 		{0, 0}
404 		}
405 	},
406 	{0, 0, 2, 0,		/* 0x2c */
407 		{{2, 3},
408 		{5, 5},
409 		{0, 0},
410 		{0, 0}
411 		}
412 	},
413 	{1, 0, 3, 0,		/* 0x2d */
414 		{{0, 0},
415 		{2, 3},
416 		{5, 5},
417 		{0, 0}
418 		}
419 	},
420 	{0, 0, 2, 0,		/* 0x2e */
421 		{{1, 3},
422 		{5, 5},
423 		{0, 0},
424 		{0, 0}
425 		}
426 	},
427 	{1, 0, 2, 0,		/* 0x2f */
428 		{{0, 3},
429 		{5, 5},
430 		{0, 0},
431 		{0, 0}
432 		}
433 	},
434 	{0, 0, 1, 0,		/* 0x30 */
435 		{{4, 5},
436 		{0, 0},
437 		{0, 0},
438 		{0, 0}
439 		}
440 	},
441 	{1, 0, 2, 0,		/* 0x31 */
442 		{{0, 0},
443 		{4, 5},
444 		{0, 0},
445 		{0, 0}
446 		}
447 	},
448 	{0, 0, 2, 0,		/* 0x32 */
449 		{{1, 1},
450 		{4, 5},
451 		{0, 0},
452 		{0, 0}
453 		}
454 	},
455 	{1, 0, 2, 0,		/* 0x33 */
456 		{{0, 1},
457 		{4, 5},
458 		{0, 0},
459 		{0, 0}
460 		}
461 	},
462 	{0, 0, 2, 0,		/* 0x34 */
463 		{{2, 2},
464 		{4, 5},
465 		{0, 0},
466 		{0, 0}
467 		}
468 	},
469 	{1, 0, 3, 0,		/* 0x35 */
470 		{{0, 0},
471 		{2, 2},
472 		{4, 5},
473 		{0, 0}
474 		}
475 	},
476 	{0, 0, 2, 0,		/* 0x36 */
477 		{{1, 2},
478 		{4, 5},
479 		{0, 0},
480 		{0, 0}
481 		}
482 	},
483 	{1, 0, 2, 0,		/* 0x37 */
484 		{{0, 2},
485 		{4, 5},
486 		{0, 0},
487 		{0, 0}
488 		}
489 	},
490 	{0, 0, 1, 0,		/* 0x38 */
491 		{{3, 5},
492 		{0, 0},
493 		{0, 0},
494 		{0, 0}
495 		}
496 	},
497 	{1, 0, 2, 0,		/* 0x39 */
498 		{{0, 0},
499 		{3, 5},
500 		{0, 0},
501 		{0, 0}
502 		}
503 	},
504 	{0, 0, 2, 0,		/* 0x3a */
505 		{{1, 1},
506 		{3, 5},
507 		{0, 0},
508 		{0, 0}
509 		}
510 	},
511 	{1, 0, 2, 0,		/* 0x3b */
512 		{{0, 1},
513 		{3, 5},
514 		{0, 0},
515 		{0, 0}
516 		}
517 	},
518 	{0, 0, 1, 0,		/* 0x3c */
519 		{{2, 5},
520 		{0, 0},
521 		{0, 0},
522 		{0, 0}
523 		}
524 	},
525 	{1, 0, 2, 0,		/* 0x3d */
526 		{{0, 0},
527 		{2, 5},
528 		{0, 0},
529 		{0, 0}
530 		}
531 	},
532 	{0, 0, 1, 0,		/* 0x3e */
533 		{{1, 5},
534 		{0, 0},
535 		{0, 0},
536 		{0, 0}
537 		}
538 	},
539 	{1, 0, 1, 0,		/* 0x3f */
540 		{{0, 5},
541 		{0, 0},
542 		{0, 0},
543 		{0, 0}
544 		}
545 	},
546 	{0, 0, 1, 0,		/* 0x40 */
547 		{{6, 6},
548 		{0, 0},
549 		{0, 0},
550 		{0, 0}
551 		}
552 	},
553 	{1, 0, 2, 0,		/* 0x41 */
554 		{{0, 0},
555 		{6, 6},
556 		{0, 0},
557 		{0, 0}
558 		}
559 	},
560 	{0, 0, 2, 0,		/* 0x42 */
561 		{{1, 1},
562 		{6, 6},
563 		{0, 0},
564 		{0, 0}
565 		}
566 	},
567 	{1, 0, 2, 0,		/* 0x43 */
568 		{{0, 1},
569 		{6, 6},
570 		{0, 0},
571 		{0, 0}
572 		}
573 	},
574 	{0, 0, 2, 0,		/* 0x44 */
575 		{{2, 2},
576 		{6, 6},
577 		{0, 0},
578 		{0, 0}
579 		}
580 	},
581 	{1, 0, 3, 0,		/* 0x45 */
582 		{{0, 0},
583 		{2, 2},
584 		{6, 6},
585 		{0, 0}
586 		}
587 	},
588 	{0, 0, 2, 0,		/* 0x46 */
589 		{{1, 2},
590 		{6, 6},
591 		{0, 0},
592 		{0, 0}
593 		}
594 	},
595 	{1, 0, 2, 0,		/* 0x47 */
596 		{{0, 2},
597 		{6, 6},
598 		{0, 0},
599 		{0, 0}
600 		}
601 	},
602 	{0, 0, 2, 0,		/* 0x48 */
603 		{{3, 3},
604 		{6, 6},
605 		{0, 0},
606 		{0, 0}
607 		}
608 	},
609 	{1, 0, 3, 0,		/* 0x49 */
610 		{{0, 0},
611 		{3, 3},
612 		{6, 6},
613 		{0, 0}
614 		}
615 	},
616 	{0, 0, 3, 0,		/* 0x4a */
617 		{{1, 1},
618 		{3, 3},
619 		{6, 6},
620 		{0, 0}
621 		}
622 	},
623 	{1, 0, 3, 0,		/* 0x4b */
624 		{{0, 1},
625 		{3, 3},
626 		{6, 6},
627 		{0, 0}
628 		}
629 	},
630 	{0, 0, 2, 0,		/* 0x4c */
631 		{{2, 3},
632 		{6, 6},
633 		{0, 0},
634 		{0, 0}
635 		}
636 	},
637 	{1, 0, 3, 0,		/* 0x4d */
638 		{{0, 0},
639 		{2, 3},
640 		{6, 6},
641 		{0, 0}
642 		}
643 	},
644 	{0, 0, 2, 0,		/* 0x4e */
645 		{{1, 3},
646 		{6, 6},
647 		{0, 0},
648 		{0, 0}
649 		}
650 	},
651 	{1, 0, 2, 0,		/* 0x4f */
652 		{{0, 3},
653 		{6, 6},
654 		{0, 0},
655 		{0, 0}
656 		}
657 	},
658 	{0, 0, 2, 0,		/* 0x50 */
659 		{{4, 4},
660 		{6, 6},
661 		{0, 0},
662 		{0, 0}
663 		}
664 	},
665 	{1, 0, 3, 0,		/* 0x51 */
666 		{{0, 0},
667 		{4, 4},
668 		{6, 6},
669 		{0, 0}
670 		}
671 	},
672 	{0, 0, 3, 0,		/* 0x52 */
673 		{{1, 1},
674 		{4, 4},
675 		{6, 6},
676 		{0, 0}
677 		}
678 	},
679 	{1, 0, 3, 0,		/* 0x53 */
680 		{{0, 1},
681 		{4, 4},
682 		{6, 6},
683 		{0, 0}
684 		}
685 	},
686 	{0, 0, 3, 0,		/* 0x54 */
687 		{{2, 2},
688 		{4, 4},
689 		{6, 6},
690 		{0, 0}
691 		}
692 	},
693 	{1, 0, 4, 0,		/* 0x55 */
694 		{{0, 0},
695 		{2, 2},
696 		{4, 4},
697 		{6, 6}
698 		}
699 	},
700 	{0, 0, 3, 0,		/* 0x56 */
701 		{{1, 2},
702 		{4, 4},
703 		{6, 6},
704 		{0, 0}
705 		}
706 	},
707 	{1, 0, 3, 0,		/* 0x57 */
708 		{{0, 2},
709 		{4, 4},
710 		{6, 6},
711 		{0, 0}
712 		}
713 	},
714 	{0, 0, 2, 0,		/* 0x58 */
715 		{{3, 4},
716 		{6, 6},
717 		{0, 0},
718 		{0, 0}
719 		}
720 	},
721 	{1, 0, 3, 0,		/* 0x59 */
722 		{{0, 0},
723 		{3, 4},
724 		{6, 6},
725 		{0, 0}
726 		}
727 	},
728 	{0, 0, 3, 0,		/* 0x5a */
729 		{{1, 1},
730 		{3, 4},
731 		{6, 6},
732 		{0, 0}
733 		}
734 	},
735 	{1, 0, 3, 0,		/* 0x5b */
736 		{{0, 1},
737 		{3, 4},
738 		{6, 6},
739 		{0, 0}
740 		}
741 	},
742 	{0, 0, 2, 0,		/* 0x5c */
743 		{{2, 4},
744 		{6, 6},
745 		{0, 0},
746 		{0, 0}
747 		}
748 	},
749 	{1, 0, 3, 0,		/* 0x5d */
750 		{{0, 0},
751 		{2, 4},
752 		{6, 6},
753 		{0, 0}
754 		}
755 	},
756 	{0, 0, 2, 0,		/* 0x5e */
757 		{{1, 4},
758 		{6, 6},
759 		{0, 0},
760 		{0, 0}
761 		}
762 	},
763 	{1, 0, 2, 0,		/* 0x5f */
764 		{{0, 4},
765 		{6, 6},
766 		{0, 0},
767 		{0, 0}
768 		}
769 	},
770 	{0, 0, 1, 0,		/* 0x60 */
771 		{{5, 6},
772 		{0, 0},
773 		{0, 0},
774 		{0, 0}
775 		}
776 	},
777 	{1, 0, 2, 0,		/* 0x61 */
778 		{{0, 0},
779 		{5, 6},
780 		{0, 0},
781 		{0, 0}
782 		}
783 	},
784 	{0, 0, 2, 0,		/* 0x62 */
785 		{{1, 1},
786 		{5, 6},
787 		{0, 0},
788 		{0, 0}
789 		}
790 	},
791 	{1, 0, 2, 0,		/* 0x63 */
792 		{{0, 1},
793 		{5, 6},
794 		{0, 0},
795 		{0, 0}
796 		}
797 	},
798 	{0, 0, 2, 0,		/* 0x64 */
799 		{{2, 2},
800 		{5, 6},
801 		{0, 0},
802 		{0, 0}
803 		}
804 	},
805 	{1, 0, 3, 0,		/* 0x65 */
806 		{{0, 0},
807 		{2, 2},
808 		{5, 6},
809 		{0, 0}
810 		}
811 	},
812 	{0, 0, 2, 0,		/* 0x66 */
813 		{{1, 2},
814 		{5, 6},
815 		{0, 0},
816 		{0, 0}
817 		}
818 	},
819 	{1, 0, 2, 0,		/* 0x67 */
820 		{{0, 2},
821 		{5, 6},
822 		{0, 0},
823 		{0, 0}
824 		}
825 	},
826 	{0, 0, 2, 0,		/* 0x68 */
827 		{{3, 3},
828 		{5, 6},
829 		{0, 0},
830 		{0, 0}
831 		}
832 	},
833 	{1, 0, 3, 0,		/* 0x69 */
834 		{{0, 0},
835 		{3, 3},
836 		{5, 6},
837 		{0, 0}
838 		}
839 	},
840 	{0, 0, 3, 0,		/* 0x6a */
841 		{{1, 1},
842 		{3, 3},
843 		{5, 6},
844 		{0, 0}
845 		}
846 	},
847 	{1, 0, 3, 0,		/* 0x6b */
848 		{{0, 1},
849 		{3, 3},
850 		{5, 6},
851 		{0, 0}
852 		}
853 	},
854 	{0, 0, 2, 0,		/* 0x6c */
855 		{{2, 3},
856 		{5, 6},
857 		{0, 0},
858 		{0, 0}
859 		}
860 	},
861 	{1, 0, 3, 0,		/* 0x6d */
862 		{{0, 0},
863 		{2, 3},
864 		{5, 6},
865 		{0, 0}
866 		}
867 	},
868 	{0, 0, 2, 0,		/* 0x6e */
869 		{{1, 3},
870 		{5, 6},
871 		{0, 0},
872 		{0, 0}
873 		}
874 	},
875 	{1, 0, 2, 0,		/* 0x6f */
876 		{{0, 3},
877 		{5, 6},
878 		{0, 0},
879 		{0, 0}
880 		}
881 	},
882 	{0, 0, 1, 0,		/* 0x70 */
883 		{{4, 6},
884 		{0, 0},
885 		{0, 0},
886 		{0, 0}
887 		}
888 	},
889 	{1, 0, 2, 0,		/* 0x71 */
890 		{{0, 0},
891 		{4, 6},
892 		{0, 0},
893 		{0, 0}
894 		}
895 	},
896 	{0, 0, 2, 0,		/* 0x72 */
897 		{{1, 1},
898 		{4, 6},
899 		{0, 0},
900 		{0, 0}
901 		}
902 	},
903 	{1, 0, 2, 0,		/* 0x73 */
904 		{{0, 1},
905 		{4, 6},
906 		{0, 0},
907 		{0, 0}
908 		}
909 	},
910 	{0, 0, 2, 0,		/* 0x74 */
911 		{{2, 2},
912 		{4, 6},
913 		{0, 0},
914 		{0, 0}
915 		}
916 	},
917 	{1, 0, 3, 0,		/* 0x75 */
918 		{{0, 0},
919 		{2, 2},
920 		{4, 6},
921 		{0, 0}
922 		}
923 	},
924 	{0, 0, 2, 0,		/* 0x76 */
925 		{{1, 2},
926 		{4, 6},
927 		{0, 0},
928 		{0, 0}
929 		}
930 	},
931 	{1, 0, 2, 0,		/* 0x77 */
932 		{{0, 2},
933 		{4, 6},
934 		{0, 0},
935 		{0, 0}
936 		}
937 	},
938 	{0, 0, 1, 0,		/* 0x78 */
939 		{{3, 6},
940 		{0, 0},
941 		{0, 0},
942 		{0, 0}
943 		}
944 	},
945 	{1, 0, 2, 0,		/* 0x79 */
946 		{{0, 0},
947 		{3, 6},
948 		{0, 0},
949 		{0, 0}
950 		}
951 	},
952 	{0, 0, 2, 0,		/* 0x7a */
953 		{{1, 1},
954 		{3, 6},
955 		{0, 0},
956 		{0, 0}
957 		}
958 	},
959 	{1, 0, 2, 0,		/* 0x7b */
960 		{{0, 1},
961 		{3, 6},
962 		{0, 0},
963 		{0, 0}
964 		}
965 	},
966 	{0, 0, 1, 0,		/* 0x7c */
967 		{{2, 6},
968 		{0, 0},
969 		{0, 0},
970 		{0, 0}
971 		}
972 	},
973 	{1, 0, 2, 0,		/* 0x7d */
974 		{{0, 0},
975 		{2, 6},
976 		{0, 0},
977 		{0, 0}
978 		}
979 	},
980 	{0, 0, 1, 0,		/* 0x7e */
981 		{{1, 6},
982 		{0, 0},
983 		{0, 0},
984 		{0, 0}
985 		}
986 	},
987 	{1, 0, 1, 0,		/* 0x7f */
988 		{{0, 6},
989 		{0, 0},
990 		{0, 0},
991 		{0, 0}
992 		}
993 	},
994 	{0, 1, 1, 0,		/* 0x80 */
995 		{{7, 7},
996 		{0, 0},
997 		{0, 0},
998 		{0, 0}
999 		}
1000 	},
1001 	{1, 1, 2, 0,		/* 0x81 */
1002 		{{0, 0},
1003 		{7, 7},
1004 		{0, 0},
1005 		{0, 0}
1006 		}
1007 	},
1008 	{0, 1, 2, 0,		/* 0x82 */
1009 		{{1, 1},
1010 		{7, 7},
1011 		{0, 0},
1012 		{0, 0}
1013 		}
1014 	},
1015 	{1, 1, 2, 0,		/* 0x83 */
1016 		{{0, 1},
1017 		{7, 7},
1018 		{0, 0},
1019 		{0, 0}
1020 		}
1021 	},
1022 	{0, 1, 2, 0,		/* 0x84 */
1023 		{{2, 2},
1024 		{7, 7},
1025 		{0, 0},
1026 		{0, 0}
1027 		}
1028 	},
1029 	{1, 1, 3, 0,		/* 0x85 */
1030 		{{0, 0},
1031 		{2, 2},
1032 		{7, 7},
1033 		{0, 0}
1034 		}
1035 	},
1036 	{0, 1, 2, 0,		/* 0x86 */
1037 		{{1, 2},
1038 		{7, 7},
1039 		{0, 0},
1040 		{0, 0}
1041 		}
1042 	},
1043 	{1, 1, 2, 0,		/* 0x87 */
1044 		{{0, 2},
1045 		{7, 7},
1046 		{0, 0},
1047 		{0, 0}
1048 		}
1049 	},
1050 	{0, 1, 2, 0,		/* 0x88 */
1051 		{{3, 3},
1052 		{7, 7},
1053 		{0, 0},
1054 		{0, 0}
1055 		}
1056 	},
1057 	{1, 1, 3, 0,		/* 0x89 */
1058 		{{0, 0},
1059 		{3, 3},
1060 		{7, 7},
1061 		{0, 0}
1062 		}
1063 	},
1064 	{0, 1, 3, 0,		/* 0x8a */
1065 		{{1, 1},
1066 		{3, 3},
1067 		{7, 7},
1068 		{0, 0}
1069 		}
1070 	},
1071 	{1, 1, 3, 0,		/* 0x8b */
1072 		{{0, 1},
1073 		{3, 3},
1074 		{7, 7},
1075 		{0, 0}
1076 		}
1077 	},
1078 	{0, 1, 2, 0,		/* 0x8c */
1079 		{{2, 3},
1080 		{7, 7},
1081 		{0, 0},
1082 		{0, 0}
1083 		}
1084 	},
1085 	{1, 1, 3, 0,		/* 0x8d */
1086 		{{0, 0},
1087 		{2, 3},
1088 		{7, 7},
1089 		{0, 0}
1090 		}
1091 	},
1092 	{0, 1, 2, 0,		/* 0x8e */
1093 		{{1, 3},
1094 		{7, 7},
1095 		{0, 0},
1096 		{0, 0}
1097 		}
1098 	},
1099 	{1, 1, 2, 0,		/* 0x8f */
1100 		{{0, 3},
1101 		{7, 7},
1102 		{0, 0},
1103 		{0, 0}
1104 		}
1105 	},
1106 	{0, 1, 2, 0,		/* 0x90 */
1107 		{{4, 4},
1108 		{7, 7},
1109 		{0, 0},
1110 		{0, 0}
1111 		}
1112 	},
1113 	{1, 1, 3, 0,		/* 0x91 */
1114 		{{0, 0},
1115 		{4, 4},
1116 		{7, 7},
1117 		{0, 0}
1118 		}
1119 	},
1120 	{0, 1, 3, 0,		/* 0x92 */
1121 		{{1, 1},
1122 		{4, 4},
1123 		{7, 7},
1124 		{0, 0}
1125 		}
1126 	},
1127 	{1, 1, 3, 0,		/* 0x93 */
1128 		{{0, 1},
1129 		{4, 4},
1130 		{7, 7},
1131 		{0, 0}
1132 		}
1133 	},
1134 	{0, 1, 3, 0,		/* 0x94 */
1135 		{{2, 2},
1136 		{4, 4},
1137 		{7, 7},
1138 		{0, 0}
1139 		}
1140 	},
1141 	{1, 1, 4, 0,		/* 0x95 */
1142 		{{0, 0},
1143 		{2, 2},
1144 		{4, 4},
1145 		{7, 7}
1146 		}
1147 	},
1148 	{0, 1, 3, 0,		/* 0x96 */
1149 		{{1, 2},
1150 		{4, 4},
1151 		{7, 7},
1152 		{0, 0}
1153 		}
1154 	},
1155 	{1, 1, 3, 0,		/* 0x97 */
1156 		{{0, 2},
1157 		{4, 4},
1158 		{7, 7},
1159 		{0, 0}
1160 		}
1161 	},
1162 	{0, 1, 2, 0,		/* 0x98 */
1163 		{{3, 4},
1164 		{7, 7},
1165 		{0, 0},
1166 		{0, 0}
1167 		}
1168 	},
1169 	{1, 1, 3, 0,		/* 0x99 */
1170 		{{0, 0},
1171 		{3, 4},
1172 		{7, 7},
1173 		{0, 0}
1174 		}
1175 	},
1176 	{0, 1, 3, 0,		/* 0x9a */
1177 		{{1, 1},
1178 		{3, 4},
1179 		{7, 7},
1180 		{0, 0}
1181 		}
1182 	},
1183 	{1, 1, 3, 0,		/* 0x9b */
1184 		{{0, 1},
1185 		{3, 4},
1186 		{7, 7},
1187 		{0, 0}
1188 		}
1189 	},
1190 	{0, 1, 2, 0,		/* 0x9c */
1191 		{{2, 4},
1192 		{7, 7},
1193 		{0, 0},
1194 		{0, 0}
1195 		}
1196 	},
1197 	{1, 1, 3, 0,		/* 0x9d */
1198 		{{0, 0},
1199 		{2, 4},
1200 		{7, 7},
1201 		{0, 0}
1202 		}
1203 	},
1204 	{0, 1, 2, 0,		/* 0x9e */
1205 		{{1, 4},
1206 		{7, 7},
1207 		{0, 0},
1208 		{0, 0}
1209 		}
1210 	},
1211 	{1, 1, 2, 0,		/* 0x9f */
1212 		{{0, 4},
1213 		{7, 7},
1214 		{0, 0},
1215 		{0, 0}
1216 		}
1217 	},
1218 	{0, 1, 2, 0,		/* 0xa0 */
1219 		{{5, 5},
1220 		{7, 7},
1221 		{0, 0},
1222 		{0, 0}
1223 		}
1224 	},
1225 	{1, 1, 3, 0,		/* 0xa1 */
1226 		{{0, 0},
1227 		{5, 5},
1228 		{7, 7},
1229 		{0, 0}
1230 		}
1231 	},
1232 	{0, 1, 3, 0,		/* 0xa2 */
1233 		{{1, 1},
1234 		{5, 5},
1235 		{7, 7},
1236 		{0, 0}
1237 		}
1238 	},
1239 	{1, 1, 3, 0,		/* 0xa3 */
1240 		{{0, 1},
1241 		{5, 5},
1242 		{7, 7},
1243 		{0, 0}
1244 		}
1245 	},
1246 	{0, 1, 3, 0,		/* 0xa4 */
1247 		{{2, 2},
1248 		{5, 5},
1249 		{7, 7},
1250 		{0, 0}
1251 		}
1252 	},
1253 	{1, 1, 4, 0,		/* 0xa5 */
1254 		{{0, 0},
1255 		{2, 2},
1256 		{5, 5},
1257 		{7, 7}
1258 		}
1259 	},
1260 	{0, 1, 3, 0,		/* 0xa6 */
1261 		{{1, 2},
1262 		{5, 5},
1263 		{7, 7},
1264 		{0, 0}
1265 		}
1266 	},
1267 	{1, 1, 3, 0,		/* 0xa7 */
1268 		{{0, 2},
1269 		{5, 5},
1270 		{7, 7},
1271 		{0, 0}
1272 		}
1273 	},
1274 	{0, 1, 3, 0,		/* 0xa8 */
1275 		{{3, 3},
1276 		{5, 5},
1277 		{7, 7},
1278 		{0, 0}
1279 		}
1280 	},
1281 	{1, 1, 4, 0,		/* 0xa9 */
1282 		{{0, 0},
1283 		{3, 3},
1284 		{5, 5},
1285 		{7, 7}
1286 		}
1287 	},
1288 	{0, 1, 4, 0,		/* 0xaa */
1289 		{{1, 1},
1290 		{3, 3},
1291 		{5, 5},
1292 		{7, 7}
1293 		}
1294 	},
1295 	{1, 1, 4, 0,		/* 0xab */
1296 		{{0, 1},
1297 		{3, 3},
1298 		{5, 5},
1299 		{7, 7}
1300 		}
1301 	},
1302 	{0, 1, 3, 0,		/* 0xac */
1303 		{{2, 3},
1304 		{5, 5},
1305 		{7, 7},
1306 		{0, 0}
1307 		}
1308 	},
1309 	{1, 1, 4, 0,		/* 0xad */
1310 		{{0, 0},
1311 		{2, 3},
1312 		{5, 5},
1313 		{7, 7}
1314 		}
1315 	},
1316 	{0, 1, 3, 0,		/* 0xae */
1317 		{{1, 3},
1318 		{5, 5},
1319 		{7, 7},
1320 		{0, 0}
1321 		}
1322 	},
1323 	{1, 1, 3, 0,		/* 0xaf */
1324 		{{0, 3},
1325 		{5, 5},
1326 		{7, 7},
1327 		{0, 0}
1328 		}
1329 	},
1330 	{0, 1, 2, 0,		/* 0xb0 */
1331 		{{4, 5},
1332 		{7, 7},
1333 		{0, 0},
1334 		{0, 0}
1335 		}
1336 	},
1337 	{1, 1, 3, 0,		/* 0xb1 */
1338 		{{0, 0},
1339 		{4, 5},
1340 		{7, 7},
1341 		{0, 0}
1342 		}
1343 	},
1344 	{0, 1, 3, 0,		/* 0xb2 */
1345 		{{1, 1},
1346 		{4, 5},
1347 		{7, 7},
1348 		{0, 0}
1349 		}
1350 	},
1351 	{1, 1, 3, 0,		/* 0xb3 */
1352 		{{0, 1},
1353 		{4, 5},
1354 		{7, 7},
1355 		{0, 0}
1356 		}
1357 	},
1358 	{0, 1, 3, 0,		/* 0xb4 */
1359 		{{2, 2},
1360 		{4, 5},
1361 		{7, 7},
1362 		{0, 0}
1363 		}
1364 	},
1365 	{1, 1, 4, 0,		/* 0xb5 */
1366 		{{0, 0},
1367 		{2, 2},
1368 		{4, 5},
1369 		{7, 7}
1370 		}
1371 	},
1372 	{0, 1, 3, 0,		/* 0xb6 */
1373 		{{1, 2},
1374 		{4, 5},
1375 		{7, 7},
1376 		{0, 0}
1377 		}
1378 	},
1379 	{1, 1, 3, 0,		/* 0xb7 */
1380 		{{0, 2},
1381 		{4, 5},
1382 		{7, 7},
1383 		{0, 0}
1384 		}
1385 	},
1386 	{0, 1, 2, 0,		/* 0xb8 */
1387 		{{3, 5},
1388 		{7, 7},
1389 		{0, 0},
1390 		{0, 0}
1391 		}
1392 	},
1393 	{1, 1, 3, 0,		/* 0xb9 */
1394 		{{0, 0},
1395 		{3, 5},
1396 		{7, 7},
1397 		{0, 0}
1398 		}
1399 	},
1400 	{0, 1, 3, 0,		/* 0xba */
1401 		{{1, 1},
1402 		{3, 5},
1403 		{7, 7},
1404 		{0, 0}
1405 		}
1406 	},
1407 	{1, 1, 3, 0,		/* 0xbb */
1408 		{{0, 1},
1409 		{3, 5},
1410 		{7, 7},
1411 		{0, 0}
1412 		}
1413 	},
1414 	{0, 1, 2, 0,		/* 0xbc */
1415 		{{2, 5},
1416 		{7, 7},
1417 		{0, 0},
1418 		{0, 0}
1419 		}
1420 	},
1421 	{1, 1, 3, 0,		/* 0xbd */
1422 		{{0, 0},
1423 		{2, 5},
1424 		{7, 7},
1425 		{0, 0}
1426 		}
1427 	},
1428 	{0, 1, 2, 0,		/* 0xbe */
1429 		{{1, 5},
1430 		{7, 7},
1431 		{0, 0},
1432 		{0, 0}
1433 		}
1434 	},
1435 	{1, 1, 2, 0,		/* 0xbf */
1436 		{{0, 5},
1437 		{7, 7},
1438 		{0, 0},
1439 		{0, 0}
1440 		}
1441 	},
1442 	{0, 1, 1, 0,		/* 0xc0 */
1443 		{{6, 7},
1444 		{0, 0},
1445 		{0, 0},
1446 		{0, 0}
1447 		}
1448 	},
1449 	{1, 1, 2, 0,		/* 0xc1 */
1450 		{{0, 0},
1451 		{6, 7},
1452 		{0, 0},
1453 		{0, 0}
1454 		}
1455 	},
1456 	{0, 1, 2, 0,		/* 0xc2 */
1457 		{{1, 1},
1458 		{6, 7},
1459 		{0, 0},
1460 		{0, 0}
1461 		}
1462 	},
1463 	{1, 1, 2, 0,		/* 0xc3 */
1464 		{{0, 1},
1465 		{6, 7},
1466 		{0, 0},
1467 		{0, 0}
1468 		}
1469 	},
1470 	{0, 1, 2, 0,		/* 0xc4 */
1471 		{{2, 2},
1472 		{6, 7},
1473 		{0, 0},
1474 		{0, 0}
1475 		}
1476 	},
1477 	{1, 1, 3, 0,		/* 0xc5 */
1478 		{{0, 0},
1479 		{2, 2},
1480 		{6, 7},
1481 		{0, 0}
1482 		}
1483 	},
1484 	{0, 1, 2, 0,		/* 0xc6 */
1485 		{{1, 2},
1486 		{6, 7},
1487 		{0, 0},
1488 		{0, 0}
1489 		}
1490 	},
1491 	{1, 1, 2, 0,		/* 0xc7 */
1492 		{{0, 2},
1493 		{6, 7},
1494 		{0, 0},
1495 		{0, 0}
1496 		}
1497 	},
1498 	{0, 1, 2, 0,		/* 0xc8 */
1499 		{{3, 3},
1500 		{6, 7},
1501 		{0, 0},
1502 		{0, 0}
1503 		}
1504 	},
1505 	{1, 1, 3, 0,		/* 0xc9 */
1506 		{{0, 0},
1507 		{3, 3},
1508 		{6, 7},
1509 		{0, 0}
1510 		}
1511 	},
1512 	{0, 1, 3, 0,		/* 0xca */
1513 		{{1, 1},
1514 		{3, 3},
1515 		{6, 7},
1516 		{0, 0}
1517 		}
1518 	},
1519 	{1, 1, 3, 0,		/* 0xcb */
1520 		{{0, 1},
1521 		{3, 3},
1522 		{6, 7},
1523 		{0, 0}
1524 		}
1525 	},
1526 	{0, 1, 2, 0,		/* 0xcc */
1527 		{{2, 3},
1528 		{6, 7},
1529 		{0, 0},
1530 		{0, 0}
1531 		}
1532 	},
1533 	{1, 1, 3, 0,		/* 0xcd */
1534 		{{0, 0},
1535 		{2, 3},
1536 		{6, 7},
1537 		{0, 0}
1538 		}
1539 	},
1540 	{0, 1, 2, 0,		/* 0xce */
1541 		{{1, 3},
1542 		{6, 7},
1543 		{0, 0},
1544 		{0, 0}
1545 		}
1546 	},
1547 	{1, 1, 2, 0,		/* 0xcf */
1548 		{{0, 3},
1549 		{6, 7},
1550 		{0, 0},
1551 		{0, 0}
1552 		}
1553 	},
1554 	{0, 1, 2, 0,		/* 0xd0 */
1555 		{{4, 4},
1556 		{6, 7},
1557 		{0, 0},
1558 		{0, 0}
1559 		}
1560 	},
1561 	{1, 1, 3, 0,		/* 0xd1 */
1562 		{{0, 0},
1563 		{4, 4},
1564 		{6, 7},
1565 		{0, 0}
1566 		}
1567 	},
1568 	{0, 1, 3, 0,		/* 0xd2 */
1569 		{{1, 1},
1570 		{4, 4},
1571 		{6, 7},
1572 		{0, 0}
1573 		}
1574 	},
1575 	{1, 1, 3, 0,		/* 0xd3 */
1576 		{{0, 1},
1577 		{4, 4},
1578 		{6, 7},
1579 		{0, 0}
1580 		}
1581 	},
1582 	{0, 1, 3, 0,		/* 0xd4 */
1583 		{{2, 2},
1584 		{4, 4},
1585 		{6, 7},
1586 		{0, 0}
1587 		}
1588 	},
1589 	{1, 1, 4, 0,		/* 0xd5 */
1590 		{{0, 0},
1591 		{2, 2},
1592 		{4, 4},
1593 		{6, 7}
1594 		}
1595 	},
1596 	{0, 1, 3, 0,		/* 0xd6 */
1597 		{{1, 2},
1598 		{4, 4},
1599 		{6, 7},
1600 		{0, 0}
1601 		}
1602 	},
1603 	{1, 1, 3, 0,		/* 0xd7 */
1604 		{{0, 2},
1605 		{4, 4},
1606 		{6, 7},
1607 		{0, 0}
1608 		}
1609 	},
1610 	{0, 1, 2, 0,		/* 0xd8 */
1611 		{{3, 4},
1612 		{6, 7},
1613 		{0, 0},
1614 		{0, 0}
1615 		}
1616 	},
1617 	{1, 1, 3, 0,		/* 0xd9 */
1618 		{{0, 0},
1619 		{3, 4},
1620 		{6, 7},
1621 		{0, 0}
1622 		}
1623 	},
1624 	{0, 1, 3, 0,		/* 0xda */
1625 		{{1, 1},
1626 		{3, 4},
1627 		{6, 7},
1628 		{0, 0}
1629 		}
1630 	},
1631 	{1, 1, 3, 0,		/* 0xdb */
1632 		{{0, 1},
1633 		{3, 4},
1634 		{6, 7},
1635 		{0, 0}
1636 		}
1637 	},
1638 	{0, 1, 2, 0,		/* 0xdc */
1639 		{{2, 4},
1640 		{6, 7},
1641 		{0, 0},
1642 		{0, 0}
1643 		}
1644 	},
1645 	{1, 1, 3, 0,		/* 0xdd */
1646 		{{0, 0},
1647 		{2, 4},
1648 		{6, 7},
1649 		{0, 0}
1650 		}
1651 	},
1652 	{0, 1, 2, 0,		/* 0xde */
1653 		{{1, 4},
1654 		{6, 7},
1655 		{0, 0},
1656 		{0, 0}
1657 		}
1658 	},
1659 	{1, 1, 2, 0,		/* 0xdf */
1660 		{{0, 4},
1661 		{6, 7},
1662 		{0, 0},
1663 		{0, 0}
1664 		}
1665 	},
1666 	{0, 1, 1, 0,		/* 0xe0 */
1667 		{{5, 7},
1668 		{0, 0},
1669 		{0, 0},
1670 		{0, 0}
1671 		}
1672 	},
1673 	{1, 1, 2, 0,		/* 0xe1 */
1674 		{{0, 0},
1675 		{5, 7},
1676 		{0, 0},
1677 		{0, 0}
1678 		}
1679 	},
1680 	{0, 1, 2, 0,		/* 0xe2 */
1681 		{{1, 1},
1682 		{5, 7},
1683 		{0, 0},
1684 		{0, 0}
1685 		}
1686 	},
1687 	{1, 1, 2, 0,		/* 0xe3 */
1688 		{{0, 1},
1689 		{5, 7},
1690 		{0, 0},
1691 		{0, 0}
1692 		}
1693 	},
1694 	{0, 1, 2, 0,		/* 0xe4 */
1695 		{{2, 2},
1696 		{5, 7},
1697 		{0, 0},
1698 		{0, 0}
1699 		}
1700 	},
1701 	{1, 1, 3, 0,		/* 0xe5 */
1702 		{{0, 0},
1703 		{2, 2},
1704 		{5, 7},
1705 		{0, 0}
1706 		}
1707 	},
1708 	{0, 1, 2, 0,		/* 0xe6 */
1709 		{{1, 2},
1710 		{5, 7},
1711 		{0, 0},
1712 		{0, 0}
1713 		}
1714 	},
1715 	{1, 1, 2, 0,		/* 0xe7 */
1716 		{{0, 2},
1717 		{5, 7},
1718 		{0, 0},
1719 		{0, 0}
1720 		}
1721 	},
1722 	{0, 1, 2, 0,		/* 0xe8 */
1723 		{{3, 3},
1724 		{5, 7},
1725 		{0, 0},
1726 		{0, 0}
1727 		}
1728 	},
1729 	{1, 1, 3, 0,		/* 0xe9 */
1730 		{{0, 0},
1731 		{3, 3},
1732 		{5, 7},
1733 		{0, 0}
1734 		}
1735 	},
1736 	{0, 1, 3, 0,		/* 0xea */
1737 		{{1, 1},
1738 		{3, 3},
1739 		{5, 7},
1740 		{0, 0}
1741 		}
1742 	},
1743 	{1, 1, 3, 0,		/* 0xeb */
1744 		{{0, 1},
1745 		{3, 3},
1746 		{5, 7},
1747 		{0, 0}
1748 		}
1749 	},
1750 	{0, 1, 2, 0,		/* 0xec */
1751 		{{2, 3},
1752 		{5, 7},
1753 		{0, 0},
1754 		{0, 0}
1755 		}
1756 	},
1757 	{1, 1, 3, 0,		/* 0xed */
1758 		{{0, 0},
1759 		{2, 3},
1760 		{5, 7},
1761 		{0, 0}
1762 		}
1763 	},
1764 	{0, 1, 2, 0,		/* 0xee */
1765 		{{1, 3},
1766 		{5, 7},
1767 		{0, 0},
1768 		{0, 0}
1769 		}
1770 	},
1771 	{1, 1, 2, 0,		/* 0xef */
1772 		{{0, 3},
1773 		{5, 7},
1774 		{0, 0},
1775 		{0, 0}
1776 		}
1777 	},
1778 	{0, 1, 1, 0,		/* 0xf0 */
1779 		{{4, 7},
1780 		{0, 0},
1781 		{0, 0},
1782 		{0, 0}
1783 		}
1784 	},
1785 	{1, 1, 2, 0,		/* 0xf1 */
1786 		{{0, 0},
1787 		{4, 7},
1788 		{0, 0},
1789 		{0, 0}
1790 		}
1791 	},
1792 	{0, 1, 2, 0,		/* 0xf2 */
1793 		{{1, 1},
1794 		{4, 7},
1795 		{0, 0},
1796 		{0, 0}
1797 		}
1798 	},
1799 	{1, 1, 2, 0,		/* 0xf3 */
1800 		{{0, 1},
1801 		{4, 7},
1802 		{0, 0},
1803 		{0, 0}
1804 		}
1805 	},
1806 	{0, 1, 2, 0,		/* 0xf4 */
1807 		{{2, 2},
1808 		{4, 7},
1809 		{0, 0},
1810 		{0, 0}
1811 		}
1812 	},
1813 	{1, 1, 3, 0,		/* 0xf5 */
1814 		{{0, 0},
1815 		{2, 2},
1816 		{4, 7},
1817 		{0, 0}
1818 		}
1819 	},
1820 	{0, 1, 2, 0,		/* 0xf6 */
1821 		{{1, 2},
1822 		{4, 7},
1823 		{0, 0},
1824 		{0, 0}
1825 		}
1826 	},
1827 	{1, 1, 2, 0,		/* 0xf7 */
1828 		{{0, 2},
1829 		{4, 7},
1830 		{0, 0},
1831 		{0, 0}
1832 		}
1833 	},
1834 	{0, 1, 1, 0,		/* 0xf8 */
1835 		{{3, 7},
1836 		{0, 0},
1837 		{0, 0},
1838 		{0, 0}
1839 		}
1840 	},
1841 	{1, 1, 2, 0,		/* 0xf9 */
1842 		{{0, 0},
1843 		{3, 7},
1844 		{0, 0},
1845 		{0, 0}
1846 		}
1847 	},
1848 	{0, 1, 2, 0,		/* 0xfa */
1849 		{{1, 1},
1850 		{3, 7},
1851 		{0, 0},
1852 		{0, 0}
1853 		}
1854 	},
1855 	{1, 1, 2, 0,		/* 0xfb */
1856 		{{0, 1},
1857 		{3, 7},
1858 		{0, 0},
1859 		{0, 0}
1860 		}
1861 	},
1862 	{0, 1, 1, 0,		/* 0xfc */
1863 		{{2, 7},
1864 		{0, 0},
1865 		{0, 0},
1866 		{0, 0}
1867 		}
1868 	},
1869 	{1, 1, 2, 0,		/* 0xfd */
1870 		{{0, 0},
1871 		{2, 7},
1872 		{0, 0},
1873 		{0, 0}
1874 		}
1875 	},
1876 	{0, 1, 1, 0,		/* 0xfe */
1877 		{{1, 7},
1878 		{0, 0},
1879 		{0, 0},
1880 		{0, 0}
1881 		}
1882 	},
1883 	{1, 1, 1, 0,		/* 0xff */
1884 		{{0, 7},
1885 		{0, 0},
1886 		{0, 0},
1887 		{0, 0}
1888 		}
1889 	}
1890 };
1891 
1892 int
sctp_is_address_in_scope(struct sctp_ifa * ifa,struct sctp_scoping * scope,int do_update)1893 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1894                          struct sctp_scoping *scope,
1895                          int do_update)
1896 {
1897 	if ((scope->loopback_scope == 0) &&
1898 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1899 		/*
1900 		 * skip loopback if not in scope *
1901 		 */
1902 		return (0);
1903 	}
1904 	switch (ifa->address.sa.sa_family) {
1905 #ifdef INET
1906 	case AF_INET:
1907 		if (scope->ipv4_addr_legal) {
1908 			struct sockaddr_in *sin;
1909 
1910 			sin = &ifa->address.sin;
1911 			if (sin->sin_addr.s_addr == 0) {
1912 				/* not in scope , unspecified */
1913 				return (0);
1914 			}
1915 			if ((scope->ipv4_local_scope == 0) &&
1916 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1917 				/* private address not in scope */
1918 				return (0);
1919 			}
1920 		} else {
1921 			return (0);
1922 		}
1923 		break;
1924 #endif
1925 #ifdef INET6
1926 	case AF_INET6:
1927 		if (scope->ipv6_addr_legal) {
1928 			struct sockaddr_in6 *sin6;
1929 
1930 			/* Must update the flags,  bummer, which
1931 			 * means any IFA locks must now be applied HERE <->
1932 			 */
1933 			if (do_update) {
1934 				sctp_gather_internal_ifa_flags(ifa);
1935 			}
1936 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1937 				return (0);
1938 			}
1939 			/* ok to use deprecated addresses? */
1940 			sin6 = &ifa->address.sin6;
1941 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1942 				/* skip unspecifed addresses */
1943 				return (0);
1944 			}
1945 			if (		/* (local_scope == 0) && */
1946 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1947 				return (0);
1948 			}
1949 			if ((scope->site_scope == 0) &&
1950 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1951 				return (0);
1952 			}
1953 		} else {
1954 			return (0);
1955 		}
1956 		break;
1957 #endif
1958 #if defined(__Userspace__)
1959 	case AF_CONN:
1960 		if (!scope->conn_addr_legal) {
1961 			return (0);
1962 		}
1963 		break;
1964 #endif
1965 	default:
1966 		return (0);
1967 	}
1968 	return (1);
1969 }
1970 
1971 static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf * m,struct sctp_ifa * ifa,uint16_t * len)1972 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1973 {
1974 #if defined(INET) || defined(INET6)
1975 	struct sctp_paramhdr *paramh;
1976 	struct mbuf *mret;
1977 	uint16_t plen;
1978 #endif
1979 
1980 	switch (ifa->address.sa.sa_family) {
1981 #ifdef INET
1982 	case AF_INET:
1983 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1984 		break;
1985 #endif
1986 #ifdef INET6
1987 	case AF_INET6:
1988 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1989 		break;
1990 #endif
1991 	default:
1992 		return (m);
1993 	}
1994 #if defined(INET) || defined(INET6)
1995 	if (M_TRAILINGSPACE(m) >= plen) {
1996 		/* easy side we just drop it on the end */
1997 		paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1998 		mret = m;
1999 	} else {
2000 		/* Need more space */
2001 		mret = m;
2002 		while (SCTP_BUF_NEXT(mret) != NULL) {
2003 			mret = SCTP_BUF_NEXT(mret);
2004 		}
2005 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2006 		if (SCTP_BUF_NEXT(mret) == NULL) {
2007 			/* We are hosed, can't add more addresses */
2008 			return (m);
2009 		}
2010 		mret = SCTP_BUF_NEXT(mret);
2011 		paramh = mtod(mret, struct sctp_paramhdr *);
2012 	}
2013 	/* now add the parameter */
2014 	switch (ifa->address.sa.sa_family) {
2015 #ifdef INET
2016 	case AF_INET:
2017 	{
2018 		struct sctp_ipv4addr_param *ipv4p;
2019 		struct sockaddr_in *sin;
2020 
2021 		sin = &ifa->address.sin;
2022 		ipv4p = (struct sctp_ipv4addr_param *)paramh;
2023 		paramh->param_type = htons(SCTP_IPV4_ADDRESS);
2024 		paramh->param_length = htons(plen);
2025 		ipv4p->addr = sin->sin_addr.s_addr;
2026 		SCTP_BUF_LEN(mret) += plen;
2027 		break;
2028 	}
2029 #endif
2030 #ifdef INET6
2031 	case AF_INET6:
2032 	{
2033 		struct sctp_ipv6addr_param *ipv6p;
2034 		struct sockaddr_in6 *sin6;
2035 
2036 		sin6 = &ifa->address.sin6;
2037 		ipv6p = (struct sctp_ipv6addr_param *)paramh;
2038 		paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2039 		paramh->param_length = htons(plen);
2040 		memcpy(ipv6p->addr, &sin6->sin6_addr,
2041 		    sizeof(ipv6p->addr));
2042 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2043 		/* clear embedded scope in the address */
2044 		in6_clearscope((struct in6_addr *)ipv6p->addr);
2045 #endif
2046 		SCTP_BUF_LEN(mret) += plen;
2047 		break;
2048 	}
2049 #endif
2050 	default:
2051 		return (m);
2052 	}
2053 	if (len != NULL) {
2054 		*len += plen;
2055 	}
2056 	return (mret);
2057 #endif
2058 }
2059 
2060 struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_scoping * scope,struct mbuf * m_at,int cnt_inits_to,uint16_t * padding_len,uint16_t * chunk_len)2061 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2062                            struct sctp_scoping *scope,
2063 			   struct mbuf *m_at, int cnt_inits_to,
2064 			   uint16_t *padding_len, uint16_t *chunk_len)
2065 {
2066 	struct sctp_vrf *vrf = NULL;
2067 	int cnt, limit_out = 0, total_count;
2068 	uint32_t vrf_id;
2069 
2070 	vrf_id = inp->def_vrf_id;
2071 	SCTP_IPI_ADDR_RLOCK();
2072 	vrf = sctp_find_vrf(vrf_id);
2073 	if (vrf == NULL) {
2074 		SCTP_IPI_ADDR_RUNLOCK();
2075 		return (m_at);
2076 	}
2077 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2078 		struct sctp_ifa *sctp_ifap;
2079 		struct sctp_ifn *sctp_ifnp;
2080 
2081 		cnt = cnt_inits_to;
2082 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2083 			limit_out = 1;
2084 			cnt = SCTP_ADDRESS_LIMIT;
2085 			goto skip_count;
2086 		}
2087 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2088 			if ((scope->loopback_scope == 0) &&
2089 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2090 				/*
2091 				 * Skip loopback devices if loopback_scope
2092 				 * not set
2093 				 */
2094 				continue;
2095 			}
2096 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2097 #if defined(__FreeBSD__) && !defined(__Userspace__)
2098 #ifdef INET
2099 				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2100 				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2101 				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2102 					continue;
2103 				}
2104 #endif
2105 #ifdef INET6
2106 				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2107 				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2108 				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2109 					continue;
2110 				}
2111 #endif
2112 #endif
2113 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2114 					continue;
2115 				}
2116 #if defined(__Userspace__)
2117 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2118 					continue;
2119 				}
2120 #endif
2121 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2122 					continue;
2123 				}
2124 				cnt++;
2125 				if (cnt > SCTP_ADDRESS_LIMIT) {
2126 					break;
2127 				}
2128 			}
2129 			if (cnt > SCTP_ADDRESS_LIMIT) {
2130 				break;
2131 			}
2132 		}
2133 	skip_count:
2134 		if (cnt > 1) {
2135 			total_count = 0;
2136 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2137 				cnt = 0;
2138 				if ((scope->loopback_scope == 0) &&
2139 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2140 					/*
2141 					 * Skip loopback devices if
2142 					 * loopback_scope not set
2143 					 */
2144 					continue;
2145 				}
2146 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2147 #if defined(__FreeBSD__) && !defined(__Userspace__)
2148 #ifdef INET
2149 					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2150 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2151 					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2152 						continue;
2153 					}
2154 #endif
2155 #ifdef INET6
2156 					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2157 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2158 					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2159 						continue;
2160 					}
2161 #endif
2162 #endif
2163 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2164 						continue;
2165 					}
2166 #if defined(__Userspace__)
2167 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2168 						continue;
2169 					}
2170 #endif
2171 					if (sctp_is_address_in_scope(sctp_ifap,
2172 								     scope, 0) == 0) {
2173 						continue;
2174 					}
2175 					if ((chunk_len != NULL) &&
2176 					    (padding_len != NULL) &&
2177 					    (*padding_len > 0)) {
2178 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2179 						SCTP_BUF_LEN(m_at) += *padding_len;
2180 						*chunk_len += *padding_len;
2181 						*padding_len = 0;
2182 					}
2183 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2184 					if (limit_out) {
2185 						cnt++;
2186 						total_count++;
2187 						if (cnt >= 2) {
2188 							/* two from each address */
2189 							break;
2190 						}
2191 						if (total_count > SCTP_ADDRESS_LIMIT) {
2192 							/* No more addresses */
2193 							break;
2194 						}
2195 					}
2196 				}
2197 			}
2198 		}
2199 	} else {
2200 		struct sctp_laddr *laddr;
2201 
2202 		cnt = cnt_inits_to;
2203 		/* First, how many ? */
2204 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2205 			if (laddr->ifa == NULL) {
2206 				continue;
2207 			}
2208 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2209 				/* Address being deleted by the system, dont
2210 				 * list.
2211 				 */
2212 				continue;
2213 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2214 				/* Address being deleted on this ep
2215 				 * don't list.
2216 				 */
2217 				continue;
2218 			}
2219 #if defined(__Userspace__)
2220 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2221 				continue;
2222 			}
2223 #endif
2224 			if (sctp_is_address_in_scope(laddr->ifa,
2225 						     scope, 1) == 0) {
2226 				continue;
2227 			}
2228 			cnt++;
2229 		}
2230 		/*
2231 		 * To get through a NAT we only list addresses if we have
2232 		 * more than one. That way if you just bind a single address
2233 		 * we let the source of the init dictate our address.
2234 		 */
2235 		if (cnt > 1) {
2236 			cnt = cnt_inits_to;
2237 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2238 				if (laddr->ifa == NULL) {
2239 					continue;
2240 				}
2241 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2242 					continue;
2243 				}
2244 #if defined(__Userspace__)
2245 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2246 					continue;
2247 				}
2248 #endif
2249 				if (sctp_is_address_in_scope(laddr->ifa,
2250 							     scope, 0) == 0) {
2251 					continue;
2252 				}
2253 				if ((chunk_len != NULL) &&
2254 				    (padding_len != NULL) &&
2255 				    (*padding_len > 0)) {
2256 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2257 					SCTP_BUF_LEN(m_at) += *padding_len;
2258 					*chunk_len += *padding_len;
2259 					*padding_len = 0;
2260 				}
2261 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2262 				cnt++;
2263 				if (cnt >= SCTP_ADDRESS_LIMIT) {
2264 					break;
2265 				}
2266 			}
2267 		}
2268 	}
2269 	SCTP_IPI_ADDR_RUNLOCK();
2270 	return (m_at);
2271 }
2272 
2273 static struct sctp_ifa *
sctp_is_ifa_addr_preferred(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2274 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2275 			   uint8_t dest_is_loop,
2276 			   uint8_t dest_is_priv,
2277 			   sa_family_t fam)
2278 {
2279 	uint8_t dest_is_global = 0;
2280 	/* dest_is_priv is true if destination is a private address */
2281 	/* dest_is_loop is true if destination is a loopback addresses */
2282 
2283 	/**
2284 	 * Here we determine if its a preferred address. A preferred address
2285 	 * means it is the same scope or higher scope then the destination.
2286 	 * L = loopback, P = private, G = global
2287 	 * -----------------------------------------
2288 	 *    src    |  dest | result
2289 	 *  ----------------------------------------
2290 	 *     L     |    L  |    yes
2291 	 *  -----------------------------------------
2292 	 *     P     |    L  |    yes-v4 no-v6
2293 	 *  -----------------------------------------
2294 	 *     G     |    L  |    yes-v4 no-v6
2295 	 *  -----------------------------------------
2296 	 *     L     |    P  |    no
2297 	 *  -----------------------------------------
2298 	 *     P     |    P  |    yes
2299 	 *  -----------------------------------------
2300 	 *     G     |    P  |    no
2301 	 *   -----------------------------------------
2302 	 *     L     |    G  |    no
2303 	 *   -----------------------------------------
2304 	 *     P     |    G  |    no
2305 	 *    -----------------------------------------
2306 	 *     G     |    G  |    yes
2307 	 *    -----------------------------------------
2308 	 */
2309 
2310 	if (ifa->address.sa.sa_family != fam) {
2311 		/* forget mis-matched family */
2312 		return (NULL);
2313 	}
2314 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2315 		dest_is_global = 1;
2316 	}
2317 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2318 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2319 	/* Ok the address may be ok */
2320 #ifdef INET6
2321 	if (fam == AF_INET6) {
2322 		/* ok to use deprecated addresses? no lets not! */
2323 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2324 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2325 			return (NULL);
2326 		}
2327 		if (ifa->src_is_priv && !ifa->src_is_loop) {
2328 			if (dest_is_loop) {
2329 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2330 				return (NULL);
2331 			}
2332 		}
2333 		if (ifa->src_is_glob) {
2334 			if (dest_is_loop) {
2335 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2336 				return (NULL);
2337 			}
2338 		}
2339 	}
2340 #endif
2341 	/* Now that we know what is what, implement or table
2342 	 * this could in theory be done slicker (it used to be), but this
2343 	 * is straightforward and easier to validate :-)
2344 	 */
2345 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2346 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2347 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2348 		dest_is_loop, dest_is_priv, dest_is_global);
2349 
2350 	if ((ifa->src_is_loop) && (dest_is_priv)) {
2351 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2352 		return (NULL);
2353 	}
2354 	if ((ifa->src_is_glob) && (dest_is_priv)) {
2355 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2356 		return (NULL);
2357 	}
2358 	if ((ifa->src_is_loop) && (dest_is_global)) {
2359 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2360 		return (NULL);
2361 	}
2362 	if ((ifa->src_is_priv) && (dest_is_global)) {
2363 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2364 		return (NULL);
2365 	}
2366 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2367 	/* its a preferred address */
2368 	return (ifa);
2369 }
2370 
2371 static struct sctp_ifa *
sctp_is_ifa_addr_acceptable(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2372 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2373 			    uint8_t dest_is_loop,
2374 			    uint8_t dest_is_priv,
2375 			    sa_family_t fam)
2376 {
2377 	uint8_t dest_is_global = 0;
2378 
2379 	/**
2380 	 * Here we determine if its a acceptable address. A acceptable
2381 	 * address means it is the same scope or higher scope but we can
2382 	 * allow for NAT which means its ok to have a global dest and a
2383 	 * private src.
2384 	 *
2385 	 * L = loopback, P = private, G = global
2386 	 * -----------------------------------------
2387 	 *  src    |  dest | result
2388 	 * -----------------------------------------
2389 	 *   L     |   L   |    yes
2390 	 *  -----------------------------------------
2391 	 *   P     |   L   |    yes-v4 no-v6
2392 	 *  -----------------------------------------
2393 	 *   G     |   L   |    yes
2394 	 * -----------------------------------------
2395 	 *   L     |   P   |    no
2396 	 * -----------------------------------------
2397 	 *   P     |   P   |    yes
2398 	 * -----------------------------------------
2399 	 *   G     |   P   |    yes - May not work
2400 	 * -----------------------------------------
2401 	 *   L     |   G   |    no
2402 	 * -----------------------------------------
2403 	 *   P     |   G   |    yes - May not work
2404 	 * -----------------------------------------
2405 	 *   G     |   G   |    yes
2406 	 * -----------------------------------------
2407 	 */
2408 
2409 	if (ifa->address.sa.sa_family != fam) {
2410 		/* forget non matching family */
2411 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2412 			ifa->address.sa.sa_family, fam);
2413 		return (NULL);
2414 	}
2415 	/* Ok the address may be ok */
2416 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2417 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2418 		dest_is_loop, dest_is_priv);
2419 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2420 		dest_is_global = 1;
2421 	}
2422 #ifdef INET6
2423 	if (fam == AF_INET6) {
2424 		/* ok to use deprecated addresses? */
2425 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2426 			return (NULL);
2427 		}
2428 		if (ifa->src_is_priv) {
2429 			/* Special case, linklocal to loop */
2430 			if (dest_is_loop)
2431 				return (NULL);
2432 		}
2433 	}
2434 #endif
2435 	/*
2436 	 * Now that we know what is what, implement our table.
2437 	 * This could in theory be done slicker (it used to be), but this
2438 	 * is straightforward and easier to validate :-)
2439 	 */
2440 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2441 		ifa->src_is_loop,
2442 		dest_is_priv);
2443 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2444 		return (NULL);
2445 	}
2446 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2447 		ifa->src_is_loop,
2448 		dest_is_global);
2449 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2450 		return (NULL);
2451 	}
2452 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2453 	/* its an acceptable address */
2454 	return (ifa);
2455 }
2456 
2457 int
sctp_is_addr_restricted(struct sctp_tcb * stcb,struct sctp_ifa * ifa)2458 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2459 {
2460 	struct sctp_laddr *laddr;
2461 
2462 	if (stcb == NULL) {
2463 		/* There are no restrictions, no TCB :-) */
2464 		return (0);
2465 	}
2466 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2467 		if (laddr->ifa == NULL) {
2468 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2469 				__func__);
2470 			continue;
2471 		}
2472 		if (laddr->ifa == ifa) {
2473 			/* Yes it is on the list */
2474 			return (1);
2475 		}
2476 	}
2477 	return (0);
2478 }
2479 
2480 int
sctp_is_addr_in_ep(struct sctp_inpcb * inp,struct sctp_ifa * ifa)2481 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2482 {
2483 	struct sctp_laddr *laddr;
2484 
2485 	if (ifa == NULL)
2486 		return (0);
2487 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2488 		if (laddr->ifa == NULL) {
2489 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2490 				__func__);
2491 			continue;
2492 		}
2493 		if ((laddr->ifa == ifa) && laddr->action == 0)
2494 			/* same pointer */
2495 			return (1);
2496 	}
2497 	return (0);
2498 }
2499 
2500 static struct sctp_ifa *
sctp_choose_boundspecific_inp(struct sctp_inpcb * inp,sctp_route_t * ro,uint32_t vrf_id,int non_asoc_addr_ok,uint8_t dest_is_priv,uint8_t dest_is_loop,sa_family_t fam)2501 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2502 			      sctp_route_t *ro,
2503 			      uint32_t vrf_id,
2504 			      int non_asoc_addr_ok,
2505 			      uint8_t dest_is_priv,
2506 			      uint8_t dest_is_loop,
2507 			      sa_family_t fam)
2508 {
2509 	struct sctp_laddr *laddr, *starting_point;
2510 	void *ifn;
2511 	int resettotop = 0;
2512 	struct sctp_ifn *sctp_ifn;
2513 	struct sctp_ifa *sctp_ifa, *sifa;
2514 	struct sctp_vrf *vrf;
2515 	uint32_t ifn_index;
2516 
2517 	vrf = sctp_find_vrf(vrf_id);
2518 	if (vrf == NULL)
2519 		return (NULL);
2520 
2521 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2522 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2523 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2524 	/*
2525 	 * first question, is the ifn we will emit on in our list, if so, we
2526 	 * want such an address. Note that we first looked for a
2527 	 * preferred address.
2528 	 */
2529 	if (sctp_ifn) {
2530 		/* is a preferred one on the interface we route out? */
2531 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2532 #if defined(__FreeBSD__) && !defined(__Userspace__)
2533 #ifdef INET
2534 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2535 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2536 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2537 				continue;
2538 			}
2539 #endif
2540 #ifdef INET6
2541 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2542 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2543 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2544 				continue;
2545 			}
2546 #endif
2547 #endif
2548 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2549 			    (non_asoc_addr_ok == 0))
2550 				continue;
2551 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2552 							  dest_is_loop,
2553 							  dest_is_priv, fam);
2554 			if (sifa == NULL)
2555 				continue;
2556 			if (sctp_is_addr_in_ep(inp, sifa)) {
2557 				atomic_add_int(&sifa->refcount, 1);
2558 				return (sifa);
2559 			}
2560 		}
2561 	}
2562 	/*
2563 	 * ok, now we now need to find one on the list of the addresses.
2564 	 * We can't get one on the emitting interface so let's find first
2565 	 * a preferred one. If not that an acceptable one otherwise...
2566 	 * we return NULL.
2567 	 */
2568 	starting_point = inp->next_addr_touse;
2569  once_again:
2570 	if (inp->next_addr_touse == NULL) {
2571 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2572 		resettotop = 1;
2573 	}
2574 	for (laddr = inp->next_addr_touse; laddr;
2575 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2576 		if (laddr->ifa == NULL) {
2577 			/* address has been removed */
2578 			continue;
2579 		}
2580 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2581 			/* address is being deleted */
2582 			continue;
2583 		}
2584 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2585 						  dest_is_priv, fam);
2586 		if (sifa == NULL)
2587 			continue;
2588 		atomic_add_int(&sifa->refcount, 1);
2589 		return (sifa);
2590 	}
2591 	if (resettotop == 0) {
2592 		inp->next_addr_touse = NULL;
2593 		goto once_again;
2594 	}
2595 
2596 	inp->next_addr_touse = starting_point;
2597 	resettotop = 0;
2598  once_again_too:
2599 	if (inp->next_addr_touse == NULL) {
2600 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2601 		resettotop = 1;
2602 	}
2603 
2604 	/* ok, what about an acceptable address in the inp */
2605 	for (laddr = inp->next_addr_touse; laddr;
2606 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2607 		if (laddr->ifa == NULL) {
2608 			/* address has been removed */
2609 			continue;
2610 		}
2611 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2612 			/* address is being deleted */
2613 			continue;
2614 		}
2615 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2616 						   dest_is_priv, fam);
2617 		if (sifa == NULL)
2618 			continue;
2619 		atomic_add_int(&sifa->refcount, 1);
2620 		return (sifa);
2621 	}
2622 	if (resettotop == 0) {
2623 		inp->next_addr_touse = NULL;
2624 		goto once_again_too;
2625 	}
2626 
2627 	/*
2628 	 * no address bound can be a source for the destination we are in
2629 	 * trouble
2630 	 */
2631 	return (NULL);
2632 }
2633 
2634 static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)2635 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2636 			       struct sctp_tcb *stcb,
2637 			       sctp_route_t *ro,
2638 			       uint32_t vrf_id,
2639 			       uint8_t dest_is_priv,
2640 			       uint8_t dest_is_loop,
2641 			       int non_asoc_addr_ok,
2642 			       sa_family_t fam)
2643 {
2644 	struct sctp_laddr *laddr, *starting_point;
2645 	void *ifn;
2646 	struct sctp_ifn *sctp_ifn;
2647 	struct sctp_ifa *sctp_ifa, *sifa;
2648 	uint8_t start_at_beginning = 0;
2649 	struct sctp_vrf *vrf;
2650 	uint32_t ifn_index;
2651 
2652 	/*
2653 	 * first question, is the ifn we will emit on in our list, if so, we
2654 	 * want that one.
2655 	 */
2656 	vrf = sctp_find_vrf(vrf_id);
2657 	if (vrf == NULL)
2658 		return (NULL);
2659 
2660 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2661 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2662 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2663 
2664 	/*
2665 	 * first question, is the ifn we will emit on in our list?  If so,
2666 	 * we want that one. First we look for a preferred. Second, we go
2667 	 * for an acceptable.
2668 	 */
2669 	if (sctp_ifn) {
2670 		/* first try for a preferred address on the ep */
2671 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2672 #if defined(__FreeBSD__) && !defined(__Userspace__)
2673 #ifdef INET
2674 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2675 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2676 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2677 				continue;
2678 			}
2679 #endif
2680 #ifdef INET6
2681 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2682 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2683 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2684 				continue;
2685 			}
2686 #endif
2687 #endif
2688 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2689 				continue;
2690 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2691 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2692 				if (sifa == NULL)
2693 					continue;
2694 				if (((non_asoc_addr_ok == 0) &&
2695 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2696 				    (non_asoc_addr_ok &&
2697 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2698 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2699 					/* on the no-no list */
2700 					continue;
2701 				}
2702 				atomic_add_int(&sifa->refcount, 1);
2703 				return (sifa);
2704 			}
2705 		}
2706 		/* next try for an acceptable address on the ep */
2707 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2708 #if defined(__FreeBSD__) && !defined(__Userspace__)
2709 #ifdef INET
2710 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2711 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2712 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2713 				continue;
2714 			}
2715 #endif
2716 #ifdef INET6
2717 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2718 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2719 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2720 				continue;
2721 			}
2722 #endif
2723 #endif
2724 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2725 				continue;
2726 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2727 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2728 				if (sifa == NULL)
2729 					continue;
2730 				if (((non_asoc_addr_ok == 0) &&
2731 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2732 				    (non_asoc_addr_ok &&
2733 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2734 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2735 					/* on the no-no list */
2736 					continue;
2737 				}
2738 				atomic_add_int(&sifa->refcount, 1);
2739 				return (sifa);
2740 			}
2741 		}
2742 	}
2743 	/*
2744 	 * if we can't find one like that then we must look at all
2745 	 * addresses bound to pick one at first preferable then
2746 	 * secondly acceptable.
2747 	 */
2748 	starting_point = stcb->asoc.last_used_address;
2749  sctp_from_the_top:
2750 	if (stcb->asoc.last_used_address == NULL) {
2751 		start_at_beginning = 1;
2752 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2753 	}
2754 	/* search beginning with the last used address */
2755 	for (laddr = stcb->asoc.last_used_address; laddr;
2756 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2757 		if (laddr->ifa == NULL) {
2758 			/* address has been removed */
2759 			continue;
2760 		}
2761 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2762 			/* address is being deleted */
2763 			continue;
2764 		}
2765 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2766 		if (sifa == NULL)
2767 			continue;
2768 		if (((non_asoc_addr_ok == 0) &&
2769 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2770 		    (non_asoc_addr_ok &&
2771 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2772 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2773 			/* on the no-no list */
2774 			continue;
2775 		}
2776 		stcb->asoc.last_used_address = laddr;
2777 		atomic_add_int(&sifa->refcount, 1);
2778 		return (sifa);
2779 	}
2780 	if (start_at_beginning == 0) {
2781 		stcb->asoc.last_used_address = NULL;
2782 		goto sctp_from_the_top;
2783 	}
2784 	/* now try for any higher scope than the destination */
2785 	stcb->asoc.last_used_address = starting_point;
2786 	start_at_beginning = 0;
2787  sctp_from_the_top2:
2788 	if (stcb->asoc.last_used_address == NULL) {
2789 		start_at_beginning = 1;
2790 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2791 	}
2792 	/* search beginning with the last used address */
2793 	for (laddr = stcb->asoc.last_used_address; laddr;
2794 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2795 		if (laddr->ifa == NULL) {
2796 			/* address has been removed */
2797 			continue;
2798 		}
2799 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2800 			/* address is being deleted */
2801 			continue;
2802 		}
2803 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2804 						   dest_is_priv, fam);
2805 		if (sifa == NULL)
2806 			continue;
2807 		if (((non_asoc_addr_ok == 0) &&
2808 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2809 		    (non_asoc_addr_ok &&
2810 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2811 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2812 			/* on the no-no list */
2813 			continue;
2814 		}
2815 		stcb->asoc.last_used_address = laddr;
2816 		atomic_add_int(&sifa->refcount, 1);
2817 		return (sifa);
2818 	}
2819 	if (start_at_beginning == 0) {
2820 		stcb->asoc.last_used_address = NULL;
2821 		goto sctp_from_the_top2;
2822 	}
2823 	return (NULL);
2824 }
2825 
2826 static struct sctp_ifa *
sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,int addr_wanted,sa_family_t fam,sctp_route_t * ro)2827 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2828 #if defined(__FreeBSD__) && !defined(__Userspace__)
2829                                                  struct sctp_inpcb *inp,
2830 #else
2831                                                  struct sctp_inpcb *inp SCTP_UNUSED,
2832 #endif
2833                                                  struct sctp_tcb *stcb,
2834                                                  int non_asoc_addr_ok,
2835                                                  uint8_t dest_is_loop,
2836                                                  uint8_t dest_is_priv,
2837                                                  int addr_wanted,
2838                                                  sa_family_t fam,
2839                                                  sctp_route_t *ro)
2840 {
2841 	struct sctp_ifa *ifa, *sifa;
2842 	int num_eligible_addr = 0;
2843 #ifdef INET6
2844 #ifdef SCTP_EMBEDDED_V6_SCOPE
2845 	struct sockaddr_in6 sin6, lsa6;
2846 
2847 	if (fam == AF_INET6) {
2848 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2849 #ifdef SCTP_KAME
2850 		(void)sa6_recoverscope(&sin6);
2851 #else
2852 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2853 #endif  /* SCTP_KAME */
2854 	}
2855 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2856 #endif	/* INET6 */
2857 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2858 #if defined(__FreeBSD__) && !defined(__Userspace__)
2859 #ifdef INET
2860 		if ((ifa->address.sa.sa_family == AF_INET) &&
2861 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2862 		                      &ifa->address.sin.sin_addr) != 0)) {
2863 			continue;
2864 		}
2865 #endif
2866 #ifdef INET6
2867 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2868 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2869 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2870 			continue;
2871 		}
2872 #endif
2873 #endif
2874 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2875 		    (non_asoc_addr_ok == 0))
2876 			continue;
2877 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2878 						  dest_is_priv, fam);
2879 		if (sifa == NULL)
2880 			continue;
2881 #ifdef INET6
2882 		if (fam == AF_INET6 &&
2883 		    dest_is_loop &&
2884 		    sifa->src_is_loop && sifa->src_is_priv) {
2885 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
2886 			 * to the peer so we will get an abort.
2887 			 */
2888 			continue;
2889 		}
2890 #ifdef SCTP_EMBEDDED_V6_SCOPE
2891 		if (fam == AF_INET6 &&
2892 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2893 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2894 			/* link-local <-> link-local must belong to the same scope. */
2895 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2896 #ifdef SCTP_KAME
2897 			(void)sa6_recoverscope(&lsa6);
2898 #else
2899 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2900 #endif  /* SCTP_KAME */
2901 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2902 				continue;
2903 			}
2904 		}
2905 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2906 #endif	/* INET6 */
2907 
2908 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2909 		/* Check if the IPv6 address matches to next-hop.
2910 		   In the mobile case, old IPv6 address may be not deleted
2911 		   from the interface. Then, the interface has previous and
2912 		   new addresses.  We should use one corresponding to the
2913 		   next-hop.  (by micchie)
2914 		 */
2915 #ifdef INET6
2916 		if (stcb && fam == AF_INET6 &&
2917 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2918 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2919 			    == 0) {
2920 				continue;
2921 			}
2922 		}
2923 #endif
2924 #ifdef INET
2925 		/* Avoid topologically incorrect IPv4 address */
2926 		if (stcb && fam == AF_INET &&
2927 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2928 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2929 				continue;
2930 			}
2931 		}
2932 #endif
2933 #endif
2934 		if (stcb) {
2935 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2936 				continue;
2937 			}
2938 			if (((non_asoc_addr_ok == 0) &&
2939 			     (sctp_is_addr_restricted(stcb, sifa))) ||
2940 			    (non_asoc_addr_ok &&
2941 			     (sctp_is_addr_restricted(stcb, sifa)) &&
2942 			     (!sctp_is_addr_pending(stcb, sifa)))) {
2943 				/*
2944 				 * It is restricted for some reason..
2945 				 * probably not yet added.
2946 				 */
2947 				continue;
2948 			}
2949 		}
2950 		if (num_eligible_addr >= addr_wanted) {
2951 			return (sifa);
2952 		}
2953 		num_eligible_addr++;
2954 	}
2955 	return (NULL);
2956 }
2957 
2958 static int
sctp_count_num_preferred_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2959 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2960 #if defined(__FreeBSD__) && !defined(__Userspace__)
2961                                   struct sctp_inpcb *inp,
2962 #else
2963                                   struct sctp_inpcb *inp SCTP_UNUSED,
2964 #endif
2965 				  struct sctp_tcb *stcb,
2966 				  int non_asoc_addr_ok,
2967 				  uint8_t dest_is_loop,
2968 				  uint8_t dest_is_priv,
2969 				  sa_family_t fam)
2970 {
2971 	struct sctp_ifa *ifa, *sifa;
2972 	int num_eligible_addr = 0;
2973 
2974 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2975 #if defined(__FreeBSD__) && !defined(__Userspace__)
2976 #ifdef INET
2977 		if ((ifa->address.sa.sa_family == AF_INET) &&
2978 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2979 		                      &ifa->address.sin.sin_addr) != 0)) {
2980 			continue;
2981 		}
2982 #endif
2983 #ifdef INET6
2984 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2985 		    (stcb != NULL) &&
2986 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2987 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2988 			continue;
2989 		}
2990 #endif
2991 #endif
2992 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2993 		    (non_asoc_addr_ok == 0)) {
2994 			continue;
2995 		}
2996 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2997 						  dest_is_priv, fam);
2998 		if (sifa == NULL) {
2999 			continue;
3000 		}
3001 		if (stcb) {
3002 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3003 				continue;
3004 			}
3005 			if (((non_asoc_addr_ok == 0) &&
3006 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3007 			    (non_asoc_addr_ok &&
3008 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3009 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3010 				/*
3011 				 * It is restricted for some reason..
3012 				 * probably not yet added.
3013 				 */
3014 				continue;
3015 			}
3016 		}
3017 		num_eligible_addr++;
3018 	}
3019 	return (num_eligible_addr);
3020 }
3021 
3022 static struct sctp_ifa *
sctp_choose_boundall(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)3023 sctp_choose_boundall(struct sctp_inpcb *inp,
3024                      struct sctp_tcb *stcb,
3025 		     struct sctp_nets *net,
3026 		     sctp_route_t *ro,
3027 		     uint32_t vrf_id,
3028 		     uint8_t dest_is_priv,
3029 		     uint8_t dest_is_loop,
3030 		     int non_asoc_addr_ok,
3031 		     sa_family_t fam)
3032 {
3033 	int cur_addr_num = 0, num_preferred = 0;
3034 	void *ifn;
3035 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3036 	struct sctp_ifa *sctp_ifa, *sifa;
3037 	uint32_t ifn_index;
3038 	struct sctp_vrf *vrf;
3039 #ifdef INET
3040 	int retried = 0;
3041 #endif
3042 
3043 	/*-
3044 	 * For boundall we can use any address in the association.
3045 	 * If non_asoc_addr_ok is set we can use any address (at least in
3046 	 * theory). So we look for preferred addresses first. If we find one,
3047 	 * we use it. Otherwise we next try to get an address on the
3048 	 * interface, which we should be able to do (unless non_asoc_addr_ok
3049 	 * is false and we are routed out that way). In these cases where we
3050 	 * can't use the address of the interface we go through all the
3051 	 * ifn's looking for an address we can use and fill that in. Punting
3052 	 * means we send back address 0, which will probably cause problems
3053 	 * actually since then IP will fill in the address of the route ifn,
3054 	 * which means we probably already rejected it.. i.e. here comes an
3055 	 * abort :-<.
3056 	 */
3057 	vrf = sctp_find_vrf(vrf_id);
3058 	if (vrf == NULL)
3059 		return (NULL);
3060 
3061 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3062 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3063 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3064 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3065 	if (sctp_ifn == NULL) {
3066 		/* ?? We don't have this guy ?? */
3067 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3068 		goto bound_all_plan_b;
3069 	}
3070 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3071 		ifn_index, sctp_ifn->ifn_name);
3072 
3073 	if (net) {
3074 		cur_addr_num = net->indx_of_eligible_next_to_use;
3075 	}
3076 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3077 							  inp, stcb,
3078 							  non_asoc_addr_ok,
3079 							  dest_is_loop,
3080 							  dest_is_priv, fam);
3081 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3082 		num_preferred, sctp_ifn->ifn_name);
3083 	if (num_preferred == 0) {
3084 		/*
3085 		 * no eligible addresses, we must use some other interface
3086 		 * address if we can find one.
3087 		 */
3088 		goto bound_all_plan_b;
3089 	}
3090 	/*
3091 	 * Ok we have num_eligible_addr set with how many we can use, this
3092 	 * may vary from call to call due to addresses being deprecated
3093 	 * etc..
3094 	 */
3095 	if (cur_addr_num >= num_preferred) {
3096 		cur_addr_num = 0;
3097 	}
3098 	/*
3099 	 * select the nth address from the list (where cur_addr_num is the
3100 	 * nth) and 0 is the first one, 1 is the second one etc...
3101 	 */
3102 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3103 
3104 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3105                                                                     dest_is_priv, cur_addr_num, fam, ro);
3106 
3107 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
3108 	if (sctp_ifa) {
3109 		atomic_add_int(&sctp_ifa->refcount, 1);
3110 		if (net) {
3111 			/* save off where the next one we will want */
3112 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3113 		}
3114 		return (sctp_ifa);
3115 	}
3116 	/*
3117 	 * plan_b: Look at all interfaces and find a preferred address. If
3118 	 * no preferred fall through to plan_c.
3119 	 */
3120  bound_all_plan_b:
3121 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3122 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3123 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3124 			sctp_ifn->ifn_name);
3125 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3126 			/* wrong base scope */
3127 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3128 			continue;
3129 		}
3130 		if ((sctp_ifn == looked_at) && looked_at) {
3131 			/* already looked at this guy */
3132 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3133 			continue;
3134 		}
3135 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3136                                                                   dest_is_loop, dest_is_priv, fam);
3137 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3138 			"Found ifn:%p %d preferred source addresses\n",
3139 			ifn, num_preferred);
3140 		if (num_preferred == 0) {
3141 			/* None on this interface. */
3142 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3143 			continue;
3144 		}
3145 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3146 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
3147 			num_preferred, (void *)sctp_ifn, cur_addr_num);
3148 
3149 		/*
3150 		 * Ok we have num_eligible_addr set with how many we can
3151 		 * use, this may vary from call to call due to addresses
3152 		 * being deprecated etc..
3153 		 */
3154 		if (cur_addr_num >= num_preferred) {
3155 			cur_addr_num = 0;
3156 		}
3157 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3158                                                                         dest_is_priv, cur_addr_num, fam, ro);
3159 		if (sifa == NULL)
3160 			continue;
3161 		if (net) {
3162 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3163 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3164 				cur_addr_num);
3165 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3166 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3167 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3168 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3169 		}
3170 		atomic_add_int(&sifa->refcount, 1);
3171 		return (sifa);
3172 	}
3173 #ifdef INET
3174 again_with_private_addresses_allowed:
3175 #endif
3176 	/* plan_c: do we have an acceptable address on the emit interface */
3177 	sifa = NULL;
3178 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3179 	if (emit_ifn == NULL) {
3180 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3181 		goto plan_d;
3182 	}
3183 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3184 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3185 #if defined(__FreeBSD__) && !defined(__Userspace__)
3186 #ifdef INET
3187 		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3188 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3189 		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3190 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3191 			continue;
3192 		}
3193 #endif
3194 #ifdef INET6
3195 		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3196 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3197 		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3198 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3199 			continue;
3200 		}
3201 #endif
3202 #endif
3203 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3204 		    (non_asoc_addr_ok == 0)) {
3205 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3206 			continue;
3207 		}
3208 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3209 						   dest_is_priv, fam);
3210 		if (sifa == NULL) {
3211 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3212 			continue;
3213 		}
3214 		if (stcb) {
3215 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3216 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3217 				sifa = NULL;
3218 				continue;
3219 			}
3220 			if (((non_asoc_addr_ok == 0) &&
3221 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3222 			    (non_asoc_addr_ok &&
3223 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3224 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3225 				/*
3226 				 * It is restricted for some
3227 				 * reason.. probably not yet added.
3228 				 */
3229 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3230 				sifa = NULL;
3231 				continue;
3232 			}
3233 		}
3234 		atomic_add_int(&sifa->refcount, 1);
3235 		goto out;
3236 	}
3237  plan_d:
3238 	/*
3239 	 * plan_d: We are in trouble. No preferred address on the emit
3240 	 * interface. And not even a preferred address on all interfaces.
3241 	 * Go out and see if we can find an acceptable address somewhere
3242 	 * amongst all interfaces.
3243 	 */
3244 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3245 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3246 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3247 			/* wrong base scope */
3248 			continue;
3249 		}
3250 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3251 #if defined(__FreeBSD__) && !defined(__Userspace__)
3252 #ifdef INET
3253 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3254 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3255 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3256 				continue;
3257 			}
3258 #endif
3259 #ifdef INET6
3260 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3261 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3262 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3263 				continue;
3264 			}
3265 #endif
3266 #endif
3267 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3268 			    (non_asoc_addr_ok == 0))
3269 				continue;
3270 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3271 							   dest_is_loop,
3272 							   dest_is_priv, fam);
3273 			if (sifa == NULL)
3274 				continue;
3275 			if (stcb) {
3276 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3277 					sifa = NULL;
3278 					continue;
3279 				}
3280 				if (((non_asoc_addr_ok == 0) &&
3281 				     (sctp_is_addr_restricted(stcb, sifa))) ||
3282 				    (non_asoc_addr_ok &&
3283 				     (sctp_is_addr_restricted(stcb, sifa)) &&
3284 				     (!sctp_is_addr_pending(stcb, sifa)))) {
3285 					/*
3286 					 * It is restricted for some
3287 					 * reason.. probably not yet added.
3288 					 */
3289 					sifa = NULL;
3290 					continue;
3291 				}
3292 			}
3293 			goto out;
3294 		}
3295 	}
3296 #ifdef INET
3297 	if (stcb) {
3298 		if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3299 			stcb->asoc.scope.ipv4_local_scope = 1;
3300 			retried = 1;
3301 			goto again_with_private_addresses_allowed;
3302 		} else if (retried == 1) {
3303 			stcb->asoc.scope.ipv4_local_scope = 0;
3304 		}
3305 	}
3306 #endif
3307 out:
3308 #ifdef INET
3309 	if (sifa) {
3310 		if (retried == 1) {
3311 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3312 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3313 					/* wrong base scope */
3314 					continue;
3315 				}
3316 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3317 					struct sctp_ifa *tmp_sifa;
3318 
3319 #if defined(__FreeBSD__) && !defined(__Userspace__)
3320 #ifdef INET
3321 					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3322 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3323 					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3324 						continue;
3325 					}
3326 #endif
3327 #ifdef INET6
3328 					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3329 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3330 					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3331 						continue;
3332 					}
3333 #endif
3334 #endif
3335 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3336 					    (non_asoc_addr_ok == 0))
3337 						continue;
3338 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3339 					                                       dest_is_loop,
3340 					                                       dest_is_priv, fam);
3341 					if (tmp_sifa == NULL) {
3342 						continue;
3343 					}
3344 					if (tmp_sifa == sifa) {
3345 						continue;
3346 					}
3347 					if (stcb) {
3348 						if (sctp_is_address_in_scope(tmp_sifa,
3349 						                             &stcb->asoc.scope, 0) == 0) {
3350 							continue;
3351 						}
3352 						if (((non_asoc_addr_ok == 0) &&
3353 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3354 						    (non_asoc_addr_ok &&
3355 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3356 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3357 							/*
3358 							 * It is restricted for some
3359 							 * reason.. probably not yet added.
3360 							 */
3361 							continue;
3362 						}
3363 					}
3364 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3365 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3366 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
3367 					}
3368 				}
3369 			}
3370 		}
3371 		atomic_add_int(&sifa->refcount, 1);
3372 	}
3373 #endif
3374 	return (sifa);
3375 }
3376 
3377 /* tcb may be NULL */
3378 struct sctp_ifa *
sctp_source_address_selection(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,struct sctp_nets * net,int non_asoc_addr_ok,uint32_t vrf_id)3379 sctp_source_address_selection(struct sctp_inpcb *inp,
3380 			      struct sctp_tcb *stcb,
3381 			      sctp_route_t *ro,
3382 			      struct sctp_nets *net,
3383 			      int non_asoc_addr_ok, uint32_t vrf_id)
3384 {
3385 	struct sctp_ifa *answer;
3386 	uint8_t dest_is_priv, dest_is_loop;
3387 	sa_family_t fam;
3388 #ifdef INET
3389 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3390 #endif
3391 #ifdef INET6
3392 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3393 #endif
3394 
3395 	/**
3396 	 * Rules:
3397 	 * - Find the route if needed, cache if I can.
3398 	 * - Look at interface address in route, Is it in the bound list. If so we
3399 	 *   have the best source.
3400 	 * - If not we must rotate amongst the addresses.
3401 	 *
3402 	 * Cavets and issues
3403 	 *
3404 	 * Do we need to pay attention to scope. We can have a private address
3405 	 * or a global address we are sourcing or sending to. So if we draw
3406 	 * it out
3407 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3408 	 * For V4
3409 	 * ------------------------------------------
3410 	 *      source     *      dest  *  result
3411 	 * -----------------------------------------
3412 	 * <a>  Private    *    Global  *  NAT
3413 	 * -----------------------------------------
3414 	 * <b>  Private    *    Private *  No problem
3415 	 * -----------------------------------------
3416 	 * <c>  Global     *    Private *  Huh, How will this work?
3417 	 * -----------------------------------------
3418 	 * <d>  Global     *    Global  *  No Problem
3419 	 *------------------------------------------
3420 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3421 	 * For V6
3422 	 *------------------------------------------
3423 	 *      source     *      dest  *  result
3424 	 * -----------------------------------------
3425 	 * <a>  Linklocal  *    Global  *
3426 	 * -----------------------------------------
3427 	 * <b>  Linklocal  * Linklocal  *  No problem
3428 	 * -----------------------------------------
3429 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
3430 	 * -----------------------------------------
3431 	 * <d>  Global     *    Global  *  No Problem
3432 	 *------------------------------------------
3433 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3434 	 *
3435 	 * And then we add to that what happens if there are multiple addresses
3436 	 * assigned to an interface. Remember the ifa on a ifn is a linked
3437 	 * list of addresses. So one interface can have more than one IP
3438 	 * address. What happens if we have both a private and a global
3439 	 * address? Do we then use context of destination to sort out which
3440 	 * one is best? And what about NAT's sending P->G may get you a NAT
3441 	 * translation, or should you select the G thats on the interface in
3442 	 * preference.
3443 	 *
3444 	 * Decisions:
3445 	 *
3446 	 * - count the number of addresses on the interface.
3447 	 * - if it is one, no problem except case <c>.
3448 	 *   For <a> we will assume a NAT out there.
3449 	 * - if there are more than one, then we need to worry about scope P
3450 	 *   or G. We should prefer G -> G and P -> P if possible.
3451 	 *   Then as a secondary fall back to mixed types G->P being a last
3452 	 *   ditch one.
3453 	 * - The above all works for bound all, but bound specific we need to
3454 	 *   use the same concept but instead only consider the bound
3455 	 *   addresses. If the bound set is NOT assigned to the interface then
3456 	 *   we must use rotation amongst the bound addresses..
3457 	 */
3458 #if defined(__FreeBSD__) && !defined(__Userspace__)
3459 	if (ro->ro_nh == NULL) {
3460 #else
3461 	if (ro->ro_rt == NULL) {
3462 #endif
3463 		/*
3464 		 * Need a route to cache.
3465 		 */
3466 		SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3467 	}
3468 #if defined(__FreeBSD__) && !defined(__Userspace__)
3469 	if (ro->ro_nh == NULL) {
3470 #else
3471 	if (ro->ro_rt == NULL) {
3472 #endif
3473 		return (NULL);
3474 	}
3475 #if defined(_WIN32)
3476 	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3477 	fam = (sa_family_t)ro->ro_dst.sa_family;
3478 #else
3479 	fam = ro->ro_dst.sa_family;
3480 #endif
3481 	dest_is_priv = dest_is_loop = 0;
3482 	/* Setup our scopes for the destination */
3483 	switch (fam) {
3484 #ifdef INET
3485 	case AF_INET:
3486 		/* Scope based on outbound address */
3487 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3488 			dest_is_loop = 1;
3489 			if (net != NULL) {
3490 				/* mark it as local */
3491 				net->addr_is_local = 1;
3492 			}
3493 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3494 			dest_is_priv = 1;
3495 		}
3496 		break;
3497 #endif
3498 #ifdef INET6
3499 	case AF_INET6:
3500 		/* Scope based on outbound address */
3501 #if defined(_WIN32)
3502 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3503 #else
3504 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3505 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3506 #endif
3507 			/*
3508 			 * If the address is a loopback address, which
3509 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
3510 			 * scope. But we don't use dest_is_priv (link local
3511 			 * addresses).
3512 			 */
3513 			dest_is_loop = 1;
3514 			if (net != NULL) {
3515 				/* mark it as local */
3516 				net->addr_is_local = 1;
3517 			}
3518 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3519 			dest_is_priv = 1;
3520 		}
3521 		break;
3522 #endif
3523 	}
3524 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3525 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3526 	SCTP_IPI_ADDR_RLOCK();
3527 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3528 		/*
3529 		 * Bound all case
3530 		 */
3531 		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3532 					      dest_is_priv, dest_is_loop,
3533 					      non_asoc_addr_ok, fam);
3534 		SCTP_IPI_ADDR_RUNLOCK();
3535 		return (answer);
3536 	}
3537 	/*
3538 	 * Subset bound case
3539 	 */
3540 	if (stcb) {
3541 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3542 							vrf_id,	dest_is_priv,
3543 							dest_is_loop,
3544 							non_asoc_addr_ok, fam);
3545 	} else {
3546 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3547 						       non_asoc_addr_ok,
3548 						       dest_is_priv,
3549 						       dest_is_loop, fam);
3550 	}
3551 	SCTP_IPI_ADDR_RUNLOCK();
3552 	return (answer);
3553 }
3554 
3555 static int
3556 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3557 {
3558 #if defined(_WIN32)
3559 	WSACMSGHDR cmh;
3560 #else
3561 	struct cmsghdr cmh;
3562 #endif
3563 	struct sctp_sndinfo sndinfo;
3564 	struct sctp_prinfo prinfo;
3565 	struct sctp_authinfo authinfo;
3566 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3567 	int found;
3568 
3569 	/*
3570 	 * Independent of how many mbufs, find the c_type inside the control
3571 	 * structure and copy out the data.
3572 	 */
3573 	found = 0;
3574 	tot_len = SCTP_BUF_LEN(control);
3575 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3576 		rem_len = tot_len - off;
3577 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3578 			/* There is not enough room for one more. */
3579 			return (found);
3580 		}
3581 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3582 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3583 			/* We dont't have a complete CMSG header. */
3584 			return (found);
3585 		}
3586 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3587 			/* We don't have the complete CMSG. */
3588 			return (found);
3589 		}
3590 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3591 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3592 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3593 		    ((c_type == cmh.cmsg_type) ||
3594 		     ((c_type == SCTP_SNDRCV) &&
3595 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
3596 		       (cmh.cmsg_type == SCTP_PRINFO) ||
3597 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3598 			if (c_type == cmh.cmsg_type) {
3599 				if (cpsize > INT_MAX) {
3600 					return (found);
3601 				}
3602 				if (cmsg_data_len < (int)cpsize) {
3603 					return (found);
3604 				}
3605 				/* It is exactly what we want. Copy it out. */
3606 				m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3607 				return (1);
3608 			} else {
3609 				struct sctp_sndrcvinfo *sndrcvinfo;
3610 
3611 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3612 				if (found == 0) {
3613 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3614 						return (found);
3615 					}
3616 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3617 				}
3618 				switch (cmh.cmsg_type) {
3619 				case SCTP_SNDINFO:
3620 					if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3621 						return (found);
3622 					}
3623 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3624 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3625 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3626 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3627 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
3628 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3629 					break;
3630 				case SCTP_PRINFO:
3631 					if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3632 						return (found);
3633 					}
3634 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3635 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3636 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3637 					} else {
3638 						sndrcvinfo->sinfo_timetolive = 0;
3639 					}
3640 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3641 					break;
3642 				case SCTP_AUTHINFO:
3643 					if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3644 						return (found);
3645 					}
3646 					m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3647 					sndrcvinfo->sinfo_keynumber_valid = 1;
3648 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3649 					break;
3650 				default:
3651 					return (found);
3652 				}
3653 				found = 1;
3654 			}
3655 		}
3656 	}
3657 	return (found);
3658 }
3659 
3660 static int
3661 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3662 {
3663 #if defined(_WIN32)
3664 	WSACMSGHDR cmh;
3665 #else
3666 	struct cmsghdr cmh;
3667 #endif
3668 	struct sctp_initmsg initmsg;
3669 #ifdef INET
3670 	struct sockaddr_in sin;
3671 #endif
3672 #ifdef INET6
3673 	struct sockaddr_in6 sin6;
3674 #endif
3675 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3676 
3677 	tot_len = SCTP_BUF_LEN(control);
3678 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3679 		rem_len = tot_len - off;
3680 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3681 			/* There is not enough room for one more. */
3682 			*error = EINVAL;
3683 			return (1);
3684 		}
3685 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3686 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3687 			/* We dont't have a complete CMSG header. */
3688 			*error = EINVAL;
3689 			return (1);
3690 		}
3691 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3692 			/* We don't have the complete CMSG. */
3693 			*error = EINVAL;
3694 			return (1);
3695 		}
3696 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3697 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3698 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3699 			switch (cmh.cmsg_type) {
3700 			case SCTP_INIT:
3701 				if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3702 					*error = EINVAL;
3703 					return (1);
3704 				}
3705 				m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3706 				if (initmsg.sinit_max_attempts)
3707 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3708 				if (initmsg.sinit_num_ostreams)
3709 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3710 				if (initmsg.sinit_max_instreams)
3711 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3712 				if (initmsg.sinit_max_init_timeo)
3713 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3714 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3715 					struct sctp_stream_out *tmp_str;
3716 					unsigned int i;
3717 #if defined(SCTP_DETAILED_STR_STATS)
3718 					int j;
3719 #endif
3720 
3721 					/* Default is NOT correct */
3722 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3723 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3724 					SCTP_TCB_UNLOCK(stcb);
3725 					SCTP_MALLOC(tmp_str,
3726 					            struct sctp_stream_out *,
3727 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3728 					            SCTP_M_STRMO);
3729 					SCTP_TCB_LOCK(stcb);
3730 					if (tmp_str != NULL) {
3731 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3732 						stcb->asoc.strmout = tmp_str;
3733 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3734 					} else {
3735 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3736 					}
3737 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3738 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3739 						stcb->asoc.strmout[i].chunks_on_queues = 0;
3740 						stcb->asoc.strmout[i].next_mid_ordered = 0;
3741 						stcb->asoc.strmout[i].next_mid_unordered = 0;
3742 #if defined(SCTP_DETAILED_STR_STATS)
3743 						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3744 							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3745 							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3746 						}
3747 #else
3748 						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3749 						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3750 #endif
3751 						stcb->asoc.strmout[i].sid = i;
3752 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3753 						stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3754 						stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3755 					}
3756 				}
3757 				break;
3758 #ifdef INET
3759 			case SCTP_DSTADDRV4:
3760 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3761 					*error = EINVAL;
3762 					return (1);
3763 				}
3764 				memset(&sin, 0, sizeof(struct sockaddr_in));
3765 				sin.sin_family = AF_INET;
3766 #ifdef HAVE_SIN_LEN
3767 				sin.sin_len = sizeof(struct sockaddr_in);
3768 #endif
3769 				sin.sin_port = stcb->rport;
3770 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3771 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3772 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3773 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3774 					*error = EINVAL;
3775 					return (1);
3776 				}
3777 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3778 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3779 					*error = ENOBUFS;
3780 					return (1);
3781 				}
3782 				break;
3783 #endif
3784 #ifdef INET6
3785 			case SCTP_DSTADDRV6:
3786 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3787 					*error = EINVAL;
3788 					return (1);
3789 				}
3790 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3791 				sin6.sin6_family = AF_INET6;
3792 #ifdef HAVE_SIN6_LEN
3793 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3794 #endif
3795 				sin6.sin6_port = stcb->rport;
3796 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3797 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3798 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3799 					*error = EINVAL;
3800 					return (1);
3801 				}
3802 #ifdef INET
3803 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3804 					in6_sin6_2_sin(&sin, &sin6);
3805 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3806 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3807 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3808 						*error = EINVAL;
3809 						return (1);
3810 					}
3811 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3812 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3813 						*error = ENOBUFS;
3814 						return (1);
3815 					}
3816 				} else
3817 #endif
3818 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3819 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3820 						*error = ENOBUFS;
3821 						return (1);
3822 					}
3823 				break;
3824 #endif
3825 			default:
3826 				break;
3827 			}
3828 		}
3829 	}
3830 	return (0);
3831 }
3832 
3833 #if defined(INET) || defined(INET6)
3834 static struct sctp_tcb *
3835 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3836                            uint16_t port,
3837                            struct mbuf *control,
3838                            struct sctp_nets **net_p,
3839                            int *error)
3840 {
3841 #if defined(_WIN32)
3842 	WSACMSGHDR cmh;
3843 #else
3844 	struct cmsghdr cmh;
3845 #endif
3846 	struct sctp_tcb *stcb;
3847 	struct sockaddr *addr;
3848 #ifdef INET
3849 	struct sockaddr_in sin;
3850 #endif
3851 #ifdef INET6
3852 	struct sockaddr_in6 sin6;
3853 #endif
3854 	int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3855 
3856 	tot_len = SCTP_BUF_LEN(control);
3857 	for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3858 		rem_len = tot_len - off;
3859 		if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3860 			/* There is not enough room for one more. */
3861 			*error = EINVAL;
3862 			return (NULL);
3863 		}
3864 		m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3865 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3866 			/* We dont't have a complete CMSG header. */
3867 			*error = EINVAL;
3868 			return (NULL);
3869 		}
3870 		if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3871 			/* We don't have the complete CMSG. */
3872 			*error = EINVAL;
3873 			return (NULL);
3874 		}
3875 		cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3876 		cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3877 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3878 			switch (cmh.cmsg_type) {
3879 #ifdef INET
3880 			case SCTP_DSTADDRV4:
3881 				if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3882 					*error = EINVAL;
3883 					return (NULL);
3884 				}
3885 				memset(&sin, 0, sizeof(struct sockaddr_in));
3886 				sin.sin_family = AF_INET;
3887 #ifdef HAVE_SIN_LEN
3888 				sin.sin_len = sizeof(struct sockaddr_in);
3889 #endif
3890 				sin.sin_port = port;
3891 				m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3892 				addr = (struct sockaddr *)&sin;
3893 				break;
3894 #endif
3895 #ifdef INET6
3896 			case SCTP_DSTADDRV6:
3897 				if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3898 					*error = EINVAL;
3899 					return (NULL);
3900 				}
3901 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3902 				sin6.sin6_family = AF_INET6;
3903 #ifdef HAVE_SIN6_LEN
3904 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3905 #endif
3906 				sin6.sin6_port = port;
3907 				m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3908 #ifdef INET
3909 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3910 					in6_sin6_2_sin(&sin, &sin6);
3911 					addr = (struct sockaddr *)&sin;
3912 				} else
3913 #endif
3914 					addr = (struct sockaddr *)&sin6;
3915 				break;
3916 #endif
3917 			default:
3918 				addr = NULL;
3919 				break;
3920 			}
3921 			if (addr) {
3922 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3923 				if (stcb != NULL) {
3924 					return (stcb);
3925 				}
3926 			}
3927 		}
3928 	}
3929 	return (NULL);
3930 }
3931 #endif
3932 
3933 static struct mbuf *
3934 sctp_add_cookie(struct mbuf *init, int init_offset,
3935     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3936 {
3937 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3938 	struct sctp_state_cookie *stc;
3939 	struct sctp_paramhdr *ph;
3940 	uint16_t cookie_sz;
3941 
3942 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3943 				      sizeof(struct sctp_paramhdr)), 0,
3944 				     M_NOWAIT, 1, MT_DATA);
3945 	if (mret == NULL) {
3946 		return (NULL);
3947 	}
3948 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3949 	if (copy_init == NULL) {
3950 		sctp_m_freem(mret);
3951 		return (NULL);
3952 	}
3953 #ifdef SCTP_MBUF_LOGGING
3954 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3955 		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3956 	}
3957 #endif
3958 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3959 	    M_NOWAIT);
3960 	if (copy_initack == NULL) {
3961 		sctp_m_freem(mret);
3962 		sctp_m_freem(copy_init);
3963 		return (NULL);
3964 	}
3965 #ifdef SCTP_MBUF_LOGGING
3966 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3967 		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3968 	}
3969 #endif
3970 	/* easy side we just drop it on the end */
3971 	ph = mtod(mret, struct sctp_paramhdr *);
3972 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3973 	    sizeof(struct sctp_paramhdr);
3974 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
3975 	    sizeof(struct sctp_paramhdr));
3976 	ph->param_type = htons(SCTP_STATE_COOKIE);
3977 	ph->param_length = 0;	/* fill in at the end */
3978 	/* Fill in the stc cookie data */
3979 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3980 
3981 	/* tack the INIT and then the INIT-ACK onto the chain */
3982 	cookie_sz = 0;
3983 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3984 		cookie_sz += SCTP_BUF_LEN(m_at);
3985 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3986 			SCTP_BUF_NEXT(m_at) = copy_init;
3987 			break;
3988 		}
3989 	}
3990 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3991 		cookie_sz += SCTP_BUF_LEN(m_at);
3992 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3993 			SCTP_BUF_NEXT(m_at) = copy_initack;
3994 			break;
3995 		}
3996 	}
3997 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3998 		cookie_sz += SCTP_BUF_LEN(m_at);
3999 		if (SCTP_BUF_NEXT(m_at) == NULL) {
4000 			break;
4001 		}
4002 	}
4003 	sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4004 	if (sig == NULL) {
4005 		/* no space, so free the entire chain */
4006 		sctp_m_freem(mret);
4007 		return (NULL);
4008 	}
4009 	SCTP_BUF_NEXT(m_at) = sig;
4010 	SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE;
4011 	cookie_sz += SCTP_SIGNATURE_SIZE;
4012 	ph->param_length = htons(cookie_sz);
4013 	*signature = (uint8_t *)mtod(sig, caddr_t);
4014 	memset(*signature, 0, SCTP_SIGNATURE_SIZE);
4015 	return (mret);
4016 }
4017 
4018 static uint8_t
4019 sctp_get_ect(struct sctp_tcb *stcb)
4020 {
4021 	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4022 		return (SCTP_ECT0_BIT);
4023 	} else {
4024 		return (0);
4025 	}
4026 }
4027 
4028 #if defined(INET) || defined(INET6)
4029 static void
4030 sctp_handle_no_route(struct sctp_tcb *stcb,
4031                      struct sctp_nets *net,
4032                      int so_locked)
4033 {
4034 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4035 
4036 	if (net) {
4037 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4038 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4039 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4040 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4041 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4042 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4043 			                        stcb, 0,
4044 			                        (void *)net,
4045 			                        so_locked);
4046 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
4047 				net->dest_state &= ~SCTP_ADDR_PF;
4048 			}
4049 		}
4050 		if (stcb) {
4051 			if (net == stcb->asoc.primary_destination) {
4052 				/* need a new primary */
4053 				struct sctp_nets *alt;
4054 
4055 				alt = sctp_find_alternate_net(stcb, net, 0);
4056 				if (alt != net) {
4057 					if (stcb->asoc.alternate) {
4058 						sctp_free_remote_addr(stcb->asoc.alternate);
4059 					}
4060 					stcb->asoc.alternate = alt;
4061 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4062 					if (net->ro._s_addr) {
4063 						sctp_free_ifa(net->ro._s_addr);
4064 						net->ro._s_addr = NULL;
4065 					}
4066 					net->src_addr_selected = 0;
4067 				}
4068 			}
4069 		}
4070 	}
4071 }
4072 #endif
4073 
4074 static int
4075 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4076     struct sctp_tcb *stcb,	/* may be NULL */
4077     struct sctp_nets *net,
4078     struct sockaddr *to,
4079     struct mbuf *m,
4080     uint32_t auth_offset,
4081     struct sctp_auth_chunk *auth,
4082     uint16_t auth_keyid,
4083     int nofragment_flag,
4084     int ecn_ok,
4085     int out_of_asoc_ok,
4086     uint16_t src_port,
4087     uint16_t dest_port,
4088     uint32_t v_tag,
4089     uint16_t port,
4090     union sctp_sockstore *over_addr,
4091 #if defined(__FreeBSD__) && !defined(__Userspace__)
4092     uint8_t mflowtype, uint32_t mflowid,
4093 #endif
4094 int so_locked)
4095 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4096 {
4097 	/**
4098 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4099 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4100 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
4101 	 * - calculate and fill in the SCTP checksum.
4102 	 * - prepend an IP address header.
4103 	 * - if boundall use INADDR_ANY.
4104 	 * - if boundspecific do source address selection.
4105 	 * - set fragmentation option for ipV4.
4106 	 * - On return from IP output, check/adjust mtu size of output
4107 	 *   interface and smallest_mtu size as well.
4108 	 */
4109 	/* Will need ifdefs around this */
4110 	struct mbuf *newm;
4111 	struct sctphdr *sctphdr;
4112 	int packet_length;
4113 	int ret;
4114 #if defined(INET) || defined(INET6)
4115 	uint32_t vrf_id;
4116 #endif
4117 #if defined(INET) || defined(INET6)
4118 	struct mbuf *o_pak;
4119 	sctp_route_t *ro = NULL;
4120 	struct udphdr *udp = NULL;
4121 #endif
4122 	uint8_t tos_value;
4123 #if defined(__APPLE__) && !defined(__Userspace__)
4124 	struct socket *so = NULL;
4125 #endif
4126 
4127 #if defined(__APPLE__) && !defined(__Userspace__)
4128 	if (so_locked) {
4129 		sctp_lock_assert(SCTP_INP_SO(inp));
4130 		SCTP_TCB_LOCK_ASSERT(stcb);
4131 	} else {
4132 		sctp_unlock_assert(SCTP_INP_SO(inp));
4133 	}
4134 #endif
4135 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4136 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4137 		sctp_m_freem(m);
4138 		return (EFAULT);
4139 	}
4140 #if defined(INET) || defined(INET6)
4141 	if (stcb) {
4142 		vrf_id = stcb->asoc.vrf_id;
4143 	} else {
4144 		vrf_id = inp->def_vrf_id;
4145 	}
4146 #endif
4147 	/* fill in the HMAC digest for any AUTH chunk in the packet */
4148 	if ((auth != NULL) && (stcb != NULL)) {
4149 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4150 	}
4151 
4152 	if (net) {
4153 		tos_value = net->dscp;
4154 	} else if (stcb) {
4155 		tos_value = stcb->asoc.default_dscp;
4156 	} else {
4157 		tos_value = inp->sctp_ep.default_dscp;
4158 	}
4159 
4160 	switch (to->sa_family) {
4161 #ifdef INET
4162 	case AF_INET:
4163 	{
4164 		struct ip *ip = NULL;
4165 		sctp_route_t iproute;
4166 		int len;
4167 
4168 		len = SCTP_MIN_V4_OVERHEAD;
4169 		if (port) {
4170 			len += sizeof(struct udphdr);
4171 		}
4172 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4173 		if (newm == NULL) {
4174 			sctp_m_freem(m);
4175 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4176 			return (ENOMEM);
4177 		}
4178 		SCTP_ALIGN_TO_END(newm, len);
4179 		SCTP_BUF_LEN(newm) = len;
4180 		SCTP_BUF_NEXT(newm) = m;
4181 		m = newm;
4182 #if defined(__FreeBSD__) && !defined(__Userspace__)
4183 		if (net != NULL) {
4184 			m->m_pkthdr.flowid = net->flowid;
4185 			M_HASHTYPE_SET(m, net->flowtype);
4186 		} else {
4187 			m->m_pkthdr.flowid = mflowid;
4188 			M_HASHTYPE_SET(m, mflowtype);
4189  		}
4190 #endif
4191 		packet_length = sctp_calculate_len(m);
4192 		ip = mtod(m, struct ip *);
4193 		ip->ip_v = IPVERSION;
4194 		ip->ip_hl = (sizeof(struct ip) >> 2);
4195 		if (tos_value == 0) {
4196 			/*
4197 			 * This means especially, that it is not set at the
4198 			 * SCTP layer. So use the value from the IP layer.
4199 			 */
4200 			tos_value = inp->ip_inp.inp.inp_ip_tos;
4201 		}
4202 		tos_value &= 0xfc;
4203 		if (ecn_ok) {
4204 			tos_value |= sctp_get_ect(stcb);
4205 		}
4206 		if ((nofragment_flag) && (port == 0)) {
4207 #if defined(__FreeBSD__) && !defined(__Userspace__)
4208 			ip->ip_off = htons(IP_DF);
4209 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
4210 			ip->ip_off = IP_DF;
4211 #else
4212 			ip->ip_off = htons(IP_DF);
4213 #endif
4214 		} else {
4215 #if defined(__FreeBSD__) && !defined(__Userspace__)
4216 			ip->ip_off = htons(0);
4217 #else
4218 			ip->ip_off = 0;
4219 #endif
4220 		}
4221 #if defined(__Userspace__)
4222 		ip->ip_id = htons(SCTP_IP_ID(inp)++);
4223 #elif defined(__FreeBSD__)
4224 		/* FreeBSD has a function for ip_id's */
4225 		ip_fillid(ip);
4226 #elif defined(__APPLE__)
4227 #if RANDOM_IP_ID
4228 		ip->ip_id = ip_randomid();
4229 #else
4230 		ip->ip_id = htons(ip_id++);
4231 #endif
4232 #else
4233 		ip->ip_id = SCTP_IP_ID(inp)++;
4234 #endif
4235 
4236 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4237 #if defined(__FreeBSD__) && !defined(__Userspace__)
4238 		ip->ip_len = htons(packet_length);
4239 #else
4240 		ip->ip_len = packet_length;
4241 #endif
4242 		ip->ip_tos = tos_value;
4243 		if (port) {
4244 			ip->ip_p = IPPROTO_UDP;
4245 		} else {
4246 			ip->ip_p = IPPROTO_SCTP;
4247 		}
4248 		ip->ip_sum = 0;
4249 		if (net == NULL) {
4250 			ro = &iproute;
4251 			memset(&iproute, 0, sizeof(iproute));
4252 #ifdef HAVE_SA_LEN
4253 			memcpy(&ro->ro_dst, to, to->sa_len);
4254 #else
4255 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4256 #endif
4257 		} else {
4258 			ro = (sctp_route_t *)&net->ro;
4259 		}
4260 		/* Now the address selection part */
4261 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4262 
4263 		/* call the routine to select the src address */
4264 		if (net && out_of_asoc_ok == 0) {
4265 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4266 				sctp_free_ifa(net->ro._s_addr);
4267 				net->ro._s_addr = NULL;
4268 				net->src_addr_selected = 0;
4269 #if defined(__FreeBSD__) && !defined(__Userspace__)
4270 				RO_NHFREE(ro);
4271 #else
4272 				if (ro->ro_rt) {
4273 					RTFREE(ro->ro_rt);
4274 					ro->ro_rt = NULL;
4275 				}
4276 #endif
4277 			}
4278 			if (net->src_addr_selected == 0) {
4279 				/* Cache the source address */
4280 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4281 										ro, net, 0,
4282 										vrf_id);
4283 				net->src_addr_selected = 1;
4284 			}
4285 			if (net->ro._s_addr == NULL) {
4286 				/* No route to host */
4287 				net->src_addr_selected = 0;
4288 				sctp_handle_no_route(stcb, net, so_locked);
4289 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4290 				sctp_m_freem(m);
4291 				return (EHOSTUNREACH);
4292 			}
4293 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4294 		} else {
4295 			if (over_addr == NULL) {
4296 				struct sctp_ifa *_lsrc;
4297 
4298 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4299 				                                      net,
4300 				                                      out_of_asoc_ok,
4301 				                                      vrf_id);
4302 				if (_lsrc == NULL) {
4303 					sctp_handle_no_route(stcb, net, so_locked);
4304 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4305 					sctp_m_freem(m);
4306 					return (EHOSTUNREACH);
4307 				}
4308 				ip->ip_src = _lsrc->address.sin.sin_addr;
4309 				sctp_free_ifa(_lsrc);
4310 			} else {
4311 				ip->ip_src = over_addr->sin.sin_addr;
4312 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4313 			}
4314 		}
4315 		if (port) {
4316 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4317 				sctp_handle_no_route(stcb, net, so_locked);
4318 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4319 				sctp_m_freem(m);
4320 				return (EHOSTUNREACH);
4321 			}
4322 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4323 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4324 			udp->uh_dport = port;
4325 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4326 #if !defined(__Userspace__)
4327 #if defined(__FreeBSD__)
4328 			if (V_udp_cksum) {
4329 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4330 			} else {
4331 				udp->uh_sum = 0;
4332 			}
4333 #else
4334 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4335 #endif
4336 #else
4337 			udp->uh_sum = 0;
4338 #endif
4339 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4340 		} else {
4341 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4342 		}
4343 
4344 		sctphdr->src_port = src_port;
4345 		sctphdr->dest_port = dest_port;
4346 		sctphdr->v_tag = v_tag;
4347 		sctphdr->checksum = 0;
4348 
4349 		/*
4350 		 * If source address selection fails and we find no route
4351 		 * then the ip_output should fail as well with a
4352 		 * NO_ROUTE_TO_HOST type error. We probably should catch
4353 		 * that somewhere and abort the association right away
4354 		 * (assuming this is an INIT being sent).
4355 		 */
4356 #if defined(__FreeBSD__) && !defined(__Userspace__)
4357 		if (ro->ro_nh == NULL) {
4358 #else
4359 		if (ro->ro_rt == NULL) {
4360 #endif
4361 			/*
4362 			 * src addr selection failed to find a route (or
4363 			 * valid source addr), so we can't get there from
4364 			 * here (yet)!
4365 			 */
4366 			sctp_handle_no_route(stcb, net, so_locked);
4367 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4368 			sctp_m_freem(m);
4369 			return (EHOSTUNREACH);
4370 		}
4371 		if (ro != &iproute) {
4372 			memcpy(&iproute, ro, sizeof(*ro));
4373 		}
4374 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4375 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
4376 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4377 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
4378 #if defined(__FreeBSD__) && !defined(__Userspace__)
4379 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4380 			(void *)ro->ro_nh);
4381 #else
4382 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4383 			(void *)ro->ro_rt);
4384 #endif
4385 
4386 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4387 			/* failed to prepend data, give up */
4388 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4389 			sctp_m_freem(m);
4390 			return (ENOMEM);
4391 		}
4392 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4393 		if (port) {
4394 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4395 			SCTP_STAT_INCR(sctps_sendswcrc);
4396 #if !defined(__Userspace__)
4397 #if defined(__FreeBSD__)
4398 			if (V_udp_cksum) {
4399 				SCTP_ENABLE_UDP_CSUM(o_pak);
4400 			}
4401 #else
4402 			SCTP_ENABLE_UDP_CSUM(o_pak);
4403 #endif
4404 #endif
4405 		} else {
4406 #if defined(__FreeBSD__) && !defined(__Userspace__)
4407 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4408 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4409 			SCTP_STAT_INCR(sctps_sendhwcrc);
4410 #else
4411 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4412 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4413 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4414 				SCTP_STAT_INCR(sctps_sendswcrc);
4415 			} else {
4416 				SCTP_STAT_INCR(sctps_sendhwcrc);
4417 			}
4418 #endif
4419 		}
4420 #ifdef SCTP_PACKET_LOGGING
4421 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4422 			sctp_packet_log(o_pak);
4423 #endif
4424 		/* send it out.  table id is taken from stcb */
4425 #if defined(__APPLE__) && !defined(__Userspace__)
4426 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4427 			so = SCTP_INP_SO(inp);
4428 			SCTP_SOCKET_UNLOCK(so, 0);
4429 		}
4430 #endif
4431 #if defined(__FreeBSD__) && !defined(__Userspace__)
4432 		SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4433 #endif
4434 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4435 #if defined(__APPLE__) && !defined(__Userspace__)
4436 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4437 			atomic_add_int(&stcb->asoc.refcnt, 1);
4438 			SCTP_TCB_UNLOCK(stcb);
4439 			SCTP_SOCKET_LOCK(so, 0);
4440 			SCTP_TCB_LOCK(stcb);
4441 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4442 		}
4443 #endif
4444 #if defined(__FreeBSD__) && !defined(__Userspace__)
4445 		if (port) {
4446 			UDPSTAT_INC(udps_opackets);
4447 		}
4448 #endif
4449 		SCTP_STAT_INCR(sctps_sendpackets);
4450 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4451 		if (ret)
4452 			SCTP_STAT_INCR(sctps_senderrors);
4453 
4454 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4455 		if (net == NULL) {
4456 			/* free tempy routes */
4457 #if defined(__FreeBSD__) && !defined(__Userspace__)
4458 			RO_NHFREE(ro);
4459 #else
4460 			if (ro->ro_rt) {
4461 				RTFREE(ro->ro_rt);
4462 				ro->ro_rt = NULL;
4463 			}
4464 #endif
4465 		} else {
4466 #if defined(__FreeBSD__) && !defined(__Userspace__)
4467 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4468 #else
4469 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4470 #endif
4471 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4472 				uint32_t mtu;
4473 
4474 #if defined(__FreeBSD__) && !defined(__Userspace__)
4475 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4476 #else
4477 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4478 #endif
4479 				if (mtu > 0) {
4480 					if (net->port) {
4481 						mtu -= sizeof(struct udphdr);
4482 					}
4483 					if (mtu < net->mtu) {
4484 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4485 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4486 						}
4487 						net->mtu = mtu;
4488 					}
4489 				}
4490 #if defined(__FreeBSD__) && !defined(__Userspace__)
4491 			} else if (ro->ro_nh == NULL) {
4492 #else
4493 			} else if (ro->ro_rt == NULL) {
4494 #endif
4495 				/* route was freed */
4496 				if (net->ro._s_addr &&
4497 				    net->src_addr_selected) {
4498 					sctp_free_ifa(net->ro._s_addr);
4499 					net->ro._s_addr = NULL;
4500 				}
4501 				net->src_addr_selected = 0;
4502 			}
4503 		}
4504 		return (ret);
4505 	}
4506 #endif
4507 #ifdef INET6
4508 	case AF_INET6:
4509 	{
4510 		uint32_t flowlabel, flowinfo;
4511 		struct ip6_hdr *ip6h;
4512 		struct route_in6 ip6route;
4513 #if !defined(__Userspace__)
4514 		struct ifnet *ifp;
4515 #endif
4516 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4517 		int prev_scope = 0;
4518 #ifdef SCTP_EMBEDDED_V6_SCOPE
4519 		struct sockaddr_in6 lsa6_storage;
4520 		int error;
4521 #endif
4522 		u_short prev_port = 0;
4523 		int len;
4524 
4525 		if (net) {
4526 			flowlabel = net->flowlabel;
4527 		} else if (stcb) {
4528 			flowlabel = stcb->asoc.default_flowlabel;
4529 		} else {
4530 			flowlabel = inp->sctp_ep.default_flowlabel;
4531 		}
4532 		if (flowlabel == 0) {
4533 			/*
4534 			 * This means especially, that it is not set at the
4535 			 * SCTP layer. So use the value from the IP layer.
4536 			 */
4537 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4538 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4539 #else
4540 			flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4541 #endif
4542 		}
4543 		flowlabel &= 0x000fffff;
4544 		len = SCTP_MIN_OVERHEAD;
4545 		if (port) {
4546 			len += sizeof(struct udphdr);
4547 		}
4548 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4549 		if (newm == NULL) {
4550 			sctp_m_freem(m);
4551 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4552 			return (ENOMEM);
4553 		}
4554 		SCTP_ALIGN_TO_END(newm, len);
4555 		SCTP_BUF_LEN(newm) = len;
4556 		SCTP_BUF_NEXT(newm) = m;
4557 		m = newm;
4558 #if defined(__FreeBSD__) && !defined(__Userspace__)
4559 		if (net != NULL) {
4560 			m->m_pkthdr.flowid = net->flowid;
4561 			M_HASHTYPE_SET(m, net->flowtype);
4562 		} else {
4563 			m->m_pkthdr.flowid = mflowid;
4564 			M_HASHTYPE_SET(m, mflowtype);
4565  		}
4566 #endif
4567 		packet_length = sctp_calculate_len(m);
4568 
4569 		ip6h = mtod(m, struct ip6_hdr *);
4570 		/* protect *sin6 from overwrite */
4571 		sin6 = (struct sockaddr_in6 *)to;
4572 		tmp = *sin6;
4573 		sin6 = &tmp;
4574 
4575 #ifdef SCTP_EMBEDDED_V6_SCOPE
4576 		/* KAME hack: embed scopeid */
4577 #if defined(__APPLE__) && !defined(__Userspace__)
4578 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4579 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4580 #else
4581 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4582 #endif
4583 #elif defined(SCTP_KAME)
4584 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4585 #else
4586 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4587 #endif
4588 		{
4589 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4590 			sctp_m_freem(m);
4591 			return (EINVAL);
4592 		}
4593 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4594 		if (net == NULL) {
4595 			memset(&ip6route, 0, sizeof(ip6route));
4596 			ro = (sctp_route_t *)&ip6route;
4597 #ifdef HAVE_SIN6_LEN
4598 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4599 #else
4600 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4601 #endif
4602 		} else {
4603 			ro = (sctp_route_t *)&net->ro;
4604 		}
4605 		/*
4606 		 * We assume here that inp_flow is in host byte order within
4607 		 * the TCB!
4608 		 */
4609 		if (tos_value == 0) {
4610 			/*
4611 			 * This means especially, that it is not set at the
4612 			 * SCTP layer. So use the value from the IP layer.
4613 			 */
4614 #if defined(__APPLE__)  && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4615 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4616 #else
4617 			tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4618 #endif
4619 		}
4620 		tos_value &= 0xfc;
4621 		if (ecn_ok) {
4622 			tos_value |= sctp_get_ect(stcb);
4623 		}
4624 		flowinfo = 0x06;
4625 		flowinfo <<= 8;
4626 		flowinfo |= tos_value;
4627 		flowinfo <<= 20;
4628 		flowinfo |= flowlabel;
4629 		ip6h->ip6_flow = htonl(flowinfo);
4630 		if (port) {
4631 			ip6h->ip6_nxt = IPPROTO_UDP;
4632 		} else {
4633 			ip6h->ip6_nxt = IPPROTO_SCTP;
4634 		}
4635 		ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4636 		ip6h->ip6_dst = sin6->sin6_addr;
4637 
4638 		/*
4639 		 * Add SRC address selection here: we can only reuse to a
4640 		 * limited degree the kame src-addr-sel, since we can try
4641 		 * their selection but it may not be bound.
4642 		 */
4643 		memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4644 		lsa6_tmp.sin6_family = AF_INET6;
4645 #ifdef HAVE_SIN6_LEN
4646 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4647 #endif
4648 		lsa6 = &lsa6_tmp;
4649 		if (net && out_of_asoc_ok == 0) {
4650 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4651 				sctp_free_ifa(net->ro._s_addr);
4652 				net->ro._s_addr = NULL;
4653 				net->src_addr_selected = 0;
4654 #if defined(__FreeBSD__) && !defined(__Userspace__)
4655 				RO_NHFREE(ro);
4656 #else
4657 				if (ro->ro_rt) {
4658 					RTFREE(ro->ro_rt);
4659 					ro->ro_rt = NULL;
4660 				}
4661 #endif
4662 			}
4663 			if (net->src_addr_selected == 0) {
4664 #ifdef SCTP_EMBEDDED_V6_SCOPE
4665 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4666 				/* KAME hack: embed scopeid */
4667 #if defined(__APPLE__) && !defined(__Userspace__)
4668 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4669 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4670 #else
4671 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4672 #endif
4673 #elif defined(SCTP_KAME)
4674 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4675 #else
4676 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4677 #endif
4678 				{
4679 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4680 					sctp_m_freem(m);
4681 					return (EINVAL);
4682 				}
4683 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4684 				/* Cache the source address */
4685 				net->ro._s_addr = sctp_source_address_selection(inp,
4686 										stcb,
4687 										ro,
4688 										net,
4689 										0,
4690 										vrf_id);
4691 #ifdef SCTP_EMBEDDED_V6_SCOPE
4692 #ifdef SCTP_KAME
4693 				(void)sa6_recoverscope(sin6);
4694 #else
4695 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4696 #endif	/* SCTP_KAME */
4697 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4698 				net->src_addr_selected = 1;
4699 			}
4700 			if (net->ro._s_addr == NULL) {
4701 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4702 				net->src_addr_selected = 0;
4703 				sctp_handle_no_route(stcb, net, so_locked);
4704 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4705 				sctp_m_freem(m);
4706 				return (EHOSTUNREACH);
4707 			}
4708 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4709 		} else {
4710 #ifdef SCTP_EMBEDDED_V6_SCOPE
4711 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4712 			/* KAME hack: embed scopeid */
4713 #if defined(__APPLE__) && !defined(__Userspace__)
4714 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4715 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4716 #else
4717 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4718 #endif
4719 #elif defined(SCTP_KAME)
4720 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4721 #else
4722 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4723 #endif
4724 			  {
4725 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4726 				sctp_m_freem(m);
4727 				return (EINVAL);
4728 			  }
4729 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4730 			if (over_addr == NULL) {
4731 				struct sctp_ifa *_lsrc;
4732 
4733 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4734 				                                      net,
4735 				                                      out_of_asoc_ok,
4736 				                                      vrf_id);
4737 				if (_lsrc == NULL) {
4738 					sctp_handle_no_route(stcb, net, so_locked);
4739 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4740 					sctp_m_freem(m);
4741 					return (EHOSTUNREACH);
4742 				}
4743 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4744 				sctp_free_ifa(_lsrc);
4745 			} else {
4746 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4747 				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4748 			}
4749 #ifdef SCTP_EMBEDDED_V6_SCOPE
4750 #ifdef SCTP_KAME
4751 			(void)sa6_recoverscope(sin6);
4752 #else
4753 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4754 #endif	/* SCTP_KAME */
4755 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4756 		}
4757 		lsa6->sin6_port = inp->sctp_lport;
4758 
4759 #if defined(__FreeBSD__) && !defined(__Userspace__)
4760 		if (ro->ro_nh == NULL) {
4761 #else
4762 		if (ro->ro_rt == NULL) {
4763 #endif
4764 			/*
4765 			 * src addr selection failed to find a route (or
4766 			 * valid source addr), so we can't get there from
4767 			 * here!
4768 			 */
4769 			sctp_handle_no_route(stcb, net, so_locked);
4770 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4771 			sctp_m_freem(m);
4772 			return (EHOSTUNREACH);
4773 		}
4774 #ifndef SCOPEDROUTING
4775 #ifdef SCTP_EMBEDDED_V6_SCOPE
4776 		/*
4777 		 * XXX: sa6 may not have a valid sin6_scope_id in the
4778 		 * non-SCOPEDROUTING case.
4779 		 */
4780 		memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4781 		lsa6_storage.sin6_family = AF_INET6;
4782 #ifdef HAVE_SIN6_LEN
4783 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
4784 #endif
4785 #ifdef SCTP_KAME
4786 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4787 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4788 #else
4789 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4790 		    NULL)) != 0) {
4791 #endif				/* SCTP_KAME */
4792 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4793 			sctp_m_freem(m);
4794 			return (error);
4795 		}
4796 		/* XXX */
4797 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4798 		lsa6_storage.sin6_port = inp->sctp_lport;
4799 		lsa6 = &lsa6_storage;
4800 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4801 #endif /* SCOPEDROUTING */
4802 		ip6h->ip6_src = lsa6->sin6_addr;
4803 
4804 		if (port) {
4805 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4806 				sctp_handle_no_route(stcb, net, so_locked);
4807 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4808 				sctp_m_freem(m);
4809 				return (EHOSTUNREACH);
4810 			}
4811 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4812 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4813 			udp->uh_dport = port;
4814 			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4815 			udp->uh_sum = 0;
4816 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4817 		} else {
4818 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4819 		}
4820 
4821 		sctphdr->src_port = src_port;
4822 		sctphdr->dest_port = dest_port;
4823 		sctphdr->v_tag = v_tag;
4824 		sctphdr->checksum = 0;
4825 
4826 		/*
4827 		 * We set the hop limit now since there is a good chance
4828 		 * that our ro pointer is now filled
4829 		 */
4830 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4831 #if !defined(__Userspace__)
4832 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4833 #endif
4834 
4835 #ifdef SCTP_DEBUG
4836 		/* Copy to be sure something bad is not happening */
4837 		sin6->sin6_addr = ip6h->ip6_dst;
4838 		lsa6->sin6_addr = ip6h->ip6_src;
4839 #endif
4840 
4841 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4842 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4843 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4844 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4845 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4846 		if (net) {
4847 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4848 			/* preserve the port and scope for link local send */
4849 			prev_scope = sin6->sin6_scope_id;
4850 			prev_port = sin6->sin6_port;
4851 		}
4852 
4853 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4854 			/* failed to prepend data, give up */
4855 			sctp_m_freem(m);
4856 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4857 			return (ENOMEM);
4858 		}
4859 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4860 		if (port) {
4861 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4862 			SCTP_STAT_INCR(sctps_sendswcrc);
4863 #if !defined(__Userspace__)
4864 #if defined(_WIN32)
4865 			udp->uh_sum = 0;
4866 #else
4867 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4868 				udp->uh_sum = 0xffff;
4869 			}
4870 #endif
4871 #endif
4872 		} else {
4873 #if defined(__FreeBSD__) && !defined(__Userspace__)
4874 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4875 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4876 			SCTP_STAT_INCR(sctps_sendhwcrc);
4877 #else
4878 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4879 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4880 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4881 				SCTP_STAT_INCR(sctps_sendswcrc);
4882 			} else {
4883 				SCTP_STAT_INCR(sctps_sendhwcrc);
4884 			}
4885 #endif
4886 		}
4887 		/* send it out. table id is taken from stcb */
4888 #if defined(__APPLE__) && !defined(__Userspace__)
4889 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4890 			so = SCTP_INP_SO(inp);
4891 			SCTP_SOCKET_UNLOCK(so, 0);
4892 		}
4893 #endif
4894 #ifdef SCTP_PACKET_LOGGING
4895 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4896 			sctp_packet_log(o_pak);
4897 #endif
4898 #if !defined(__Userspace__)
4899 #if defined(__FreeBSD__)
4900 		SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4901 #endif
4902 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4903 #else
4904 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4905 #endif
4906 #if defined(__APPLE__) && !defined(__Userspace__)
4907 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4908 			atomic_add_int(&stcb->asoc.refcnt, 1);
4909 			SCTP_TCB_UNLOCK(stcb);
4910 			SCTP_SOCKET_LOCK(so, 0);
4911 			SCTP_TCB_LOCK(stcb);
4912 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4913 		}
4914 #endif
4915 		if (net) {
4916 			/* for link local this must be done */
4917 			sin6->sin6_scope_id = prev_scope;
4918 			sin6->sin6_port = prev_port;
4919 		}
4920 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4921 #if defined(__FreeBSD__) && !defined(__Userspace__)
4922 		if (port) {
4923 			UDPSTAT_INC(udps_opackets);
4924 		}
4925 #endif
4926 		SCTP_STAT_INCR(sctps_sendpackets);
4927 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4928 		if (ret) {
4929 			SCTP_STAT_INCR(sctps_senderrors);
4930 		}
4931 		if (net == NULL) {
4932 			/* Now if we had a temp route free it */
4933 #if defined(__FreeBSD__) && !defined(__Userspace__)
4934 			RO_NHFREE(ro);
4935 #else
4936 			if (ro->ro_rt) {
4937 				RTFREE(ro->ro_rt);
4938 				ro->ro_rt = NULL;
4939 			}
4940 #endif
4941 		} else {
4942 			/* PMTU check versus smallest asoc MTU goes here */
4943 #if defined(__FreeBSD__) && !defined(__Userspace__)
4944 			if (ro->ro_nh == NULL) {
4945 #else
4946 			if (ro->ro_rt == NULL) {
4947 #endif
4948 				/* Route was freed */
4949 				if (net->ro._s_addr &&
4950 				    net->src_addr_selected) {
4951 					sctp_free_ifa(net->ro._s_addr);
4952 					net->ro._s_addr = NULL;
4953 				}
4954 				net->src_addr_selected = 0;
4955 			}
4956 #if defined(__FreeBSD__) && !defined(__Userspace__)
4957 			if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4958 #else
4959 			if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4960 #endif
4961 			    ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4962 				uint32_t mtu;
4963 
4964 #if defined(__FreeBSD__) && !defined(__Userspace__)
4965 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4966 #else
4967 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4968 #endif
4969 				if (mtu > 0) {
4970 					if (net->port) {
4971 						mtu -= sizeof(struct udphdr);
4972 					}
4973 					if (mtu < net->mtu) {
4974 						if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4975 							sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4976 						}
4977 						net->mtu = mtu;
4978 					}
4979 				}
4980 			}
4981 #if !defined(__Userspace__)
4982 			else if (ifp) {
4983 #if defined(_WIN32)
4984 #define ND_IFINFO(ifp)	(ifp)
4985 #define linkmtu		if_mtu
4986 #endif
4987 				if (ND_IFINFO(ifp)->linkmtu &&
4988 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4989 					sctp_mtu_size_reset(inp,
4990 					    &stcb->asoc,
4991 					    ND_IFINFO(ifp)->linkmtu);
4992 				}
4993 			}
4994 #endif
4995 		}
4996 		return (ret);
4997 	}
4998 #endif
4999 #if defined(__Userspace__)
5000 	case AF_CONN:
5001 	{
5002 		char *buffer;
5003 		struct sockaddr_conn *sconn;
5004 		int len;
5005 
5006 		sconn = (struct sockaddr_conn *)to;
5007 		len = sizeof(struct sctphdr);
5008 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
5009 		if (newm == NULL) {
5010 			sctp_m_freem(m);
5011 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5012 			return (ENOMEM);
5013 		}
5014 		SCTP_ALIGN_TO_END(newm, len);
5015 		SCTP_BUF_LEN(newm) = len;
5016 		SCTP_BUF_NEXT(newm) = m;
5017 		m = newm;
5018 		packet_length = sctp_calculate_len(m);
5019 		sctphdr = mtod(m, struct sctphdr *);
5020 		sctphdr->src_port = src_port;
5021 		sctphdr->dest_port = dest_port;
5022 		sctphdr->v_tag = v_tag;
5023 		sctphdr->checksum = 0;
5024 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5025 			sctphdr->checksum = sctp_calculate_cksum(m, 0);
5026 			SCTP_STAT_INCR(sctps_sendswcrc);
5027 		} else {
5028 			SCTP_STAT_INCR(sctps_sendhwcrc);
5029 		}
5030 		if (tos_value == 0) {
5031 			tos_value = inp->ip_inp.inp.inp_ip_tos;
5032 		}
5033 		tos_value &= 0xfc;
5034 		if (ecn_ok) {
5035 			tos_value |= sctp_get_ect(stcb);
5036 		}
5037 		/* Don't alloc/free for each packet */
5038 		if ((buffer = malloc(packet_length)) != NULL) {
5039 			m_copydata(m, 0, packet_length, buffer);
5040 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5041 			free(buffer);
5042 		} else {
5043 			ret = ENOMEM;
5044 		}
5045 		sctp_m_freem(m);
5046 		return (ret);
5047 	}
5048 #endif
5049 	default:
5050 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5051 		        ((struct sockaddr *)to)->sa_family);
5052 		sctp_m_freem(m);
5053 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5054 		return (EFAULT);
5055 	}
5056 }
5057 
5058 void
5059 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
5060 {
5061 	struct mbuf *m, *m_last;
5062 	struct sctp_nets *net;
5063 	struct sctp_init_chunk *init;
5064 	struct sctp_supported_addr_param *sup_addr;
5065 	struct sctp_adaptation_layer_indication *ali;
5066 	struct sctp_supported_chunk_types_param *pr_supported;
5067 	struct sctp_paramhdr *ph;
5068 	int cnt_inits_to = 0;
5069 	int error;
5070 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5071 
5072 #if defined(__APPLE__) && !defined(__Userspace__)
5073 	if (so_locked) {
5074 		sctp_lock_assert(SCTP_INP_SO(inp));
5075 	} else {
5076 		sctp_unlock_assert(SCTP_INP_SO(inp));
5077 	}
5078 #endif
5079 	/* INIT's always go to the primary (and usually ONLY address) */
5080 	net = stcb->asoc.primary_destination;
5081 	if (net == NULL) {
5082 		net = TAILQ_FIRST(&stcb->asoc.nets);
5083 		if (net == NULL) {
5084 			/* TSNH */
5085 			return;
5086 		}
5087 		/* we confirm any address we send an INIT to */
5088 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5089 		(void)sctp_set_primary_addr(stcb, NULL, net);
5090 	} else {
5091 		/* we confirm any address we send an INIT to */
5092 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5093 	}
5094 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5095 #ifdef INET6
5096 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5097 		/*
5098 		 * special hook, if we are sending to link local it will not
5099 		 * show up in our private address count.
5100 		 */
5101 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5102 			cnt_inits_to = 1;
5103 	}
5104 #endif
5105 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5106 		/* This case should not happen */
5107 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5108 		return;
5109 	}
5110 	/* start the INIT timer */
5111 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5112 
5113 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5114 	if (m == NULL) {
5115 		/* No memory, INIT timer will re-attempt. */
5116 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5117 		return;
5118 	}
5119 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5120 	padding_len = 0;
5121 	/* Now lets put the chunk header in place */
5122 	init = mtod(m, struct sctp_init_chunk *);
5123 	/* now the chunk header */
5124 	init->ch.chunk_type = SCTP_INITIATION;
5125 	init->ch.chunk_flags = 0;
5126 	/* fill in later from mbuf we build */
5127 	init->ch.chunk_length = 0;
5128 	/* place in my tag */
5129 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5130 	/* set up some of the credits. */
5131 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5132 	                              SCTP_MINIMAL_RWND));
5133 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5134 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5135 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5136 
5137 	/* Adaptation layer indication parameter */
5138 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5139 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5140 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5141 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5142 		ali->ph.param_length = htons(parameter_len);
5143 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5144 		chunk_len += parameter_len;
5145 	}
5146 
5147 	/* ECN parameter */
5148 	if (stcb->asoc.ecn_supported == 1) {
5149 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5150 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5151 		ph->param_type = htons(SCTP_ECN_CAPABLE);
5152 		ph->param_length = htons(parameter_len);
5153 		chunk_len += parameter_len;
5154 	}
5155 
5156 	/* PR-SCTP supported parameter */
5157 	if (stcb->asoc.prsctp_supported == 1) {
5158 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5159 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5160 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5161 		ph->param_length = htons(parameter_len);
5162 		chunk_len += parameter_len;
5163 	}
5164 
5165 	/* Add NAT friendly parameter. */
5166 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5167 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5168 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5169 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5170 		ph->param_length = htons(parameter_len);
5171 		chunk_len += parameter_len;
5172 	}
5173 
5174 	/* And now tell the peer which extensions we support */
5175 	num_ext = 0;
5176 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5177 	if (stcb->asoc.prsctp_supported == 1) {
5178 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5179 		if (stcb->asoc.idata_supported) {
5180 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5181 		}
5182 	}
5183 	if (stcb->asoc.auth_supported == 1) {
5184 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5185 	}
5186 	if (stcb->asoc.asconf_supported == 1) {
5187 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5188 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5189 	}
5190 	if (stcb->asoc.reconfig_supported == 1) {
5191 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5192 	}
5193 	if (stcb->asoc.idata_supported) {
5194 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5195 	}
5196 	if (stcb->asoc.nrsack_supported == 1) {
5197 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5198 	}
5199 	if (stcb->asoc.pktdrop_supported == 1) {
5200 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5201 	}
5202 	if (num_ext > 0) {
5203 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5204 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5205 		pr_supported->ph.param_length = htons(parameter_len);
5206 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5207 		chunk_len += parameter_len;
5208 	}
5209 	/* add authentication parameters */
5210 	if (stcb->asoc.auth_supported) {
5211 		/* attach RANDOM parameter, if available */
5212 		if (stcb->asoc.authinfo.random != NULL) {
5213 			struct sctp_auth_random *randp;
5214 
5215 			if (padding_len > 0) {
5216 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5217 				chunk_len += padding_len;
5218 				padding_len = 0;
5219 			}
5220 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5221 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5222 			/* random key already contains the header */
5223 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5224 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5225 			chunk_len += parameter_len;
5226 		}
5227 		/* add HMAC_ALGO parameter */
5228 		if (stcb->asoc.local_hmacs != NULL) {
5229 			struct sctp_auth_hmac_algo *hmacs;
5230 
5231 			if (padding_len > 0) {
5232 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5233 				chunk_len += padding_len;
5234 				padding_len = 0;
5235 			}
5236 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5237 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5238 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5239 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5240 			hmacs->ph.param_length = htons(parameter_len);
5241 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5242 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5243 			chunk_len += parameter_len;
5244 		}
5245 		/* add CHUNKS parameter */
5246 		if (stcb->asoc.local_auth_chunks != NULL) {
5247 			struct sctp_auth_chunk_list *chunks;
5248 
5249 			if (padding_len > 0) {
5250 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5251 				chunk_len += padding_len;
5252 				padding_len = 0;
5253 			}
5254 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5255 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5256 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5257 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5258 			chunks->ph.param_length = htons(parameter_len);
5259 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5260 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5261 			chunk_len += parameter_len;
5262 		}
5263 	}
5264 
5265 	/* now any cookie time extensions */
5266 	if (stcb->asoc.cookie_preserve_req > 0) {
5267 		struct sctp_cookie_perserve_param *cookie_preserve;
5268 
5269 		if (padding_len > 0) {
5270 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5271 			chunk_len += padding_len;
5272 			padding_len = 0;
5273 		}
5274 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5275 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5276 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5277 		cookie_preserve->ph.param_length = htons(parameter_len);
5278 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5279 		stcb->asoc.cookie_preserve_req = 0;
5280 		chunk_len += parameter_len;
5281 	}
5282 
5283 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5284 		uint8_t i;
5285 
5286 		if (padding_len > 0) {
5287 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5288 			chunk_len += padding_len;
5289 			padding_len = 0;
5290 		}
5291 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5292 		if (stcb->asoc.scope.ipv4_addr_legal) {
5293 			parameter_len += (uint16_t)sizeof(uint16_t);
5294 		}
5295 		if (stcb->asoc.scope.ipv6_addr_legal) {
5296 			parameter_len += (uint16_t)sizeof(uint16_t);
5297 		}
5298 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5299 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5300 		sup_addr->ph.param_length = htons(parameter_len);
5301 		i = 0;
5302 		if (stcb->asoc.scope.ipv4_addr_legal) {
5303 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5304 		}
5305 		if (stcb->asoc.scope.ipv6_addr_legal) {
5306 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5307 		}
5308 		padding_len = 4 - 2 * i;
5309 		chunk_len += parameter_len;
5310 	}
5311 
5312 	SCTP_BUF_LEN(m) = chunk_len;
5313 	/* now the addresses */
5314 	/* To optimize this we could put the scoping stuff
5315 	 * into a structure and remove the individual uint8's from
5316 	 * the assoc structure. Then we could just sifa in the
5317 	 * address within the stcb. But for now this is a quick
5318 	 * hack to get the address stuff teased apart.
5319 	 */
5320 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5321 	                                    m, cnt_inits_to,
5322 	                                    &padding_len, &chunk_len);
5323 
5324 	init->ch.chunk_length = htons(chunk_len);
5325 	if (padding_len > 0) {
5326 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5327 			sctp_m_freem(m);
5328 			return;
5329 		}
5330 	}
5331 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5332 	if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5333 	                                        (struct sockaddr *)&net->ro._l_addr,
5334 	                                        m, 0, NULL, 0, 0, 0, 0,
5335 	                                        inp->sctp_lport, stcb->rport, htonl(0),
5336 	                                        net->port, NULL,
5337 #if defined(__FreeBSD__) && !defined(__Userspace__)
5338 	                                        0, 0,
5339 #endif
5340 	                                        so_locked))) {
5341 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5342 		if (error == ENOBUFS) {
5343 			stcb->asoc.ifp_had_enobuf = 1;
5344 			SCTP_STAT_INCR(sctps_lowlevelerr);
5345 		}
5346 	} else {
5347 		stcb->asoc.ifp_had_enobuf = 0;
5348 	}
5349 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5350 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5351 }
5352 
5353 struct mbuf *
5354 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5355                                       int param_offset, int *abort_processing,
5356                                       struct sctp_chunkhdr *cp,
5357                                       int *nat_friendly,
5358                                       int *cookie_found)
5359 {
5360 	/*
5361 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5362 	 * being equal to the beginning of the params i.e. (iphlen +
5363 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
5364 	 * end of the mbuf verifying that all parameters are known.
5365 	 *
5366 	 * For unknown parameters build and return a mbuf with
5367 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5368 	 * processing this chunk stop, and set *abort_processing to 1.
5369 	 *
5370 	 * By having param_offset be pre-set to where parameters begin it is
5371 	 * hoped that this routine may be reused in the future by new
5372 	 * features.
5373 	 */
5374 	struct sctp_paramhdr *phdr, params;
5375 
5376 	struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
5377 	int at, limit, pad_needed;
5378 	uint16_t ptype, plen, padded_size;
5379 
5380 	*abort_processing = 0;
5381 	if (cookie_found != NULL) {
5382 		*cookie_found = 0;
5383 	}
5384 	mat = in_initpkt;
5385 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5386 	at = param_offset;
5387 	op_err = NULL;
5388 	op_err_last = NULL;
5389 	pad_needed = 0;
5390 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5391 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5392 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5393 		ptype = ntohs(phdr->param_type);
5394 		plen = ntohs(phdr->param_length);
5395 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5396 			/* wacked parameter */
5397 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5398 			goto invalid_size;
5399 		}
5400 		limit -= SCTP_SIZE32(plen);
5401 		/*-
5402 		 * All parameters for all chunks that we know/understand are
5403 		 * listed here. We process them other places and make
5404 		 * appropriate stop actions per the upper bits. However this
5405 		 * is the generic routine processor's can call to get back
5406 		 * an operr.. to either incorporate (init-ack) or send.
5407 		 */
5408 		padded_size = SCTP_SIZE32(plen);
5409 		switch (ptype) {
5410 			/* Param's with variable size */
5411 		case SCTP_HEARTBEAT_INFO:
5412 		case SCTP_UNRECOG_PARAM:
5413 		case SCTP_ERROR_CAUSE_IND:
5414 			/* ok skip fwd */
5415 			at += padded_size;
5416 			break;
5417 		case SCTP_STATE_COOKIE:
5418 			if (cookie_found != NULL) {
5419 				*cookie_found = 1;
5420 			}
5421 			at += padded_size;
5422 			break;
5423 			/* Param's with variable size within a range */
5424 		case SCTP_CHUNK_LIST:
5425 		case SCTP_SUPPORTED_CHUNK_EXT:
5426 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5427 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5428 				goto invalid_size;
5429 			}
5430 			at += padded_size;
5431 			break;
5432 		case SCTP_SUPPORTED_ADDRTYPE:
5433 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5434 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5435 				goto invalid_size;
5436 			}
5437 			at += padded_size;
5438 			break;
5439 		case SCTP_RANDOM:
5440 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5441 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5442 				goto invalid_size;
5443 			}
5444 			at += padded_size;
5445 			break;
5446 		case SCTP_SET_PRIM_ADDR:
5447 		case SCTP_DEL_IP_ADDRESS:
5448 		case SCTP_ADD_IP_ADDRESS:
5449 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5450 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5451 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5452 				goto invalid_size;
5453 			}
5454 			at += padded_size;
5455 			break;
5456 			/* Param's with a fixed size */
5457 		case SCTP_IPV4_ADDRESS:
5458 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5459 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5460 				goto invalid_size;
5461 			}
5462 			at += padded_size;
5463 			break;
5464 		case SCTP_IPV6_ADDRESS:
5465 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5466 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5467 				goto invalid_size;
5468 			}
5469 			at += padded_size;
5470 			break;
5471 		case SCTP_COOKIE_PRESERVE:
5472 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5473 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5474 				goto invalid_size;
5475 			}
5476 			at += padded_size;
5477 			break;
5478 		case SCTP_HAS_NAT_SUPPORT:
5479 			*nat_friendly = 1;
5480 			/* fall through */
5481 		case SCTP_PRSCTP_SUPPORTED:
5482 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5483 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5484 				goto invalid_size;
5485 			}
5486 			at += padded_size;
5487 			break;
5488 		case SCTP_ECN_CAPABLE:
5489 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5490 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5491 				goto invalid_size;
5492 			}
5493 			at += padded_size;
5494 			break;
5495 		case SCTP_ULP_ADAPTATION:
5496 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5497 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5498 				goto invalid_size;
5499 			}
5500 			at += padded_size;
5501 			break;
5502 		case SCTP_SUCCESS_REPORT:
5503 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5504 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5505 				goto invalid_size;
5506 			}
5507 			at += padded_size;
5508 			break;
5509 		case SCTP_HOSTNAME_ADDRESS:
5510 		{
5511 			/* Hostname parameters are deprecated. */
5512 			struct sctp_gen_error_cause *cause;
5513 			int l_len;
5514 
5515 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5516 			*abort_processing = 1;
5517 			sctp_m_freem(op_err);
5518 			op_err = NULL;
5519 			op_err_last = NULL;
5520 #ifdef INET6
5521 			l_len = SCTP_MIN_OVERHEAD;
5522 #else
5523 			l_len = SCTP_MIN_V4_OVERHEAD;
5524 #endif
5525 			l_len += sizeof(struct sctp_chunkhdr);
5526 			l_len += sizeof(struct sctp_gen_error_cause);
5527 			op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5528 			if (op_err != NULL) {
5529 				/*
5530 				 * Pre-reserve space for IP, SCTP, and
5531 				 * chunk header.
5532 				 */
5533 #ifdef INET6
5534 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5535 #else
5536 				SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5537 #endif
5538 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5539 				SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5540 				SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5541 				cause = mtod(op_err, struct sctp_gen_error_cause *);
5542 				cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5543 				cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5544 				SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5545 				if (SCTP_BUF_NEXT(op_err) == NULL) {
5546 					sctp_m_freem(op_err);
5547 					op_err = NULL;
5548 					op_err_last = NULL;
5549 				}
5550 			}
5551 			return (op_err);
5552 		}
5553 		default:
5554 			/*
5555 			 * we do not recognize the parameter figure out what
5556 			 * we do.
5557 			 */
5558 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5559 			if ((ptype & 0x4000) == 0x4000) {
5560 				/* Report bit is set?? */
5561 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5562 				if (op_err == NULL) {
5563 					int l_len;
5564 					/* Ok need to try to get an mbuf */
5565 #ifdef INET6
5566 					l_len = SCTP_MIN_OVERHEAD;
5567 #else
5568 					l_len = SCTP_MIN_V4_OVERHEAD;
5569 #endif
5570 					l_len += sizeof(struct sctp_chunkhdr);
5571 					l_len += sizeof(struct sctp_paramhdr);
5572 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5573 					if (op_err) {
5574 						SCTP_BUF_LEN(op_err) = 0;
5575 #ifdef INET6
5576 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5577 #else
5578 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5579 #endif
5580 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5581 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5582 						op_err_last = op_err;
5583 					}
5584 				}
5585 				if (op_err != NULL) {
5586 					/* If we have space */
5587 					struct sctp_paramhdr *param;
5588 
5589 					if (pad_needed > 0) {
5590 						op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5591 					}
5592 					if (op_err_last == NULL) {
5593 						sctp_m_freem(op_err);
5594 						op_err = NULL;
5595 						op_err_last = NULL;
5596 						goto more_processing;
5597 					}
5598 					if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5599 						m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5600 						if (m_tmp == NULL) {
5601 							sctp_m_freem(op_err);
5602 							op_err = NULL;
5603 							op_err_last = NULL;
5604 							goto more_processing;
5605 						}
5606 						SCTP_BUF_LEN(m_tmp) = 0;
5607 						SCTP_BUF_NEXT(m_tmp) = NULL;
5608 						SCTP_BUF_NEXT(op_err_last) = m_tmp;
5609 						op_err_last = m_tmp;
5610 					}
5611 					param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last));
5612 					param->param_type = htons(SCTP_UNRECOG_PARAM);
5613 					param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5614 					SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5615 					SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5616 					if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5617 						sctp_m_freem(op_err);
5618 						op_err = NULL;
5619 						op_err_last = NULL;
5620 						goto more_processing;
5621 					} else {
5622 						while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5623 							op_err_last = SCTP_BUF_NEXT(op_err_last);
5624 						}
5625 					}
5626 					if (plen % 4 != 0) {
5627 						pad_needed = 4 - (plen % 4);
5628 					} else {
5629 						pad_needed = 0;
5630 					}
5631 				}
5632 			}
5633 		more_processing:
5634 			if ((ptype & 0x8000) == 0x0000) {
5635 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5636 				return (op_err);
5637 			} else {
5638 				/* skip this chunk and continue processing */
5639 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5640 				at += SCTP_SIZE32(plen);
5641 			}
5642 			break;
5643 		}
5644 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5645 	}
5646 	return (op_err);
5647  invalid_size:
5648 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5649 	*abort_processing = 1;
5650 	sctp_m_freem(op_err);
5651 	op_err = NULL;
5652 	op_err_last = NULL;
5653 	if (phdr != NULL) {
5654 		struct sctp_paramhdr *param;
5655 		int l_len;
5656 #ifdef INET6
5657 		l_len = SCTP_MIN_OVERHEAD;
5658 #else
5659 		l_len = SCTP_MIN_V4_OVERHEAD;
5660 #endif
5661 		l_len += sizeof(struct sctp_chunkhdr);
5662 		l_len += (2 * sizeof(struct sctp_paramhdr));
5663 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5664 		if (op_err) {
5665 			SCTP_BUF_LEN(op_err) = 0;
5666 #ifdef INET6
5667 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5668 #else
5669 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5670 #endif
5671 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5672 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5673 			SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5674 			param = mtod(op_err, struct sctp_paramhdr *);
5675 			param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5676 			param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5677 			param++;
5678 			param->param_type = htons(ptype);
5679 			param->param_length = htons(plen);
5680 		}
5681 	}
5682 	return (op_err);
5683 }
5684 
5685 static int
5686 sctp_are_there_new_addresses(struct sctp_association *asoc,
5687     struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5688 {
5689 	/*
5690 	 * Given a INIT packet, look through the packet to verify that there
5691 	 * are NO new addresses. As we go through the parameters add reports
5692 	 * of any un-understood parameters that require an error.  Also we
5693 	 * must return (1) to drop the packet if we see a un-understood
5694 	 * parameter that tells us to drop the chunk.
5695 	 */
5696 	struct sockaddr *sa_touse;
5697 	struct sockaddr *sa;
5698 	struct sctp_paramhdr *phdr, params;
5699 	uint16_t ptype, plen;
5700 	uint8_t fnd;
5701 	struct sctp_nets *net;
5702 	int check_src;
5703 #ifdef INET
5704 	struct sockaddr_in sin4, *sa4;
5705 #endif
5706 #ifdef INET6
5707 	struct sockaddr_in6 sin6, *sa6;
5708 #endif
5709 #if defined(__Userspace__)
5710 	struct sockaddr_conn *sac;
5711 #endif
5712 
5713 #ifdef INET
5714 	memset(&sin4, 0, sizeof(sin4));
5715 	sin4.sin_family = AF_INET;
5716 #ifdef HAVE_SIN_LEN
5717 	sin4.sin_len = sizeof(sin4);
5718 #endif
5719 #endif
5720 #ifdef INET6
5721 	memset(&sin6, 0, sizeof(sin6));
5722 	sin6.sin6_family = AF_INET6;
5723 #ifdef HAVE_SIN6_LEN
5724 	sin6.sin6_len = sizeof(sin6);
5725 #endif
5726 #endif
5727 	/* First what about the src address of the pkt ? */
5728 	check_src = 0;
5729 	switch (src->sa_family) {
5730 #ifdef INET
5731 	case AF_INET:
5732 		if (asoc->scope.ipv4_addr_legal) {
5733 			check_src = 1;
5734 		}
5735 		break;
5736 #endif
5737 #ifdef INET6
5738 	case AF_INET6:
5739 		if (asoc->scope.ipv6_addr_legal) {
5740 			check_src = 1;
5741 		}
5742 		break;
5743 #endif
5744 #if defined(__Userspace__)
5745 	case AF_CONN:
5746 		if (asoc->scope.conn_addr_legal) {
5747 			check_src = 1;
5748 		}
5749 		break;
5750 #endif
5751 	default:
5752 		/* TSNH */
5753 		break;
5754 	}
5755 	if (check_src) {
5756 		fnd = 0;
5757 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5758 			sa = (struct sockaddr *)&net->ro._l_addr;
5759 			if (sa->sa_family == src->sa_family) {
5760 #ifdef INET
5761 				if (sa->sa_family == AF_INET) {
5762 					struct sockaddr_in *src4;
5763 
5764 					sa4 = (struct sockaddr_in *)sa;
5765 					src4 = (struct sockaddr_in *)src;
5766 					if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5767 						fnd = 1;
5768 						break;
5769 					}
5770 				}
5771 #endif
5772 #ifdef INET6
5773 				if (sa->sa_family == AF_INET6) {
5774 					struct sockaddr_in6 *src6;
5775 
5776 					sa6 = (struct sockaddr_in6 *)sa;
5777 					src6 = (struct sockaddr_in6 *)src;
5778 					if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5779 						fnd = 1;
5780 						break;
5781 					}
5782 				}
5783 #endif
5784 #if defined(__Userspace__)
5785 				if (sa->sa_family == AF_CONN) {
5786 					struct sockaddr_conn *srcc;
5787 
5788 					sac = (struct sockaddr_conn *)sa;
5789 					srcc = (struct sockaddr_conn *)src;
5790 					if (sac->sconn_addr == srcc->sconn_addr) {
5791 						fnd = 1;
5792 						break;
5793 					}
5794 				}
5795 #endif
5796 			}
5797 		}
5798 		if (fnd == 0) {
5799 			/* New address added! no need to look further. */
5800 			return (1);
5801 		}
5802 	}
5803 	/* Ok so far lets munge through the rest of the packet */
5804 	offset += sizeof(struct sctp_init_chunk);
5805 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5806 	while (phdr) {
5807 		sa_touse = NULL;
5808 		ptype = ntohs(phdr->param_type);
5809 		plen = ntohs(phdr->param_length);
5810 		switch (ptype) {
5811 #ifdef INET
5812 		case SCTP_IPV4_ADDRESS:
5813 		{
5814 			struct sctp_ipv4addr_param *p4, p4_buf;
5815 
5816 			if (plen != sizeof(struct sctp_ipv4addr_param)) {
5817 				return (1);
5818 			}
5819 			phdr = sctp_get_next_param(in_initpkt, offset,
5820 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5821 			if (phdr == NULL) {
5822 				return (1);
5823 			}
5824 			if (asoc->scope.ipv4_addr_legal) {
5825 				p4 = (struct sctp_ipv4addr_param *)phdr;
5826 				sin4.sin_addr.s_addr = p4->addr;
5827 				sa_touse = (struct sockaddr *)&sin4;
5828 			}
5829 			break;
5830 		}
5831 #endif
5832 #ifdef INET6
5833 		case SCTP_IPV6_ADDRESS:
5834 		{
5835 			struct sctp_ipv6addr_param *p6, p6_buf;
5836 
5837 			if (plen != sizeof(struct sctp_ipv6addr_param)) {
5838 				return (1);
5839 			}
5840 			phdr = sctp_get_next_param(in_initpkt, offset,
5841 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5842 			if (phdr == NULL) {
5843 				return (1);
5844 			}
5845 			if (asoc->scope.ipv6_addr_legal) {
5846 				p6 = (struct sctp_ipv6addr_param *)phdr;
5847 				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5848 				       sizeof(p6->addr));
5849 				sa_touse = (struct sockaddr *)&sin6;
5850 			}
5851 			break;
5852 		}
5853 #endif
5854 		default:
5855 			sa_touse = NULL;
5856 			break;
5857 		}
5858 		if (sa_touse) {
5859 			/* ok, sa_touse points to one to check */
5860 			fnd = 0;
5861 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5862 				sa = (struct sockaddr *)&net->ro._l_addr;
5863 				if (sa->sa_family != sa_touse->sa_family) {
5864 					continue;
5865 				}
5866 #ifdef INET
5867 				if (sa->sa_family == AF_INET) {
5868 					sa4 = (struct sockaddr_in *)sa;
5869 					if (sa4->sin_addr.s_addr ==
5870 					    sin4.sin_addr.s_addr) {
5871 						fnd = 1;
5872 						break;
5873 					}
5874 				}
5875 #endif
5876 #ifdef INET6
5877 				if (sa->sa_family == AF_INET6) {
5878 					sa6 = (struct sockaddr_in6 *)sa;
5879 					if (SCTP6_ARE_ADDR_EQUAL(
5880 					    sa6, &sin6)) {
5881 						fnd = 1;
5882 						break;
5883 					}
5884 				}
5885 #endif
5886 			}
5887 			if (!fnd) {
5888 				/* New addr added! no need to look further */
5889 				return (1);
5890 			}
5891 		}
5892 		offset += SCTP_SIZE32(plen);
5893 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5894 	}
5895 	return (0);
5896 }
5897 
5898 /*
5899  * Given a MBUF chain that was sent into us containing an INIT. Build a
5900  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5901  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5902  * message (i.e. the struct sctp_init_msg).
5903  */
5904 void
5905 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5906                        struct sctp_nets *src_net, struct mbuf *init_pkt,
5907                        int iphlen, int offset,
5908                        struct sockaddr *src, struct sockaddr *dst,
5909                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5910 #if defined(__FreeBSD__) && !defined(__Userspace__)
5911 		       uint8_t mflowtype, uint32_t mflowid,
5912 #endif
5913                        uint32_t vrf_id, uint16_t port)
5914 {
5915 	struct sctp_association *asoc;
5916 	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5917 	struct sctp_init_ack_chunk *initack;
5918 	struct sctp_adaptation_layer_indication *ali;
5919 	struct sctp_supported_chunk_types_param *pr_supported;
5920 	struct sctp_paramhdr *ph;
5921 	union sctp_sockstore *over_addr;
5922 	struct sctp_scoping scp;
5923 	struct timeval now;
5924 #ifdef INET
5925 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5926 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5927 	struct sockaddr_in *sin;
5928 #endif
5929 #ifdef INET6
5930 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5931 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5932 	struct sockaddr_in6 *sin6;
5933 #endif
5934 #if defined(__Userspace__)
5935 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5936 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5937 	struct sockaddr_conn *sconn;
5938 #endif
5939 	struct sockaddr *to;
5940 	struct sctp_state_cookie stc;
5941 	struct sctp_nets *net = NULL;
5942 	uint8_t *signature = NULL;
5943 	int cnt_inits_to = 0;
5944 	uint16_t his_limit, i_want;
5945 	int abort_flag;
5946 	int nat_friendly = 0;
5947 	int error;
5948 	struct socket *so;
5949 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5950 
5951 	if (stcb) {
5952 		asoc = &stcb->asoc;
5953 	} else {
5954 		asoc = NULL;
5955 	}
5956 	if ((asoc != NULL) &&
5957 	    (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5958 		if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5959 			/*
5960 			 * new addresses, out of here in non-cookie-wait states
5961 			 *
5962 			 * Send an ABORT, without the new address error cause.
5963 			 * This looks no different than if no listener
5964 			 * was present.
5965 			 */
5966 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5967 			                             "Address added");
5968 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5969 #if defined(__FreeBSD__) && !defined(__Userspace__)
5970 			                mflowtype, mflowid, inp->fibnum,
5971 #endif
5972 			                vrf_id, port);
5973 			return;
5974 		}
5975 		if (src_net != NULL && (src_net->port != port)) {
5976 			/*
5977 			 * change of remote encapsulation port, out of here in
5978 			 * non-cookie-wait states
5979 			 *
5980 			 * Send an ABORT, without an specific error cause.
5981 			 * This looks no different than if no listener
5982 			 * was present.
5983 			 */
5984 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5985 			                             "Remote encapsulation port changed");
5986 			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5987 #if defined(__FreeBSD__) && !defined(__Userspace__)
5988 			                mflowtype, mflowid, inp->fibnum,
5989 #endif
5990 			                vrf_id, port);
5991 			return;
5992 		}
5993 	}
5994 	abort_flag = 0;
5995 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5996 	                                               (offset + sizeof(struct sctp_init_chunk)),
5997 	                                               &abort_flag,
5998 	                                               (struct sctp_chunkhdr *)init_chk,
5999 	                                               &nat_friendly, NULL);
6000 	if (abort_flag) {
6001 	do_a_abort:
6002 		if (op_err == NULL) {
6003 			char msg[SCTP_DIAG_INFO_LEN];
6004 
6005 			SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
6006 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6007 			                             msg);
6008 		}
6009 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
6010 				init_chk->init.initiate_tag, op_err,
6011 #if defined(__FreeBSD__) && !defined(__Userspace__)
6012 		                mflowtype, mflowid, inp->fibnum,
6013 #endif
6014 		                vrf_id, port);
6015 		return;
6016 	}
6017 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6018 	if (m == NULL) {
6019 		/* No memory, INIT timer will re-attempt. */
6020 		sctp_m_freem(op_err);
6021 		return;
6022 	}
6023 	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6024 	padding_len = 0;
6025 
6026 	/*
6027 	 * We might not overwrite the identification[] completely and on
6028 	 * some platforms time_entered will contain some padding.
6029 	 * Therefore zero out the cookie to avoid putting
6030 	 * uninitialized memory on the wire.
6031 	 */
6032 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
6033 
6034 	/* the time I built cookie */
6035 	(void)SCTP_GETTIME_TIMEVAL(&now);
6036 	stc.time_entered.tv_sec = now.tv_sec;
6037 	stc.time_entered.tv_usec = now.tv_usec;
6038 
6039 	/* populate any tie tags */
6040 	if (asoc != NULL) {
6041 		/* unlock before tag selections */
6042 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6043 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6044 		stc.cookie_life = asoc->cookie_life;
6045 		net = asoc->primary_destination;
6046 	} else {
6047 		stc.tie_tag_my_vtag = 0;
6048 		stc.tie_tag_peer_vtag = 0;
6049 		/* life I will award this cookie */
6050 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
6051 	}
6052 
6053 	/* copy in the ports for later check */
6054 	stc.myport = sh->dest_port;
6055 	stc.peerport = sh->src_port;
6056 
6057 	/*
6058 	 * If we wanted to honor cookie life extensions, we would add to
6059 	 * stc.cookie_life. For now we should NOT honor any extension
6060 	 */
6061 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6062 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6063 		stc.ipv6_addr_legal = 1;
6064 		if (SCTP_IPV6_V6ONLY(inp)) {
6065 			stc.ipv4_addr_legal = 0;
6066 		} else {
6067 			stc.ipv4_addr_legal = 1;
6068 		}
6069 #if defined(__Userspace__)
6070 		stc.conn_addr_legal = 0;
6071 #endif
6072 	} else {
6073 		stc.ipv6_addr_legal = 0;
6074 #if defined(__Userspace__)
6075 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6076 			stc.conn_addr_legal = 1;
6077 			stc.ipv4_addr_legal = 0;
6078 		} else {
6079 			stc.conn_addr_legal = 0;
6080 			stc.ipv4_addr_legal = 1;
6081 		}
6082 #else
6083 		stc.ipv4_addr_legal = 1;
6084 #endif
6085 	}
6086 	stc.ipv4_scope = 0;
6087 	if (net == NULL) {
6088 		to = src;
6089 		switch (dst->sa_family) {
6090 #ifdef INET
6091 		case AF_INET:
6092 		{
6093 			/* lookup address */
6094 			stc.address[0] = src4->sin_addr.s_addr;
6095 			stc.address[1] = 0;
6096 			stc.address[2] = 0;
6097 			stc.address[3] = 0;
6098 			stc.addr_type = SCTP_IPV4_ADDRESS;
6099 			/* local from address */
6100 			stc.laddress[0] = dst4->sin_addr.s_addr;
6101 			stc.laddress[1] = 0;
6102 			stc.laddress[2] = 0;
6103 			stc.laddress[3] = 0;
6104 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6105 			/* scope_id is only for v6 */
6106 			stc.scope_id = 0;
6107 			if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6108 			    (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6109 				stc.ipv4_scope = 1;
6110 			}
6111 			/* Must use the address in this case */
6112 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6113 				stc.loopback_scope = 1;
6114 				stc.ipv4_scope = 1;
6115 				stc.site_scope = 1;
6116 				stc.local_scope = 0;
6117 			}
6118 			break;
6119 		}
6120 #endif
6121 #ifdef INET6
6122 		case AF_INET6:
6123 		{
6124 			stc.addr_type = SCTP_IPV6_ADDRESS;
6125 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6126 #if defined(__FreeBSD__) && !defined(__Userspace__)
6127 			stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6128 #else
6129 			stc.scope_id = 0;
6130 #endif
6131 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6132 				stc.loopback_scope = 1;
6133 				stc.local_scope = 0;
6134 				stc.site_scope = 1;
6135 				stc.ipv4_scope = 1;
6136 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6137 			           IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6138 				/*
6139 				 * If the new destination or source is a
6140 				 * LINK_LOCAL we must have common both site and
6141 				 * local scope. Don't set local scope though
6142 				 * since we must depend on the source to be
6143 				 * added implicitly. We cannot assure just
6144 				 * because we share one link that all links are
6145 				 * common.
6146 				 */
6147 #if defined(__APPLE__) && !defined(__Userspace__)
6148 				/* Mac OS X currently doesn't have in6_getscope() */
6149 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
6150 #endif
6151 				stc.local_scope = 0;
6152 				stc.site_scope = 1;
6153 				stc.ipv4_scope = 1;
6154 				/*
6155 				 * we start counting for the private address
6156 				 * stuff at 1. since the link local we
6157 				 * source from won't show up in our scoped
6158 				 * count.
6159 				 */
6160 				cnt_inits_to = 1;
6161 				/* pull out the scope_id from incoming pkt */
6162 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6163 			           IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6164 				/*
6165 				 * If the new destination or source is
6166 				 * SITE_LOCAL then we must have site scope in
6167 				 * common.
6168 				 */
6169 				stc.site_scope = 1;
6170 			}
6171 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6172 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6173 			break;
6174 		}
6175 #endif
6176 #if defined(__Userspace__)
6177 		case AF_CONN:
6178 		{
6179 			/* lookup address */
6180 			stc.address[0] = 0;
6181 			stc.address[1] = 0;
6182 			stc.address[2] = 0;
6183 			stc.address[3] = 0;
6184 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6185 			stc.addr_type = SCTP_CONN_ADDRESS;
6186 			/* local from address */
6187 			stc.laddress[0] = 0;
6188 			stc.laddress[1] = 0;
6189 			stc.laddress[2] = 0;
6190 			stc.laddress[3] = 0;
6191 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6192 			stc.laddr_type = SCTP_CONN_ADDRESS;
6193 			/* scope_id is only for v6 */
6194 			stc.scope_id = 0;
6195 			break;
6196 		}
6197 #endif
6198 		default:
6199 			/* TSNH */
6200 			goto do_a_abort;
6201 			break;
6202 		}
6203 	} else {
6204 		/* set the scope per the existing tcb */
6205 
6206 #ifdef INET6
6207 		struct sctp_nets *lnet;
6208 #endif
6209 
6210 		stc.loopback_scope = asoc->scope.loopback_scope;
6211 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6212 		stc.site_scope = asoc->scope.site_scope;
6213 		stc.local_scope = asoc->scope.local_scope;
6214 #ifdef INET6
6215 		/* Why do we not consider IPv4 LL addresses? */
6216 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6217 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6218 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6219 					/*
6220 					 * if we have a LL address, start
6221 					 * counting at 1.
6222 					 */
6223 					cnt_inits_to = 1;
6224 				}
6225 			}
6226 		}
6227 #endif
6228 		/* use the net pointer */
6229 		to = (struct sockaddr *)&net->ro._l_addr;
6230 		switch (to->sa_family) {
6231 #ifdef INET
6232 		case AF_INET:
6233 			sin = (struct sockaddr_in *)to;
6234 			stc.address[0] = sin->sin_addr.s_addr;
6235 			stc.address[1] = 0;
6236 			stc.address[2] = 0;
6237 			stc.address[3] = 0;
6238 			stc.addr_type = SCTP_IPV4_ADDRESS;
6239 			if (net->src_addr_selected == 0) {
6240 				/*
6241 				 * strange case here, the INIT should have
6242 				 * did the selection.
6243 				 */
6244 				net->ro._s_addr = sctp_source_address_selection(inp,
6245 										stcb, (sctp_route_t *)&net->ro,
6246 										net, 0, vrf_id);
6247 				if (net->ro._s_addr == NULL) {
6248 					sctp_m_freem(op_err);
6249 					sctp_m_freem(m);
6250 					return;
6251 				}
6252 
6253 				net->src_addr_selected = 1;
6254 			}
6255 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6256 			stc.laddress[1] = 0;
6257 			stc.laddress[2] = 0;
6258 			stc.laddress[3] = 0;
6259 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6260 			/* scope_id is only for v6 */
6261 			stc.scope_id = 0;
6262 			break;
6263 #endif
6264 #ifdef INET6
6265 		case AF_INET6:
6266 			sin6 = (struct sockaddr_in6 *)to;
6267 			memcpy(&stc.address, &sin6->sin6_addr,
6268 			       sizeof(struct in6_addr));
6269 			stc.addr_type = SCTP_IPV6_ADDRESS;
6270 			stc.scope_id = sin6->sin6_scope_id;
6271 			if (net->src_addr_selected == 0) {
6272 				/*
6273 				 * strange case here, the INIT should have
6274 				 * done the selection.
6275 				 */
6276 				net->ro._s_addr = sctp_source_address_selection(inp,
6277 										stcb, (sctp_route_t *)&net->ro,
6278 										net, 0, vrf_id);
6279 				if (net->ro._s_addr == NULL) {
6280 					sctp_m_freem(op_err);
6281 					sctp_m_freem(m);
6282 					return;
6283 				}
6284 
6285 				net->src_addr_selected = 1;
6286 			}
6287 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6288 			       sizeof(struct in6_addr));
6289 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6290 			break;
6291 #endif
6292 #if defined(__Userspace__)
6293 		case AF_CONN:
6294 			sconn = (struct sockaddr_conn *)to;
6295 			stc.address[0] = 0;
6296 			stc.address[1] = 0;
6297 			stc.address[2] = 0;
6298 			stc.address[3] = 0;
6299 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6300 			stc.addr_type = SCTP_CONN_ADDRESS;
6301 			stc.laddress[0] = 0;
6302 			stc.laddress[1] = 0;
6303 			stc.laddress[2] = 0;
6304 			stc.laddress[3] = 0;
6305 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6306 			stc.laddr_type = SCTP_CONN_ADDRESS;
6307 			stc.scope_id = 0;
6308 			break;
6309 #endif
6310 		}
6311 	}
6312 	/* Now lets put the SCTP header in place */
6313 	initack = mtod(m, struct sctp_init_ack_chunk *);
6314 	/* Save it off for quick ref */
6315 	stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6316 	/* who are we */
6317 	memcpy(stc.identification, SCTP_VERSION_STRING,
6318 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6319 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6320 	/* now the chunk header */
6321 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
6322 	initack->ch.chunk_flags = 0;
6323 	/* fill in later from mbuf we build */
6324 	initack->ch.chunk_length = 0;
6325 	/* place in my tag */
6326 	if ((asoc != NULL) &&
6327 	    ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
6328 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
6329 	     (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
6330 		/* re-use the v-tags and init-seq here */
6331 		initack->init.initiate_tag = htonl(asoc->my_vtag);
6332 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
6333 	} else {
6334 		uint32_t vtag, itsn;
6335 
6336 		if (asoc) {
6337 			atomic_add_int(&asoc->refcnt, 1);
6338 			SCTP_TCB_UNLOCK(stcb);
6339 		new_tag:
6340 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6341 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
6342 				/* Got a duplicate vtag on some guy behind a nat
6343 				 * make sure we don't use it.
6344 				 */
6345 				goto new_tag;
6346 			}
6347 			initack->init.initiate_tag = htonl(vtag);
6348 			/* get a TSN to use too */
6349 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6350 			initack->init.initial_tsn = htonl(itsn);
6351 			SCTP_TCB_LOCK(stcb);
6352 			atomic_add_int(&asoc->refcnt, -1);
6353 		} else {
6354 			SCTP_INP_INCR_REF(inp);
6355 			SCTP_INP_RUNLOCK(inp);
6356 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6357 			initack->init.initiate_tag = htonl(vtag);
6358 			/* get a TSN to use too */
6359 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6360 			SCTP_INP_RLOCK(inp);
6361 			SCTP_INP_DECR_REF(inp);
6362 		}
6363 	}
6364 	/* save away my tag to */
6365 	stc.my_vtag = initack->init.initiate_tag;
6366 
6367 	/* set up some of the credits. */
6368 	so = inp->sctp_socket;
6369 	if (so == NULL) {
6370 		/* memory problem */
6371 		sctp_m_freem(op_err);
6372 		sctp_m_freem(m);
6373 		return;
6374 	} else {
6375 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6376 	}
6377 	/* set what I want */
6378 	his_limit = ntohs(init_chk->init.num_inbound_streams);
6379 	/* choose what I want */
6380 	if (asoc != NULL) {
6381 		if (asoc->streamoutcnt > asoc->pre_open_streams) {
6382 			i_want = asoc->streamoutcnt;
6383 		} else {
6384 			i_want = asoc->pre_open_streams;
6385 		}
6386 	} else {
6387 		i_want = inp->sctp_ep.pre_open_stream_count;
6388 	}
6389 	if (his_limit < i_want) {
6390 		/* I Want more :< */
6391 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6392 	} else {
6393 		/* I can have what I want :> */
6394 		initack->init.num_outbound_streams = htons(i_want);
6395 	}
6396 	/* tell him his limit. */
6397 	initack->init.num_inbound_streams =
6398 		htons(inp->sctp_ep.max_open_streams_intome);
6399 
6400 	/* adaptation layer indication parameter */
6401 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6402 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6403 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6404 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6405 		ali->ph.param_length = htons(parameter_len);
6406 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6407 		chunk_len += parameter_len;
6408 	}
6409 
6410 	/* ECN parameter */
6411 	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6412 	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
6413 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6414 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6415 		ph->param_type = htons(SCTP_ECN_CAPABLE);
6416 		ph->param_length = htons(parameter_len);
6417 		chunk_len += parameter_len;
6418 	}
6419 
6420 	/* PR-SCTP supported parameter */
6421 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6422 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6423 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6424 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6425 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6426 		ph->param_length = htons(parameter_len);
6427 		chunk_len += parameter_len;
6428 	}
6429 
6430 	/* Add NAT friendly parameter */
6431 	if (nat_friendly) {
6432 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6433 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6434 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6435 		ph->param_length = htons(parameter_len);
6436 		chunk_len += parameter_len;
6437 	}
6438 
6439 	/* And now tell the peer which extensions we support */
6440 	num_ext = 0;
6441 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6442 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6443 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6444 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6445 		if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6446 		    ((asoc == NULL) && (inp->idata_supported == 1))) {
6447 			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6448 		}
6449 	}
6450 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6451 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6452 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6453 	}
6454 	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6455 	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
6456 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6457 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6458 	}
6459 	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6460 	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6461 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6462 	}
6463 	if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6464 	    ((asoc == NULL) && (inp->idata_supported == 1))) {
6465 		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6466 	}
6467 	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6468 	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6469 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6470 	}
6471 	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6472 	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6473 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6474 	}
6475 	if (num_ext > 0) {
6476 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6477 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6478 		pr_supported->ph.param_length = htons(parameter_len);
6479 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6480 		chunk_len += parameter_len;
6481 	}
6482 
6483 	/* add authentication parameters */
6484 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6485 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6486 		struct sctp_auth_random *randp;
6487 		struct sctp_auth_hmac_algo *hmacs;
6488 		struct sctp_auth_chunk_list *chunks;
6489 
6490 		if (padding_len > 0) {
6491 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6492 			chunk_len += padding_len;
6493 			padding_len = 0;
6494 		}
6495 		/* generate and add RANDOM parameter */
6496 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6497 		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6498 		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6499 		randp->ph.param_type = htons(SCTP_RANDOM);
6500 		randp->ph.param_length = htons(parameter_len);
6501 		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6502 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6503 		chunk_len += parameter_len;
6504 
6505 		if (padding_len > 0) {
6506 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6507 			chunk_len += padding_len;
6508 			padding_len = 0;
6509 		}
6510 		/* add HMAC_ALGO parameter */
6511 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6512 		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6513 		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6514 		                                        (uint8_t *)hmacs->hmac_ids);
6515 		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6516 		hmacs->ph.param_length = htons(parameter_len);
6517 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6518 		chunk_len += parameter_len;
6519 
6520 		if (padding_len > 0) {
6521 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6522 			chunk_len += padding_len;
6523 			padding_len = 0;
6524 		}
6525 		/* add CHUNKS parameter */
6526 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6527 		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6528 		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6529 		                                           chunks->chunk_types);
6530 		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6531 		chunks->ph.param_length = htons(parameter_len);
6532 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6533 		chunk_len += parameter_len;
6534 	}
6535 	SCTP_BUF_LEN(m) = chunk_len;
6536 	m_last = m;
6537 	/* now the addresses */
6538 	/* To optimize this we could put the scoping stuff
6539 	 * into a structure and remove the individual uint8's from
6540 	 * the stc structure. Then we could just sifa in the
6541 	 * address within the stc.. but for now this is a quick
6542 	 * hack to get the address stuff teased apart.
6543 	 */
6544 	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6545 	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6546 #if defined(__Userspace__)
6547 	scp.conn_addr_legal = stc.conn_addr_legal;
6548 #endif
6549 	scp.loopback_scope = stc.loopback_scope;
6550 	scp.ipv4_local_scope = stc.ipv4_scope;
6551 	scp.local_scope = stc.local_scope;
6552 	scp.site_scope = stc.site_scope;
6553 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6554 	                                    cnt_inits_to,
6555 	                                    &padding_len, &chunk_len);
6556 	/* padding_len can only be positive, if no addresses have been added */
6557 	if (padding_len > 0) {
6558 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6559 		chunk_len += padding_len;
6560 		SCTP_BUF_LEN(m) += padding_len;
6561 		padding_len = 0;
6562 	}
6563 
6564 	/* tack on the operational error if present */
6565 	if (op_err) {
6566 		parameter_len = 0;
6567 		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6568 			parameter_len += SCTP_BUF_LEN(m_tmp);
6569 		}
6570 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6571 		SCTP_BUF_NEXT(m_last) = op_err;
6572 		while (SCTP_BUF_NEXT(m_last) != NULL) {
6573 			m_last = SCTP_BUF_NEXT(m_last);
6574 		}
6575 		chunk_len += parameter_len;
6576 	}
6577 	if (padding_len > 0) {
6578 		m_last = sctp_add_pad_tombuf(m_last, padding_len);
6579 		if (m_last == NULL) {
6580 			/* Houston we have a problem, no space */
6581 			sctp_m_freem(m);
6582 			return;
6583 		}
6584 		chunk_len += padding_len;
6585 		padding_len = 0;
6586 	}
6587 	/* Now we must build a cookie */
6588 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6589 	if (m_cookie == NULL) {
6590 		/* memory problem */
6591 		sctp_m_freem(m);
6592 		return;
6593 	}
6594 	/* Now append the cookie to the end and update the space/size */
6595 	SCTP_BUF_NEXT(m_last) = m_cookie;
6596 	parameter_len = 0;
6597 	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6598 		parameter_len += SCTP_BUF_LEN(m_tmp);
6599 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6600 			m_last = m_tmp;
6601 		}
6602 	}
6603 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6604 	chunk_len += parameter_len;
6605 
6606 	/* Place in the size, but we don't include
6607 	 * the last pad (if any) in the INIT-ACK.
6608 	 */
6609 	initack->ch.chunk_length = htons(chunk_len);
6610 
6611 	/* Time to sign the cookie, we don't sign over the cookie
6612 	 * signature though thus we set trailer.
6613 	 */
6614 	(void)sctp_hmac_m(SCTP_HMAC,
6615 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6616 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6617 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6618 #if defined(__Userspace__)
6619 	/*
6620 	 * Don't put AF_CONN addresses on the wire, in case this is critical
6621 	 * for the application. However, they are protected by the HMAC and
6622 	 * need to be reconstructed before checking the HMAC.
6623 	 * Clearing is only done in the mbuf chain, since the local stc is
6624 	 * not used anymore.
6625 	 */
6626 	if (stc.addr_type == SCTP_CONN_ADDRESS) {
6627 		const void *p = NULL;
6628 
6629 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address),
6630 		           (int)sizeof(void *), (caddr_t)&p);
6631 	}
6632 	if (stc.laddr_type == SCTP_CONN_ADDRESS) {
6633 		const void *p = NULL;
6634 
6635 		m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress),
6636 		           (int)sizeof(void *), (caddr_t)&p);
6637 	}
6638 #endif
6639 	/*
6640 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6641 	 * here since the timer will drive a retranmission.
6642 	 */
6643 	if (padding_len > 0) {
6644 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6645 			sctp_m_freem(m);
6646 			return;
6647 		}
6648 	}
6649 	if (stc.loopback_scope) {
6650 		over_addr = (union sctp_sockstore *)dst;
6651 	} else {
6652 		over_addr = NULL;
6653 	}
6654 
6655 	if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6656 	                                        0, 0,
6657 	                                        inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6658 	                                        port, over_addr,
6659 #if defined(__FreeBSD__) && !defined(__Userspace__)
6660 	                                        mflowtype, mflowid,
6661 #endif
6662 	                                        SCTP_SO_NOT_LOCKED))) {
6663 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6664 		if (error == ENOBUFS) {
6665 			if (asoc != NULL) {
6666 				asoc->ifp_had_enobuf = 1;
6667 			}
6668 			SCTP_STAT_INCR(sctps_lowlevelerr);
6669 		}
6670 	} else {
6671 		if (asoc != NULL) {
6672 			asoc->ifp_had_enobuf = 0;
6673 		}
6674 	}
6675 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6676 }
6677 
6678 static void
6679 sctp_prune_prsctp(struct sctp_tcb *stcb,
6680     struct sctp_association *asoc,
6681     struct sctp_sndrcvinfo *srcv,
6682     int dataout)
6683 {
6684 	int freed_spc = 0;
6685 	struct sctp_tmit_chunk *chk, *nchk;
6686 
6687 	SCTP_TCB_LOCK_ASSERT(stcb);
6688 	if ((asoc->prsctp_supported) &&
6689 	    (asoc->sent_queue_cnt_removeable > 0)) {
6690 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6691 			/*
6692 			 * Look for chunks marked with the PR_SCTP flag AND
6693 			 * the buffer space flag. If the one being sent is
6694 			 * equal or greater priority then purge the old one
6695 			 * and free some space.
6696 			 */
6697 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6698 				/*
6699 				 * This one is PR-SCTP AND buffer space
6700 				 * limited type
6701 				 */
6702 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6703 					/*
6704 					 * Lower numbers equates to higher
6705 					 * priority. So if the one we are
6706 					 * looking at has a larger priority,
6707 					 * we want to drop the data and NOT
6708 					 * retransmit it.
6709 					 */
6710 					if (chk->data) {
6711 						/*
6712 						 * We release the book_size
6713 						 * if the mbuf is here
6714 						 */
6715 						int ret_spc;
6716 						uint8_t sent;
6717 
6718 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
6719 							sent = 1;
6720 						else
6721 							sent = 0;
6722 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6723 						    sent,
6724 						    SCTP_SO_LOCKED);
6725 						freed_spc += ret_spc;
6726 						if (freed_spc >= dataout) {
6727 							return;
6728 						}
6729 					}	/* if chunk was present */
6730 				}	/* if of sufficient priority */
6731 			}	/* if chunk has enabled */
6732 		}		/* tailqforeach */
6733 
6734 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6735 			/* Here we must move to the sent queue and mark */
6736 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6737 				if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6738 					if (chk->data) {
6739 						/*
6740 						 * We release the book_size
6741 						 * if the mbuf is here
6742 						 */
6743 						int ret_spc;
6744 
6745 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6746 						    0, SCTP_SO_LOCKED);
6747 
6748 						freed_spc += ret_spc;
6749 						if (freed_spc >= dataout) {
6750 							return;
6751 						}
6752 					}	/* end if chk->data */
6753 				}	/* end if right class */
6754 			}	/* end if chk pr-sctp */
6755 		}		/* tailqforeachsafe (chk) */
6756 	}			/* if enabled in asoc */
6757 }
6758 
6759 int
6760 sctp_get_frag_point(struct sctp_tcb *stcb,
6761     struct sctp_association *asoc)
6762 {
6763 	int siz, ovh;
6764 
6765 	/*
6766 	 * For endpoints that have both v6 and v4 addresses we must reserve
6767 	 * room for the ipv6 header, for those that are only dealing with V4
6768 	 * we use a larger frag point.
6769 	 */
6770 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6771 		ovh = SCTP_MIN_OVERHEAD;
6772 	} else {
6773 #if defined(__Userspace__)
6774 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6775 			ovh = sizeof(struct sctphdr);
6776 		} else {
6777 			ovh = SCTP_MIN_V4_OVERHEAD;
6778 		}
6779 #else
6780 		ovh = SCTP_MIN_V4_OVERHEAD;
6781 #endif
6782 	}
6783 	ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6784 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6785 		siz = asoc->smallest_mtu - ovh;
6786 	else
6787 		siz = (stcb->asoc.sctp_frag_point - ovh);
6788 	/*
6789 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6790 	 */
6791 	/* A data chunk MUST fit in a cluster */
6792 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6793 	/* } */
6794 
6795 	/* adjust for an AUTH chunk if DATA requires auth */
6796 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6797 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6798 
6799 	if (siz % 4) {
6800 		/* make it an even word boundary please */
6801 		siz -= (siz % 4);
6802 	}
6803 	return (siz);
6804 }
6805 
6806 static void
6807 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6808 {
6809 	/*
6810 	 * We assume that the user wants PR_SCTP_TTL if the user
6811 	 * provides a positive lifetime but does not specify any
6812 	 * PR_SCTP policy.
6813 	 */
6814 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6815 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6816 	} else if (sp->timetolive > 0) {
6817 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6818 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6819 	} else {
6820 		return;
6821 	}
6822 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6823 	case CHUNK_FLAGS_PR_SCTP_BUF:
6824 		/*
6825 		 * Time to live is a priority stored in tv_sec when
6826 		 * doing the buffer drop thing.
6827 		 */
6828 		sp->ts.tv_sec = sp->timetolive;
6829 		sp->ts.tv_usec = 0;
6830 		break;
6831 	case CHUNK_FLAGS_PR_SCTP_TTL:
6832 	{
6833 		struct timeval tv;
6834 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6835 		tv.tv_sec = sp->timetolive / 1000;
6836 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6837 		/* TODO sctp_constants.h needs alternative time macros when
6838 		 *  _KERNEL is undefined.
6839 		 */
6840 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
6841 		timeradd(&sp->ts, &tv, &sp->ts);
6842 #else
6843 		timevaladd(&sp->ts, &tv);
6844 #endif
6845 	}
6846 		break;
6847 	case CHUNK_FLAGS_PR_SCTP_RTX:
6848 		/*
6849 		 * Time to live is a the number or retransmissions
6850 		 * stored in tv_sec.
6851 		 */
6852 		sp->ts.tv_sec = sp->timetolive;
6853 		sp->ts.tv_usec = 0;
6854 		break;
6855 	default:
6856 		SCTPDBG(SCTP_DEBUG_USRREQ1,
6857 			"Unknown PR_SCTP policy %u.\n",
6858 			PR_SCTP_POLICY(sp->sinfo_flags));
6859 		break;
6860 	}
6861 }
6862 
6863 static int
6864 sctp_msg_append(struct sctp_tcb *stcb,
6865 		struct sctp_nets *net,
6866 		struct mbuf *m,
6867 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6868 {
6869 	int error = 0;
6870 	struct mbuf *at;
6871 	struct sctp_stream_queue_pending *sp = NULL;
6872 	struct sctp_stream_out *strm;
6873 
6874 	/* Given an mbuf chain, put it
6875 	 * into the association send queue and
6876 	 * place it on the wheel
6877 	 */
6878 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6879 		/* Invalid stream number */
6880 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6881 		error = EINVAL;
6882 		goto out_now;
6883 	}
6884 	if ((stcb->asoc.stream_locked) &&
6885 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6886 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6887 		error = EINVAL;
6888 		goto out_now;
6889 	}
6890 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6891 	/* Now can we send this? */
6892 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6893 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6894 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6895 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6896 		/* got data while shutting down */
6897 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6898 		error = ECONNRESET;
6899 		goto out_now;
6900 	}
6901 	sctp_alloc_a_strmoq(stcb, sp);
6902 	if (sp == NULL) {
6903 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6904 		error = ENOMEM;
6905 		goto out_now;
6906 	}
6907 	sp->sinfo_flags = srcv->sinfo_flags;
6908 	sp->timetolive = srcv->sinfo_timetolive;
6909 	sp->ppid = srcv->sinfo_ppid;
6910 	sp->context = srcv->sinfo_context;
6911 	sp->fsn = 0;
6912 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6913 		sp->net = net;
6914 		atomic_add_int(&sp->net->ref_count, 1);
6915 	} else {
6916 		sp->net = NULL;
6917 	}
6918 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6919 	sp->sid = srcv->sinfo_stream;
6920 	sp->msg_is_complete = 1;
6921 	sp->sender_all_done = 1;
6922 	sp->some_taken = 0;
6923 	sp->data = m;
6924 	sp->tail_mbuf = NULL;
6925 	sctp_set_prsctp_policy(sp);
6926 	/* We could in theory (for sendall) sifa the length
6927 	 * in, but we would still have to hunt through the
6928 	 * chain since we need to setup the tail_mbuf
6929 	 */
6930 	sp->length = 0;
6931 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6932 		if (SCTP_BUF_NEXT(at) == NULL)
6933 			sp->tail_mbuf = at;
6934 		sp->length += SCTP_BUF_LEN(at);
6935 	}
6936 	if (srcv->sinfo_keynumber_valid) {
6937 		sp->auth_keyid = srcv->sinfo_keynumber;
6938 	} else {
6939 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6940 	}
6941 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6942 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
6943 		sp->holds_key_ref = 1;
6944 	}
6945 	if (hold_stcb_lock == 0) {
6946 		SCTP_TCB_SEND_LOCK(stcb);
6947 	}
6948 	sctp_snd_sb_alloc(stcb, sp->length);
6949 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6950 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6951 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6952 	m = NULL;
6953 	if (hold_stcb_lock == 0) {
6954 		SCTP_TCB_SEND_UNLOCK(stcb);
6955 	}
6956 out_now:
6957 	if (m) {
6958 		sctp_m_freem(m);
6959 	}
6960 	return (error);
6961 }
6962 
6963 static struct mbuf *
6964 sctp_copy_mbufchain(struct mbuf *clonechain,
6965 		    struct mbuf *outchain,
6966 		    struct mbuf **endofchain,
6967 		    int can_take_mbuf,
6968 		    int sizeofcpy,
6969 		    uint8_t copy_by_ref)
6970 {
6971 	struct mbuf *m;
6972 	struct mbuf *appendchain;
6973 	caddr_t cp;
6974 	int len;
6975 
6976 	if (endofchain == NULL) {
6977 		/* error */
6978 	error_out:
6979 		if (outchain)
6980 			sctp_m_freem(outchain);
6981 		return (NULL);
6982 	}
6983 	if (can_take_mbuf) {
6984 		appendchain = clonechain;
6985 	} else {
6986 		if (!copy_by_ref &&
6987 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
6988 			/* Its not in a cluster */
6989 			if (*endofchain == NULL) {
6990 				/* lets get a mbuf cluster */
6991 				if (outchain == NULL) {
6992 					/* This is the general case */
6993 				new_mbuf:
6994 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6995 					if (outchain == NULL) {
6996 						goto error_out;
6997 					}
6998 					SCTP_BUF_LEN(outchain) = 0;
6999 					*endofchain = outchain;
7000 					/* get the prepend space */
7001 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
7002 				} else {
7003 					/* We really should not get a NULL in endofchain */
7004 					/* find end */
7005 					m = outchain;
7006 					while (m) {
7007 						if (SCTP_BUF_NEXT(m) == NULL) {
7008 							*endofchain = m;
7009 							break;
7010 						}
7011 						m = SCTP_BUF_NEXT(m);
7012 					}
7013 					/* sanity */
7014 					if (*endofchain == NULL) {
7015 						/* huh, TSNH XXX maybe we should panic */
7016 						sctp_m_freem(outchain);
7017 						goto new_mbuf;
7018 					}
7019 				}
7020 				/* get the new end of length */
7021 				len = (int)M_TRAILINGSPACE(*endofchain);
7022 			} else {
7023 				/* how much is left at the end? */
7024 				len = (int)M_TRAILINGSPACE(*endofchain);
7025 			}
7026 			/* Find the end of the data, for appending */
7027 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
7028 
7029 			/* Now lets copy it out */
7030 			if (len >= sizeofcpy) {
7031 				/* It all fits, copy it in */
7032 				m_copydata(clonechain, 0, sizeofcpy, cp);
7033 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7034 			} else {
7035 				/* fill up the end of the chain */
7036 				if (len > 0) {
7037 					m_copydata(clonechain, 0, len, cp);
7038 					SCTP_BUF_LEN((*endofchain)) += len;
7039 					/* now we need another one */
7040 					sizeofcpy -= len;
7041 				}
7042 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7043 				if (m == NULL) {
7044 					/* We failed */
7045 					goto error_out;
7046 				}
7047 				SCTP_BUF_NEXT((*endofchain)) = m;
7048 				*endofchain = m;
7049 				cp = mtod((*endofchain), caddr_t);
7050 				m_copydata(clonechain, len, sizeofcpy, cp);
7051 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7052 			}
7053 			return (outchain);
7054 		} else {
7055 			/* copy the old fashion way */
7056 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7057 #ifdef SCTP_MBUF_LOGGING
7058 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7059 				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7060 			}
7061 #endif
7062 		}
7063 	}
7064 	if (appendchain == NULL) {
7065 		/* error */
7066 		if (outchain)
7067 			sctp_m_freem(outchain);
7068 		return (NULL);
7069 	}
7070 	if (outchain) {
7071 		/* tack on to the end */
7072 		if (*endofchain != NULL) {
7073 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7074 		} else {
7075 			m = outchain;
7076 			while (m) {
7077 				if (SCTP_BUF_NEXT(m) == NULL) {
7078 					SCTP_BUF_NEXT(m) = appendchain;
7079 					break;
7080 				}
7081 				m = SCTP_BUF_NEXT(m);
7082 			}
7083 		}
7084 		/*
7085 		 * save off the end and update the end-chain
7086 		 * position
7087 		 */
7088 		m = appendchain;
7089 		while (m) {
7090 			if (SCTP_BUF_NEXT(m) == NULL) {
7091 				*endofchain = m;
7092 				break;
7093 			}
7094 			m = SCTP_BUF_NEXT(m);
7095 		}
7096 		return (outchain);
7097 	} else {
7098 		/* save off the end and update the end-chain position */
7099 		m = appendchain;
7100 		while (m) {
7101 			if (SCTP_BUF_NEXT(m) == NULL) {
7102 				*endofchain = m;
7103 				break;
7104 			}
7105 			m = SCTP_BUF_NEXT(m);
7106 		}
7107 		return (appendchain);
7108 	}
7109 }
7110 
7111 static int
7112 sctp_med_chunk_output(struct sctp_inpcb *inp,
7113 		      struct sctp_tcb *stcb,
7114 		      struct sctp_association *asoc,
7115 		      int *num_out,
7116 		      int *reason_code,
7117 		      int control_only, int from_where,
7118 		      struct timeval *now, int *now_filled, int frag_point, int so_locked);
7119 
7120 static void
7121 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7122     uint32_t val SCTP_UNUSED)
7123 {
7124 	struct sctp_copy_all *ca;
7125 	struct mbuf *m;
7126 	int ret = 0;
7127 	int added_control = 0;
7128 	int un_sent, do_chunk_output = 1;
7129 	struct sctp_association *asoc;
7130 	struct sctp_nets *net;
7131 
7132 	ca = (struct sctp_copy_all *)ptr;
7133 	if (ca->m == NULL) {
7134 		return;
7135 	}
7136 	if (ca->inp != inp) {
7137 		/* TSNH */
7138 		return;
7139 	}
7140 	if (ca->sndlen > 0) {
7141 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7142 		if (m == NULL) {
7143 			/* can't copy so we are done */
7144 			ca->cnt_failed++;
7145 			return;
7146 		}
7147 #ifdef SCTP_MBUF_LOGGING
7148 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7149 			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7150 		}
7151 #endif
7152 	} else {
7153 		m = NULL;
7154 	}
7155 	SCTP_TCB_LOCK_ASSERT(stcb);
7156 	if (stcb->asoc.alternate) {
7157 		net = stcb->asoc.alternate;
7158 	} else {
7159 		net = stcb->asoc.primary_destination;
7160 	}
7161 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7162 		/* Abort this assoc with m as the user defined reason */
7163 		if (m != NULL) {
7164 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7165 		} else {
7166 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7167 			                          0, M_NOWAIT, 1, MT_DATA);
7168 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7169 		}
7170 		if (m != NULL) {
7171 			struct sctp_paramhdr *ph;
7172 
7173 			ph = mtod(m, struct sctp_paramhdr *);
7174 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7175 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7176 		}
7177 		/* We add one here to keep the assoc from
7178 		 * dis-appearing on us.
7179 		 */
7180 		atomic_add_int(&stcb->asoc.refcnt, 1);
7181 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7182 		/* sctp_abort_an_association calls sctp_free_asoc()
7183 		 * free association will NOT free it since we
7184 		 * incremented the refcnt .. we do this to prevent
7185 		 * it being freed and things getting tricky since
7186 		 * we could end up (from free_asoc) calling inpcb_free
7187 		 * which would get a recursive lock call to the
7188 		 * iterator lock.. But as a consequence of that the
7189 		 * stcb will return to us un-locked.. since free_asoc
7190 		 * returns with either no TCB or the TCB unlocked, we
7191 		 * must relock.. to unlock in the iterator timer :-0
7192 		 */
7193 		SCTP_TCB_LOCK(stcb);
7194 		atomic_add_int(&stcb->asoc.refcnt, -1);
7195 		goto no_chunk_output;
7196 	} else {
7197 		if (m) {
7198 			ret = sctp_msg_append(stcb, net, m,
7199 					      &ca->sndrcv, 1);
7200 		}
7201 		asoc = &stcb->asoc;
7202 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7203 			/* shutdown this assoc */
7204 			if (TAILQ_EMPTY(&asoc->send_queue) &&
7205 			    TAILQ_EMPTY(&asoc->sent_queue) &&
7206 			    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7207 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7208 					goto abort_anyway;
7209 				}
7210 				/* there is nothing queued to send, so I'm done... */
7211 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7212 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7213 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7214 					/* only send SHUTDOWN the first time through */
7215 					if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
7216 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7217 					}
7218 					SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
7219 					sctp_stop_timers_for_shutdown(stcb);
7220 					sctp_send_shutdown(stcb, net);
7221 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7222 							 net);
7223 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7224 					                 NULL);
7225 					added_control = 1;
7226 					do_chunk_output = 0;
7227 				}
7228 			} else {
7229 				/*
7230 				 * we still got (or just got) data to send, so set
7231 				 * SHUTDOWN_PENDING
7232 				 */
7233 				/*
7234 				 * XXX sockets draft says that SCTP_EOF should be
7235 				 * sent with no data.  currently, we will allow user
7236 				 * data to be sent first and move to
7237 				 * SHUTDOWN-PENDING
7238 				 */
7239 				if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
7240 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7241 				    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7242 					if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7243 						SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
7244 					}
7245 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7246 					if (TAILQ_EMPTY(&asoc->send_queue) &&
7247 					    TAILQ_EMPTY(&asoc->sent_queue) &&
7248 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7249 						struct mbuf *op_err;
7250 						char msg[SCTP_DIAG_INFO_LEN];
7251 
7252 					abort_anyway:
7253 						SCTP_SNPRINTF(msg, sizeof(msg),
7254 						              "%s:%d at %s", __FILE__, __LINE__, __func__);
7255 						op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7256 						                             msg);
7257 						atomic_add_int(&stcb->asoc.refcnt, 1);
7258 						sctp_abort_an_association(stcb->sctp_ep, stcb,
7259 									  op_err, SCTP_SO_NOT_LOCKED);
7260 						atomic_add_int(&stcb->asoc.refcnt, -1);
7261 						goto no_chunk_output;
7262 					}
7263 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7264 					                 NULL);
7265 				}
7266 			}
7267 		}
7268 	}
7269 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7270 		   (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
7271 
7272 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7273 	    (stcb->asoc.total_flight > 0) &&
7274 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7275 		do_chunk_output = 0;
7276 	}
7277 	if (do_chunk_output)
7278 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7279 	else if (added_control) {
7280 		int num_out, reason, now_filled = 0;
7281 		struct timeval now;
7282 		int frag_point;
7283 
7284 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7285 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7286 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7287 	}
7288  no_chunk_output:
7289 	if (ret) {
7290 		ca->cnt_failed++;
7291 	} else {
7292 		ca->cnt_sent++;
7293 	}
7294 }
7295 
7296 static void
7297 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7298 {
7299 	struct sctp_copy_all *ca;
7300 
7301 	ca = (struct sctp_copy_all *)ptr;
7302 	/*
7303 	 * Do a notify here? Kacheong suggests that the notify be done at
7304 	 * the send time.. so you would push up a notification if any send
7305 	 * failed. Don't know if this is feasible since the only failures we
7306 	 * have is "memory" related and if you cannot get an mbuf to send
7307 	 * the data you surely can't get an mbuf to send up to notify the
7308 	 * user you can't send the data :->
7309 	 */
7310 
7311 	/* now free everything */
7312 	if (ca->inp) {
7313 		/* Lets clear the flag to allow others to run. */
7314 		ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7315 	}
7316 	sctp_m_freem(ca->m);
7317 	SCTP_FREE(ca, SCTP_M_COPYAL);
7318 }
7319 
7320 static struct mbuf *
7321 sctp_copy_out_all(struct uio *uio, ssize_t len)
7322 {
7323 	struct mbuf *ret, *at;
7324 	ssize_t left, willcpy, cancpy, error;
7325 
7326 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7327 	if (ret == NULL) {
7328 		/* TSNH */
7329 		return (NULL);
7330 	}
7331 	left = len;
7332 	SCTP_BUF_LEN(ret) = 0;
7333 	/* save space for the data chunk header */
7334 	cancpy = (int)M_TRAILINGSPACE(ret);
7335 	willcpy = min(cancpy, left);
7336 	at = ret;
7337 	while (left > 0) {
7338 		/* Align data to the end */
7339 		error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
7340 		if (error) {
7341 	err_out_now:
7342 			sctp_m_freem(at);
7343 			return (NULL);
7344 		}
7345 		SCTP_BUF_LEN(at) = (int)willcpy;
7346 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7347 		left -= willcpy;
7348 		if (left > 0) {
7349 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
7350 			if (SCTP_BUF_NEXT(at) == NULL) {
7351 				goto err_out_now;
7352 			}
7353 			at = SCTP_BUF_NEXT(at);
7354 			SCTP_BUF_LEN(at) = 0;
7355 			cancpy = (int)M_TRAILINGSPACE(at);
7356 			willcpy = min(cancpy, left);
7357 		}
7358 	}
7359 	return (ret);
7360 }
7361 
7362 static int
7363 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7364     struct sctp_sndrcvinfo *srcv)
7365 {
7366 	int ret;
7367 	struct sctp_copy_all *ca;
7368 
7369 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
7370 		/* There is another. */
7371 		return (EBUSY);
7372 	}
7373 #if defined(__APPLE__) && !defined(__Userspace__)
7374 #if defined(APPLE_LEOPARD)
7375 	if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7376 #else
7377 	if (uio_resid(uio) > SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7378 #endif
7379 #else
7380 	if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
7381 #endif
7382 		/* You must not be larger than the limit! */
7383 		return (EMSGSIZE);
7384 	}
7385 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7386 		    SCTP_M_COPYAL);
7387 	if (ca == NULL) {
7388 		sctp_m_freem(m);
7389 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7390 		return (ENOMEM);
7391 	}
7392 	memset(ca, 0, sizeof(struct sctp_copy_all));
7393 
7394 	ca->inp = inp;
7395 	if (srcv) {
7396 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7397 	}
7398 	/*
7399 	 * take off the sendall flag, it would be bad if we failed to do
7400 	 * this :-0
7401 	 */
7402 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7403 	/* get length and mbuf chain */
7404 	if (uio) {
7405 #if defined(__APPLE__) && !defined(__Userspace__)
7406 #if defined(APPLE_LEOPARD)
7407 		ca->sndlen = uio->uio_resid;
7408 #else
7409 		ca->sndlen = uio_resid(uio);
7410 #endif
7411 #else
7412 		ca->sndlen = uio->uio_resid;
7413 #endif
7414 #if defined(__APPLE__) && !defined(__Userspace__)
7415 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7416 #endif
7417 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
7418 #if defined(__APPLE__) && !defined(__Userspace__)
7419 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7420 #endif
7421 		if (ca->m == NULL) {
7422 			SCTP_FREE(ca, SCTP_M_COPYAL);
7423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7424 			return (ENOMEM);
7425 		}
7426 	} else {
7427 		/* Gather the length of the send */
7428 		struct mbuf *mat;
7429 
7430 		ca->sndlen = 0;
7431 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7432 			ca->sndlen += SCTP_BUF_LEN(mat);
7433 		}
7434 	}
7435 	inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7436 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7437 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7438 				     SCTP_ASOC_ANY_STATE,
7439 				     (void *)ca, 0,
7440 				     sctp_sendall_completes, inp, 1);
7441 	if (ret) {
7442 		inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
7443 		SCTP_FREE(ca, SCTP_M_COPYAL);
7444 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7445 		return (EFAULT);
7446 	}
7447 	return (0);
7448 }
7449 
7450 void
7451 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7452 {
7453 	struct sctp_tmit_chunk *chk, *nchk;
7454 
7455 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7456 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7457 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7458 			asoc->ctrl_queue_cnt--;
7459 			if (chk->data) {
7460 				sctp_m_freem(chk->data);
7461 				chk->data = NULL;
7462 			}
7463 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7464 		}
7465 	}
7466 }
7467 
7468 void
7469 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7470 {
7471 	struct sctp_association *asoc;
7472 	struct sctp_tmit_chunk *chk, *nchk;
7473 	struct sctp_asconf_chunk *acp;
7474 
7475 	asoc = &stcb->asoc;
7476 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7477 		/* find SCTP_ASCONF chunk in queue */
7478 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7479 			if (chk->data) {
7480 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
7481 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7482 					/* Not Acked yet */
7483 					break;
7484 				}
7485 			}
7486 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7487 			asoc->ctrl_queue_cnt--;
7488 			if (chk->data) {
7489 				sctp_m_freem(chk->data);
7490 				chk->data = NULL;
7491 			}
7492 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7493 		}
7494 	}
7495 }
7496 
7497 static void
7498 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7499     struct sctp_association *asoc,
7500     struct sctp_tmit_chunk **data_list,
7501     int bundle_at,
7502     struct sctp_nets *net)
7503 {
7504 	int i;
7505 	struct sctp_tmit_chunk *tp1;
7506 
7507 	for (i = 0; i < bundle_at; i++) {
7508 		/* off of the send queue */
7509 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7510 		asoc->send_queue_cnt--;
7511 		if (i > 0) {
7512 			/*
7513 			 * Any chunk NOT 0 you zap the time chunk 0 gets
7514 			 * zapped or set based on if a RTO measurment is
7515 			 * needed.
7516 			 */
7517 			data_list[i]->do_rtt = 0;
7518 		}
7519 		/* record time */
7520 		data_list[i]->sent_rcv_time = net->last_sent_time;
7521 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7522 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7523 		if (data_list[i]->whoTo == NULL) {
7524 			data_list[i]->whoTo = net;
7525 			atomic_add_int(&net->ref_count, 1);
7526 		}
7527 		/* on to the sent queue */
7528 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7529 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7530 			struct sctp_tmit_chunk *tpp;
7531 
7532 			/* need to move back */
7533 		back_up_more:
7534 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7535 			if (tpp == NULL) {
7536 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7537 				goto all_done;
7538 			}
7539 			tp1 = tpp;
7540 			if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7541 				goto back_up_more;
7542 			}
7543 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7544 		} else {
7545 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
7546 					  data_list[i],
7547 					  sctp_next);
7548 		}
7549 	all_done:
7550 		/* This does not lower until the cum-ack passes it */
7551 		asoc->sent_queue_cnt++;
7552 		if ((asoc->peers_rwnd <= 0) &&
7553 		    (asoc->total_flight == 0) &&
7554 		    (bundle_at == 1)) {
7555 			/* Mark the chunk as being a window probe */
7556 			SCTP_STAT_INCR(sctps_windowprobed);
7557 		}
7558 #ifdef SCTP_AUDITING_ENABLED
7559 		sctp_audit_log(0xC2, 3);
7560 #endif
7561 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
7562 		data_list[i]->snd_count = 1;
7563 		data_list[i]->rec.data.chunk_was_revoked = 0;
7564 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7565 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7566 				       data_list[i]->whoTo->flight_size,
7567 				       data_list[i]->book_size,
7568 				       (uint32_t)(uintptr_t)data_list[i]->whoTo,
7569 				       data_list[i]->rec.data.tsn);
7570 		}
7571 		sctp_flight_size_increase(data_list[i]);
7572 		sctp_total_flight_increase(stcb, data_list[i]);
7573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7574 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7575 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7576 		}
7577 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7578 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7579 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7580 			/* SWS sender side engages */
7581 			asoc->peers_rwnd = 0;
7582 		}
7583 	}
7584 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7585 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7586 	}
7587 }
7588 
7589 static void
7590 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
7591 {
7592 	struct sctp_tmit_chunk *chk, *nchk;
7593 
7594 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7595 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7596 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
7597 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7598 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7599 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7600 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7601 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7602 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7603 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7604 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7605 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7606 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7607 			/* Stray chunks must be cleaned up */
7608 	clean_up_anyway:
7609 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7610 			asoc->ctrl_queue_cnt--;
7611 			if (chk->data) {
7612 				sctp_m_freem(chk->data);
7613 				chk->data = NULL;
7614 			}
7615 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7616 				asoc->fwd_tsn_cnt--;
7617 			}
7618 			sctp_free_a_chunk(stcb, chk, so_locked);
7619 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7620 			/* special handling, we must look into the param */
7621 			if (chk != asoc->str_reset) {
7622 				goto clean_up_anyway;
7623 			}
7624 		}
7625 	}
7626 }
7627 
7628 static uint32_t
7629 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7630                        uint32_t space_left, uint32_t frag_point, int eeor_on)
7631 {
7632 	/* Make a decision on if I should split a
7633 	 * msg into multiple parts. This is only asked of
7634 	 * incomplete messages.
7635 	 */
7636 	if (eeor_on) {
7637 		/* If we are doing EEOR we need to always send
7638 		 * it if its the entire thing, since it might
7639 		 * be all the guy is putting in the hopper.
7640 		 */
7641 		if (space_left >= length) {
7642 			/*-
7643 			 * If we have data outstanding,
7644 			 * we get another chance when the sack
7645 			 * arrives to transmit - wait for more data
7646 			 */
7647 			if (stcb->asoc.total_flight == 0) {
7648 				/* If nothing is in flight, we zero
7649 				 * the packet counter.
7650 				 */
7651 				return (length);
7652 			}
7653 			return (0);
7654 
7655 		} else {
7656 			/* You can fill the rest */
7657 			return (space_left);
7658 		}
7659 	}
7660 	/*-
7661 	 * For those strange folk that make the send buffer
7662 	 * smaller than our fragmentation point, we can't
7663 	 * get a full msg in so we have to allow splitting.
7664 	 */
7665 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7666 		return (length);
7667 	}
7668 	if ((length <= space_left) ||
7669 	    ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7670 		/* Sub-optimial residual don't split in non-eeor mode. */
7671 		return (0);
7672 	}
7673 	/* If we reach here length is larger
7674 	 * than the space_left. Do we wish to split
7675 	 * it for the sake of packet putting together?
7676 	 */
7677 	if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7678 		/* Its ok to split it */
7679 		return (min(space_left, frag_point));
7680 	}
7681 	/* Nope, can't split */
7682 	return (0);
7683 }
7684 
7685 static uint32_t
7686 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7687                       struct sctp_stream_out *strq,
7688                       uint32_t space_left,
7689                       uint32_t frag_point,
7690                       int *giveup,
7691                       int eeor_mode,
7692                       int *bail,
7693                       int so_locked)
7694 {
7695 	/* Move from the stream to the send_queue keeping track of the total */
7696 	struct sctp_association *asoc;
7697 	struct sctp_stream_queue_pending *sp;
7698 	struct sctp_tmit_chunk *chk;
7699 	struct sctp_data_chunk *dchkh=NULL;
7700 	struct sctp_idata_chunk *ndchkh=NULL;
7701 	uint32_t to_move, length;
7702 	int leading;
7703 	uint8_t rcv_flags = 0;
7704 	uint8_t some_taken;
7705 	uint8_t send_lock_up = 0;
7706 
7707 	SCTP_TCB_LOCK_ASSERT(stcb);
7708 	asoc = &stcb->asoc;
7709 one_more_time:
7710 	/*sa_ignore FREED_MEMORY*/
7711 	sp = TAILQ_FIRST(&strq->outqueue);
7712 	if (sp == NULL) {
7713 		if (send_lock_up == 0) {
7714 			SCTP_TCB_SEND_LOCK(stcb);
7715 			send_lock_up = 1;
7716 		}
7717 		sp = TAILQ_FIRST(&strq->outqueue);
7718 		if (sp) {
7719 			goto one_more_time;
7720 		}
7721 		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7722 		    (stcb->asoc.idata_supported == 0) &&
7723 		    (strq->last_msg_incomplete)) {
7724 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7725 			            strq->sid,
7726 			            strq->last_msg_incomplete);
7727 			strq->last_msg_incomplete = 0;
7728 		}
7729 		to_move = 0;
7730 		if (send_lock_up) {
7731 			SCTP_TCB_SEND_UNLOCK(stcb);
7732 			send_lock_up = 0;
7733 		}
7734 		goto out_of;
7735 	}
7736 	if ((sp->msg_is_complete) && (sp->length == 0)) {
7737 		if (sp->sender_all_done) {
7738 			/* We are doing deferred cleanup. Last
7739 			 * time through when we took all the data
7740 			 * the sender_all_done was not set.
7741 			 */
7742 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7743 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7744 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7745 				            sp->sender_all_done,
7746 				            sp->length,
7747 				            sp->msg_is_complete,
7748 				            sp->put_last_out,
7749 				            send_lock_up);
7750 			}
7751 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
7752 				SCTP_TCB_SEND_LOCK(stcb);
7753 				send_lock_up = 1;
7754 			}
7755 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7756 			TAILQ_REMOVE(&strq->outqueue, sp, next);
7757 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7758 			if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7759 			    (strq->chunks_on_queues == 0) &&
7760 			    TAILQ_EMPTY(&strq->outqueue)) {
7761 				stcb->asoc.trigger_reset = 1;
7762 			}
7763 			if (sp->net) {
7764 				sctp_free_remote_addr(sp->net);
7765 				sp->net = NULL;
7766 			}
7767 			if (sp->data) {
7768 				sctp_m_freem(sp->data);
7769 				sp->data = NULL;
7770 			}
7771 			sctp_free_a_strmoq(stcb, sp, so_locked);
7772 			/* we can't be locked to it */
7773 			if (send_lock_up) {
7774 				SCTP_TCB_SEND_UNLOCK(stcb);
7775 				send_lock_up = 0;
7776 			}
7777 			/* back to get the next msg */
7778 			goto one_more_time;
7779 		} else {
7780 			/* sender just finished this but
7781 			 * still holds a reference
7782 			 */
7783 			*giveup = 1;
7784 			to_move = 0;
7785 			goto out_of;
7786 		}
7787 	} else {
7788 		/* is there some to get */
7789 		if (sp->length == 0) {
7790 			/* no */
7791 			*giveup = 1;
7792 			to_move = 0;
7793 			goto out_of;
7794 		} else if (sp->discard_rest) {
7795 			if (send_lock_up == 0) {
7796 				SCTP_TCB_SEND_LOCK(stcb);
7797 				send_lock_up = 1;
7798 			}
7799 			/* Whack down the size */
7800 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7801 			if ((stcb->sctp_socket != NULL) &&
7802 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7803 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7804 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7805 			}
7806 			if (sp->data) {
7807 				sctp_m_freem(sp->data);
7808 				sp->data = NULL;
7809 				sp->tail_mbuf = NULL;
7810 			}
7811 			sp->length = 0;
7812 			sp->some_taken = 1;
7813 			*giveup = 1;
7814 			to_move = 0;
7815 			goto out_of;
7816 		}
7817 	}
7818 	some_taken = sp->some_taken;
7819 re_look:
7820 	length = sp->length;
7821 	if (sp->msg_is_complete) {
7822 		/* The message is complete */
7823 		to_move = min(length, frag_point);
7824 		if (to_move == length) {
7825 			/* All of it fits in the MTU */
7826 			if (sp->some_taken) {
7827 				rcv_flags |= SCTP_DATA_LAST_FRAG;
7828 			} else {
7829 				rcv_flags |= SCTP_DATA_NOT_FRAG;
7830 			}
7831 			sp->put_last_out = 1;
7832 			if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7833 				rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7834 			}
7835 		} else {
7836 			/* Not all of it fits, we fragment */
7837 			if (sp->some_taken == 0) {
7838 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7839 			}
7840 			sp->some_taken = 1;
7841 		}
7842 	} else {
7843 		to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7844 		if (to_move) {
7845 			/*-
7846 			 * We use a snapshot of length in case it
7847 			 * is expanding during the compare.
7848 			 */
7849 			uint32_t llen;
7850 
7851 			llen = length;
7852 			if (to_move >= llen) {
7853 				to_move = llen;
7854 				if (send_lock_up == 0) {
7855 					/*-
7856 					 * We are taking all of an incomplete msg
7857 					 * thus we need a send lock.
7858 					 */
7859 					SCTP_TCB_SEND_LOCK(stcb);
7860 					send_lock_up = 1;
7861 					if (sp->msg_is_complete) {
7862 						/* the sender finished the msg */
7863 						goto re_look;
7864 					}
7865 				}
7866 			}
7867 			if (sp->some_taken == 0) {
7868 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7869 				sp->some_taken = 1;
7870 			}
7871 		} else {
7872 			/* Nothing to take. */
7873 			*giveup = 1;
7874 			to_move = 0;
7875 			goto out_of;
7876 		}
7877 	}
7878 
7879 	/* If we reach here, we can copy out a chunk */
7880 	sctp_alloc_a_chunk(stcb, chk);
7881 	if (chk == NULL) {
7882 		/* No chunk memory */
7883 		*giveup = 1;
7884 		to_move = 0;
7885 		goto out_of;
7886 	}
7887 	/* Setup for unordered if needed by looking
7888 	 * at the user sent info flags.
7889 	 */
7890 	if (sp->sinfo_flags & SCTP_UNORDERED) {
7891 		rcv_flags |= SCTP_DATA_UNORDERED;
7892 	}
7893 	if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7894 	    (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7895 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7896 	}
7897 	/* clear out the chunk before setting up */
7898 	memset(chk, 0, sizeof(*chk));
7899 	chk->rec.data.rcv_flags = rcv_flags;
7900 
7901 	if (to_move >= length) {
7902 		/* we think we can steal the whole thing */
7903 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7904 			SCTP_TCB_SEND_LOCK(stcb);
7905 			send_lock_up = 1;
7906 		}
7907 		if (to_move < sp->length) {
7908 			/* bail, it changed */
7909 			goto dont_do_it;
7910 		}
7911 		chk->data = sp->data;
7912 		chk->last_mbuf = sp->tail_mbuf;
7913 		/* register the stealing */
7914 		sp->data = sp->tail_mbuf = NULL;
7915 	} else {
7916 		struct mbuf *m;
7917 	dont_do_it:
7918 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7919 		chk->last_mbuf = NULL;
7920 		if (chk->data == NULL) {
7921 			sp->some_taken = some_taken;
7922 			sctp_free_a_chunk(stcb, chk, so_locked);
7923 			*bail = 1;
7924 			to_move = 0;
7925 			goto out_of;
7926 		}
7927 #ifdef SCTP_MBUF_LOGGING
7928 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7929 			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7930 		}
7931 #endif
7932 		/* Pull off the data */
7933 		m_adj(sp->data, to_move);
7934 		/* Now lets work our way down and compact it */
7935 		m = sp->data;
7936 		while (m && (SCTP_BUF_LEN(m) == 0)) {
7937 			sp->data  = SCTP_BUF_NEXT(m);
7938 			SCTP_BUF_NEXT(m) = NULL;
7939 			if (sp->tail_mbuf == m) {
7940 				/*-
7941 				 * Freeing tail? TSNH since
7942 				 * we supposedly were taking less
7943 				 * than the sp->length.
7944 				 */
7945 #ifdef INVARIANTS
7946 				panic("Huh, freing tail? - TSNH");
7947 #else
7948 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7949 				sp->tail_mbuf = sp->data = NULL;
7950 				sp->length = 0;
7951 #endif
7952 			}
7953 			sctp_m_free(m);
7954 			m = sp->data;
7955 		}
7956 	}
7957 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7958 		chk->copy_by_ref = 1;
7959 	} else {
7960 		chk->copy_by_ref = 0;
7961 	}
7962 	/* get last_mbuf and counts of mb usage
7963 	 * This is ugly but hopefully its only one mbuf.
7964 	 */
7965 	if (chk->last_mbuf == NULL) {
7966 		chk->last_mbuf = chk->data;
7967 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7968 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7969 		}
7970 	}
7971 
7972 	if (to_move > length) {
7973 		/*- This should not happen either
7974 		 * since we always lower to_move to the size
7975 		 * of sp->length if its larger.
7976 		 */
7977 #ifdef INVARIANTS
7978 		panic("Huh, how can to_move be larger?");
7979 #else
7980 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
7981 		sp->length = 0;
7982 #endif
7983 	} else {
7984 		atomic_subtract_int(&sp->length, to_move);
7985 	}
7986 	leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7987 	if (M_LEADINGSPACE(chk->data) < leading) {
7988 		/* Not enough room for a chunk header, get some */
7989 		struct mbuf *m;
7990 
7991 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
7992 		if (m == NULL) {
7993 			/*
7994 			 * we're in trouble here. _PREPEND below will free
7995 			 * all the data if there is no leading space, so we
7996 			 * must put the data back and restore.
7997 			 */
7998 			if (send_lock_up == 0) {
7999 				SCTP_TCB_SEND_LOCK(stcb);
8000 				send_lock_up = 1;
8001 			}
8002 			if (sp->data == NULL) {
8003 				/* unsteal the data */
8004 				sp->data = chk->data;
8005 				sp->tail_mbuf = chk->last_mbuf;
8006 			} else {
8007 				struct mbuf *m_tmp;
8008 				/* reassemble the data */
8009 				m_tmp = sp->data;
8010 				sp->data = chk->data;
8011 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
8012 			}
8013 			sp->some_taken = some_taken;
8014 			atomic_add_int(&sp->length, to_move);
8015 			chk->data = NULL;
8016 			*bail = 1;
8017 			sctp_free_a_chunk(stcb, chk, so_locked);
8018 			to_move = 0;
8019 			goto out_of;
8020 		} else {
8021 			SCTP_BUF_LEN(m) = 0;
8022 			SCTP_BUF_NEXT(m) = chk->data;
8023 			chk->data = m;
8024 			M_ALIGN(chk->data, 4);
8025 		}
8026 	}
8027 	SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
8028 	if (chk->data == NULL) {
8029 		/* HELP, TSNH since we assured it would not above? */
8030 #ifdef INVARIANTS
8031 		panic("prepend failes HELP?");
8032 #else
8033 		SCTP_PRINTF("prepend fails HELP?\n");
8034 		sctp_free_a_chunk(stcb, chk, so_locked);
8035 #endif
8036 		*bail = 1;
8037 		to_move = 0;
8038 		goto out_of;
8039 	}
8040 	sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
8041 	chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
8042 	chk->book_size_scale = 0;
8043 	chk->sent = SCTP_DATAGRAM_UNSENT;
8044 
8045 	chk->flags = 0;
8046 	chk->asoc = &stcb->asoc;
8047 	chk->pad_inplace = 0;
8048 	chk->no_fr_allowed = 0;
8049 	if (stcb->asoc.idata_supported == 0) {
8050 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8051 			/* Just use 0. The receiver ignores the values. */
8052 			chk->rec.data.mid = 0;
8053 		} else {
8054 			chk->rec.data.mid = strq->next_mid_ordered;
8055 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8056 				strq->next_mid_ordered++;
8057 			}
8058 		}
8059 	} else {
8060 		if (rcv_flags & SCTP_DATA_UNORDERED) {
8061 			chk->rec.data.mid = strq->next_mid_unordered;
8062 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8063 				strq->next_mid_unordered++;
8064 			}
8065 		} else {
8066 			chk->rec.data.mid = strq->next_mid_ordered;
8067 			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8068 				strq->next_mid_ordered++;
8069 			}
8070 		}
8071 	}
8072 	chk->rec.data.sid = sp->sid;
8073 	chk->rec.data.ppid = sp->ppid;
8074 	chk->rec.data.context = sp->context;
8075 	chk->rec.data.doing_fast_retransmit = 0;
8076 
8077 	chk->rec.data.timetodrop = sp->ts;
8078 	chk->flags = sp->act_flags;
8079 
8080 	if (sp->net) {
8081 		chk->whoTo = sp->net;
8082 		atomic_add_int(&chk->whoTo->ref_count, 1);
8083 	} else
8084 		chk->whoTo = NULL;
8085 
8086 	if (sp->holds_key_ref) {
8087 		chk->auth_keyid = sp->auth_keyid;
8088 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
8089 		chk->holds_key_ref = 1;
8090 	}
8091 #if defined(__FreeBSD__) && !defined(__Userspace__)
8092 	chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8093 #else
8094 	chk->rec.data.tsn = asoc->sending_seq++;
8095 #endif
8096 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8097 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8098 		               (uint32_t)(uintptr_t)stcb, sp->length,
8099 		               (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8100 		               chk->rec.data.tsn);
8101 	}
8102 	if (stcb->asoc.idata_supported == 0) {
8103 		dchkh = mtod(chk->data, struct sctp_data_chunk *);
8104 	} else {
8105 		ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8106 	}
8107 	/*
8108 	 * Put the rest of the things in place now. Size was done
8109 	 * earlier in previous loop prior to padding.
8110 	 */
8111 
8112 #ifdef SCTP_ASOCLOG_OF_TSNS
8113 	SCTP_TCB_LOCK_ASSERT(stcb);
8114 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8115 		asoc->tsn_out_at = 0;
8116 		asoc->tsn_out_wrapped = 1;
8117 	}
8118 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8119 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8120 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8121 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8122 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8123 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8124 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8125 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8126 	asoc->tsn_out_at++;
8127 #endif
8128 	if (stcb->asoc.idata_supported == 0) {
8129 		dchkh->ch.chunk_type = SCTP_DATA;
8130 		dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8131 		dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8132 		dchkh->dp.sid = htons(strq->sid);
8133 		dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8134 		dchkh->dp.ppid = chk->rec.data.ppid;
8135 		dchkh->ch.chunk_length = htons(chk->send_size);
8136 	} else {
8137 		ndchkh->ch.chunk_type = SCTP_IDATA;
8138 		ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8139 		ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8140 		ndchkh->dp.sid = htons(strq->sid);
8141 		ndchkh->dp.reserved = htons(0);
8142 		ndchkh->dp.mid = htonl(chk->rec.data.mid);
8143 		if (sp->fsn == 0)
8144 			ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8145 		else
8146 			ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8147 		sp->fsn++;
8148 		ndchkh->ch.chunk_length = htons(chk->send_size);
8149 	}
8150 	/* Now advance the chk->send_size by the actual pad needed. */
8151 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8152 		/* need a pad */
8153 		struct mbuf *lm;
8154 		int pads;
8155 
8156 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8157 		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8158 		if (lm != NULL) {
8159 			chk->last_mbuf = lm;
8160 			chk->pad_inplace = 1;
8161 		}
8162 		chk->send_size += pads;
8163 	}
8164 	if (PR_SCTP_ENABLED(chk->flags)) {
8165 		asoc->pr_sctp_cnt++;
8166 	}
8167 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8168 		/* All done pull and kill the message */
8169 		if (sp->put_last_out == 0) {
8170 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8171 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8172 			            sp->sender_all_done,
8173 			            sp->length,
8174 			            sp->msg_is_complete,
8175 			            sp->put_last_out,
8176 			            send_lock_up);
8177 		}
8178 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8179 			SCTP_TCB_SEND_LOCK(stcb);
8180 			send_lock_up = 1;
8181 		}
8182 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8183 		TAILQ_REMOVE(&strq->outqueue, sp, next);
8184 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8185 		if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8186 		    (strq->chunks_on_queues == 0) &&
8187 		    TAILQ_EMPTY(&strq->outqueue)) {
8188 			stcb->asoc.trigger_reset = 1;
8189 		}
8190 		if (sp->net) {
8191 			sctp_free_remote_addr(sp->net);
8192 			sp->net = NULL;
8193 		}
8194 		if (sp->data) {
8195 			sctp_m_freem(sp->data);
8196 			sp->data = NULL;
8197 		}
8198 		sctp_free_a_strmoq(stcb, sp, so_locked);
8199 	}
8200 	asoc->chunks_on_out_queue++;
8201 	strq->chunks_on_queues++;
8202 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8203 	asoc->send_queue_cnt++;
8204 out_of:
8205 	if (send_lock_up) {
8206 		SCTP_TCB_SEND_UNLOCK(stcb);
8207 	}
8208 	return (to_move);
8209 }
8210 
8211 static void
8212 sctp_fill_outqueue(struct sctp_tcb *stcb,
8213     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked)
8214 {
8215 	struct sctp_association *asoc;
8216 	struct sctp_stream_out *strq;
8217 	uint32_t space_left, moved, total_moved;
8218 	int bail, giveup;
8219 
8220 	SCTP_TCB_LOCK_ASSERT(stcb);
8221 	asoc = &stcb->asoc;
8222 	total_moved = 0;
8223 	switch (net->ro._l_addr.sa.sa_family) {
8224 #ifdef INET
8225 		case AF_INET:
8226 			space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
8227 			break;
8228 #endif
8229 #ifdef INET6
8230 		case AF_INET6:
8231 			space_left = net->mtu - SCTP_MIN_OVERHEAD;
8232 			break;
8233 #endif
8234 #if defined(__Userspace__)
8235 		case AF_CONN:
8236 			space_left = net->mtu - sizeof(struct sctphdr);
8237 			break;
8238 #endif
8239 		default:
8240 			/* TSNH */
8241 			space_left = net->mtu;
8242 			break;
8243 	}
8244 	/* Need an allowance for the data chunk header too */
8245 	space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8246 
8247 	/* must make even word boundary */
8248 	space_left &= 0xfffffffc;
8249 	strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8250 	giveup = 0;
8251 	bail = 0;
8252 	while ((space_left > 0) && (strq != NULL)) {
8253 		moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
8254 		                              &giveup, eeor_mode, &bail, so_locked);
8255 		stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
8256 		if ((giveup != 0) || (bail != 0)) {
8257 			break;
8258 		}
8259 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8260 		total_moved += moved;
8261 		if (space_left >= moved) {
8262 			space_left -= moved;
8263 		} else {
8264 			space_left = 0;
8265 		}
8266 		if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
8267 			space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
8268 		} else {
8269 			space_left = 0;
8270 		}
8271 		space_left &= 0xfffffffc;
8272 	}
8273 	if (bail != 0)
8274 		*quit_now = 1;
8275 
8276 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8277 
8278 	if (total_moved == 0) {
8279 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8280 		    (net == stcb->asoc.primary_destination)) {
8281 			/* ran dry for primary network net */
8282 			SCTP_STAT_INCR(sctps_primary_randry);
8283 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
8284 			/* ran dry with CMT on */
8285 			SCTP_STAT_INCR(sctps_cmt_randry);
8286 		}
8287 	}
8288 }
8289 
8290 void
8291 sctp_fix_ecn_echo(struct sctp_association *asoc)
8292 {
8293 	struct sctp_tmit_chunk *chk;
8294 
8295 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8296 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8297 			chk->sent = SCTP_DATAGRAM_UNSENT;
8298 		}
8299 	}
8300 }
8301 
8302 void
8303 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8304 {
8305 	struct sctp_association *asoc;
8306 	struct sctp_tmit_chunk *chk;
8307 	struct sctp_stream_queue_pending *sp;
8308 	unsigned int i;
8309 
8310 	if (net == NULL) {
8311 		return;
8312 	}
8313 	asoc = &stcb->asoc;
8314 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8315 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8316 			if (sp->net == net) {
8317 				sctp_free_remote_addr(sp->net);
8318 				sp->net = NULL;
8319 			}
8320 		}
8321 	}
8322 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8323 		if (chk->whoTo == net) {
8324 			sctp_free_remote_addr(chk->whoTo);
8325 			chk->whoTo = NULL;
8326 		}
8327 	}
8328 }
8329 
8330 int
8331 sctp_med_chunk_output(struct sctp_inpcb *inp,
8332 		      struct sctp_tcb *stcb,
8333 		      struct sctp_association *asoc,
8334 		      int *num_out,
8335 		      int *reason_code,
8336 		      int control_only, int from_where,
8337 		      struct timeval *now, int *now_filled, int frag_point, int so_locked)
8338 {
8339 	/**
8340 	 * Ok this is the generic chunk service queue. we must do the
8341 	 * following:
8342 	 * - Service the stream queue that is next, moving any
8343 	 *   message (note I must get a complete message i.e. FIRST/MIDDLE and
8344 	 *   LAST to the out queue in one pass) and assigning TSN's. This
8345 	 *   only applys though if the peer does not support NDATA. For NDATA
8346 	 *   chunks its ok to not send the entire message ;-)
8347 	 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8348 	 *   fomulate and send the low level chunks. Making sure to combine
8349 	 *   any control in the control chunk queue also.
8350 	 */
8351 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8352 	struct mbuf *outchain, *endoutchain;
8353 	struct sctp_tmit_chunk *chk, *nchk;
8354 
8355 	/* temp arrays for unlinking */
8356 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8357 	int no_fragmentflg, error;
8358 	unsigned int max_rwnd_per_dest, max_send_per_dest;
8359 	int one_chunk, hbflag, skip_data_for_this_net;
8360 	int asconf, cookie, no_out_cnt;
8361 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8362 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8363 	int tsns_sent = 0;
8364 	uint32_t auth_offset;
8365 	struct sctp_auth_chunk *auth;
8366 	uint16_t auth_keyid;
8367 	int override_ok = 1;
8368 	int skip_fill_up = 0;
8369 	int data_auth_reqd = 0;
8370 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8371 	   the destination. */
8372 	int quit_now = 0;
8373 
8374 #if defined(__APPLE__) && !defined(__Userspace__)
8375 	if (so_locked) {
8376 		sctp_lock_assert(SCTP_INP_SO(inp));
8377 	} else {
8378 		sctp_unlock_assert(SCTP_INP_SO(inp));
8379 	}
8380 #endif
8381 	*num_out = 0;
8382 	*reason_code = 0;
8383 	auth_keyid = stcb->asoc.authinfo.active_keyid;
8384 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8385 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8386 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8387 		eeor_mode = 1;
8388 	} else {
8389 		eeor_mode = 0;
8390 	}
8391 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
8392 	/*
8393 	 * First lets prime the pump. For each destination, if there is room
8394 	 * in the flight size, attempt to pull an MTU's worth out of the
8395 	 * stream queues into the general send_queue
8396 	 */
8397 #ifdef SCTP_AUDITING_ENABLED
8398 	sctp_audit_log(0xC2, 2);
8399 #endif
8400 	SCTP_TCB_LOCK_ASSERT(stcb);
8401 	hbflag = 0;
8402 	if (control_only)
8403 		no_data_chunks = 1;
8404 	else
8405 		no_data_chunks = 0;
8406 
8407 	/* Nothing to possible to send? */
8408 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8409 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8410 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8411 	    TAILQ_EMPTY(&asoc->send_queue) &&
8412 	    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8413 	nothing_to_send:
8414 		*reason_code = 9;
8415 		return (0);
8416 	}
8417 	if (asoc->peers_rwnd == 0) {
8418 		/* No room in peers rwnd */
8419 		*reason_code = 1;
8420 		if (asoc->total_flight > 0) {
8421 			/* we are allowed one chunk in flight */
8422 			no_data_chunks = 1;
8423 		}
8424 	}
8425 	if (stcb->asoc.ecn_echo_cnt_onq) {
8426 		/* Record where a sack goes, if any */
8427 		if (no_data_chunks &&
8428 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8429 			/* Nothing but ECNe to send - we don't do that */
8430 			goto nothing_to_send;
8431 		}
8432 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8433 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8434 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8435 				sack_goes_to = chk->whoTo;
8436 				break;
8437 			}
8438 		}
8439 	}
8440 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8441 	if (stcb->sctp_socket)
8442 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8443 	else
8444 		max_send_per_dest = 0;
8445 	if (no_data_chunks == 0) {
8446 		/* How many non-directed chunks are there? */
8447 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8448 			if (chk->whoTo == NULL) {
8449 				/* We already have non-directed
8450 				 * chunks on the queue, no need
8451 				 * to do a fill-up.
8452 				 */
8453 				skip_fill_up = 1;
8454 				break;
8455 			}
8456 		}
8457 	}
8458 	if ((no_data_chunks == 0) &&
8459 	    (skip_fill_up == 0) &&
8460 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8461 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8462 			/*
8463 			 * This for loop we are in takes in
8464 			 * each net, if its's got space in cwnd and
8465 			 * has data sent to it (when CMT is off) then it
8466 			 * calls sctp_fill_outqueue for the net. This gets
8467 			 * data on the send queue for that network.
8468 			 *
8469 			 * In sctp_fill_outqueue TSN's are assigned and
8470 			 * data is copied out of the stream buffers. Note
8471 			 * mostly copy by reference (we hope).
8472 			 */
8473 			net->window_probe = 0;
8474 			if ((net != stcb->asoc.alternate) &&
8475 			    ((net->dest_state & SCTP_ADDR_PF) ||
8476 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8477 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8478 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8479 					sctp_log_cwnd(stcb, net, 1,
8480 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8481 				}
8482 				continue;
8483 			}
8484 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8485 			    (net->flight_size == 0)) {
8486 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8487 			}
8488 			if (net->flight_size >= net->cwnd) {
8489 				/* skip this network, no room - can't fill */
8490 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8491 					sctp_log_cwnd(stcb, net, 3,
8492 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8493 				}
8494 				continue;
8495 			}
8496 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8497 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8498 			}
8499 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8500 			if (quit_now) {
8501 				/* memory alloc failure */
8502 				no_data_chunks = 1;
8503 				break;
8504 			}
8505 		}
8506 	}
8507 	/* now service each destination and send out what we can for it */
8508 	/* Nothing to send? */
8509 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8510 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8511 	    TAILQ_EMPTY(&asoc->send_queue)) {
8512 		*reason_code = 8;
8513 		return (0);
8514 	}
8515 
8516 	if (asoc->sctp_cmt_on_off > 0) {
8517 		/* get the last start point */
8518 		start_at = asoc->last_net_cmt_send_started;
8519 		if (start_at == NULL) {
8520 			/* null so to beginning */
8521 			start_at = TAILQ_FIRST(&asoc->nets);
8522 		} else {
8523 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8524 			if (start_at == NULL) {
8525 				start_at = TAILQ_FIRST(&asoc->nets);
8526 			}
8527 		}
8528 		asoc->last_net_cmt_send_started = start_at;
8529 	} else {
8530 		start_at = TAILQ_FIRST(&asoc->nets);
8531 	}
8532 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8533 		if (chk->whoTo == NULL) {
8534 			if (asoc->alternate) {
8535 				chk->whoTo = asoc->alternate;
8536 			} else {
8537 				chk->whoTo = asoc->primary_destination;
8538 			}
8539 			atomic_add_int(&chk->whoTo->ref_count, 1);
8540 		}
8541 	}
8542 	old_start_at = NULL;
8543 again_one_more_time:
8544 	for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8545 		/* how much can we send? */
8546 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8547 		if (old_start_at && (old_start_at == net)) {
8548 			/* through list ocmpletely. */
8549 			break;
8550 		}
8551 		tsns_sent = 0xa;
8552 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8553 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8554 		    (net->flight_size >= net->cwnd)) {
8555 			/* Nothing on control or asconf and flight is full, we can skip
8556 			 * even in the CMT case.
8557 			 */
8558 			continue;
8559 		}
8560 		bundle_at = 0;
8561 		endoutchain = outchain = NULL;
8562 		auth = NULL;
8563 		auth_offset = 0;
8564 		no_fragmentflg = 1;
8565 		one_chunk = 0;
8566 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8567 			skip_data_for_this_net = 1;
8568 		} else {
8569 			skip_data_for_this_net = 0;
8570 		}
8571 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8572 #ifdef INET
8573 		case AF_INET:
8574 			mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8575 			break;
8576 #endif
8577 #ifdef INET6
8578 		case AF_INET6:
8579 			mtu = net->mtu - SCTP_MIN_OVERHEAD;
8580 			break;
8581 #endif
8582 #if defined(__Userspace__)
8583 		case AF_CONN:
8584 			mtu = net->mtu - sizeof(struct sctphdr);
8585 			break;
8586 #endif
8587 		default:
8588 			/* TSNH */
8589 			mtu = net->mtu;
8590 			break;
8591 		}
8592 		mx_mtu = mtu;
8593 		to_out = 0;
8594 		if (mtu > asoc->peers_rwnd) {
8595 			if (asoc->total_flight > 0) {
8596 				/* We have a packet in flight somewhere */
8597 				r_mtu = asoc->peers_rwnd;
8598 			} else {
8599 				/* We are always allowed to send one MTU out */
8600 				one_chunk = 1;
8601 				r_mtu = mtu;
8602 			}
8603 		} else {
8604 			r_mtu = mtu;
8605 		}
8606 		error = 0;
8607 		/************************/
8608 		/* ASCONF transmission */
8609 		/************************/
8610 		/* Now first lets go through the asconf queue */
8611 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8612 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8613 				continue;
8614 			}
8615 			if (chk->whoTo == NULL) {
8616 				if (asoc->alternate == NULL) {
8617 					if (asoc->primary_destination != net) {
8618 						break;
8619 					}
8620 				} else {
8621 					if (asoc->alternate != net) {
8622 						break;
8623 					}
8624 				}
8625 			} else {
8626 				if (chk->whoTo != net) {
8627 					break;
8628 				}
8629 			}
8630 			if (chk->data == NULL) {
8631 				break;
8632 			}
8633 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8634 			    chk->sent != SCTP_DATAGRAM_RESEND) {
8635 				break;
8636 			}
8637 			/*
8638 			 * if no AUTH is yet included and this chunk
8639 			 * requires it, make sure to account for it.  We
8640 			 * don't apply the size until the AUTH chunk is
8641 			 * actually added below in case there is no room for
8642 			 * this chunk. NOTE: we overload the use of "omtu"
8643 			 * here
8644 			 */
8645 			if ((auth == NULL) &&
8646 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8647 							stcb->asoc.peer_auth_chunks)) {
8648 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8649 			} else
8650 				omtu = 0;
8651 			/* Here we do NOT factor the r_mtu */
8652 			if ((chk->send_size < (int)(mtu - omtu)) ||
8653 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8654 				/*
8655 				 * We probably should glom the mbuf chain
8656 				 * from the chk->data for control but the
8657 				 * problem is it becomes yet one more level
8658 				 * of tracking to do if for some reason
8659 				 * output fails. Then I have got to
8660 				 * reconstruct the merged control chain.. el
8661 				 * yucko.. for now we take the easy way and
8662 				 * do the copy
8663 				 */
8664 				/*
8665 				 * Add an AUTH chunk, if chunk requires it
8666 				 * save the offset into the chain for AUTH
8667 				 */
8668 				if ((auth == NULL) &&
8669 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8670 								 stcb->asoc.peer_auth_chunks))) {
8671 					outchain = sctp_add_auth_chunk(outchain,
8672 								       &endoutchain,
8673 								       &auth,
8674 								       &auth_offset,
8675 								       stcb,
8676 								       chk->rec.chunk_id.id);
8677 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8678 				}
8679 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8680 							       (int)chk->rec.chunk_id.can_take_data,
8681 							       chk->send_size, chk->copy_by_ref);
8682 				if (outchain == NULL) {
8683 					*reason_code = 8;
8684 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8685 					return (ENOMEM);
8686 				}
8687 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8688 				/* update our MTU size */
8689 				if (mtu > (chk->send_size + omtu))
8690 					mtu -= (chk->send_size + omtu);
8691 				else
8692 					mtu = 0;
8693 				to_out += (chk->send_size + omtu);
8694 				/* Do clear IP_DF ? */
8695 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8696 					no_fragmentflg = 0;
8697 				}
8698 				if (chk->rec.chunk_id.can_take_data)
8699 					chk->data = NULL;
8700 				/*
8701 				 * set hb flag since we can
8702 				 * use these for RTO
8703 				 */
8704 				hbflag = 1;
8705 				asconf = 1;
8706 				/*
8707 				 * should sysctl this: don't
8708 				 * bundle data with ASCONF
8709 				 * since it requires AUTH
8710 				 */
8711 				no_data_chunks = 1;
8712 				chk->sent = SCTP_DATAGRAM_SENT;
8713 				if (chk->whoTo == NULL) {
8714 					chk->whoTo = net;
8715 					atomic_add_int(&net->ref_count, 1);
8716 				}
8717 				chk->snd_count++;
8718 				if (mtu == 0) {
8719 					/*
8720 					 * Ok we are out of room but we can
8721 					 * output without effecting the
8722 					 * flight size since this little guy
8723 					 * is a control only packet.
8724 					 */
8725 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8726 					/*
8727 					 * do NOT clear the asconf
8728 					 * flag as it is used to do
8729 					 * appropriate source address
8730 					 * selection.
8731 					 */
8732 					if (*now_filled == 0) {
8733 						(void)SCTP_GETTIME_TIMEVAL(now);
8734 						*now_filled = 1;
8735 					}
8736 					net->last_sent_time = *now;
8737 					hbflag = 0;
8738 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8739 					                                        (struct sockaddr *)&net->ro._l_addr,
8740 					                                        outchain, auth_offset, auth,
8741 					                                        stcb->asoc.authinfo.active_keyid,
8742 					                                        no_fragmentflg, 0, asconf,
8743 					                                        inp->sctp_lport, stcb->rport,
8744 					                                        htonl(stcb->asoc.peer_vtag),
8745 					                                        net->port, NULL,
8746 #if defined(__FreeBSD__) && !defined(__Userspace__)
8747 					                                        0, 0,
8748 #endif
8749 					                                        so_locked))) {
8750 						/* error, we could not output */
8751 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8752 						if (from_where == 0) {
8753 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8754 						}
8755 						if (error == ENOBUFS) {
8756 							asoc->ifp_had_enobuf = 1;
8757 							SCTP_STAT_INCR(sctps_lowlevelerr);
8758 						}
8759 						/* error, could not output */
8760 						if (error == EHOSTUNREACH) {
8761 							/*
8762 							 * Destination went
8763 							 * unreachable
8764 							 * during this send
8765 							 */
8766 							sctp_move_chunks_from_net(stcb, net);
8767 						}
8768 						*reason_code = 7;
8769 						break;
8770 					} else {
8771 						asoc->ifp_had_enobuf = 0;
8772 					}
8773 					/*
8774 					 * increase the number we sent, if a
8775 					 * cookie is sent we don't tell them
8776 					 * any was sent out.
8777 					 */
8778 					outchain = endoutchain = NULL;
8779 					auth = NULL;
8780 					auth_offset = 0;
8781 					if (!no_out_cnt)
8782 						*num_out += ctl_cnt;
8783 					/* recalc a clean slate and setup */
8784 					switch (net->ro._l_addr.sa.sa_family) {
8785 #ifdef INET
8786 						case AF_INET:
8787 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8788 							break;
8789 #endif
8790 #ifdef INET6
8791 						case AF_INET6:
8792 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8793 							break;
8794 #endif
8795 #if defined(__Userspace__)
8796 						case AF_CONN:
8797 							mtu = net->mtu - sizeof(struct sctphdr);
8798 							break;
8799 #endif
8800 						default:
8801 							/* TSNH */
8802 							mtu = net->mtu;
8803 							break;
8804 					}
8805 					to_out = 0;
8806 					no_fragmentflg = 1;
8807 				}
8808 			}
8809 		}
8810 		if (error != 0) {
8811 			/* try next net */
8812 			continue;
8813 		}
8814 		/************************/
8815 		/* Control transmission */
8816 		/************************/
8817 		/* Now first lets go through the control queue */
8818 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8819 			if ((sack_goes_to) &&
8820 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8821 			    (chk->whoTo != sack_goes_to)) {
8822 				/*
8823 				 * if we have a sack in queue, and we are looking at an
8824 				 * ecn echo that is NOT queued to where the sack is going..
8825 				 */
8826 				if (chk->whoTo == net) {
8827 					/* Don't transmit it to where its going (current net) */
8828 					continue;
8829 				} else if (sack_goes_to == net) {
8830 					/* But do transmit it to this address */
8831 					goto skip_net_check;
8832 				}
8833 			}
8834 			if (chk->whoTo == NULL) {
8835 				if (asoc->alternate == NULL) {
8836 					if (asoc->primary_destination != net) {
8837 						continue;
8838 					}
8839 				} else {
8840 					if (asoc->alternate != net) {
8841 						continue;
8842 					}
8843 				}
8844 			} else {
8845 				if (chk->whoTo != net) {
8846 					continue;
8847 				}
8848 			}
8849 		skip_net_check:
8850 			if (chk->data == NULL) {
8851 				continue;
8852 			}
8853 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8854 				/*
8855 				 * It must be unsent. Cookies and ASCONF's
8856 				 * hang around but there timers will force
8857 				 * when marked for resend.
8858 				 */
8859 				continue;
8860 			}
8861 			/*
8862 			 * if no AUTH is yet included and this chunk
8863 			 * requires it, make sure to account for it.  We
8864 			 * don't apply the size until the AUTH chunk is
8865 			 * actually added below in case there is no room for
8866 			 * this chunk. NOTE: we overload the use of "omtu"
8867 			 * here
8868 			 */
8869 			if ((auth == NULL) &&
8870 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8871 							stcb->asoc.peer_auth_chunks)) {
8872 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8873 			} else
8874 				omtu = 0;
8875 			/* Here we do NOT factor the r_mtu */
8876 			if ((chk->send_size <= (int)(mtu - omtu)) ||
8877 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8878 				/*
8879 				 * We probably should glom the mbuf chain
8880 				 * from the chk->data for control but the
8881 				 * problem is it becomes yet one more level
8882 				 * of tracking to do if for some reason
8883 				 * output fails. Then I have got to
8884 				 * reconstruct the merged control chain.. el
8885 				 * yucko.. for now we take the easy way and
8886 				 * do the copy
8887 				 */
8888 				/*
8889 				 * Add an AUTH chunk, if chunk requires it
8890 				 * save the offset into the chain for AUTH
8891 				 */
8892 				if ((auth == NULL) &&
8893 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8894 								 stcb->asoc.peer_auth_chunks))) {
8895 					outchain = sctp_add_auth_chunk(outchain,
8896 								       &endoutchain,
8897 								       &auth,
8898 								       &auth_offset,
8899 								       stcb,
8900 								       chk->rec.chunk_id.id);
8901 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8902 				}
8903 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8904 							       (int)chk->rec.chunk_id.can_take_data,
8905 							       chk->send_size, chk->copy_by_ref);
8906 				if (outchain == NULL) {
8907 					*reason_code = 8;
8908 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8909 					return (ENOMEM);
8910 				}
8911 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8912 				/* update our MTU size */
8913 				if (mtu > (chk->send_size + omtu))
8914 					mtu -= (chk->send_size + omtu);
8915 				else
8916 					mtu = 0;
8917 				to_out += (chk->send_size + omtu);
8918 				/* Do clear IP_DF ? */
8919 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8920 					no_fragmentflg = 0;
8921 				}
8922 				if (chk->rec.chunk_id.can_take_data)
8923 					chk->data = NULL;
8924 				/* Mark things to be removed, if needed */
8925 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8926 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8927 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8928 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8929 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8930 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8931 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8932 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8933 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8934 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8935 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8936 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8937 						hbflag = 1;
8938 					}
8939 					/* remove these chunks at the end */
8940 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8941 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8942 						/* turn off the timer */
8943 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8944 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8945 							                inp, stcb, NULL,
8946 							                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8947 						}
8948 					}
8949 					ctl_cnt++;
8950 				} else {
8951 					/*
8952 					 * Other chunks, since they have
8953 					 * timers running (i.e. COOKIE)
8954 					 * we just "trust" that it
8955 					 * gets sent or retransmitted.
8956 					 */
8957 					ctl_cnt++;
8958 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8959 						cookie = 1;
8960 						no_out_cnt = 1;
8961 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8962 						/*
8963 						 * Increment ecne send count here
8964 						 * this means we may be over-zealous in
8965 						 * our counting if the send fails, but its
8966 						 * the best place to do it (we used to do
8967 						 * it in the queue of the chunk, but that did
8968 						 * not tell how many times it was sent.
8969 						 */
8970 						SCTP_STAT_INCR(sctps_sendecne);
8971 					}
8972 					chk->sent = SCTP_DATAGRAM_SENT;
8973 					if (chk->whoTo == NULL) {
8974 						chk->whoTo = net;
8975 						atomic_add_int(&net->ref_count, 1);
8976 					}
8977 					chk->snd_count++;
8978 				}
8979 				if (mtu == 0) {
8980 					/*
8981 					 * Ok we are out of room but we can
8982 					 * output without effecting the
8983 					 * flight size since this little guy
8984 					 * is a control only packet.
8985 					 */
8986 					if (asconf) {
8987 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8988 						/*
8989 						 * do NOT clear the asconf
8990 						 * flag as it is used to do
8991 						 * appropriate source address
8992 						 * selection.
8993 						 */
8994 					}
8995 					if (cookie) {
8996 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8997 						cookie = 0;
8998 					}
8999 					/* Only HB or ASCONF advances time */
9000 					if (hbflag) {
9001 						if (*now_filled == 0) {
9002 							(void)SCTP_GETTIME_TIMEVAL(now);
9003 							*now_filled = 1;
9004 						}
9005 						net->last_sent_time = *now;
9006 						hbflag = 0;
9007 					}
9008 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9009 					                                        (struct sockaddr *)&net->ro._l_addr,
9010 					                                        outchain,
9011 					                                        auth_offset, auth,
9012 					                                        stcb->asoc.authinfo.active_keyid,
9013 					                                        no_fragmentflg, 0, asconf,
9014 					                                        inp->sctp_lport, stcb->rport,
9015 					                                        htonl(stcb->asoc.peer_vtag),
9016 					                                        net->port, NULL,
9017 #if defined(__FreeBSD__) && !defined(__Userspace__)
9018 					                                        0, 0,
9019 #endif
9020 					                                        so_locked))) {
9021 						/* error, we could not output */
9022 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9023 						if (from_where == 0) {
9024 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
9025 						}
9026 						if (error == ENOBUFS) {
9027 							asoc->ifp_had_enobuf = 1;
9028 							SCTP_STAT_INCR(sctps_lowlevelerr);
9029 						}
9030 						if (error == EHOSTUNREACH) {
9031 							/*
9032 							 * Destination went
9033 							 * unreachable
9034 							 * during this send
9035 							 */
9036 							sctp_move_chunks_from_net(stcb, net);
9037 						}
9038 						*reason_code = 7;
9039 						break;
9040 					} else {
9041 						asoc->ifp_had_enobuf = 0;
9042 					}
9043 					/*
9044 					 * increase the number we sent, if a
9045 					 * cookie is sent we don't tell them
9046 					 * any was sent out.
9047 					 */
9048 					outchain = endoutchain = NULL;
9049 					auth = NULL;
9050 					auth_offset = 0;
9051 					if (!no_out_cnt)
9052 						*num_out += ctl_cnt;
9053 					/* recalc a clean slate and setup */
9054 					switch (net->ro._l_addr.sa.sa_family) {
9055 #ifdef INET
9056 						case AF_INET:
9057 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9058 							break;
9059 #endif
9060 #ifdef INET6
9061 						case AF_INET6:
9062 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
9063 							break;
9064 #endif
9065 #if defined(__Userspace__)
9066 						case AF_CONN:
9067 							mtu = net->mtu - sizeof(struct sctphdr);
9068 							break;
9069 #endif
9070 						default:
9071 							/* TSNH */
9072 							mtu = net->mtu;
9073 							break;
9074 					}
9075 					to_out = 0;
9076 					no_fragmentflg = 1;
9077 				}
9078 			}
9079 		}
9080 		if (error != 0) {
9081 			/* try next net */
9082 			continue;
9083 		}
9084 		/* JRI: if dest is in PF state, do not send data to it */
9085 		if ((asoc->sctp_cmt_on_off > 0) &&
9086 		    (net != stcb->asoc.alternate) &&
9087 		    (net->dest_state & SCTP_ADDR_PF)) {
9088 			goto no_data_fill;
9089 		}
9090 		if (net->flight_size >= net->cwnd) {
9091 			goto no_data_fill;
9092 		}
9093 		if ((asoc->sctp_cmt_on_off > 0) &&
9094 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9095 		    (net->flight_size > max_rwnd_per_dest)) {
9096 			goto no_data_fill;
9097 		}
9098 		/*
9099 		 * We need a specific accounting for the usage of the
9100 		 * send buffer. We also need to check the number of messages
9101 		 * per net. For now, this is better than nothing and it
9102 		 * disabled by default...
9103 		 */
9104 		if ((asoc->sctp_cmt_on_off > 0) &&
9105 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9106 		    (max_send_per_dest > 0) &&
9107 		    (net->flight_size > max_send_per_dest)) {
9108 			goto no_data_fill;
9109 		}
9110 		/*********************/
9111 		/* Data transmission */
9112 		/*********************/
9113 		/*
9114 		 * if AUTH for DATA is required and no AUTH has been added
9115 		 * yet, account for this in the mtu now... if no data can be
9116 		 * bundled, this adjustment won't matter anyways since the
9117 		 * packet will be going out...
9118 		 */
9119 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9120 							     stcb->asoc.peer_auth_chunks);
9121 		if (data_auth_reqd && (auth == NULL)) {
9122 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9123 		}
9124 		/* now lets add any data within the MTU constraints */
9125 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9126 #ifdef INET
9127 		case AF_INET:
9128 			if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9129 				omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9130 			else
9131 				omtu = 0;
9132 			break;
9133 #endif
9134 #ifdef INET6
9135 		case AF_INET6:
9136 			if (net->mtu > SCTP_MIN_OVERHEAD)
9137 				omtu = net->mtu - SCTP_MIN_OVERHEAD;
9138 			else
9139 				omtu = 0;
9140 			break;
9141 #endif
9142 #if defined(__Userspace__)
9143 		case AF_CONN:
9144 			if (net->mtu > sizeof(struct sctphdr)) {
9145 				omtu = net->mtu - sizeof(struct sctphdr);
9146 			} else {
9147 				omtu = 0;
9148 			}
9149 			break;
9150 #endif
9151 		default:
9152 			/* TSNH */
9153 			omtu = 0;
9154 			break;
9155 		}
9156 		if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
9157 		      (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9158 		     (skip_data_for_this_net == 0)) ||
9159 		    (cookie)) {
9160 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9161 				if (no_data_chunks) {
9162 					/* let only control go out */
9163 					*reason_code = 1;
9164 					break;
9165 				}
9166 				if (net->flight_size >= net->cwnd) {
9167 					/* skip this net, no room for data */
9168 					*reason_code = 2;
9169 					break;
9170 				}
9171 				if ((chk->whoTo != NULL) &&
9172 				    (chk->whoTo != net)) {
9173 					/* Don't send the chunk on this net */
9174 					continue;
9175 				}
9176 
9177 				if (asoc->sctp_cmt_on_off == 0) {
9178 					if ((asoc->alternate) &&
9179 					    (asoc->alternate != net) &&
9180 					    (chk->whoTo == NULL)) {
9181 						continue;
9182 					} else if ((net != asoc->primary_destination) &&
9183 						   (asoc->alternate == NULL) &&
9184 						   (chk->whoTo == NULL)) {
9185 						continue;
9186 					}
9187 				}
9188 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9189 					/*-
9190 					 * strange, we have a chunk that is
9191 					 * to big for its destination and
9192 					 * yet no fragment ok flag.
9193 					 * Something went wrong when the
9194 					 * PMTU changed...we did not mark
9195 					 * this chunk for some reason?? I
9196 					 * will fix it here by letting IP
9197 					 * fragment it for now and printing
9198 					 * a warning. This really should not
9199 					 * happen ...
9200 					 */
9201 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9202 						    chk->send_size, mtu);
9203 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9204 				}
9205 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9206 				    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9207 					struct sctp_data_chunk *dchkh;
9208 
9209 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
9210 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9211 				}
9212 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9213 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9214 					/* ok we will add this one */
9215 
9216 					/*
9217 					 * Add an AUTH chunk, if chunk
9218 					 * requires it, save the offset into
9219 					 * the chain for AUTH
9220 					 */
9221 					if (data_auth_reqd) {
9222 						if (auth == NULL) {
9223 							outchain = sctp_add_auth_chunk(outchain,
9224 										       &endoutchain,
9225 										       &auth,
9226 										       &auth_offset,
9227 										       stcb,
9228 										       SCTP_DATA);
9229 							auth_keyid = chk->auth_keyid;
9230 							override_ok = 0;
9231 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9232 						} else if (override_ok) {
9233 							/* use this data's keyid */
9234 							auth_keyid = chk->auth_keyid;
9235 							override_ok = 0;
9236 						} else if (auth_keyid != chk->auth_keyid) {
9237 							/* different keyid, so done bundling */
9238 							break;
9239 						}
9240 					}
9241 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9242 								       chk->send_size, chk->copy_by_ref);
9243 					if (outchain == NULL) {
9244 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9245 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9246 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9247 						}
9248 						*reason_code = 3;
9249 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9250 						return (ENOMEM);
9251 					}
9252 					/* upate our MTU size */
9253 					/* Do clear IP_DF ? */
9254 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9255 						no_fragmentflg = 0;
9256 					}
9257 					/* unsigned subtraction of mtu */
9258 					if (mtu > chk->send_size)
9259 						mtu -= chk->send_size;
9260 					else
9261 						mtu = 0;
9262 					/* unsigned subtraction of r_mtu */
9263 					if (r_mtu > chk->send_size)
9264 						r_mtu -= chk->send_size;
9265 					else
9266 						r_mtu = 0;
9267 
9268 					to_out += chk->send_size;
9269 					if ((to_out > mx_mtu) && no_fragmentflg) {
9270 #ifdef INVARIANTS
9271 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9272 #else
9273 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9274 							    mx_mtu, to_out);
9275 #endif
9276 					}
9277 					chk->window_probe = 0;
9278 					data_list[bundle_at++] = chk;
9279 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9280 						break;
9281 					}
9282 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9283 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9284 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9285 						} else {
9286 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9287 						}
9288 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9289 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9290 							/* Count number of user msg's that were fragmented
9291 							 * we do this by counting when we see a LAST fragment
9292 							 * only.
9293 							 */
9294 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9295 					}
9296 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9297 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9298 							data_list[0]->window_probe = 1;
9299 							net->window_probe = 1;
9300 						}
9301 						break;
9302 					}
9303 				} else {
9304 					/*
9305 					 * Must be sent in order of the
9306 					 * TSN's (on a network)
9307 					 */
9308 					break;
9309 				}
9310 			}	/* for (chunk gather loop for this net) */
9311 		}		/* if asoc.state OPEN */
9312 	no_data_fill:
9313 		/* Is there something to send for this destination? */
9314 		if (outchain) {
9315 			/* We may need to start a control timer or two */
9316 			if (asconf) {
9317 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9318 						 stcb, net);
9319 				/*
9320 				 * do NOT clear the asconf flag as it is used
9321 				 * to do appropriate source address selection.
9322 				 */
9323 			}
9324 			if (cookie) {
9325 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9326 				cookie = 0;
9327 			}
9328 			/* must start a send timer if data is being sent */
9329 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9330 				/*
9331 				 * no timer running on this destination
9332 				 * restart it.
9333 				 */
9334 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9335 			}
9336 			if (bundle_at || hbflag) {
9337 				/* For data/asconf and hb set time */
9338 				if (*now_filled == 0) {
9339 					(void)SCTP_GETTIME_TIMEVAL(now);
9340 					*now_filled = 1;
9341 				}
9342 				net->last_sent_time = *now;
9343 			}
9344 			/* Now send it, if there is anything to send :> */
9345 			if ((error = sctp_lowlevel_chunk_output(inp,
9346 			                                        stcb,
9347 			                                        net,
9348 			                                        (struct sockaddr *)&net->ro._l_addr,
9349 			                                        outchain,
9350 			                                        auth_offset,
9351 			                                        auth,
9352 			                                        auth_keyid,
9353 			                                        no_fragmentflg,
9354 			                                        bundle_at,
9355 			                                        asconf,
9356 			                                        inp->sctp_lport, stcb->rport,
9357 			                                        htonl(stcb->asoc.peer_vtag),
9358 			                                        net->port, NULL,
9359 #if defined(__FreeBSD__) && !defined(__Userspace__)
9360 			                                        0, 0,
9361 #endif
9362 			                                        so_locked))) {
9363 				/* error, we could not output */
9364 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9365 				if (from_where == 0) {
9366 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
9367 				}
9368 				if (error == ENOBUFS) {
9369 					asoc->ifp_had_enobuf = 1;
9370 					SCTP_STAT_INCR(sctps_lowlevelerr);
9371 				}
9372 				if (error == EHOSTUNREACH) {
9373 					/*
9374 					 * Destination went unreachable
9375 					 * during this send
9376 					 */
9377 					sctp_move_chunks_from_net(stcb, net);
9378 				}
9379 				*reason_code = 6;
9380 				/*-
9381 				 * I add this line to be paranoid. As far as
9382 				 * I can tell the continue, takes us back to
9383 				 * the top of the for, but just to make sure
9384 				 * I will reset these again here.
9385 				 */
9386 				ctl_cnt = 0;
9387 				continue; /* This takes us back to the for() for the nets. */
9388 			} else {
9389 				asoc->ifp_had_enobuf = 0;
9390 			}
9391 			endoutchain = NULL;
9392 			auth = NULL;
9393 			auth_offset = 0;
9394 			if (!no_out_cnt) {
9395 				*num_out += (ctl_cnt + bundle_at);
9396 			}
9397 			if (bundle_at) {
9398 				/* setup for a RTO measurement */
9399 				tsns_sent = data_list[0]->rec.data.tsn;
9400 				/* fill time if not already filled */
9401 				if (*now_filled == 0) {
9402 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9403 					*now_filled = 1;
9404 					*now = asoc->time_last_sent;
9405 				} else {
9406 					asoc->time_last_sent = *now;
9407 				}
9408 				if (net->rto_needed) {
9409 					data_list[0]->do_rtt = 1;
9410 					net->rto_needed = 0;
9411 				}
9412 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9413 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9414 			}
9415 			if (one_chunk) {
9416 				break;
9417 			}
9418 		}
9419 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9420 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9421 		}
9422 	}
9423 	if (old_start_at == NULL) {
9424 		old_start_at = start_at;
9425 		start_at = TAILQ_FIRST(&asoc->nets);
9426 		if (old_start_at)
9427 			goto again_one_more_time;
9428 	}
9429 
9430 	/*
9431 	 * At the end there should be no NON timed chunks hanging on this
9432 	 * queue.
9433 	 */
9434 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9435 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9436 	}
9437 	if ((*num_out == 0) && (*reason_code == 0)) {
9438 		*reason_code = 4;
9439 	} else {
9440 		*reason_code = 5;
9441 	}
9442 	sctp_clean_up_ctl(stcb, asoc, so_locked);
9443 	return (0);
9444 }
9445 
9446 void
9447 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9448 {
9449 	/*-
9450 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9451 	 * the control chunk queue.
9452 	 */
9453 	struct sctp_chunkhdr *hdr;
9454 	struct sctp_tmit_chunk *chk;
9455 	struct mbuf *mat, *last_mbuf;
9456 	uint32_t chunk_length;
9457 	uint16_t padding_length;
9458 
9459 	SCTP_TCB_LOCK_ASSERT(stcb);
9460 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9461 	if (op_err == NULL) {
9462 		return;
9463 	}
9464 	last_mbuf = NULL;
9465 	chunk_length = 0;
9466 	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9467 		chunk_length += SCTP_BUF_LEN(mat);
9468 		if (SCTP_BUF_NEXT(mat) == NULL) {
9469 			last_mbuf = mat;
9470 		}
9471 	}
9472 	if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9473 		sctp_m_freem(op_err);
9474 		return;
9475 	}
9476 	padding_length = chunk_length % 4;
9477 	if (padding_length != 0) {
9478 		padding_length = 4 - padding_length;
9479 	}
9480 	if (padding_length != 0) {
9481 		if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9482 			sctp_m_freem(op_err);
9483 			return;
9484 		}
9485 	}
9486 	sctp_alloc_a_chunk(stcb, chk);
9487 	if (chk == NULL) {
9488 		/* no memory */
9489 		sctp_m_freem(op_err);
9490 		return;
9491 	}
9492 	chk->copy_by_ref = 0;
9493 	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9494 	chk->rec.chunk_id.can_take_data = 0;
9495 	chk->flags = 0;
9496 	chk->send_size = (uint16_t)chunk_length;
9497 	chk->sent = SCTP_DATAGRAM_UNSENT;
9498 	chk->snd_count = 0;
9499 	chk->asoc = &stcb->asoc;
9500 	chk->data = op_err;
9501 	chk->whoTo = NULL;
9502 	hdr = mtod(op_err, struct sctp_chunkhdr *);
9503 	hdr->chunk_type = SCTP_OPERATION_ERROR;
9504 	hdr->chunk_flags = 0;
9505 	hdr->chunk_length = htons(chk->send_size);
9506 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9507 	chk->asoc->ctrl_queue_cnt++;
9508 }
9509 
9510 int
9511 sctp_send_cookie_echo(struct mbuf *m,
9512     int offset, int limit,
9513     struct sctp_tcb *stcb,
9514     struct sctp_nets *net)
9515 {
9516 	/*-
9517 	 * pull out the cookie and put it at the front of the control chunk
9518 	 * queue.
9519 	 */
9520 	int at;
9521 	struct mbuf *cookie;
9522 	struct sctp_paramhdr param, *phdr;
9523 	struct sctp_chunkhdr *hdr;
9524 	struct sctp_tmit_chunk *chk;
9525 	uint16_t ptype, plen;
9526 
9527 	SCTP_TCB_LOCK_ASSERT(stcb);
9528 	/* First find the cookie in the param area */
9529 	cookie = NULL;
9530 	at = offset + sizeof(struct sctp_init_chunk);
9531 	for (;;) {
9532 		phdr = sctp_get_next_param(m, at, &param, sizeof(param));
9533 		if (phdr == NULL) {
9534 			return (-3);
9535 		}
9536 		ptype = ntohs(phdr->param_type);
9537 		plen = ntohs(phdr->param_length);
9538 		if (plen < sizeof(struct sctp_paramhdr)) {
9539 			return (-6);
9540 		}
9541 		if (ptype == SCTP_STATE_COOKIE) {
9542 			int pad;
9543 
9544 			/* found the cookie */
9545 			if (at + plen > limit) {
9546 				return (-7);
9547 			}
9548 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9549 			if (cookie == NULL) {
9550 				/* No memory */
9551 				return (-2);
9552 			}
9553 			if ((pad = (plen % 4)) > 0) {
9554 				pad = 4 - pad;
9555 			}
9556 			if (pad > 0) {
9557 				if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9558 					return (-8);
9559 				}
9560 			}
9561 #ifdef SCTP_MBUF_LOGGING
9562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9563 				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9564 			}
9565 #endif
9566 			break;
9567 		}
9568 		at += SCTP_SIZE32(plen);
9569 	}
9570 	/* ok, we got the cookie lets change it into a cookie echo chunk */
9571 	/* first the change from param to cookie */
9572 	hdr = mtod(cookie, struct sctp_chunkhdr *);
9573 	hdr->chunk_type = SCTP_COOKIE_ECHO;
9574 	hdr->chunk_flags = 0;
9575 	/* get the chunk stuff now and place it in the FRONT of the queue */
9576 	sctp_alloc_a_chunk(stcb, chk);
9577 	if (chk == NULL) {
9578 		/* no memory */
9579 		sctp_m_freem(cookie);
9580 		return (-5);
9581 	}
9582 	chk->copy_by_ref = 0;
9583 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9584 	chk->rec.chunk_id.can_take_data = 0;
9585 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9586 	chk->send_size = SCTP_SIZE32(plen);
9587 	chk->sent = SCTP_DATAGRAM_UNSENT;
9588 	chk->snd_count = 0;
9589 	chk->asoc = &stcb->asoc;
9590 	chk->data = cookie;
9591 	chk->whoTo = net;
9592 	atomic_add_int(&chk->whoTo->ref_count, 1);
9593 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9594 	chk->asoc->ctrl_queue_cnt++;
9595 	return (0);
9596 }
9597 
9598 void
9599 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9600     struct mbuf *m,
9601     int offset,
9602     int chk_length,
9603     struct sctp_nets *net)
9604 {
9605 	/*
9606 	 * take a HB request and make it into a HB ack and send it.
9607 	 */
9608 	struct mbuf *outchain;
9609 	struct sctp_chunkhdr *chdr;
9610 	struct sctp_tmit_chunk *chk;
9611 
9612 	if (net == NULL)
9613 		/* must have a net pointer */
9614 		return;
9615 
9616 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9617 	if (outchain == NULL) {
9618 		/* gak out of memory */
9619 		return;
9620 	}
9621 #ifdef SCTP_MBUF_LOGGING
9622 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9623 		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9624 	}
9625 #endif
9626 	chdr = mtod(outchain, struct sctp_chunkhdr *);
9627 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9628 	chdr->chunk_flags = 0;
9629 	if (chk_length % 4 != 0) {
9630 		sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9631 	}
9632 	sctp_alloc_a_chunk(stcb, chk);
9633 	if (chk == NULL) {
9634 		/* no memory */
9635 		sctp_m_freem(outchain);
9636 		return;
9637 	}
9638 	chk->copy_by_ref = 0;
9639 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9640 	chk->rec.chunk_id.can_take_data = 1;
9641 	chk->flags = 0;
9642 	chk->send_size = chk_length;
9643 	chk->sent = SCTP_DATAGRAM_UNSENT;
9644 	chk->snd_count = 0;
9645 	chk->asoc = &stcb->asoc;
9646 	chk->data = outchain;
9647 	chk->whoTo = net;
9648 	atomic_add_int(&chk->whoTo->ref_count, 1);
9649 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9650 	chk->asoc->ctrl_queue_cnt++;
9651 }
9652 
9653 void
9654 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9655 {
9656 	/* formulate and queue a cookie-ack back to sender */
9657 	struct mbuf *cookie_ack;
9658 	struct sctp_chunkhdr *hdr;
9659 	struct sctp_tmit_chunk *chk;
9660 
9661 	SCTP_TCB_LOCK_ASSERT(stcb);
9662 
9663 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9664 	if (cookie_ack == NULL) {
9665 		/* no mbuf's */
9666 		return;
9667 	}
9668 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9669 	sctp_alloc_a_chunk(stcb, chk);
9670 	if (chk == NULL) {
9671 		/* no memory */
9672 		sctp_m_freem(cookie_ack);
9673 		return;
9674 	}
9675 	chk->copy_by_ref = 0;
9676 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9677 	chk->rec.chunk_id.can_take_data = 1;
9678 	chk->flags = 0;
9679 	chk->send_size = sizeof(struct sctp_chunkhdr);
9680 	chk->sent = SCTP_DATAGRAM_UNSENT;
9681 	chk->snd_count = 0;
9682 	chk->asoc = &stcb->asoc;
9683 	chk->data = cookie_ack;
9684 	if (chk->asoc->last_control_chunk_from != NULL) {
9685 		chk->whoTo = chk->asoc->last_control_chunk_from;
9686 		atomic_add_int(&chk->whoTo->ref_count, 1);
9687 	} else {
9688 		chk->whoTo = NULL;
9689 	}
9690 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9691 	hdr->chunk_type = SCTP_COOKIE_ACK;
9692 	hdr->chunk_flags = 0;
9693 	hdr->chunk_length = htons(chk->send_size);
9694 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9695 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9696 	chk->asoc->ctrl_queue_cnt++;
9697 	return;
9698 }
9699 
9700 void
9701 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9702 {
9703 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
9704 	struct mbuf *m_shutdown_ack;
9705 	struct sctp_shutdown_ack_chunk *ack_cp;
9706 	struct sctp_tmit_chunk *chk;
9707 
9708 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9709 	if (m_shutdown_ack == NULL) {
9710 		/* no mbuf's */
9711 		return;
9712 	}
9713 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9714 	sctp_alloc_a_chunk(stcb, chk);
9715 	if (chk == NULL) {
9716 		/* no memory */
9717 		sctp_m_freem(m_shutdown_ack);
9718 		return;
9719 	}
9720 	chk->copy_by_ref = 0;
9721 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9722 	chk->rec.chunk_id.can_take_data = 1;
9723 	chk->flags = 0;
9724 	chk->send_size = sizeof(struct sctp_chunkhdr);
9725 	chk->sent = SCTP_DATAGRAM_UNSENT;
9726 	chk->snd_count = 0;
9727 	chk->asoc = &stcb->asoc;
9728 	chk->data = m_shutdown_ack;
9729 	chk->whoTo = net;
9730 	if (chk->whoTo) {
9731 		atomic_add_int(&chk->whoTo->ref_count, 1);
9732 	}
9733 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9734 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9735 	ack_cp->ch.chunk_flags = 0;
9736 	ack_cp->ch.chunk_length = htons(chk->send_size);
9737 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9738 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9739 	chk->asoc->ctrl_queue_cnt++;
9740 	return;
9741 }
9742 
9743 void
9744 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9745 {
9746 	/* formulate and queue a SHUTDOWN to the sender */
9747 	struct mbuf *m_shutdown;
9748 	struct sctp_shutdown_chunk *shutdown_cp;
9749 	struct sctp_tmit_chunk *chk;
9750 
9751 	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9752 		if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9753 			/* We already have a SHUTDOWN queued. Reuse it. */
9754 			if (chk->whoTo) {
9755 				sctp_free_remote_addr(chk->whoTo);
9756 				chk->whoTo = NULL;
9757 			}
9758 			break;
9759 		}
9760 	}
9761 	if (chk == NULL) {
9762 		m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9763 		if (m_shutdown == NULL) {
9764 			/* no mbuf's */
9765 			return;
9766 		}
9767 		SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9768 		sctp_alloc_a_chunk(stcb, chk);
9769 		if (chk == NULL) {
9770 			/* no memory */
9771 			sctp_m_freem(m_shutdown);
9772 			return;
9773 		}
9774 		chk->copy_by_ref = 0;
9775 		chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9776 		chk->rec.chunk_id.can_take_data = 1;
9777 		chk->flags = 0;
9778 		chk->send_size = sizeof(struct sctp_shutdown_chunk);
9779 		chk->sent = SCTP_DATAGRAM_UNSENT;
9780 		chk->snd_count = 0;
9781 		chk->asoc = &stcb->asoc;
9782 		chk->data = m_shutdown;
9783 		chk->whoTo = net;
9784 		if (chk->whoTo) {
9785 			atomic_add_int(&chk->whoTo->ref_count, 1);
9786 		}
9787 		shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9788 		shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9789 		shutdown_cp->ch.chunk_flags = 0;
9790 		shutdown_cp->ch.chunk_length = htons(chk->send_size);
9791 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9792 		SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9793 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9794 		chk->asoc->ctrl_queue_cnt++;
9795 	} else {
9796 		TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9797 		chk->whoTo = net;
9798 		if (chk->whoTo) {
9799 			atomic_add_int(&chk->whoTo->ref_count, 1);
9800 		}
9801 		shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9802 		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9803 		TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9804 	}
9805 	return;
9806 }
9807 
9808 void
9809 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9810 {
9811 	/*
9812 	 * formulate and queue an ASCONF to the peer.
9813 	 * ASCONF parameters should be queued on the assoc queue.
9814 	 */
9815 	struct sctp_tmit_chunk *chk;
9816 	struct mbuf *m_asconf;
9817 	int len;
9818 
9819 	SCTP_TCB_LOCK_ASSERT(stcb);
9820 
9821 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9822 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9823 		/* can't send a new one if there is one in flight already */
9824 		return;
9825 	}
9826 
9827 	/* compose an ASCONF chunk, maximum length is PMTU */
9828 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9829 	if (m_asconf == NULL) {
9830 		return;
9831 	}
9832 
9833 	sctp_alloc_a_chunk(stcb, chk);
9834 	if (chk == NULL) {
9835 		/* no memory */
9836 		sctp_m_freem(m_asconf);
9837 		return;
9838 	}
9839 
9840 	chk->copy_by_ref = 0;
9841 	chk->rec.chunk_id.id = SCTP_ASCONF;
9842 	chk->rec.chunk_id.can_take_data = 0;
9843 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9844 	chk->data = m_asconf;
9845 	chk->send_size = len;
9846 	chk->sent = SCTP_DATAGRAM_UNSENT;
9847 	chk->snd_count = 0;
9848 	chk->asoc = &stcb->asoc;
9849 	chk->whoTo = net;
9850 	if (chk->whoTo) {
9851 		atomic_add_int(&chk->whoTo->ref_count, 1);
9852 	}
9853 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9854 	chk->asoc->ctrl_queue_cnt++;
9855 	return;
9856 }
9857 
9858 void
9859 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9860 {
9861 	/*
9862 	 * formulate and queue a asconf-ack back to sender.
9863 	 * the asconf-ack must be stored in the tcb.
9864 	 */
9865 	struct sctp_tmit_chunk *chk;
9866 	struct sctp_asconf_ack *ack, *latest_ack;
9867 	struct mbuf *m_ack;
9868 	struct sctp_nets *net = NULL;
9869 
9870 	SCTP_TCB_LOCK_ASSERT(stcb);
9871 	/* Get the latest ASCONF-ACK */
9872 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9873 	if (latest_ack == NULL) {
9874 		return;
9875 	}
9876 	if (latest_ack->last_sent_to != NULL &&
9877 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9878 		/* we're doing a retransmission */
9879 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9880 		if (net == NULL) {
9881 			/* no alternate */
9882 			if (stcb->asoc.last_control_chunk_from == NULL) {
9883 				if (stcb->asoc.alternate) {
9884 					net = stcb->asoc.alternate;
9885 				} else {
9886 					net = stcb->asoc.primary_destination;
9887 				}
9888 			} else {
9889 				net = stcb->asoc.last_control_chunk_from;
9890 			}
9891 		}
9892 	} else {
9893 		/* normal case */
9894 		if (stcb->asoc.last_control_chunk_from == NULL) {
9895 			if (stcb->asoc.alternate) {
9896 				net = stcb->asoc.alternate;
9897 			} else {
9898 				net = stcb->asoc.primary_destination;
9899 			}
9900 		} else {
9901 			net = stcb->asoc.last_control_chunk_from;
9902 		}
9903 	}
9904 	latest_ack->last_sent_to = net;
9905 
9906 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9907 		if (ack->data == NULL) {
9908 			continue;
9909 		}
9910 
9911 		/* copy the asconf_ack */
9912 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9913 		if (m_ack == NULL) {
9914 			/* couldn't copy it */
9915 			return;
9916 		}
9917 #ifdef SCTP_MBUF_LOGGING
9918 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9919 			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9920 		}
9921 #endif
9922 
9923 		sctp_alloc_a_chunk(stcb, chk);
9924 		if (chk == NULL) {
9925 			/* no memory */
9926 			if (m_ack)
9927 				sctp_m_freem(m_ack);
9928 			return;
9929 		}
9930 		chk->copy_by_ref = 0;
9931 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9932 		chk->rec.chunk_id.can_take_data = 1;
9933 		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9934 		chk->whoTo = net;
9935 		if (chk->whoTo) {
9936 			atomic_add_int(&chk->whoTo->ref_count, 1);
9937 		}
9938 		chk->data = m_ack;
9939 		chk->send_size = ack->len;
9940 		chk->sent = SCTP_DATAGRAM_UNSENT;
9941 		chk->snd_count = 0;
9942 		chk->asoc = &stcb->asoc;
9943 
9944 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9945 		chk->asoc->ctrl_queue_cnt++;
9946 	}
9947 	return;
9948 }
9949 
9950 static int
9951 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9952     struct sctp_tcb *stcb,
9953     struct sctp_association *asoc,
9954     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
9955 {
9956 	/*-
9957 	 * send out one MTU of retransmission. If fast_retransmit is
9958 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9959 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
9960 	 * retransmit them by themselves.
9961 	 *
9962 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
9963 	 * marked for resend and bundle them all together (up to a MTU of
9964 	 * destination). The address to send to should have been
9965 	 * selected/changed where the retransmission was marked (i.e. in FR
9966 	 * or t3-timeout routines).
9967 	 */
9968 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9969 	struct sctp_tmit_chunk *chk, *fwd;
9970 	struct mbuf *m, *endofchain;
9971 	struct sctp_nets *net = NULL;
9972 	uint32_t tsns_sent = 0;
9973 	int no_fragmentflg, bundle_at, cnt_thru;
9974 	unsigned int mtu;
9975 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9976 	struct sctp_auth_chunk *auth = NULL;
9977 	uint32_t auth_offset = 0;
9978 	uint16_t auth_keyid;
9979 	int override_ok = 1;
9980 	int data_auth_reqd = 0;
9981 	uint32_t dmtu = 0;
9982 
9983 #if defined(__APPLE__) && !defined(__Userspace__)
9984 	if (so_locked) {
9985 		sctp_lock_assert(SCTP_INP_SO(inp));
9986 	} else {
9987 		sctp_unlock_assert(SCTP_INP_SO(inp));
9988 	}
9989 #endif
9990 	SCTP_TCB_LOCK_ASSERT(stcb);
9991 	tmr_started = ctl_cnt = 0;
9992 	no_fragmentflg = 1;
9993 	fwd_tsn = 0;
9994 	*cnt_out = 0;
9995 	fwd = NULL;
9996 	endofchain = m = NULL;
9997 	auth_keyid = stcb->asoc.authinfo.active_keyid;
9998 #ifdef SCTP_AUDITING_ENABLED
9999 	sctp_audit_log(0xC3, 1);
10000 #endif
10001 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
10002 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
10003 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
10004 			asoc->sent_queue_retran_cnt);
10005 		asoc->sent_queue_cnt = 0;
10006 		asoc->sent_queue_cnt_removeable = 0;
10007 		/* send back 0/0 so we enter normal transmission */
10008 		*cnt_out = 0;
10009 		return (0);
10010 	}
10011 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10012 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
10013 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
10014 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
10015 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
10016 				continue;
10017 			}
10018 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10019 				if (chk != asoc->str_reset) {
10020 					/*
10021 					 * not eligible for retran if its
10022 					 * not ours
10023 					 */
10024 					continue;
10025 				}
10026 			}
10027 			ctl_cnt++;
10028 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10029 				fwd_tsn = 1;
10030 			}
10031 			/*
10032 			 * Add an AUTH chunk, if chunk requires it save the
10033 			 * offset into the chain for AUTH
10034 			 */
10035 			if ((auth == NULL) &&
10036 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10037 							 stcb->asoc.peer_auth_chunks))) {
10038 				m = sctp_add_auth_chunk(m, &endofchain,
10039 							&auth, &auth_offset,
10040 							stcb,
10041 							chk->rec.chunk_id.id);
10042 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10043 			}
10044 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10045 			break;
10046 		}
10047 	}
10048 	one_chunk = 0;
10049 	cnt_thru = 0;
10050 	/* do we have control chunks to retransmit? */
10051 	if (m != NULL) {
10052 		/* Start a timer no matter if we succeed or fail */
10053 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10054 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10055 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10056 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10057 		chk->snd_count++;	/* update our count */
10058 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10059 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10060 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10061 		                                        no_fragmentflg, 0, 0,
10062 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10063 		                                        chk->whoTo->port, NULL,
10064 #if defined(__FreeBSD__) && !defined(__Userspace__)
10065 		                                        0, 0,
10066 #endif
10067 		                                        so_locked))) {
10068 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10069 			if (error == ENOBUFS) {
10070 				asoc->ifp_had_enobuf = 1;
10071 				SCTP_STAT_INCR(sctps_lowlevelerr);
10072 			}
10073 			return (error);
10074 		} else {
10075 			asoc->ifp_had_enobuf = 0;
10076 		}
10077 		endofchain = NULL;
10078 		auth = NULL;
10079 		auth_offset = 0;
10080 		/*
10081 		 * We don't want to mark the net->sent time here since this
10082 		 * we use this for HB and retrans cannot measure RTT
10083 		 */
10084 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10085 		*cnt_out += 1;
10086 		chk->sent = SCTP_DATAGRAM_SENT;
10087 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10088 		if (fwd_tsn == 0) {
10089 			return (0);
10090 		} else {
10091 			/* Clean up the fwd-tsn list */
10092 			sctp_clean_up_ctl(stcb, asoc, so_locked);
10093 			return (0);
10094 		}
10095 	}
10096 	/*
10097 	 * Ok, it is just data retransmission we need to do or that and a
10098 	 * fwd-tsn with it all.
10099 	 */
10100 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
10101 		return (SCTP_RETRAN_DONE);
10102 	}
10103 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
10104 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
10105 		/* not yet open, resend the cookie and that is it */
10106 		return (1);
10107 	}
10108 #ifdef SCTP_AUDITING_ENABLED
10109 	sctp_auditing(20, inp, stcb, NULL);
10110 #endif
10111 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10112 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10113 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
10114 			/* No, not sent to this net or not ready for rtx */
10115 			continue;
10116 		}
10117 		if (chk->data == NULL) {
10118 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10119 			            chk->rec.data.tsn, chk->snd_count, chk->sent);
10120 			continue;
10121 		}
10122 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10123 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10124 			struct mbuf *op_err;
10125 			char msg[SCTP_DIAG_INFO_LEN];
10126 
10127 			SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10128 			              chk->rec.data.tsn, chk->snd_count);
10129 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10130 			                             msg);
10131 			atomic_add_int(&stcb->asoc.refcnt, 1);
10132 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10133 			                          so_locked);
10134 			SCTP_TCB_LOCK(stcb);
10135 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
10136 			return (SCTP_RETRAN_EXIT);
10137 		}
10138 		/* pick up the net */
10139 		net = chk->whoTo;
10140 		switch (net->ro._l_addr.sa.sa_family) {
10141 #ifdef INET
10142 			case AF_INET:
10143 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10144 				break;
10145 #endif
10146 #ifdef INET6
10147 			case AF_INET6:
10148 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
10149 				break;
10150 #endif
10151 #if defined(__Userspace__)
10152 			case AF_CONN:
10153 				mtu = net->mtu - sizeof(struct sctphdr);
10154 				break;
10155 #endif
10156 			default:
10157 				/* TSNH */
10158 				mtu = net->mtu;
10159 				break;
10160 		}
10161 
10162 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10163 			/* No room in peers rwnd */
10164 			uint32_t tsn;
10165 
10166 			tsn = asoc->last_acked_seq + 1;
10167 			if (tsn == chk->rec.data.tsn) {
10168 				/*
10169 				 * we make a special exception for this
10170 				 * case. The peer has no rwnd but is missing
10171 				 * the lowest chunk.. which is probably what
10172 				 * is holding up the rwnd.
10173 				 */
10174 				goto one_chunk_around;
10175 			}
10176 			return (1);
10177 		}
10178 	one_chunk_around:
10179 		if (asoc->peers_rwnd < mtu) {
10180 			one_chunk = 1;
10181 			if ((asoc->peers_rwnd == 0) &&
10182 			    (asoc->total_flight == 0)) {
10183 				chk->window_probe = 1;
10184 				chk->whoTo->window_probe = 1;
10185 			}
10186 		}
10187 #ifdef SCTP_AUDITING_ENABLED
10188 		sctp_audit_log(0xC3, 2);
10189 #endif
10190 		bundle_at = 0;
10191 		m = NULL;
10192 		net->fast_retran_ip = 0;
10193 		if (chk->rec.data.doing_fast_retransmit == 0) {
10194 			/*
10195 			 * if no FR in progress skip destination that have
10196 			 * flight_size > cwnd.
10197 			 */
10198 			if (net->flight_size >= net->cwnd) {
10199 				continue;
10200 			}
10201 		} else {
10202 			/*
10203 			 * Mark the destination net to have FR recovery
10204 			 * limits put on it.
10205 			 */
10206 			*fr_done = 1;
10207 			net->fast_retran_ip = 1;
10208 		}
10209 
10210 		/*
10211 		 * if no AUTH is yet included and this chunk requires it,
10212 		 * make sure to account for it.  We don't apply the size
10213 		 * until the AUTH chunk is actually added below in case
10214 		 * there is no room for this chunk.
10215 		 */
10216 		if (data_auth_reqd && (auth == NULL)) {
10217 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10218 		} else
10219 			dmtu = 0;
10220 
10221 		if ((chk->send_size <= (mtu - dmtu)) ||
10222 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10223 			/* ok we will add this one */
10224 			if (data_auth_reqd) {
10225 				if (auth == NULL) {
10226 					m = sctp_add_auth_chunk(m,
10227 								&endofchain,
10228 								&auth,
10229 								&auth_offset,
10230 								stcb,
10231 								SCTP_DATA);
10232 					auth_keyid = chk->auth_keyid;
10233 					override_ok = 0;
10234 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10235 				} else if (override_ok) {
10236 					auth_keyid = chk->auth_keyid;
10237 					override_ok = 0;
10238 				} else if (chk->auth_keyid != auth_keyid) {
10239 					/* different keyid, so done bundling */
10240 					break;
10241 				}
10242 			}
10243 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10244 			if (m == NULL) {
10245 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10246 				return (ENOMEM);
10247 			}
10248 			/* Do clear IP_DF ? */
10249 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10250 				no_fragmentflg = 0;
10251 			}
10252 			/* upate our MTU size */
10253 			if (mtu > (chk->send_size + dmtu))
10254 				mtu -= (chk->send_size + dmtu);
10255 			else
10256 				mtu = 0;
10257 			data_list[bundle_at++] = chk;
10258 			if (one_chunk && (asoc->total_flight <= 0)) {
10259 				SCTP_STAT_INCR(sctps_windowprobed);
10260 			}
10261 		}
10262 		if (one_chunk == 0) {
10263 			/*
10264 			 * now are there anymore forward from chk to pick
10265 			 * up?
10266 			 */
10267 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10268 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10269 					/* Nope, not for retran */
10270 					continue;
10271 				}
10272 				if (fwd->whoTo != net) {
10273 					/* Nope, not the net in question */
10274 					continue;
10275 				}
10276 				if (data_auth_reqd && (auth == NULL)) {
10277 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10278 				} else
10279 					dmtu = 0;
10280 				if (fwd->send_size <= (mtu - dmtu)) {
10281 					if (data_auth_reqd) {
10282 						if (auth == NULL) {
10283 							m = sctp_add_auth_chunk(m,
10284 										&endofchain,
10285 										&auth,
10286 										&auth_offset,
10287 										stcb,
10288 										SCTP_DATA);
10289 							auth_keyid = fwd->auth_keyid;
10290 							override_ok = 0;
10291 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10292 						} else if (override_ok) {
10293 							auth_keyid = fwd->auth_keyid;
10294 							override_ok = 0;
10295 						} else if (fwd->auth_keyid != auth_keyid) {
10296 							/* different keyid, so done bundling */
10297 							break;
10298 						}
10299 					}
10300 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10301 					if (m == NULL) {
10302 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10303 						return (ENOMEM);
10304 					}
10305 					/* Do clear IP_DF ? */
10306 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10307 						no_fragmentflg = 0;
10308 					}
10309 					/* upate our MTU size */
10310 					if (mtu > (fwd->send_size + dmtu))
10311 						mtu -= (fwd->send_size + dmtu);
10312 					else
10313 						mtu = 0;
10314 					data_list[bundle_at++] = fwd;
10315 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10316 						break;
10317 					}
10318 				} else {
10319 					/* can't fit so we are done */
10320 					break;
10321 				}
10322 			}
10323 		}
10324 		/* Is there something to send for this destination? */
10325 		if (m) {
10326 			/*
10327 			 * No matter if we fail/or succeed we should start a
10328 			 * timer. A failure is like a lost IP packet :-)
10329 			 */
10330 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10331 				/*
10332 				 * no timer running on this destination
10333 				 * restart it.
10334 				 */
10335 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10336 				tmr_started = 1;
10337 			}
10338 			/* Now lets send it, if there is anything to send :> */
10339 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10340 			                                        (struct sockaddr *)&net->ro._l_addr, m,
10341 			                                        auth_offset, auth, auth_keyid,
10342 			                                        no_fragmentflg, 0, 0,
10343 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10344 			                                        net->port, NULL,
10345 #if defined(__FreeBSD__) && !defined(__Userspace__)
10346 			                                        0, 0,
10347 #endif
10348 			                                        so_locked))) {
10349 				/* error, we could not output */
10350 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10351 				if (error == ENOBUFS) {
10352 					asoc->ifp_had_enobuf = 1;
10353 					SCTP_STAT_INCR(sctps_lowlevelerr);
10354 				}
10355 				return (error);
10356 			} else {
10357 				asoc->ifp_had_enobuf = 0;
10358 			}
10359 			endofchain = NULL;
10360 			auth = NULL;
10361 			auth_offset = 0;
10362 			/* For HB's */
10363 			/*
10364 			 * We don't want to mark the net->sent time here
10365 			 * since this we use this for HB and retrans cannot
10366 			 * measure RTT
10367 			 */
10368 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10369 
10370 			/* For auto-close */
10371 			cnt_thru++;
10372 			if (*now_filled == 0) {
10373 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10374 				*now = asoc->time_last_sent;
10375 				*now_filled = 1;
10376 			} else {
10377 				asoc->time_last_sent = *now;
10378 			}
10379 			*cnt_out += bundle_at;
10380 #ifdef SCTP_AUDITING_ENABLED
10381 			sctp_audit_log(0xC4, bundle_at);
10382 #endif
10383 			if (bundle_at) {
10384 				tsns_sent = data_list[0]->rec.data.tsn;
10385 			}
10386 			for (i = 0; i < bundle_at; i++) {
10387 				SCTP_STAT_INCR(sctps_sendretransdata);
10388 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
10389 				/*
10390 				 * When we have a revoked data, and we
10391 				 * retransmit it, then we clear the revoked
10392 				 * flag since this flag dictates if we
10393 				 * subtracted from the fs
10394 				 */
10395 				if (data_list[i]->rec.data.chunk_was_revoked) {
10396 					/* Deflate the cwnd */
10397 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10398 					data_list[i]->rec.data.chunk_was_revoked = 0;
10399 				}
10400 				data_list[i]->snd_count++;
10401 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10402 				/* record the time */
10403 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
10404 				if (data_list[i]->book_size_scale) {
10405 					/*
10406 					 * need to double the book size on
10407 					 * this one
10408 					 */
10409 					data_list[i]->book_size_scale = 0;
10410 					/* Since we double the booksize, we must
10411 					 * also double the output queue size, since this
10412 					 * get shrunk when we free by this amount.
10413 					 */
10414 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10415 					data_list[i]->book_size *= 2;
10416 				} else {
10417 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10418 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10419 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10420 					}
10421 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10422 									    (uint32_t) (data_list[i]->send_size +
10423 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10424 				}
10425 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10426 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10427 						       data_list[i]->whoTo->flight_size,
10428 						       data_list[i]->book_size,
10429 						       (uint32_t)(uintptr_t)data_list[i]->whoTo,
10430 						       data_list[i]->rec.data.tsn);
10431 				}
10432 				sctp_flight_size_increase(data_list[i]);
10433 				sctp_total_flight_increase(stcb, data_list[i]);
10434 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10435 					/* SWS sender side engages */
10436 					asoc->peers_rwnd = 0;
10437 				}
10438 				if ((i == 0) &&
10439 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
10440 					SCTP_STAT_INCR(sctps_sendfastretrans);
10441 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10442 					    (tmr_started == 0)) {
10443 						/*-
10444 						 * ok we just fast-retrans'd
10445 						 * the lowest TSN, i.e the
10446 						 * first on the list. In
10447 						 * this case we want to give
10448 						 * some more time to get a
10449 						 * SACK back without a
10450 						 * t3-expiring.
10451 						 */
10452 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10453 						                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10454 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10455 					}
10456 				}
10457 			}
10458 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10459 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10460 			}
10461 #ifdef SCTP_AUDITING_ENABLED
10462 			sctp_auditing(21, inp, stcb, NULL);
10463 #endif
10464 		} else {
10465 			/* None will fit */
10466 			return (1);
10467 		}
10468 		if (asoc->sent_queue_retran_cnt <= 0) {
10469 			/* all done we have no more to retran */
10470 			asoc->sent_queue_retran_cnt = 0;
10471 			break;
10472 		}
10473 		if (one_chunk) {
10474 			/* No more room in rwnd */
10475 			return (1);
10476 		}
10477 		/* stop the for loop here. we sent out a packet */
10478 		break;
10479 	}
10480 	return (0);
10481 }
10482 
10483 static void
10484 sctp_timer_validation(struct sctp_inpcb *inp,
10485     struct sctp_tcb *stcb,
10486     struct sctp_association *asoc)
10487 {
10488 	struct sctp_nets *net;
10489 
10490 	/* Validate that a timer is running somewhere */
10491 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10492 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10493 			/* Here is a timer */
10494 			return;
10495 		}
10496 	}
10497 	SCTP_TCB_LOCK_ASSERT(stcb);
10498 	/* Gak, we did not have a timer somewhere */
10499 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10500 	if (asoc->alternate) {
10501 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10502 	} else {
10503 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10504 	}
10505 	return;
10506 }
10507 
10508 void
10509 sctp_chunk_output(struct sctp_inpcb *inp,
10510     struct sctp_tcb *stcb,
10511     int from_where,
10512     int so_locked)
10513 {
10514 	/*-
10515 	 * Ok this is the generic chunk service queue. we must do the
10516 	 * following:
10517 	 * - See if there are retransmits pending, if so we must
10518 	 *   do these first.
10519 	 * - Service the stream queue that is next, moving any
10520 	 *   message (note I must get a complete message i.e.
10521 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10522 	 *   TSN's
10523 	 * - Check to see if the cwnd/rwnd allows any output, if so we
10524 	 *   go ahead and fomulate and send the low level chunks. Making sure
10525 	 *   to combine any control in the control chunk queue also.
10526 	 */
10527 	struct sctp_association *asoc;
10528 	struct sctp_nets *net;
10529 	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10530 	unsigned int burst_cnt = 0;
10531 	struct timeval now;
10532 	int now_filled = 0;
10533 	int nagle_on;
10534 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10535 	int un_sent = 0;
10536 	int fr_done;
10537 	unsigned int tot_frs = 0;
10538 
10539 #if defined(__APPLE__) && !defined(__Userspace__)
10540 	if (so_locked) {
10541 		sctp_lock_assert(SCTP_INP_SO(inp));
10542 	} else {
10543 		sctp_unlock_assert(SCTP_INP_SO(inp));
10544 	}
10545 #endif
10546 	asoc = &stcb->asoc;
10547 do_it_again:
10548 	/* The Nagle algorithm is only applied when handling a send call. */
10549 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10550 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10551 			nagle_on = 0;
10552 		} else {
10553 			nagle_on = 1;
10554 		}
10555 	} else {
10556 		nagle_on = 0;
10557 	}
10558 	SCTP_TCB_LOCK_ASSERT(stcb);
10559 
10560 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10561 
10562 	if ((un_sent <= 0) &&
10563 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10564 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10565 	    (asoc->sent_queue_retran_cnt == 0) &&
10566 	    (asoc->trigger_reset == 0)) {
10567 		/* Nothing to do unless there is something to be sent left */
10568 		return;
10569 	}
10570 	/* Do we have something to send, data or control AND
10571 	 * a sack timer running, if so piggy-back the sack.
10572 	 */
10573 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10574 		sctp_send_sack(stcb, so_locked);
10575 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10576 		                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10577 	}
10578 	while (asoc->sent_queue_retran_cnt) {
10579 		/*-
10580 		 * Ok, it is retransmission time only, we send out only ONE
10581 		 * packet with a single call off to the retran code.
10582 		 */
10583 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10584 			/*-
10585 			 * Special hook for handling cookiess discarded
10586 			 * by peer that carried data. Send cookie-ack only
10587 			 * and then the next call with get the retran's.
10588 			 */
10589 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10590 						    from_where,
10591 						    &now, &now_filled, frag_point, so_locked);
10592 			return;
10593 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10594 			/* if its not from a HB then do it */
10595 			fr_done = 0;
10596 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10597 			if (fr_done) {
10598 				tot_frs++;
10599 			}
10600 		} else {
10601 			/*
10602 			 * its from any other place, we don't allow retran
10603 			 * output (only control)
10604 			 */
10605 			ret = 1;
10606 		}
10607 		if (ret > 0) {
10608 			/* Can't send anymore */
10609 			/*-
10610 			 * now lets push out control by calling med-level
10611 			 * output once. this assures that we WILL send HB's
10612 			 * if queued too.
10613 			 */
10614 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10615 						    from_where,
10616 						    &now, &now_filled, frag_point, so_locked);
10617 #ifdef SCTP_AUDITING_ENABLED
10618 			sctp_auditing(8, inp, stcb, NULL);
10619 #endif
10620 			sctp_timer_validation(inp, stcb, asoc);
10621 			return;
10622 		}
10623 		if (ret < 0) {
10624 			/*-
10625 			 * The count was off.. retran is not happening so do
10626 			 * the normal retransmission.
10627 			 */
10628 #ifdef SCTP_AUDITING_ENABLED
10629 			sctp_auditing(9, inp, stcb, NULL);
10630 #endif
10631 			if (ret == SCTP_RETRAN_EXIT) {
10632 				return;
10633 			}
10634 			break;
10635 		}
10636 		if (from_where == SCTP_OUTPUT_FROM_T3) {
10637 			/* Only one transmission allowed out of a timeout */
10638 #ifdef SCTP_AUDITING_ENABLED
10639 			sctp_auditing(10, inp, stcb, NULL);
10640 #endif
10641 			/* Push out any control */
10642 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10643 						    &now, &now_filled, frag_point, so_locked);
10644 			return;
10645 		}
10646 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10647 			/* Hit FR burst limit */
10648 			return;
10649 		}
10650 		if ((num_out == 0) && (ret == 0)) {
10651 			/* No more retrans to send */
10652 			break;
10653 		}
10654 	}
10655 #ifdef SCTP_AUDITING_ENABLED
10656 	sctp_auditing(12, inp, stcb, NULL);
10657 #endif
10658 	/* Check for bad destinations, if they exist move chunks around. */
10659 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10660 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10661 			/*-
10662 			 * if possible move things off of this address we
10663 			 * still may send below due to the dormant state but
10664 			 * we try to find an alternate address to send to
10665 			 * and if we have one we move all queued data on the
10666 			 * out wheel to this alternate address.
10667 			 */
10668 			if (net->ref_count > 1)
10669 				sctp_move_chunks_from_net(stcb, net);
10670 		} else {
10671 			/*-
10672 			 * if ((asoc->sat_network) || (net->addr_is_local))
10673 			 * { burst_limit = asoc->max_burst *
10674 			 * SCTP_SAT_NETWORK_BURST_INCR; }
10675 			 */
10676 			if (asoc->max_burst > 0) {
10677 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10678 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10679 						/* JRS - Use the congestion control given in the congestion control module */
10680 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10681 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10682 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10683 						}
10684 						SCTP_STAT_INCR(sctps_maxburstqueued);
10685 					}
10686 					net->fast_retran_ip = 0;
10687 				} else {
10688 					if (net->flight_size == 0) {
10689 						/* Should be decaying the cwnd here */
10690 						;
10691 					}
10692 				}
10693 			}
10694 		}
10695 	}
10696 	burst_cnt = 0;
10697 	do {
10698 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10699 					      &reason_code, 0, from_where,
10700 					      &now, &now_filled, frag_point, so_locked);
10701 		if (error) {
10702 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10703 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10704 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10705 			}
10706 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10707 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10708 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10709 			}
10710 			break;
10711 		}
10712 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10713 
10714 		tot_out += num_out;
10715 		burst_cnt++;
10716 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10717 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10718 			if (num_out == 0) {
10719 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10720 			}
10721 		}
10722 		if (nagle_on) {
10723 			/*
10724 			 * When the Nagle algorithm is used, look at how much
10725 			 * is unsent, then if its smaller than an MTU and we
10726 			 * have data in flight we stop, except if we are
10727 			 * handling a fragmented user message.
10728 			 */
10729 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10730 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10731 			    (stcb->asoc.total_flight > 0)) {
10732 /*	&&		     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10733 				break;
10734 			}
10735 		}
10736 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10737 		    TAILQ_EMPTY(&asoc->send_queue) &&
10738 		    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10739 			/* Nothing left to send */
10740 			break;
10741 		}
10742 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10743 			/* Nothing left to send */
10744 			break;
10745 		}
10746 	} while (num_out &&
10747 	         ((asoc->max_burst == 0) ||
10748 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10749 		  (burst_cnt < asoc->max_burst)));
10750 
10751 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10752 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10753 			SCTP_STAT_INCR(sctps_maxburstqueued);
10754 			asoc->burst_limit_applied = 1;
10755 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10756 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10757 			}
10758 		} else {
10759 			asoc->burst_limit_applied = 0;
10760 		}
10761 	}
10762 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10763 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10764 	}
10765 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10766 		tot_out);
10767 
10768 	/*-
10769 	 * Now we need to clean up the control chunk chain if a ECNE is on
10770 	 * it. It must be marked as UNSENT again so next call will continue
10771 	 * to send it until such time that we get a CWR, to remove it.
10772 	 */
10773 	if (stcb->asoc.ecn_echo_cnt_onq)
10774 		sctp_fix_ecn_echo(asoc);
10775 
10776 	if (stcb->asoc.trigger_reset) {
10777 		if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0)  {
10778 			goto do_it_again;
10779 		}
10780 	}
10781 	return;
10782 }
10783 
10784 int
10785 sctp_output(
10786 	struct sctp_inpcb *inp,
10787 	struct mbuf *m,
10788 	struct sockaddr *addr,
10789 	struct mbuf *control,
10790 #if defined(__FreeBSD__) && !defined(__Userspace__)
10791 	struct thread *p,
10792 #elif defined(_WIN32) && !defined(__Userspace__)
10793 	PKTHREAD p,
10794 #else
10795 #if defined(__APPLE__) && !defined(__Userspace__)
10796 	struct proc *p SCTP_UNUSED,
10797 #else
10798 	struct proc *p,
10799 #endif
10800 #endif
10801 	int flags)
10802 {
10803 	if (inp == NULL) {
10804 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10805 		return (EINVAL);
10806 	}
10807 
10808 	if (inp->sctp_socket == NULL) {
10809 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10810 		return (EINVAL);
10811 	}
10812 	return (sctp_sosend(inp->sctp_socket,
10813 			    addr,
10814 			    (struct uio *)NULL,
10815 			    m,
10816 			    control,
10817 #if defined(__APPLE__) && !defined(__Userspace__)
10818 			    flags
10819 #else
10820 			    flags, p
10821 #endif
10822 			));
10823 }
10824 
10825 void
10826 send_forward_tsn(struct sctp_tcb *stcb,
10827 		 struct sctp_association *asoc)
10828 {
10829 	struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10830 	struct sctp_forward_tsn_chunk *fwdtsn;
10831 	struct sctp_strseq *strseq;
10832 	struct sctp_strseq_mid *strseq_m;
10833 	uint32_t advance_peer_ack_point;
10834 	unsigned int cnt_of_space, i, ovh;
10835 	unsigned int space_needed;
10836 	unsigned int cnt_of_skipped = 0;
10837 
10838 	SCTP_TCB_LOCK_ASSERT(stcb);
10839 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10840 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10841 			/* mark it to unsent */
10842 			chk->sent = SCTP_DATAGRAM_UNSENT;
10843 			chk->snd_count = 0;
10844 			/* Do we correct its output location? */
10845 			if (chk->whoTo) {
10846 				sctp_free_remote_addr(chk->whoTo);
10847 				chk->whoTo = NULL;
10848 			}
10849 			goto sctp_fill_in_rest;
10850 		}
10851 	}
10852 	/* Ok if we reach here we must build one */
10853 	sctp_alloc_a_chunk(stcb, chk);
10854 	if (chk == NULL) {
10855 		return;
10856 	}
10857 	asoc->fwd_tsn_cnt++;
10858 	chk->copy_by_ref = 0;
10859 	/*
10860 	 * We don't do the old thing here since
10861 	 * this is used not for on-wire but to
10862 	 * tell if we are sending a fwd-tsn by
10863 	 * the stack during output. And if its
10864 	 * a IFORWARD or a FORWARD it is a fwd-tsn.
10865 	 */
10866 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10867 	chk->rec.chunk_id.can_take_data = 0;
10868 	chk->flags = 0;
10869 	chk->asoc = asoc;
10870 	chk->whoTo = NULL;
10871 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10872 	if (chk->data == NULL) {
10873 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10874 		return;
10875 	}
10876 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10877 	chk->sent = SCTP_DATAGRAM_UNSENT;
10878 	chk->snd_count = 0;
10879 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10880 	asoc->ctrl_queue_cnt++;
10881 sctp_fill_in_rest:
10882 	/*-
10883 	 * Here we go through and fill out the part that deals with
10884 	 * stream/seq of the ones we skip.
10885 	 */
10886 	SCTP_BUF_LEN(chk->data) = 0;
10887 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10888 		if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10889 		    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10890 			/* no more to look at */
10891 			break;
10892 		}
10893 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10894 			/* We don't report these */
10895 			continue;
10896 		}
10897 		cnt_of_skipped++;
10898 	}
10899 	if (asoc->idata_supported) {
10900 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10901 		                (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10902 	} else {
10903 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10904 		                (cnt_of_skipped * sizeof(struct sctp_strseq)));
10905 	}
10906 	cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10907 
10908 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10909 		ovh = SCTP_MIN_OVERHEAD;
10910 	} else {
10911 		ovh = SCTP_MIN_V4_OVERHEAD;
10912 	}
10913 	if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10914 		/* trim to a mtu size */
10915 		cnt_of_space = asoc->smallest_mtu - ovh;
10916 	}
10917 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10918 		sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10919 			       0xff, 0, cnt_of_skipped,
10920 			       asoc->advanced_peer_ack_point);
10921 	}
10922 	advance_peer_ack_point = asoc->advanced_peer_ack_point;
10923 	if (cnt_of_space < space_needed) {
10924 		/*-
10925 		 * ok we must trim down the chunk by lowering the
10926 		 * advance peer ack point.
10927 		 */
10928 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10929 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10930 				       0xff, 0xff, cnt_of_space,
10931 				       space_needed);
10932 		}
10933 		cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10934 		if (asoc->idata_supported) {
10935 			cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10936 		} else {
10937 			cnt_of_skipped /= sizeof(struct sctp_strseq);
10938 		}
10939 		/*-
10940 		 * Go through and find the TSN that will be the one
10941 		 * we report.
10942 		 */
10943 		at = TAILQ_FIRST(&asoc->sent_queue);
10944 		if (at != NULL) {
10945 			for (i = 0; i < cnt_of_skipped; i++) {
10946 				tp1 = TAILQ_NEXT(at, sctp_next);
10947 				if (tp1 == NULL) {
10948 					break;
10949 				}
10950 				at = tp1;
10951 			}
10952 		}
10953 		if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10954 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10955 				       0xff, cnt_of_skipped, at->rec.data.tsn,
10956 				       asoc->advanced_peer_ack_point);
10957 		}
10958 		last = at;
10959 		/*-
10960 		 * last now points to last one I can report, update
10961 		 * peer ack point
10962 		 */
10963 		if (last) {
10964 			advance_peer_ack_point = last->rec.data.tsn;
10965 		}
10966 		if (asoc->idata_supported) {
10967 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10968 			               cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10969 		} else {
10970 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10971 			               cnt_of_skipped * sizeof(struct sctp_strseq);
10972 		}
10973 	}
10974 	chk->send_size = space_needed;
10975 	/* Setup the chunk */
10976 	fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10977 	fwdtsn->ch.chunk_length = htons(chk->send_size);
10978 	fwdtsn->ch.chunk_flags = 0;
10979 	if (asoc->idata_supported) {
10980 		fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10981 	} else {
10982 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10983 	}
10984 	fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10985 	SCTP_BUF_LEN(chk->data) = chk->send_size;
10986 	fwdtsn++;
10987 	/*-
10988 	 * Move pointer to after the fwdtsn and transfer to the
10989 	 * strseq pointer.
10990 	 */
10991 	if (asoc->idata_supported) {
10992 		strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10993 		strseq = NULL;
10994 	} else {
10995 		strseq = (struct sctp_strseq *)fwdtsn;
10996 		strseq_m = NULL;
10997 	}
10998 	/*-
10999 	 * Now populate the strseq list. This is done blindly
11000 	 * without pulling out duplicate stream info. This is
11001 	 * inefficent but won't harm the process since the peer will
11002 	 * look at these in sequence and will thus release anything.
11003 	 * It could mean we exceed the PMTU and chop off some that
11004 	 * we could have included.. but this is unlikely (aka 1432/4
11005 	 * would mean 300+ stream seq's would have to be reported in
11006 	 * one FWD-TSN. With a bit of work we can later FIX this to
11007 	 * optimize and pull out duplicates.. but it does add more
11008 	 * overhead. So for now... not!
11009 	 */
11010 	i = 0;
11011 	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11012 		if (i >= cnt_of_skipped) {
11013 			break;
11014 		}
11015 		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11016 			/* We don't report these */
11017 			continue;
11018 		}
11019 		if (at->rec.data.tsn == advance_peer_ack_point) {
11020 			at->rec.data.fwd_tsn_cnt = 0;
11021 		}
11022 		if (asoc->idata_supported) {
11023 			strseq_m->sid = htons(at->rec.data.sid);
11024 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11025 				strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11026 			} else {
11027 				strseq_m->flags = 0;
11028 			}
11029 			strseq_m->mid = htonl(at->rec.data.mid);
11030 			strseq_m++;
11031 		} else {
11032 			strseq->sid = htons(at->rec.data.sid);
11033 			strseq->ssn = htons((uint16_t)at->rec.data.mid);
11034 			strseq++;
11035 		}
11036 		i++;
11037 	}
11038 	return;
11039 }
11040 
11041 void
11042 sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
11043 {
11044 	/*-
11045 	 * Queue up a SACK or NR-SACK in the control queue.
11046 	 * We must first check to see if a SACK or NR-SACK is
11047 	 * somehow on the control queue.
11048 	 * If so, we will take and and remove the old one.
11049 	 */
11050 	struct sctp_association *asoc;
11051 	struct sctp_tmit_chunk *chk, *a_chk;
11052 	struct sctp_sack_chunk *sack;
11053 	struct sctp_nr_sack_chunk *nr_sack;
11054 	struct sctp_gap_ack_block *gap_descriptor;
11055 	const struct sack_track *selector;
11056 	int mergeable = 0;
11057 	int offset;
11058 	caddr_t limit;
11059 	uint32_t *dup;
11060 	int limit_reached = 0;
11061 	unsigned int i, siz, j;
11062 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11063 	int num_dups = 0;
11064 	int space_req;
11065 	uint32_t highest_tsn;
11066 	uint8_t flags;
11067 	uint8_t type;
11068 	uint8_t tsn_map;
11069 
11070 	if (stcb->asoc.nrsack_supported == 1) {
11071 		type = SCTP_NR_SELECTIVE_ACK;
11072 	} else {
11073 		type = SCTP_SELECTIVE_ACK;
11074 	}
11075 	a_chk = NULL;
11076 	asoc = &stcb->asoc;
11077 	SCTP_TCB_LOCK_ASSERT(stcb);
11078 	if (asoc->last_data_chunk_from == NULL) {
11079 		/* Hmm we never received anything */
11080 		return;
11081 	}
11082 	sctp_slide_mapping_arrays(stcb);
11083 	sctp_set_rwnd(stcb, asoc);
11084 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11085 		if (chk->rec.chunk_id.id == type) {
11086 			/* Hmm, found a sack already on queue, remove it */
11087 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11088 			asoc->ctrl_queue_cnt--;
11089 			a_chk = chk;
11090 			if (a_chk->data) {
11091 				sctp_m_freem(a_chk->data);
11092 				a_chk->data = NULL;
11093 			}
11094 			if (a_chk->whoTo) {
11095 				sctp_free_remote_addr(a_chk->whoTo);
11096 				a_chk->whoTo = NULL;
11097 			}
11098 			break;
11099 		}
11100 	}
11101 	if (a_chk == NULL) {
11102 		sctp_alloc_a_chunk(stcb, a_chk);
11103 		if (a_chk == NULL) {
11104 			/* No memory so we drop the idea, and set a timer */
11105 			if (stcb->asoc.delayed_ack) {
11106 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11107 				                stcb->sctp_ep, stcb, NULL,
11108 				                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11109 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11110 				    stcb->sctp_ep, stcb, NULL);
11111 			} else {
11112 				stcb->asoc.send_sack = 1;
11113 			}
11114 			return;
11115 		}
11116 		a_chk->copy_by_ref = 0;
11117 		a_chk->rec.chunk_id.id = type;
11118 		a_chk->rec.chunk_id.can_take_data = 1;
11119 	}
11120 	/* Clear our pkt counts */
11121 	asoc->data_pkts_seen = 0;
11122 
11123 	a_chk->flags = 0;
11124 	a_chk->asoc = asoc;
11125 	a_chk->snd_count = 0;
11126 	a_chk->send_size = 0;	/* fill in later */
11127 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
11128 	a_chk->whoTo = NULL;
11129 
11130 	if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11131 		/*-
11132 		 * Ok, the destination for the SACK is unreachable, lets see if
11133 		 * we can select an alternate to asoc->last_data_chunk_from
11134 		 */
11135 		a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11136 		if (a_chk->whoTo == NULL) {
11137 			/* Nope, no alternate */
11138 			a_chk->whoTo = asoc->last_data_chunk_from;
11139 		}
11140 	} else {
11141 		a_chk->whoTo = asoc->last_data_chunk_from;
11142 	}
11143 	if (a_chk->whoTo) {
11144 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
11145 	}
11146 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11147 		highest_tsn = asoc->highest_tsn_inside_map;
11148 	} else {
11149 		highest_tsn = asoc->highest_tsn_inside_nr_map;
11150 	}
11151 	if (highest_tsn == asoc->cumulative_tsn) {
11152 		/* no gaps */
11153 		if (type == SCTP_SELECTIVE_ACK) {
11154 			space_req = sizeof(struct sctp_sack_chunk);
11155 		} else {
11156 			space_req = sizeof(struct sctp_nr_sack_chunk);
11157 		}
11158 	} else {
11159 		/* gaps get a cluster */
11160 		space_req = MCLBYTES;
11161 	}
11162 	/* Ok now lets formulate a MBUF with our sack */
11163 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11164 	if ((a_chk->data == NULL) ||
11165 	    (a_chk->whoTo == NULL)) {
11166 		/* rats, no mbuf memory */
11167 		if (a_chk->data) {
11168 			/* was a problem with the destination */
11169 			sctp_m_freem(a_chk->data);
11170 			a_chk->data = NULL;
11171 		}
11172 		sctp_free_a_chunk(stcb, a_chk, so_locked);
11173 		/* sa_ignore NO_NULL_CHK */
11174 		if (stcb->asoc.delayed_ack) {
11175 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11176 			                stcb->sctp_ep, stcb, NULL,
11177 			                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
11178 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11179 			    stcb->sctp_ep, stcb, NULL);
11180 		} else {
11181 			stcb->asoc.send_sack = 1;
11182 		}
11183 		return;
11184 	}
11185 	/* ok, lets go through and fill it in */
11186 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11187 	space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11188 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11189 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11190 	}
11191 	limit = mtod(a_chk->data, caddr_t);
11192 	limit += space;
11193 
11194 	flags = 0;
11195 
11196 	if ((asoc->sctp_cmt_on_off > 0) &&
11197 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11198 		/*-
11199 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11200 		 * received, then set high bit to 1, else 0. Reset
11201 		 * pkts_rcvd.
11202 		 */
11203 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11204 		asoc->cmt_dac_pkts_rcvd = 0;
11205 	}
11206 #ifdef SCTP_ASOCLOG_OF_TSNS
11207 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11208 	stcb->asoc.cumack_log_atsnt++;
11209 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11210 		stcb->asoc.cumack_log_atsnt = 0;
11211 	}
11212 #endif
11213 	/* reset the readers interpretation */
11214 	stcb->freed_by_sorcv_sincelast = 0;
11215 
11216 	if (type == SCTP_SELECTIVE_ACK) {
11217 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11218 		nr_sack = NULL;
11219 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11220 		if (highest_tsn > asoc->mapping_array_base_tsn) {
11221 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11222 		} else {
11223 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
11224 		}
11225 	} else {
11226 		sack = NULL;
11227 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11228 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11229 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11230 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11231 		} else {
11232 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11233 		}
11234 	}
11235 
11236 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11237 		offset = 1;
11238 	} else {
11239 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11240 	}
11241 	if (((type == SCTP_SELECTIVE_ACK) &&
11242 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11243 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
11244 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11245 		/* we have a gap .. maybe */
11246 		for (i = 0; i < siz; i++) {
11247 			tsn_map = asoc->mapping_array[i];
11248 			if (type == SCTP_SELECTIVE_ACK) {
11249 				tsn_map |= asoc->nr_mapping_array[i];
11250 			}
11251 			if (i == 0) {
11252 				/*
11253 				 * Clear all bits corresponding to TSNs
11254 				 * smaller or equal to the cumulative TSN.
11255 				 */
11256 				tsn_map &= (~0U << (1 - offset));
11257 			}
11258 			selector = &sack_array[tsn_map];
11259 			if (mergeable && selector->right_edge) {
11260 				/*
11261 				 * Backup, left and right edges were ok to
11262 				 * merge.
11263 				 */
11264 				num_gap_blocks--;
11265 				gap_descriptor--;
11266 			}
11267 			if (selector->num_entries == 0)
11268 				mergeable = 0;
11269 			else {
11270 				for (j = 0; j < selector->num_entries; j++) {
11271 					if (mergeable && selector->right_edge) {
11272 						/*
11273 						 * do a merge by NOT setting
11274 						 * the left side
11275 						 */
11276 						mergeable = 0;
11277 					} else {
11278 						/*
11279 						 * no merge, set the left
11280 						 * side
11281 						 */
11282 						mergeable = 0;
11283 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
11284 					}
11285 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
11286 					num_gap_blocks++;
11287 					gap_descriptor++;
11288 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11289 						/* no more room */
11290 						limit_reached = 1;
11291 						break;
11292 					}
11293 				}
11294 				if (selector->left_edge) {
11295 					mergeable = 1;
11296 				}
11297 			}
11298 			if (limit_reached) {
11299 				/* Reached the limit stop */
11300 				break;
11301 			}
11302 			offset += 8;
11303 		}
11304 	}
11305 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
11306 	    (limit_reached == 0)) {
11307 		mergeable = 0;
11308 
11309 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11310 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11311 		} else {
11312 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11313 		}
11314 
11315 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11316 			offset = 1;
11317 		} else {
11318 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11319 		}
11320 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11321 			/* we have a gap .. maybe */
11322 			for (i = 0; i < siz; i++) {
11323 				tsn_map = asoc->nr_mapping_array[i];
11324 				if (i == 0) {
11325 					/*
11326 					 * Clear all bits corresponding to TSNs
11327 					 * smaller or equal to the cumulative TSN.
11328 					 */
11329 					tsn_map &= (~0U << (1 - offset));
11330 				}
11331 				selector = &sack_array[tsn_map];
11332 				if (mergeable && selector->right_edge) {
11333 					/*
11334 					* Backup, left and right edges were ok to
11335 					* merge.
11336 					*/
11337 					num_nr_gap_blocks--;
11338 					gap_descriptor--;
11339 				}
11340 				if (selector->num_entries == 0)
11341 					mergeable = 0;
11342 				else {
11343 					for (j = 0; j < selector->num_entries; j++) {
11344 						if (mergeable && selector->right_edge) {
11345 							/*
11346 							* do a merge by NOT setting
11347 							* the left side
11348 							*/
11349 							mergeable = 0;
11350 						} else {
11351 							/*
11352 							* no merge, set the left
11353 							* side
11354 							*/
11355 							mergeable = 0;
11356 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
11357 						}
11358 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
11359 						num_nr_gap_blocks++;
11360 						gap_descriptor++;
11361 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11362 							/* no more room */
11363 							limit_reached = 1;
11364 							break;
11365 						}
11366 					}
11367 					if (selector->left_edge) {
11368 						mergeable = 1;
11369 					}
11370 				}
11371 				if (limit_reached) {
11372 					/* Reached the limit stop */
11373 					break;
11374 				}
11375 				offset += 8;
11376 			}
11377 		}
11378 	}
11379 	/* now we must add any dups we are going to report. */
11380 	if ((limit_reached == 0) && (asoc->numduptsns)) {
11381 		dup = (uint32_t *) gap_descriptor;
11382 		for (i = 0; i < asoc->numduptsns; i++) {
11383 			*dup = htonl(asoc->dup_tsns[i]);
11384 			dup++;
11385 			num_dups++;
11386 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11387 				/* no more room */
11388 				break;
11389 			}
11390 		}
11391 		asoc->numduptsns = 0;
11392 	}
11393 	/*
11394 	 * now that the chunk is prepared queue it to the control chunk
11395 	 * queue.
11396 	 */
11397 	if (type == SCTP_SELECTIVE_ACK) {
11398 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11399 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11400 		                              num_dups * sizeof(int32_t));
11401 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11402 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11403 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11404 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11405 		sack->sack.num_dup_tsns = htons(num_dups);
11406 		sack->ch.chunk_type = type;
11407 		sack->ch.chunk_flags = flags;
11408 		sack->ch.chunk_length = htons(a_chk->send_size);
11409 	} else {
11410 		a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11411 		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11412 		                              num_dups * sizeof(int32_t));
11413 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11414 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11415 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11416 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11417 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11418 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11419 		nr_sack->nr_sack.reserved = 0;
11420 		nr_sack->ch.chunk_type = type;
11421 		nr_sack->ch.chunk_flags = flags;
11422 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
11423 	}
11424 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11425 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
11426 	asoc->ctrl_queue_cnt++;
11427 	asoc->send_sack = 0;
11428 	SCTP_STAT_INCR(sctps_sendsacks);
11429 	return;
11430 }
11431 
11432 void
11433 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
11434 {
11435 	struct mbuf *m_abort, *m, *m_last;
11436 	struct mbuf *m_out, *m_end = NULL;
11437 	struct sctp_abort_chunk *abort;
11438 	struct sctp_auth_chunk *auth = NULL;
11439 	struct sctp_nets *net;
11440 	uint32_t vtag;
11441 	uint32_t auth_offset = 0;
11442 	int error;
11443 	uint16_t cause_len, chunk_len, padding_len;
11444 
11445 #if defined(__APPLE__) && !defined(__Userspace__)
11446 	if (so_locked) {
11447 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11448 	} else {
11449 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11450 	}
11451 #endif
11452 	SCTP_TCB_LOCK_ASSERT(stcb);
11453 	/*-
11454 	 * Add an AUTH chunk, if chunk requires it and save the offset into
11455 	 * the chain for AUTH
11456 	 */
11457 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11458 	                                stcb->asoc.peer_auth_chunks)) {
11459 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11460 					    stcb, SCTP_ABORT_ASSOCIATION);
11461 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11462 	} else {
11463 		m_out = NULL;
11464 	}
11465 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11466 	if (m_abort == NULL) {
11467 		if (m_out) {
11468 			sctp_m_freem(m_out);
11469 		}
11470 		if (operr) {
11471 			sctp_m_freem(operr);
11472 		}
11473 		return;
11474 	}
11475 	/* link in any error */
11476 	SCTP_BUF_NEXT(m_abort) = operr;
11477 	cause_len = 0;
11478 	m_last = NULL;
11479 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11480 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
11481 		if (SCTP_BUF_NEXT(m) == NULL) {
11482 			m_last = m;
11483 		}
11484 	}
11485 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11486 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11487 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11488 	if (m_out == NULL) {
11489 		/* NO Auth chunk prepended, so reserve space in front */
11490 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11491 		m_out = m_abort;
11492 	} else {
11493 		/* Put AUTH chunk at the front of the chain */
11494 		SCTP_BUF_NEXT(m_end) = m_abort;
11495 	}
11496 	if (stcb->asoc.alternate) {
11497 		net = stcb->asoc.alternate;
11498 	} else {
11499 		net = stcb->asoc.primary_destination;
11500 	}
11501 	/* Fill in the ABORT chunk header. */
11502 	abort = mtod(m_abort, struct sctp_abort_chunk *);
11503 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11504 	if (stcb->asoc.peer_vtag == 0) {
11505 		/* This happens iff the assoc is in COOKIE-WAIT state. */
11506 		vtag = stcb->asoc.my_vtag;
11507 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11508 	} else {
11509 		vtag = stcb->asoc.peer_vtag;
11510 		abort->ch.chunk_flags = 0;
11511 	}
11512 	abort->ch.chunk_length = htons(chunk_len);
11513 	/* Add padding, if necessary. */
11514 	if (padding_len > 0) {
11515 		if ((m_last == NULL) ||
11516 		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11517 			sctp_m_freem(m_out);
11518 			return;
11519 		}
11520 	}
11521 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11522 	                                        (struct sockaddr *)&net->ro._l_addr,
11523 	                                        m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11524 	                                        stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11525 	                                        stcb->asoc.primary_destination->port, NULL,
11526 #if defined(__FreeBSD__) && !defined(__Userspace__)
11527 	                                        0, 0,
11528 #endif
11529 	                                        so_locked))) {
11530 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11531 		if (error == ENOBUFS) {
11532 			stcb->asoc.ifp_had_enobuf = 1;
11533 			SCTP_STAT_INCR(sctps_lowlevelerr);
11534 		}
11535 	} else {
11536 		stcb->asoc.ifp_had_enobuf = 0;
11537 	}
11538 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11539 }
11540 
11541 void
11542 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11543                             struct sctp_nets *net,
11544                             int reflect_vtag)
11545 {
11546 	/* formulate and SEND a SHUTDOWN-COMPLETE */
11547 	struct mbuf *m_shutdown_comp;
11548 	struct sctp_shutdown_complete_chunk *shutdown_complete;
11549 	uint32_t vtag;
11550 	int error;
11551 	uint8_t flags;
11552 
11553 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11554 	if (m_shutdown_comp == NULL) {
11555 		/* no mbuf's */
11556 		return;
11557 	}
11558 	if (reflect_vtag) {
11559 		flags = SCTP_HAD_NO_TCB;
11560 		vtag = stcb->asoc.my_vtag;
11561 	} else {
11562 		flags = 0;
11563 		vtag = stcb->asoc.peer_vtag;
11564 	}
11565 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11566 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11567 	shutdown_complete->ch.chunk_flags = flags;
11568 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11569 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11570 	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11571 	                                        (struct sockaddr *)&net->ro._l_addr,
11572 	                                        m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11573 	                                        stcb->sctp_ep->sctp_lport, stcb->rport,
11574 	                                        htonl(vtag),
11575 	                                        net->port, NULL,
11576 #if defined(__FreeBSD__) && !defined(__Userspace__)
11577 	                                        0, 0,
11578 #endif
11579 	                                        SCTP_SO_NOT_LOCKED))) {
11580 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11581 		if (error == ENOBUFS) {
11582 			stcb->asoc.ifp_had_enobuf = 1;
11583 			SCTP_STAT_INCR(sctps_lowlevelerr);
11584 		}
11585 	} else {
11586 		stcb->asoc.ifp_had_enobuf = 0;
11587 	}
11588 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11589 	return;
11590 }
11591 
11592 #if defined(__FreeBSD__) && !defined(__Userspace__)
11593 static void
11594 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11595                    struct sctphdr *sh, uint32_t vtag,
11596                    uint8_t type, struct mbuf *cause,
11597                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11598                    uint32_t vrf_id, uint16_t port)
11599 #else
11600 static void
11601 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11602                    struct sctphdr *sh, uint32_t vtag,
11603                    uint8_t type, struct mbuf *cause,
11604                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11605 #endif
11606 {
11607 	struct mbuf *o_pak;
11608 	struct mbuf *mout;
11609 	struct sctphdr *shout;
11610 	struct sctp_chunkhdr *ch;
11611 #if defined(INET) || defined(INET6)
11612 	struct udphdr *udp;
11613 #endif
11614 	int ret, len, cause_len, padding_len;
11615 #ifdef INET
11616 #if defined(__APPLE__) && !defined(__Userspace__)
11617 	sctp_route_t ro;
11618 #endif
11619 	struct sockaddr_in *src_sin, *dst_sin;
11620 	struct ip *ip;
11621 #endif
11622 #ifdef INET6
11623 	struct sockaddr_in6 *src_sin6, *dst_sin6;
11624 	struct ip6_hdr *ip6;
11625 #endif
11626 
11627 	/* Compute the length of the cause and add final padding. */
11628 	cause_len = 0;
11629 	if (cause != NULL) {
11630 		struct mbuf *m_at, *m_last = NULL;
11631 
11632 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11633 			if (SCTP_BUF_NEXT(m_at) == NULL)
11634 				m_last = m_at;
11635 			cause_len += SCTP_BUF_LEN(m_at);
11636 		}
11637 		padding_len = cause_len % 4;
11638 		if (padding_len != 0) {
11639 			padding_len = 4 - padding_len;
11640 		}
11641 		if (padding_len != 0) {
11642 			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11643 				sctp_m_freem(cause);
11644 				return;
11645 			}
11646 		}
11647 	} else {
11648 		padding_len = 0;
11649 	}
11650 	/* Get an mbuf for the header. */
11651 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11652 	switch (dst->sa_family) {
11653 #ifdef INET
11654 	case AF_INET:
11655 		len += sizeof(struct ip);
11656 		break;
11657 #endif
11658 #ifdef INET6
11659 	case AF_INET6:
11660 		len += sizeof(struct ip6_hdr);
11661 		break;
11662 #endif
11663 	default:
11664 		break;
11665 	}
11666 #if defined(INET) || defined(INET6)
11667 	if (port) {
11668 		len += sizeof(struct udphdr);
11669 	}
11670 #endif
11671 #if defined(__APPLE__) && !defined(__Userspace__)
11672 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11673 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11674 #else
11675 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11676 #endif
11677 #else
11678 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11679 #endif
11680 	if (mout == NULL) {
11681 		if (cause) {
11682 			sctp_m_freem(cause);
11683 		}
11684 		return;
11685 	}
11686 #if defined(__APPLE__) && !defined(__Userspace__)
11687 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11688 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11689 #else
11690 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11691 #endif
11692 #else
11693 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11694 #endif
11695 	SCTP_BUF_LEN(mout) = len;
11696 	SCTP_BUF_NEXT(mout) = cause;
11697 #if defined(__FreeBSD__) && !defined(__Userspace__)
11698 	M_SETFIB(mout, fibnum);
11699 	mout->m_pkthdr.flowid = mflowid;
11700 	M_HASHTYPE_SET(mout, mflowtype);
11701 #endif
11702 #ifdef INET
11703 	ip = NULL;
11704 #endif
11705 #ifdef INET6
11706 	ip6 = NULL;
11707 #endif
11708 	switch (dst->sa_family) {
11709 #ifdef INET
11710 	case AF_INET:
11711 		src_sin = (struct sockaddr_in *)src;
11712 		dst_sin = (struct sockaddr_in *)dst;
11713 		ip = mtod(mout, struct ip *);
11714 		ip->ip_v = IPVERSION;
11715 		ip->ip_hl = (sizeof(struct ip) >> 2);
11716 		ip->ip_tos = 0;
11717 #if defined(__FreeBSD__) && !defined(__Userspace__)
11718 		ip->ip_off = htons(IP_DF);
11719 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__)
11720 		ip->ip_off = IP_DF;
11721 #else
11722 		ip->ip_off = htons(IP_DF);
11723 #endif
11724 #if defined(__Userspace__)
11725 		ip->ip_id = htons(ip_id++);
11726 #elif defined(__FreeBSD__)
11727 		ip_fillid(ip);
11728 #elif defined(__APPLE__)
11729 #if RANDOM_IP_ID
11730 		ip->ip_id = ip_randomid();
11731 #else
11732 		ip->ip_id = htons(ip_id++);
11733 #endif
11734 #else
11735 		ip->ip_id = ip_id++;
11736 #endif
11737 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11738 		if (port) {
11739 			ip->ip_p = IPPROTO_UDP;
11740 		} else {
11741 			ip->ip_p = IPPROTO_SCTP;
11742 		}
11743 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11744 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11745 		ip->ip_sum = 0;
11746 		len = sizeof(struct ip);
11747 		shout = (struct sctphdr *)((caddr_t)ip + len);
11748 		break;
11749 #endif
11750 #ifdef INET6
11751 	case AF_INET6:
11752 		src_sin6 = (struct sockaddr_in6 *)src;
11753 		dst_sin6 = (struct sockaddr_in6 *)dst;
11754 		ip6 = mtod(mout, struct ip6_hdr *);
11755 		ip6->ip6_flow = htonl(0x60000000);
11756 #if defined(__FreeBSD__) && !defined(__Userspace__)
11757 		if (V_ip6_auto_flowlabel) {
11758 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11759 		}
11760 #endif
11761 #if defined(__Userspace__)
11762 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
11763 #else
11764 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11765 #endif
11766 		if (port) {
11767 			ip6->ip6_nxt = IPPROTO_UDP;
11768 		} else {
11769 			ip6->ip6_nxt = IPPROTO_SCTP;
11770 		}
11771 		ip6->ip6_src = dst_sin6->sin6_addr;
11772 		ip6->ip6_dst = src_sin6->sin6_addr;
11773 		len = sizeof(struct ip6_hdr);
11774 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
11775 		break;
11776 #endif
11777 	default:
11778 		len = 0;
11779 		shout = mtod(mout, struct sctphdr *);
11780 		break;
11781 	}
11782 #if defined(INET) || defined(INET6)
11783 	if (port) {
11784 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11785 			sctp_m_freem(mout);
11786 			return;
11787 		}
11788 		udp = (struct udphdr *)shout;
11789 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11790 		udp->uh_dport = port;
11791 		udp->uh_sum = 0;
11792 		udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11793 		                                sizeof(struct sctphdr) +
11794 		                                sizeof(struct sctp_chunkhdr) +
11795 		                                cause_len + padding_len));
11796 		len += sizeof(struct udphdr);
11797 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11798 	} else {
11799 		udp = NULL;
11800 	}
11801 #endif
11802 	shout->src_port = sh->dest_port;
11803 	shout->dest_port = sh->src_port;
11804 	shout->checksum = 0;
11805 	if (vtag) {
11806 		shout->v_tag = htonl(vtag);
11807 	} else {
11808 		shout->v_tag = sh->v_tag;
11809 	}
11810 	len += sizeof(struct sctphdr);
11811 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11812 	ch->chunk_type = type;
11813 	if (vtag) {
11814 		ch->chunk_flags = 0;
11815 	} else {
11816 		ch->chunk_flags = SCTP_HAD_NO_TCB;
11817 	}
11818 	ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11819 	len += sizeof(struct sctp_chunkhdr);
11820 	len += cause_len + padding_len;
11821 
11822 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11823 		sctp_m_freem(mout);
11824 		return;
11825 	}
11826 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
11827 	switch (dst->sa_family) {
11828 #ifdef INET
11829 	case AF_INET:
11830 #if defined(__APPLE__) && !defined(__Userspace__)
11831 		/* zap the stack pointer to the route */
11832 		memset(&ro, 0, sizeof(sctp_route_t));
11833 #endif
11834 		if (port) {
11835 #if !defined(_WIN32) && !defined(__Userspace__)
11836 #if defined(__FreeBSD__)
11837 			if (V_udp_cksum) {
11838 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11839 			} else {
11840 				udp->uh_sum = 0;
11841 			}
11842 #else
11843 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11844 #endif
11845 #else
11846 			udp->uh_sum = 0;
11847 #endif
11848 		}
11849 #if defined(__FreeBSD__) && !defined(__Userspace__)
11850 		ip->ip_len = htons(len);
11851 #elif defined(__APPLE__) || defined(__Userspace__)
11852 		ip->ip_len = len;
11853 #else
11854 		ip->ip_len = htons(len);
11855 #endif
11856 		if (port) {
11857 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11858 			SCTP_STAT_INCR(sctps_sendswcrc);
11859 #if !defined(_WIN32) && !defined(__Userspace__)
11860 #if defined(__FreeBSD__)
11861 			if (V_udp_cksum) {
11862 				SCTP_ENABLE_UDP_CSUM(o_pak);
11863 			}
11864 #else
11865 			SCTP_ENABLE_UDP_CSUM(o_pak);
11866 #endif
11867 #endif
11868 		} else {
11869 #if defined(__FreeBSD__) && !defined(__Userspace__)
11870 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11871 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11872 			SCTP_STAT_INCR(sctps_sendhwcrc);
11873 #else
11874 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11875 			SCTP_STAT_INCR(sctps_sendswcrc);
11876 #endif
11877 		}
11878 #ifdef SCTP_PACKET_LOGGING
11879 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11880 			sctp_packet_log(o_pak);
11881 		}
11882 #endif
11883 #if defined(__APPLE__) && !defined(__Userspace__)
11884 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11885 		/* Free the route if we got one back */
11886 		if (ro.ro_rt) {
11887 			RTFREE(ro.ro_rt);
11888 			ro.ro_rt = NULL;
11889 		}
11890 #else
11891 #if defined(__FreeBSD__) && !defined(__Userspace__)
11892 		SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11893 #endif
11894 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11895 #endif
11896 		break;
11897 #endif
11898 #ifdef INET6
11899 	case AF_INET6:
11900 		ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11901 		if (port) {
11902 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11903 			SCTP_STAT_INCR(sctps_sendswcrc);
11904 #if !defined(__Userspace__)
11905 #if defined(_WIN32)
11906 			udp->uh_sum = 0;
11907 #else
11908 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11909 				udp->uh_sum = 0xffff;
11910 			}
11911 #endif
11912 #endif
11913 		} else {
11914 #if defined(__FreeBSD__) && !defined(__Userspace__)
11915 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11916 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11917 			SCTP_STAT_INCR(sctps_sendhwcrc);
11918 #else
11919 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11920 			SCTP_STAT_INCR(sctps_sendswcrc);
11921 #endif
11922 		}
11923 #ifdef SCTP_PACKET_LOGGING
11924 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11925 			sctp_packet_log(o_pak);
11926 		}
11927 #endif
11928 #if defined(__FreeBSD__) && !defined(__Userspace__)
11929 		SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11930 #endif
11931 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11932 		break;
11933 #endif
11934 #if defined(__Userspace__)
11935 	case AF_CONN:
11936 	{
11937 		char *buffer;
11938 		struct sockaddr_conn *sconn;
11939 
11940 		sconn = (struct sockaddr_conn *)src;
11941 		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
11942 			shout->checksum = sctp_calculate_cksum(mout, 0);
11943 			SCTP_STAT_INCR(sctps_sendswcrc);
11944 		} else {
11945 			SCTP_STAT_INCR(sctps_sendhwcrc);
11946 		}
11947 #ifdef SCTP_PACKET_LOGGING
11948 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11949 			sctp_packet_log(mout);
11950 		}
11951 #endif
11952 		/* Don't alloc/free for each packet */
11953 		if ((buffer = malloc(len)) != NULL) {
11954 			m_copydata(mout, 0, len, buffer);
11955 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11956 			free(buffer);
11957 		} else {
11958 			ret = ENOMEM;
11959 		}
11960 		sctp_m_freem(mout);
11961 		break;
11962 	}
11963 #endif
11964 	default:
11965 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11966 		        dst->sa_family);
11967 		sctp_m_freem(mout);
11968 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11969 		return;
11970 	}
11971 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
11972 #if defined(__FreeBSD__) && !defined(__Userspace__)
11973 	if (port) {
11974 		UDPSTAT_INC(udps_opackets);
11975 	}
11976 #endif
11977 	SCTP_STAT_INCR(sctps_sendpackets);
11978 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11979 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11980 	if (ret) {
11981 		SCTP_STAT_INCR(sctps_senderrors);
11982 	}
11983 	return;
11984 }
11985 
11986 void
11987 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11988                              struct sctphdr *sh,
11989 #if defined(__FreeBSD__) && !defined(__Userspace__)
11990                              uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11991 #endif
11992                              uint32_t vrf_id, uint16_t port)
11993 {
11994 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11995 #if defined(__FreeBSD__) && !defined(__Userspace__)
11996 	                   mflowtype, mflowid, fibnum,
11997 #endif
11998 	                   vrf_id, port);
11999 }
12000 
12001 void
12002 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked)
12003 {
12004 	struct sctp_tmit_chunk *chk;
12005 	struct sctp_heartbeat_chunk *hb;
12006 	struct timeval now;
12007 
12008 	SCTP_TCB_LOCK_ASSERT(stcb);
12009 	if (net == NULL) {
12010 		return;
12011 	}
12012 	(void)SCTP_GETTIME_TIMEVAL(&now);
12013 	switch (net->ro._l_addr.sa.sa_family) {
12014 #ifdef INET
12015 	case AF_INET:
12016 		break;
12017 #endif
12018 #ifdef INET6
12019 	case AF_INET6:
12020 		break;
12021 #endif
12022 #if defined(__Userspace__)
12023 	case AF_CONN:
12024 		break;
12025 #endif
12026 	default:
12027 		return;
12028 	}
12029 	sctp_alloc_a_chunk(stcb, chk);
12030 	if (chk == NULL) {
12031 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12032 		return;
12033 	}
12034 
12035 	chk->copy_by_ref = 0;
12036 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12037 	chk->rec.chunk_id.can_take_data = 1;
12038 	chk->flags = 0;
12039 	chk->asoc = &stcb->asoc;
12040 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12041 
12042 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12043 	if (chk->data == NULL) {
12044 		sctp_free_a_chunk(stcb, chk, so_locked);
12045 		return;
12046 	}
12047 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12048 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12049 	chk->sent = SCTP_DATAGRAM_UNSENT;
12050 	chk->snd_count = 0;
12051 	chk->whoTo = net;
12052 	atomic_add_int(&chk->whoTo->ref_count, 1);
12053 	/* Now we have a mbuf that we can fill in with the details */
12054 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12055 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12056 	/* fill out chunk header */
12057 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12058 	hb->ch.chunk_flags = 0;
12059 	hb->ch.chunk_length = htons(chk->send_size);
12060 	/* Fill out hb parameter */
12061 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12062 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12063 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12064 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12065 	/* Did our user request this one, put it in */
12066 	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12067 #ifdef HAVE_SA_LEN
12068 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12069 #else
12070 	switch (net->ro._l_addr.sa.sa_family) {
12071 #ifdef INET
12072 	case AF_INET:
12073 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12074 		break;
12075 #endif
12076 #ifdef INET6
12077 	case AF_INET6:
12078 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12079 		break;
12080 #endif
12081 #if defined(__Userspace__)
12082 	case AF_CONN:
12083 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12084 		break;
12085 #endif
12086 	default:
12087 		hb->heartbeat.hb_info.addr_len = 0;
12088 		break;
12089 	}
12090 #endif
12091 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12092 		/*
12093 		 * we only take from the entropy pool if the address is not
12094 		 * confirmed.
12095 		 */
12096 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12097 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12098 	} else {
12099 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12100 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12101 	}
12102 	switch (net->ro._l_addr.sa.sa_family) {
12103 #ifdef INET
12104 	case AF_INET:
12105 		memcpy(hb->heartbeat.hb_info.address,
12106 		       &net->ro._l_addr.sin.sin_addr,
12107 		       sizeof(net->ro._l_addr.sin.sin_addr));
12108 		break;
12109 #endif
12110 #ifdef INET6
12111 	case AF_INET6:
12112 		memcpy(hb->heartbeat.hb_info.address,
12113 		       &net->ro._l_addr.sin6.sin6_addr,
12114 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
12115 		break;
12116 #endif
12117 #if defined(__Userspace__)
12118 	case AF_CONN:
12119 		memcpy(hb->heartbeat.hb_info.address,
12120 		       &net->ro._l_addr.sconn.sconn_addr,
12121 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
12122 		break;
12123 #endif
12124 	default:
12125 		if (chk->data) {
12126 			sctp_m_freem(chk->data);
12127 			chk->data = NULL;
12128 		}
12129 		sctp_free_a_chunk(stcb, chk, so_locked);
12130 		return;
12131 		break;
12132 	}
12133 	net->hb_responded = 0;
12134 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12135 	stcb->asoc.ctrl_queue_cnt++;
12136 	SCTP_STAT_INCR(sctps_sendheartbeat);
12137 	return;
12138 }
12139 
12140 void
12141 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12142 		   uint32_t high_tsn)
12143 {
12144 	struct sctp_association *asoc;
12145 	struct sctp_ecne_chunk *ecne;
12146 	struct sctp_tmit_chunk *chk;
12147 
12148 	if (net == NULL) {
12149 		return;
12150 	}
12151 	asoc = &stcb->asoc;
12152 	SCTP_TCB_LOCK_ASSERT(stcb);
12153 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12154 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12155 			/* found a previous ECN_ECHO update it if needed */
12156 			uint32_t cnt, ctsn;
12157 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12158 			ctsn = ntohl(ecne->tsn);
12159 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12160 				ecne->tsn = htonl(high_tsn);
12161 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
12162 			}
12163 			cnt = ntohl(ecne->num_pkts_since_cwr);
12164 			cnt++;
12165 			ecne->num_pkts_since_cwr = htonl(cnt);
12166 			return;
12167 		}
12168 	}
12169 	/* nope could not find one to update so we must build one */
12170 	sctp_alloc_a_chunk(stcb, chk);
12171 	if (chk == NULL) {
12172 		return;
12173 	}
12174 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
12175 	chk->copy_by_ref = 0;
12176 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12177 	chk->rec.chunk_id.can_take_data = 0;
12178 	chk->flags = 0;
12179 	chk->asoc = &stcb->asoc;
12180 	chk->send_size = sizeof(struct sctp_ecne_chunk);
12181 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12182 	if (chk->data == NULL) {
12183 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12184 		return;
12185 	}
12186 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12187 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12188 	chk->sent = SCTP_DATAGRAM_UNSENT;
12189 	chk->snd_count = 0;
12190 	chk->whoTo = net;
12191 	atomic_add_int(&chk->whoTo->ref_count, 1);
12192 
12193 	stcb->asoc.ecn_echo_cnt_onq++;
12194 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12195 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
12196 	ecne->ch.chunk_flags = 0;
12197 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12198 	ecne->tsn = htonl(high_tsn);
12199 	ecne->num_pkts_since_cwr = htonl(1);
12200 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12201 	asoc->ctrl_queue_cnt++;
12202 }
12203 
12204 void
12205 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12206     struct mbuf *m, int len, int iphlen, int bad_crc)
12207 {
12208 	struct sctp_association *asoc;
12209 	struct sctp_pktdrop_chunk *drp;
12210 	struct sctp_tmit_chunk *chk;
12211 	uint8_t *datap;
12212 	int was_trunc = 0;
12213 	int fullsz = 0;
12214 	long spc;
12215 	int offset;
12216 	struct sctp_chunkhdr *ch, chunk_buf;
12217 	unsigned int chk_length;
12218 
12219         if (!stcb) {
12220             return;
12221         }
12222 	asoc = &stcb->asoc;
12223 	SCTP_TCB_LOCK_ASSERT(stcb);
12224 	if (asoc->pktdrop_supported == 0) {
12225 		/*-
12226 		 * peer must declare support before I send one.
12227 		 */
12228 		return;
12229 	}
12230 	if (stcb->sctp_socket == NULL) {
12231 		return;
12232 	}
12233 	sctp_alloc_a_chunk(stcb, chk);
12234 	if (chk == NULL) {
12235 		return;
12236 	}
12237 	chk->copy_by_ref = 0;
12238 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12239 	chk->rec.chunk_id.can_take_data = 1;
12240 	chk->flags = 0;
12241 	len -= iphlen;
12242 	chk->send_size = len;
12243 	/* Validate that we do not have an ABORT in here. */
12244 	offset = iphlen + sizeof(struct sctphdr);
12245 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12246 						   sizeof(*ch), (uint8_t *) & chunk_buf);
12247 	while (ch != NULL) {
12248 		chk_length = ntohs(ch->chunk_length);
12249 		if (chk_length < sizeof(*ch)) {
12250 			/* break to abort land */
12251 			break;
12252 		}
12253 		switch (ch->chunk_type) {
12254 		case SCTP_PACKET_DROPPED:
12255 		case SCTP_ABORT_ASSOCIATION:
12256 		case SCTP_INITIATION_ACK:
12257 			/**
12258 			 * We don't respond with an PKT-DROP to an ABORT
12259 			 * or PKT-DROP. We also do not respond to an
12260 			 * INIT-ACK, because we can't know if the initiation
12261 			 * tag is correct or not.
12262 			 */
12263 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12264 			return;
12265 		default:
12266 			break;
12267 		}
12268 		offset += SCTP_SIZE32(chk_length);
12269 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12270 		    sizeof(*ch), (uint8_t *) & chunk_buf);
12271 	}
12272 
12273 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12274 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12275 		/* only send 1 mtu worth, trim off the
12276 		 * excess on the end.
12277 		 */
12278 		fullsz = len;
12279 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12280 		was_trunc = 1;
12281 	}
12282 	chk->asoc = &stcb->asoc;
12283 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12284 	if (chk->data == NULL) {
12285 jump_out:
12286 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12287 		return;
12288 	}
12289 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12290 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12291 	if (drp == NULL) {
12292 		sctp_m_freem(chk->data);
12293 		chk->data = NULL;
12294 		goto jump_out;
12295 	}
12296 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12297 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12298 	chk->book_size_scale = 0;
12299 	if (was_trunc) {
12300 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12301 		drp->trunc_len = htons(fullsz);
12302 		/* Len is already adjusted to size minus overhead above
12303 		 * take out the pkt_drop chunk itself from it.
12304 		 */
12305 		chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12306 		len = chk->send_size;
12307 	} else {
12308 		/* no truncation needed */
12309 		drp->ch.chunk_flags = 0;
12310 		drp->trunc_len = htons(0);
12311 	}
12312 	if (bad_crc) {
12313 		drp->ch.chunk_flags |= SCTP_BADCRC;
12314 	}
12315 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12316 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12317 	chk->sent = SCTP_DATAGRAM_UNSENT;
12318 	chk->snd_count = 0;
12319 	if (net) {
12320 		/* we should hit here */
12321 		chk->whoTo = net;
12322 		atomic_add_int(&chk->whoTo->ref_count, 1);
12323 	} else {
12324 		chk->whoTo = NULL;
12325 	}
12326 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12327 	drp->ch.chunk_length = htons(chk->send_size);
12328 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12329 	if (spc < 0) {
12330 		spc = 0;
12331 	}
12332 	drp->bottle_bw = htonl(spc);
12333 	if (asoc->my_rwnd) {
12334 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
12335 		    asoc->size_on_all_streams +
12336 		    asoc->my_rwnd_control_len +
12337 		    stcb->sctp_socket->so_rcv.sb_cc);
12338 	} else {
12339 		/*-
12340 		 * If my rwnd is 0, possibly from mbuf depletion as well as
12341 		 * space used, tell the peer there is NO space aka onq == bw
12342 		 */
12343 		drp->current_onq = htonl(spc);
12344 	}
12345 	drp->reserved = 0;
12346 	datap = drp->data;
12347 	m_copydata(m, iphlen, len, (caddr_t)datap);
12348 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12349 	asoc->ctrl_queue_cnt++;
12350 }
12351 
12352 void
12353 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12354 {
12355 	struct sctp_association *asoc;
12356 	struct sctp_cwr_chunk *cwr;
12357 	struct sctp_tmit_chunk *chk;
12358 
12359 	SCTP_TCB_LOCK_ASSERT(stcb);
12360 	if (net == NULL) {
12361 		return;
12362 	}
12363 	asoc = &stcb->asoc;
12364 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12365 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12366 			/* found a previous CWR queued to same destination update it if needed */
12367 			uint32_t ctsn;
12368 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12369 			ctsn = ntohl(cwr->tsn);
12370 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12371 				cwr->tsn = htonl(high_tsn);
12372 			}
12373 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12374 				/* Make sure override is carried */
12375 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12376 			}
12377 			return;
12378 		}
12379 	}
12380 	sctp_alloc_a_chunk(stcb, chk);
12381 	if (chk == NULL) {
12382 		return;
12383 	}
12384 	chk->copy_by_ref = 0;
12385 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
12386 	chk->rec.chunk_id.can_take_data = 1;
12387 	chk->flags = 0;
12388 	chk->asoc = &stcb->asoc;
12389 	chk->send_size = sizeof(struct sctp_cwr_chunk);
12390 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12391 	if (chk->data == NULL) {
12392 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12393 		return;
12394 	}
12395 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12396 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12397 	chk->sent = SCTP_DATAGRAM_UNSENT;
12398 	chk->snd_count = 0;
12399 	chk->whoTo = net;
12400 	atomic_add_int(&chk->whoTo->ref_count, 1);
12401 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12402 	cwr->ch.chunk_type = SCTP_ECN_CWR;
12403 	cwr->ch.chunk_flags = override;
12404 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12405 	cwr->tsn = htonl(high_tsn);
12406 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12407 	asoc->ctrl_queue_cnt++;
12408 }
12409 
12410 static int
12411 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12412                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12413 {
12414 	uint16_t len, old_len, i;
12415 	struct sctp_stream_reset_out_request *req_out;
12416 	struct sctp_chunkhdr *ch;
12417 	int at;
12418 	int number_entries=0;
12419 
12420 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12421 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12422 	/* get to new offset for the param. */
12423 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12424 	/* now how long will this param be? */
12425 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12426 		if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12427 		    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12428 		    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12429 			number_entries++;
12430 		}
12431 	}
12432 	if (number_entries == 0) {
12433 		return (0);
12434 	}
12435 	if (number_entries == stcb->asoc.streamoutcnt) {
12436 		number_entries = 0;
12437 	}
12438 	if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12439 		number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12440 	}
12441 	len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12442 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12443 	req_out->ph.param_length = htons(len);
12444 	req_out->request_seq = htonl(seq);
12445 	req_out->response_seq = htonl(resp_seq);
12446 	req_out->send_reset_at_tsn = htonl(last_sent);
12447 	at = 0;
12448 	if (number_entries) {
12449 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12450 			if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12451 			    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12452 			    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12453 				req_out->list_of_streams[at] = htons(i);
12454 				at++;
12455 				stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12456 				if (at >= number_entries) {
12457 					break;
12458 				}
12459 			}
12460 		}
12461 	} else {
12462 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12463 			stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12464 		}
12465 	}
12466 	if (SCTP_SIZE32(len) > len) {
12467 		/*-
12468 		 * Need to worry about the pad we may end up adding to the
12469 		 * end. This is easy since the struct is either aligned to 4
12470 		 * bytes or 2 bytes off.
12471 		 */
12472 		req_out->list_of_streams[number_entries] = 0;
12473 	}
12474 	/* now fix the chunk length */
12475 	ch->chunk_length = htons(len + old_len);
12476 	chk->book_size = len + old_len;
12477 	chk->book_size_scale = 0;
12478 	chk->send_size = SCTP_SIZE32(chk->book_size);
12479 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12480 	return (1);
12481 }
12482 
12483 static void
12484 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12485                          int number_entries, uint16_t *list,
12486                          uint32_t seq)
12487 {
12488 	uint16_t len, old_len, i;
12489 	struct sctp_stream_reset_in_request *req_in;
12490 	struct sctp_chunkhdr *ch;
12491 
12492 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12493 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12494 
12495 	/* get to new offset for the param. */
12496 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12497 	/* now how long will this param be? */
12498 	len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12499 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12500 	req_in->ph.param_length = htons(len);
12501 	req_in->request_seq = htonl(seq);
12502 	if (number_entries) {
12503 		for (i = 0; i < number_entries; i++) {
12504 			req_in->list_of_streams[i] = htons(list[i]);
12505 		}
12506 	}
12507 	if (SCTP_SIZE32(len) > len) {
12508 		/*-
12509 		 * Need to worry about the pad we may end up adding to the
12510 		 * end. This is easy since the struct is either aligned to 4
12511 		 * bytes or 2 bytes off.
12512 		 */
12513 		req_in->list_of_streams[number_entries] = 0;
12514 	}
12515 	/* now fix the chunk length */
12516 	ch->chunk_length = htons(len + old_len);
12517 	chk->book_size = len + old_len;
12518 	chk->book_size_scale = 0;
12519 	chk->send_size = SCTP_SIZE32(chk->book_size);
12520 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12521 	return;
12522 }
12523 
12524 static void
12525 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12526                           uint32_t seq)
12527 {
12528 	uint16_t len, old_len;
12529 	struct sctp_stream_reset_tsn_request *req_tsn;
12530 	struct sctp_chunkhdr *ch;
12531 
12532 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12533 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12534 
12535 	/* get to new offset for the param. */
12536 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12537 	/* now how long will this param be? */
12538 	len = sizeof(struct sctp_stream_reset_tsn_request);
12539 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12540 	req_tsn->ph.param_length = htons(len);
12541 	req_tsn->request_seq = htonl(seq);
12542 
12543 	/* now fix the chunk length */
12544 	ch->chunk_length = htons(len + old_len);
12545 	chk->send_size = len + old_len;
12546 	chk->book_size = SCTP_SIZE32(chk->send_size);
12547 	chk->book_size_scale = 0;
12548 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12549 	return;
12550 }
12551 
12552 void
12553 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12554                              uint32_t resp_seq, uint32_t result)
12555 {
12556 	uint16_t len, old_len;
12557 	struct sctp_stream_reset_response *resp;
12558 	struct sctp_chunkhdr *ch;
12559 
12560 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12561 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12562 
12563 	/* get to new offset for the param. */
12564 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12565 	/* now how long will this param be? */
12566 	len = sizeof(struct sctp_stream_reset_response);
12567 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12568 	resp->ph.param_length = htons(len);
12569 	resp->response_seq = htonl(resp_seq);
12570 	resp->result = ntohl(result);
12571 
12572 	/* now fix the chunk length */
12573 	ch->chunk_length = htons(len + old_len);
12574 	chk->book_size = len + old_len;
12575 	chk->book_size_scale = 0;
12576 	chk->send_size = SCTP_SIZE32(chk->book_size);
12577 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12578 	return;
12579 }
12580 
12581 void
12582 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12583 				 struct sctp_stream_reset_list *ent,
12584 				 int response)
12585 {
12586 	struct sctp_association *asoc;
12587 	struct sctp_tmit_chunk *chk;
12588 	struct sctp_chunkhdr *ch;
12589 
12590 	asoc = &stcb->asoc;
12591 
12592 	/*
12593 	 * Reset our last reset action to the new one IP -> response
12594 	 * (PERFORMED probably). This assures that if we fail to send, a
12595 	 * retran from the peer will get the new response.
12596 	 */
12597 	asoc->last_reset_action[0] = response;
12598 	if (asoc->stream_reset_outstanding) {
12599 		return;
12600 	}
12601 	sctp_alloc_a_chunk(stcb, chk);
12602 	if (chk == NULL) {
12603 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12604 		return;
12605 	}
12606 	chk->copy_by_ref = 0;
12607 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12608 	chk->rec.chunk_id.can_take_data = 0;
12609 	chk->flags = 0;
12610 	chk->asoc = &stcb->asoc;
12611 	chk->book_size = sizeof(struct sctp_chunkhdr);
12612 	chk->send_size = SCTP_SIZE32(chk->book_size);
12613 	chk->book_size_scale = 0;
12614 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12615 	if (chk->data == NULL) {
12616 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12617 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12618 		return;
12619 	}
12620 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12621 	/* setup chunk parameters */
12622 	chk->sent = SCTP_DATAGRAM_UNSENT;
12623 	chk->snd_count = 0;
12624 	if (stcb->asoc.alternate) {
12625 		chk->whoTo = stcb->asoc.alternate;
12626 	} else {
12627 		chk->whoTo = stcb->asoc.primary_destination;
12628 	}
12629 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12630 	ch->chunk_type = SCTP_STREAM_RESET;
12631 	ch->chunk_flags = 0;
12632 	ch->chunk_length = htons(chk->book_size);
12633 	atomic_add_int(&chk->whoTo->ref_count, 1);
12634 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12635 	sctp_add_stream_reset_result(chk, ent->seq, response);
12636 	/* insert the chunk for sending */
12637 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12638 			  chk,
12639 			  sctp_next);
12640 	asoc->ctrl_queue_cnt++;
12641 }
12642 
12643 void
12644 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12645                                  uint32_t resp_seq, uint32_t result,
12646                                  uint32_t send_una, uint32_t recv_next)
12647 {
12648 	uint16_t len, old_len;
12649 	struct sctp_stream_reset_response_tsn *resp;
12650 	struct sctp_chunkhdr *ch;
12651 
12652 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12653 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12654 
12655 	/* get to new offset for the param. */
12656 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12657 	/* now how long will this param be? */
12658 	len = sizeof(struct sctp_stream_reset_response_tsn);
12659 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12660 	resp->ph.param_length = htons(len);
12661 	resp->response_seq = htonl(resp_seq);
12662 	resp->result = htonl(result);
12663 	resp->senders_next_tsn = htonl(send_una);
12664 	resp->receivers_next_tsn = htonl(recv_next);
12665 
12666 	/* now fix the chunk length */
12667 	ch->chunk_length = htons(len + old_len);
12668 	chk->book_size = len + old_len;
12669 	chk->send_size = SCTP_SIZE32(chk->book_size);
12670 	chk->book_size_scale = 0;
12671 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12672 	return;
12673 }
12674 
12675 static void
12676 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12677 		       uint32_t seq,
12678 		       uint16_t adding)
12679 {
12680 	uint16_t len, old_len;
12681 	struct sctp_chunkhdr *ch;
12682 	struct sctp_stream_reset_add_strm *addstr;
12683 
12684 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12685 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12686 
12687 	/* get to new offset for the param. */
12688 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12689 	/* now how long will this param be? */
12690 	len = sizeof(struct sctp_stream_reset_add_strm);
12691 
12692 	/* Fill it out. */
12693 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12694 	addstr->ph.param_length = htons(len);
12695 	addstr->request_seq = htonl(seq);
12696 	addstr->number_of_streams = htons(adding);
12697 	addstr->reserved = 0;
12698 
12699 	/* now fix the chunk length */
12700 	ch->chunk_length = htons(len + old_len);
12701 	chk->send_size = len + old_len;
12702 	chk->book_size = SCTP_SIZE32(chk->send_size);
12703 	chk->book_size_scale = 0;
12704 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12705 	return;
12706 }
12707 
12708 static void
12709 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12710                       uint32_t seq,
12711                       uint16_t adding)
12712 {
12713 	uint16_t len, old_len;
12714 	struct sctp_chunkhdr *ch;
12715 	struct sctp_stream_reset_add_strm *addstr;
12716 
12717 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12718 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12719 
12720 	/* get to new offset for the param. */
12721 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12722 	/* now how long will this param be? */
12723 	len = sizeof(struct sctp_stream_reset_add_strm);
12724 	/* Fill it out. */
12725 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12726 	addstr->ph.param_length = htons(len);
12727 	addstr->request_seq = htonl(seq);
12728 	addstr->number_of_streams = htons(adding);
12729 	addstr->reserved = 0;
12730 
12731 	/* now fix the chunk length */
12732 	ch->chunk_length = htons(len + old_len);
12733 	chk->send_size = len + old_len;
12734 	chk->book_size = SCTP_SIZE32(chk->send_size);
12735 	chk->book_size_scale = 0;
12736 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12737 	return;
12738 }
12739 
12740 int
12741 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12742 {
12743 	struct sctp_association *asoc;
12744 	struct sctp_tmit_chunk *chk;
12745 	struct sctp_chunkhdr *ch;
12746 	uint32_t seq;
12747 
12748 	asoc = &stcb->asoc;
12749 	asoc->trigger_reset = 0;
12750 	if (asoc->stream_reset_outstanding) {
12751 		return (EALREADY);
12752 	}
12753 	sctp_alloc_a_chunk(stcb, chk);
12754 	if (chk == NULL) {
12755 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12756 		return (ENOMEM);
12757 	}
12758 	chk->copy_by_ref = 0;
12759 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12760 	chk->rec.chunk_id.can_take_data = 0;
12761 	chk->flags = 0;
12762 	chk->asoc = &stcb->asoc;
12763 	chk->book_size = sizeof(struct sctp_chunkhdr);
12764 	chk->send_size = SCTP_SIZE32(chk->book_size);
12765 	chk->book_size_scale = 0;
12766 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12767 	if (chk->data == NULL) {
12768 		sctp_free_a_chunk(stcb, chk, so_locked);
12769 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12770 		return (ENOMEM);
12771 	}
12772 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12773 
12774 	/* setup chunk parameters */
12775 	chk->sent = SCTP_DATAGRAM_UNSENT;
12776 	chk->snd_count = 0;
12777 	if (stcb->asoc.alternate) {
12778 		chk->whoTo = stcb->asoc.alternate;
12779 	} else {
12780 		chk->whoTo = stcb->asoc.primary_destination;
12781 	}
12782 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12783 	ch->chunk_type = SCTP_STREAM_RESET;
12784 	ch->chunk_flags = 0;
12785 	ch->chunk_length = htons(chk->book_size);
12786 	atomic_add_int(&chk->whoTo->ref_count, 1);
12787 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12788 	seq = stcb->asoc.str_reset_seq_out;
12789 	if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12790 		seq++;
12791 		asoc->stream_reset_outstanding++;
12792 	} else {
12793 		m_freem(chk->data);
12794 		chk->data = NULL;
12795 		sctp_free_a_chunk(stcb, chk, so_locked);
12796 		return (ENOENT);
12797 	}
12798 	asoc->str_reset = chk;
12799 	/* insert the chunk for sending */
12800 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12801 			  chk,
12802 			  sctp_next);
12803 	asoc->ctrl_queue_cnt++;
12804 
12805 	if (stcb->asoc.send_sack) {
12806 		sctp_send_sack(stcb, so_locked);
12807 	}
12808 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12809 	return (0);
12810 }
12811 
12812 int
12813 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12814                         uint16_t number_entries, uint16_t *list,
12815                         uint8_t send_in_req,
12816                         uint8_t send_tsn_req,
12817                         uint8_t add_stream,
12818                         uint16_t adding_o,
12819                         uint16_t adding_i, uint8_t peer_asked)
12820 {
12821 	struct sctp_association *asoc;
12822 	struct sctp_tmit_chunk *chk;
12823 	struct sctp_chunkhdr *ch;
12824 	int can_send_out_req=0;
12825 	uint32_t seq;
12826 
12827 	asoc = &stcb->asoc;
12828 	if (asoc->stream_reset_outstanding) {
12829 		/*-
12830 		 * Already one pending, must get ACK back to clear the flag.
12831 		 */
12832 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12833 		return (EBUSY);
12834 	}
12835 	if ((send_in_req == 0) && (send_tsn_req == 0) &&
12836 	    (add_stream == 0)) {
12837 		/* nothing to do */
12838 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12839 		return (EINVAL);
12840 	}
12841 	if (send_tsn_req && send_in_req) {
12842 		/* error, can't do that */
12843 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12844 		return (EINVAL);
12845 	} else if (send_in_req) {
12846 		can_send_out_req = 1;
12847 	}
12848 	if (number_entries > (MCLBYTES -
12849 	                      SCTP_MIN_OVERHEAD -
12850 	                      sizeof(struct sctp_chunkhdr) -
12851 	                      sizeof(struct sctp_stream_reset_out_request)) /
12852 	                     sizeof(uint16_t)) {
12853 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12854 		return (ENOMEM);
12855 	}
12856 	sctp_alloc_a_chunk(stcb, chk);
12857 	if (chk == NULL) {
12858 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12859 		return (ENOMEM);
12860 	}
12861 	chk->copy_by_ref = 0;
12862 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12863 	chk->rec.chunk_id.can_take_data = 0;
12864 	chk->flags = 0;
12865 	chk->asoc = &stcb->asoc;
12866 	chk->book_size = sizeof(struct sctp_chunkhdr);
12867 	chk->send_size = SCTP_SIZE32(chk->book_size);
12868 	chk->book_size_scale = 0;
12869 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12870 	if (chk->data == NULL) {
12871 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12872 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12873 		return (ENOMEM);
12874 	}
12875 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12876 
12877 	/* setup chunk parameters */
12878 	chk->sent = SCTP_DATAGRAM_UNSENT;
12879 	chk->snd_count = 0;
12880 	if (stcb->asoc.alternate) {
12881 		chk->whoTo = stcb->asoc.alternate;
12882 	} else {
12883 		chk->whoTo = stcb->asoc.primary_destination;
12884 	}
12885 	atomic_add_int(&chk->whoTo->ref_count, 1);
12886 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12887 	ch->chunk_type = SCTP_STREAM_RESET;
12888 	ch->chunk_flags = 0;
12889 	ch->chunk_length = htons(chk->book_size);
12890 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12891 
12892 	seq = stcb->asoc.str_reset_seq_out;
12893 	if (can_send_out_req) {
12894 		int ret;
12895 	        ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12896 		if (ret) {
12897 			seq++;
12898 			asoc->stream_reset_outstanding++;
12899 		}
12900 	}
12901 	if ((add_stream & 1) &&
12902 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12903 		/* Need to allocate more */
12904 		struct sctp_stream_out *oldstream;
12905 		struct sctp_stream_queue_pending *sp, *nsp;
12906 		int i;
12907 #if defined(SCTP_DETAILED_STR_STATS)
12908 		int j;
12909 #endif
12910 
12911 		oldstream = stcb->asoc.strmout;
12912 		/* get some more */
12913 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12914 			    (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12915 			    SCTP_M_STRMO);
12916 		if (stcb->asoc.strmout == NULL) {
12917 			uint8_t x;
12918 			stcb->asoc.strmout = oldstream;
12919 			/* Turn off the bit */
12920 			x = add_stream & 0xfe;
12921 			add_stream = x;
12922 			goto skip_stuff;
12923 		}
12924 		/* Ok now we proceed with copying the old out stuff and
12925 		 * initializing the new stuff.
12926 		 */
12927 		SCTP_TCB_SEND_LOCK(stcb);
12928 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12929 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12930 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12931 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12932 			stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12933 			stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12934 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12935 			stcb->asoc.strmout[i].sid = i;
12936 			stcb->asoc.strmout[i].state = oldstream[i].state;
12937 			/* FIX ME FIX ME */
12938 			/* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
12939 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12940 			/* now anything on those queues? */
12941 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12942 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12943 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12944 			}
12945 		}
12946 		/* now the new streams */
12947 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12948 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12949 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12950 			stcb->asoc.strmout[i].chunks_on_queues = 0;
12951 #if defined(SCTP_DETAILED_STR_STATS)
12952 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12953 				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12954 				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12955 			}
12956 #else
12957 			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12958 			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12959 #endif
12960 			stcb->asoc.strmout[i].next_mid_ordered = 0;
12961 			stcb->asoc.strmout[i].next_mid_unordered = 0;
12962 			stcb->asoc.strmout[i].sid = i;
12963 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
12964 			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12965 			stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12966 		}
12967 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12968 		SCTP_FREE(oldstream, SCTP_M_STRMO);
12969 		SCTP_TCB_SEND_UNLOCK(stcb);
12970 	}
12971 skip_stuff:
12972 	if ((add_stream & 1) && (adding_o > 0)) {
12973 		asoc->strm_pending_add_size = adding_o;
12974 		asoc->peer_req_out = peer_asked;
12975 		sctp_add_an_out_stream(chk, seq, adding_o);
12976 		seq++;
12977 		asoc->stream_reset_outstanding++;
12978 	}
12979 	if ((add_stream & 2) && (adding_i > 0)) {
12980 		sctp_add_an_in_stream(chk, seq, adding_i);
12981 		seq++;
12982 		asoc->stream_reset_outstanding++;
12983 	}
12984 	if (send_in_req) {
12985 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
12986 		seq++;
12987 		asoc->stream_reset_outstanding++;
12988 	}
12989 	if (send_tsn_req) {
12990 		sctp_add_stream_reset_tsn(chk, seq);
12991 		asoc->stream_reset_outstanding++;
12992 	}
12993 	asoc->str_reset = chk;
12994 	/* insert the chunk for sending */
12995 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12996 			  chk,
12997 			  sctp_next);
12998 	asoc->ctrl_queue_cnt++;
12999 	if (stcb->asoc.send_sack) {
13000 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
13001 	}
13002 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13003 	return (0);
13004 }
13005 
13006 void
13007 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13008                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13009 #if defined(__FreeBSD__) && !defined(__Userspace__)
13010                 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13011 #endif
13012                 uint32_t vrf_id, uint16_t port)
13013 {
13014 	/* Don't respond to an ABORT with an ABORT. */
13015 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13016 		if (cause)
13017 			sctp_m_freem(cause);
13018 		return;
13019 	}
13020 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13021 #if defined(__FreeBSD__) && !defined(__Userspace__)
13022 	                   mflowtype, mflowid, fibnum,
13023 #endif
13024 	                   vrf_id, port);
13025 	return;
13026 }
13027 
13028 void
13029 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13030                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13031 #if defined(__FreeBSD__) && !defined(__Userspace__)
13032                    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13033 #endif
13034                    uint32_t vrf_id, uint16_t port)
13035 {
13036 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13037 #if defined(__FreeBSD__) && !defined(__Userspace__)
13038 	                   mflowtype, mflowid, fibnum,
13039 #endif
13040 	                   vrf_id, port);
13041 	return;
13042 }
13043 
13044 static struct mbuf *
13045 sctp_copy_resume(struct uio *uio,
13046 		 int max_send_len,
13047 #if defined(__FreeBSD__) || defined(__Userspace__)
13048 		 int user_marks_eor,
13049 #endif
13050 		 int *error,
13051 		 uint32_t *sndout,
13052 		 struct mbuf **new_tail)
13053 {
13054 #if defined(__FreeBSD__) || defined(__Userspace__)
13055 	struct mbuf *m;
13056 
13057 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13058 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13059 	if (m == NULL) {
13060 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13061 		*error = ENOBUFS;
13062 	} else {
13063 		*sndout = m_length(m, NULL);
13064 		*new_tail = m_last(m);
13065 	}
13066 	return (m);
13067 #else
13068 	int left, cancpy, willcpy;
13069 	struct mbuf *m, *head;
13070 
13071 #if defined(__APPLE__) && !defined(__Userspace__)
13072 #if defined(APPLE_LEOPARD)
13073 	left = (int)min(uio->uio_resid, max_send_len);
13074 #else
13075 	left = (int)min(uio_resid(uio), max_send_len);
13076 #endif
13077 #else
13078 	left = (int)min(uio->uio_resid, max_send_len);
13079 #endif
13080 	/* Always get a header just in case */
13081 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13082 	if (head == NULL) {
13083 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13084 		*error = ENOBUFS;
13085 		return (NULL);
13086 	}
13087 	cancpy = (int)M_TRAILINGSPACE(head);
13088 	willcpy = min(cancpy, left);
13089 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
13090 	if (*error) {
13091 		sctp_m_freem(head);
13092 		return (NULL);
13093 	}
13094 	*sndout += willcpy;
13095 	left -= willcpy;
13096 	SCTP_BUF_LEN(head) = willcpy;
13097 	m = head;
13098 	*new_tail = head;
13099 	while (left > 0) {
13100 		/* move in user data */
13101 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13102 		if (SCTP_BUF_NEXT(m) == NULL) {
13103 			sctp_m_freem(head);
13104 			*new_tail = NULL;
13105 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13106 			*error = ENOBUFS;
13107 			return (NULL);
13108 		}
13109 		m = SCTP_BUF_NEXT(m);
13110 		cancpy = (int)M_TRAILINGSPACE(m);
13111 		willcpy = min(cancpy, left);
13112 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
13113 		if (*error) {
13114 			sctp_m_freem(head);
13115 			*new_tail = NULL;
13116 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13117 			*error = EFAULT;
13118 			return (NULL);
13119 		}
13120 		SCTP_BUF_LEN(m) = willcpy;
13121 		left -= willcpy;
13122 		*sndout += willcpy;
13123 		*new_tail = m;
13124 		if (left == 0) {
13125 			SCTP_BUF_NEXT(m) = NULL;
13126 		}
13127 	}
13128 	return (head);
13129 #endif
13130 }
13131 
13132 static int
13133 sctp_copy_one(struct sctp_stream_queue_pending *sp,
13134               struct uio *uio,
13135               int resv_upfront)
13136 {
13137 #if defined(__FreeBSD__) || defined(__Userspace__)
13138 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13139 	                       resv_upfront, 0);
13140 	if (sp->data == NULL) {
13141 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13142 		return (ENOBUFS);
13143 	}
13144 
13145 	sp->tail_mbuf = m_last(sp->data);
13146 	return (0);
13147 #else
13148 	int left;
13149 	int cancpy, willcpy, error;
13150 	struct mbuf *m, *head;
13151 	int cpsz = 0;
13152 
13153 	/* First one gets a header */
13154 	left = sp->length;
13155 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13156 	if (m == NULL) {
13157 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13158 		return (ENOBUFS);
13159 	}
13160 	/*-
13161 	 * Add this one for m in now, that way if the alloc fails we won't
13162 	 * have a bad cnt.
13163 	 */
13164 	SCTP_BUF_RESV_UF(m, resv_upfront);
13165 	cancpy = (int)M_TRAILINGSPACE(m);
13166 	willcpy = min(cancpy, left);
13167 	while (left > 0) {
13168 		/* move in user data */
13169 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
13170 		if (error) {
13171 			sctp_m_freem(head);
13172 			return (error);
13173 		}
13174 		SCTP_BUF_LEN(m) = willcpy;
13175 		left -= willcpy;
13176 		cpsz += willcpy;
13177 		if (left > 0) {
13178 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13179 			if (SCTP_BUF_NEXT(m) == NULL) {
13180 				/*
13181 				 * the head goes back to caller, he can free
13182 				 * the rest
13183 				 */
13184 				sctp_m_freem(head);
13185 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13186 				return (ENOBUFS);
13187 			}
13188 			m = SCTP_BUF_NEXT(m);
13189 			cancpy = (int)M_TRAILINGSPACE(m);
13190 			willcpy = min(cancpy, left);
13191 		} else {
13192 			sp->tail_mbuf = m;
13193 			SCTP_BUF_NEXT(m) = NULL;
13194 		}
13195 	}
13196 	sp->data = head;
13197 	sp->length = cpsz;
13198 	return (0);
13199 #endif
13200 }
13201 
13202 static struct sctp_stream_queue_pending *
13203 sctp_copy_it_in(struct sctp_tcb *stcb,
13204     struct sctp_association *asoc,
13205     struct sctp_sndrcvinfo *srcv,
13206     struct uio *uio,
13207     struct sctp_nets *net,
13208     ssize_t max_send_len,
13209     int user_marks_eor,
13210     int *error)
13211 
13212 {
13213 	/*-
13214 	 * This routine must be very careful in its work. Protocol
13215 	 * processing is up and running so care must be taken to spl...()
13216 	 * when you need to do something that may effect the stcb/asoc. The
13217 	 * sb is locked however. When data is copied the protocol processing
13218 	 * should be enabled since this is a slower operation...
13219 	 */
13220 	struct sctp_stream_queue_pending *sp = NULL;
13221 	int resv_in_first;
13222 
13223 	*error = 0;
13224 	/* Now can we send this? */
13225 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13226 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13227 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13228 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13229 		/* got data while shutting down */
13230 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13231 		*error = ECONNRESET;
13232 		goto out_now;
13233 	}
13234 	sctp_alloc_a_strmoq(stcb, sp);
13235 	if (sp == NULL) {
13236 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13237 		*error = ENOMEM;
13238 		goto out_now;
13239 	}
13240 	sp->act_flags = 0;
13241 	sp->sender_all_done = 0;
13242 	sp->sinfo_flags = srcv->sinfo_flags;
13243 	sp->timetolive = srcv->sinfo_timetolive;
13244 	sp->ppid = srcv->sinfo_ppid;
13245 	sp->context = srcv->sinfo_context;
13246 	sp->fsn = 0;
13247 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13248 
13249 	sp->sid = srcv->sinfo_stream;
13250 #if defined(__APPLE__) && !defined(__Userspace__)
13251 #if defined(APPLE_LEOPARD)
13252 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13253 #else
13254 	sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13255 #endif
13256 #else
13257 	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13258 #endif
13259 #if defined(__APPLE__) && !defined(__Userspace__)
13260 #if defined(APPLE_LEOPARD)
13261 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13262 #else
13263 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
13264 #endif
13265 #else
13266 	if ((sp->length == (uint32_t)uio->uio_resid) &&
13267 #endif
13268 	    ((user_marks_eor == 0) ||
13269 	     (srcv->sinfo_flags & SCTP_EOF) ||
13270 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13271 		sp->msg_is_complete = 1;
13272 	} else {
13273 		sp->msg_is_complete = 0;
13274 	}
13275 	sp->sender_all_done = 0;
13276 	sp->some_taken = 0;
13277 	sp->put_last_out = 0;
13278 	resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
13279 	sp->data = sp->tail_mbuf = NULL;
13280 	if (sp->length == 0) {
13281 		goto skip_copy;
13282 	}
13283 	if (srcv->sinfo_keynumber_valid) {
13284 		sp->auth_keyid = srcv->sinfo_keynumber;
13285 	} else {
13286 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13287 	}
13288 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13289 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
13290 		sp->holds_key_ref = 1;
13291 	}
13292 #if defined(__APPLE__) && !defined(__Userspace__)
13293 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13294 #endif
13295 	*error = sctp_copy_one(sp, uio, resv_in_first);
13296 #if defined(__APPLE__) && !defined(__Userspace__)
13297 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13298 #endif
13299  skip_copy:
13300 	if (*error) {
13301 #if defined(__Userspace__)
13302 		SCTP_TCB_LOCK(stcb);
13303 #endif
13304 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13305 #if defined(__Userspace__)
13306 		SCTP_TCB_UNLOCK(stcb);
13307 #endif
13308 		sp = NULL;
13309 	} else {
13310 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13311 			sp->net = net;
13312 			atomic_add_int(&sp->net->ref_count, 1);
13313 		} else {
13314 			sp->net = NULL;
13315 		}
13316 		sctp_set_prsctp_policy(sp);
13317 	}
13318 out_now:
13319 	return (sp);
13320 }
13321 
13322 int
13323 sctp_sosend(struct socket *so,
13324             struct sockaddr *addr,
13325             struct uio *uio,
13326             struct mbuf *top,
13327             struct mbuf *control,
13328 #if defined(__APPLE__) && !defined(__Userspace__)
13329             int flags
13330 #else
13331             int flags,
13332 #if defined(__FreeBSD__) && !defined(__Userspace__)
13333             struct thread *p
13334 #elif defined(_WIN32) && !defined(__Userspace__)
13335             PKTHREAD p
13336 #else
13337 #if defined(__Userspace__)
13338             /*
13339 	     * proc is a dummy in __Userspace__ and will not be passed
13340 	     * to sctp_lower_sosend
13341 	     */
13342 #endif
13343             struct proc *p
13344 #endif
13345 #endif
13346 )
13347 {
13348 #if defined(__APPLE__) && !defined(__Userspace__)
13349 	struct proc *p = current_proc();
13350 #endif
13351 	int error, use_sndinfo = 0;
13352 	struct sctp_sndrcvinfo sndrcvninfo;
13353 	struct sockaddr *addr_to_use;
13354 #if defined(INET) && defined(INET6)
13355 	struct sockaddr_in sin;
13356 #endif
13357 
13358 #if defined(__APPLE__) && !defined(__Userspace__)
13359 	SCTP_SOCKET_LOCK(so, 1);
13360 #endif
13361 	if (control) {
13362 		/* process cmsg snd/rcv info (maybe a assoc-id) */
13363 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13364 		    sizeof(sndrcvninfo))) {
13365 			/* got one */
13366 			use_sndinfo = 1;
13367 		}
13368 	}
13369 	addr_to_use = addr;
13370 #if defined(INET) && defined(INET6)
13371 	if ((addr) && (addr->sa_family == AF_INET6)) {
13372 		struct sockaddr_in6 *sin6;
13373 
13374 		sin6 = (struct sockaddr_in6 *)addr;
13375 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13376 			in6_sin6_2_sin(&sin, sin6);
13377 			addr_to_use = (struct sockaddr *)&sin;
13378 		}
13379 	}
13380 #endif
13381 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
13382 				  control,
13383 				  flags,
13384 				  use_sndinfo ? &sndrcvninfo: NULL
13385 #if !defined(__Userspace__)
13386 				  , p
13387 #endif
13388 		);
13389 #if defined(__APPLE__) && !defined(__Userspace__)
13390 	SCTP_SOCKET_UNLOCK(so, 1);
13391 #endif
13392 	return (error);
13393 }
13394 
13395 int
13396 sctp_lower_sosend(struct socket *so,
13397                   struct sockaddr *addr,
13398                   struct uio *uio,
13399                   struct mbuf *i_pak,
13400                   struct mbuf *control,
13401                   int flags,
13402                   struct sctp_sndrcvinfo *srcv
13403 #if !defined(__Userspace__)
13404                   ,
13405 #if defined(__FreeBSD__)
13406                   struct thread *p
13407 #elif defined(_WIN32)
13408                   PKTHREAD p
13409 #else
13410                   struct proc *p
13411 #endif
13412 #endif
13413 	)
13414 {
13415 #if defined(__FreeBSD__) && !defined(__Userspace__)
13416 	struct epoch_tracker et;
13417 #endif
13418 	ssize_t sndlen = 0, max_len, local_add_more;
13419 	int error, len;
13420 	struct mbuf *top = NULL;
13421 	int queue_only = 0, queue_only_for_init = 0;
13422 	int free_cnt_applied = 0;
13423 	int un_sent;
13424 	int now_filled = 0;
13425 	unsigned int inqueue_bytes = 0;
13426 	struct sctp_block_entry be;
13427 	struct sctp_inpcb *inp;
13428 	struct sctp_tcb *stcb = NULL;
13429 	struct timeval now;
13430 	struct sctp_nets *net;
13431 	struct sctp_association *asoc;
13432 	struct sctp_inpcb *t_inp;
13433 	int user_marks_eor;
13434 	int create_lock_applied = 0;
13435 	int nagle_applies = 0;
13436 	int some_on_control = 0;
13437 	int got_all_of_the_send = 0;
13438 	int hold_tcblock = 0;
13439 	int non_blocking = 0;
13440 	ssize_t local_soresv = 0;
13441 	uint16_t port;
13442 	uint16_t sinfo_flags;
13443 	sctp_assoc_t sinfo_assoc_id;
13444 
13445 	error = 0;
13446 	net = NULL;
13447 	stcb = NULL;
13448 	asoc = NULL;
13449 
13450 #if defined(__APPLE__) && !defined(__Userspace__)
13451 	sctp_lock_assert(so);
13452 #endif
13453 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13454 	if (inp == NULL) {
13455 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13456 		error = EINVAL;
13457 		if (i_pak) {
13458 			SCTP_RELEASE_PKT(i_pak);
13459 		}
13460 		return (error);
13461 	}
13462 	if ((uio == NULL) && (i_pak == NULL)) {
13463 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13464 		return (EINVAL);
13465 	}
13466 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13467 	atomic_add_int(&inp->total_sends, 1);
13468 	if (uio) {
13469 #if defined(__APPLE__) && !defined(__Userspace__)
13470 #if defined(APPLE_LEOPARD)
13471 		if (uio->uio_resid < 0) {
13472 #else
13473 		if (uio_resid(uio) < 0) {
13474 #endif
13475 #else
13476 		if (uio->uio_resid < 0) {
13477 #endif
13478 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13479 			return (EINVAL);
13480 		}
13481 #if defined(__APPLE__) && !defined(__Userspace__)
13482 #if defined(APPLE_LEOPARD)
13483 		sndlen = uio->uio_resid;
13484 #else
13485 		sndlen = uio_resid(uio);
13486 #endif
13487 #else
13488 		sndlen = uio->uio_resid;
13489 #endif
13490 	} else {
13491 		top = SCTP_HEADER_TO_CHAIN(i_pak);
13492 		sndlen = SCTP_HEADER_LEN(i_pak);
13493 	}
13494 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
13495 	        (void *)addr,
13496 	        sndlen);
13497 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13498 	    SCTP_IS_LISTENING(inp)) {
13499 		/* The listener can NOT send */
13500 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13501 		error = ENOTCONN;
13502 		goto out_unlocked;
13503 	}
13504 	/**
13505 	 * Pre-screen address, if one is given the sin-len
13506 	 * must be set correctly!
13507 	 */
13508 	if (addr) {
13509 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13510 		switch (raddr->sa.sa_family) {
13511 #ifdef INET
13512 		case AF_INET:
13513 #ifdef HAVE_SIN_LEN
13514 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13515 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13516 				error = EINVAL;
13517 				goto out_unlocked;
13518 			}
13519 #endif
13520 			port = raddr->sin.sin_port;
13521 			break;
13522 #endif
13523 #ifdef INET6
13524 		case AF_INET6:
13525 #ifdef HAVE_SIN6_LEN
13526 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13527 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13528 				error = EINVAL;
13529 				goto out_unlocked;
13530 			}
13531 #endif
13532 			port = raddr->sin6.sin6_port;
13533 			break;
13534 #endif
13535 #if defined(__Userspace__)
13536 		case AF_CONN:
13537 #ifdef HAVE_SCONN_LEN
13538 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13539 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13540 				error = EINVAL;
13541 				goto out_unlocked;
13542 			}
13543 #endif
13544 			port = raddr->sconn.sconn_port;
13545 			break;
13546 #endif
13547 		default:
13548 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13549 			error = EAFNOSUPPORT;
13550 			goto out_unlocked;
13551 		}
13552 	} else
13553 		port = 0;
13554 
13555 	if (srcv) {
13556 		sinfo_flags = srcv->sinfo_flags;
13557 		sinfo_assoc_id = srcv->sinfo_assoc_id;
13558 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
13559 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13560 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13561 			error = EINVAL;
13562 			goto out_unlocked;
13563 		}
13564 		if (srcv->sinfo_flags)
13565 			SCTP_STAT_INCR(sctps_sends_with_flags);
13566 	} else {
13567 		sinfo_flags = inp->def_send.sinfo_flags;
13568 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13569 	}
13570 #if defined(__FreeBSD__) && !defined(__Userspace__)
13571 	if (flags & MSG_EOR) {
13572 		sinfo_flags |= SCTP_EOR;
13573 	}
13574 	if (flags & MSG_EOF) {
13575 		sinfo_flags |= SCTP_EOF;
13576 	}
13577 #endif
13578 	if (sinfo_flags & SCTP_SENDALL) {
13579 		/* its a sendall */
13580 		error = sctp_sendall(inp, uio, top, srcv);
13581 		top = NULL;
13582 		goto out_unlocked;
13583 	}
13584 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13585 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13586 		error = EINVAL;
13587 		goto out_unlocked;
13588 	}
13589 	/* now we must find the assoc */
13590 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13591 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13592 		SCTP_INP_RLOCK(inp);
13593 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
13594 		if (stcb) {
13595 			SCTP_TCB_LOCK(stcb);
13596 			hold_tcblock = 1;
13597 		}
13598 		SCTP_INP_RUNLOCK(inp);
13599 	} else if (sinfo_assoc_id) {
13600 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13601 		if (stcb != NULL) {
13602 			hold_tcblock = 1;
13603 		}
13604 	} else if (addr) {
13605 		/*-
13606 		 * Since we did not use findep we must
13607 		 * increment it, and if we don't find a tcb
13608 		 * decrement it.
13609 		 */
13610 		SCTP_INP_WLOCK(inp);
13611 		SCTP_INP_INCR_REF(inp);
13612 		SCTP_INP_WUNLOCK(inp);
13613 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13614 		if (stcb == NULL) {
13615 			SCTP_INP_WLOCK(inp);
13616 			SCTP_INP_DECR_REF(inp);
13617 			SCTP_INP_WUNLOCK(inp);
13618 		} else {
13619 			hold_tcblock = 1;
13620 		}
13621 	}
13622 	if ((stcb == NULL) && (addr)) {
13623 		/* Possible implicit send? */
13624 		SCTP_ASOC_CREATE_LOCK(inp);
13625 		create_lock_applied = 1;
13626 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13627 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13628 			/* Should I really unlock ? */
13629 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13630 			error = EINVAL;
13631 			goto out_unlocked;
13632 		}
13633 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13634 		    (addr->sa_family == AF_INET6)) {
13635 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13636 			error = EINVAL;
13637 			goto out_unlocked;
13638 		}
13639 		SCTP_INP_WLOCK(inp);
13640 		SCTP_INP_INCR_REF(inp);
13641 		SCTP_INP_WUNLOCK(inp);
13642 		/* With the lock applied look again */
13643 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13644 #if defined(INET) || defined(INET6)
13645 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13646 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13647 		}
13648 #endif
13649 		if (stcb == NULL) {
13650 			SCTP_INP_WLOCK(inp);
13651 			SCTP_INP_DECR_REF(inp);
13652 			SCTP_INP_WUNLOCK(inp);
13653 		} else {
13654 			hold_tcblock = 1;
13655 		}
13656 		if (error) {
13657 			goto out_unlocked;
13658 		}
13659 		if (t_inp != inp) {
13660 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13661 			error = ENOTCONN;
13662 			goto out_unlocked;
13663 		}
13664 	}
13665 	if (stcb == NULL) {
13666 		if (addr == NULL) {
13667 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13668 			error = ENOENT;
13669 			goto out_unlocked;
13670 		} else {
13671 			/* We must go ahead and start the INIT process */
13672 			uint32_t vrf_id;
13673 
13674 			if ((sinfo_flags & SCTP_ABORT) ||
13675 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13676 				/*-
13677 				 * User asks to abort a non-existant assoc,
13678 				 * or EOF a non-existant assoc with no data
13679 				 */
13680 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13681 				error = ENOENT;
13682 				goto out_unlocked;
13683 			}
13684 			/* get an asoc/stcb struct */
13685 			vrf_id = inp->def_vrf_id;
13686 #ifdef INVARIANTS
13687 			if (create_lock_applied == 0) {
13688 				panic("Error, should hold create lock and I don't?");
13689 			}
13690 #endif
13691 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13692 			                       inp->sctp_ep.pre_open_stream_count,
13693 			                       inp->sctp_ep.port,
13694 #if !defined(__Userspace__)
13695 			                       p,
13696 #else
13697 			                       (struct proc *)NULL,
13698 #endif
13699 			                       SCTP_INITIALIZE_AUTH_PARAMS);
13700 			if (stcb == NULL) {
13701 				/* Error is setup for us in the call */
13702 				goto out_unlocked;
13703 			}
13704 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13705 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13706 				/* Set the connected flag so we can queue data */
13707 				soisconnecting(so);
13708 			}
13709 			hold_tcblock = 1;
13710 			if (create_lock_applied) {
13711 				SCTP_ASOC_CREATE_UNLOCK(inp);
13712 				create_lock_applied = 0;
13713 			} else {
13714 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13715 			}
13716 			/* Turn on queue only flag to prevent data from being sent */
13717 			queue_only = 1;
13718 			asoc = &stcb->asoc;
13719 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13720 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13721 
13722 			if (control) {
13723 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13724 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13725 					                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
13726 					hold_tcblock = 0;
13727 					stcb = NULL;
13728 					goto out_unlocked;
13729 				}
13730 			}
13731 			/* out with the INIT */
13732 			queue_only_for_init = 1;
13733 			/*-
13734 			 * we may want to dig in after this call and adjust the MTU
13735 			 * value. It defaulted to 1500 (constant) but the ro
13736 			 * structure may now have an update and thus we may need to
13737 			 * change it BEFORE we append the message.
13738 			 */
13739 		}
13740 	} else
13741 		asoc = &stcb->asoc;
13742 	if (srcv == NULL) {
13743 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13744 		sinfo_flags = srcv->sinfo_flags;
13745 #if defined(__FreeBSD__) && !defined(__Userspace__)
13746 		if (flags & MSG_EOR) {
13747 			sinfo_flags |= SCTP_EOR;
13748 		}
13749 		if (flags & MSG_EOF) {
13750 			sinfo_flags |= SCTP_EOF;
13751 		}
13752 #endif
13753 	}
13754 	if (sinfo_flags & SCTP_ADDR_OVER) {
13755 		if (addr)
13756 			net = sctp_findnet(stcb, addr);
13757 		else
13758 			net = NULL;
13759 		if ((net == NULL) ||
13760 		    ((port != 0) && (port != stcb->rport))) {
13761 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13762 			error = EINVAL;
13763 			goto out_unlocked;
13764 		}
13765 	} else {
13766 		if (stcb->asoc.alternate) {
13767 			net = stcb->asoc.alternate;
13768 		} else {
13769 			net = stcb->asoc.primary_destination;
13770 		}
13771 	}
13772 	atomic_add_int(&stcb->total_sends, 1);
13773 	/* Keep the stcb from being freed under our feet */
13774 	atomic_add_int(&asoc->refcnt, 1);
13775 	free_cnt_applied = 1;
13776 
13777 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13778 		if (sndlen > (ssize_t)asoc->smallest_mtu) {
13779 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13780 			error = EMSGSIZE;
13781 			goto out_unlocked;
13782 		}
13783 	}
13784 #if defined(__Userspace__)
13785 	if (inp->recv_callback) {
13786 		non_blocking = 1;
13787 	}
13788 #endif
13789 	if (SCTP_SO_IS_NBIO(so)
13790 #if defined(__FreeBSD__) && !defined(__Userspace__)
13791 	     || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
13792 #endif
13793 	    ) {
13794 		non_blocking = 1;
13795 	}
13796 	/* would we block? */
13797 	if (non_blocking) {
13798 		ssize_t amount;
13799 
13800 		if (hold_tcblock == 0) {
13801 			SCTP_TCB_LOCK(stcb);
13802 			hold_tcblock = 1;
13803 		}
13804 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13805 		if (user_marks_eor == 0) {
13806 			amount = sndlen;
13807 		} else {
13808 			amount = 1;
13809 		}
13810 		if ((SCTP_SB_LIMIT_SND(so) <  (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13811 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13812 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13813 			if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
13814 				error = EMSGSIZE;
13815 			else
13816 				error = EWOULDBLOCK;
13817 			goto out_unlocked;
13818 		}
13819 		stcb->asoc.sb_send_resv += (uint32_t)sndlen;
13820 		SCTP_TCB_UNLOCK(stcb);
13821 		hold_tcblock = 0;
13822 	} else {
13823 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13824 	}
13825 	local_soresv = sndlen;
13826 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13827 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13828 		error = ECONNRESET;
13829 		goto out_unlocked;
13830 	}
13831 	if (create_lock_applied) {
13832 		SCTP_ASOC_CREATE_UNLOCK(inp);
13833 		create_lock_applied = 0;
13834 	}
13835 	/* Is the stream no. valid? */
13836 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13837 		/* Invalid stream number */
13838 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13839 		error = EINVAL;
13840 		goto out_unlocked;
13841 	}
13842 	if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
13843 	    (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
13844 		/*
13845 		 * Can't queue any data while stream reset is underway.
13846 		 */
13847 		if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
13848 			error = EAGAIN;
13849 		} else {
13850 			error = EINVAL;
13851 		}
13852 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
13853 		goto out_unlocked;
13854 	}
13855 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13856 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13857 		queue_only = 1;
13858 	}
13859 	/* we are now done with all control */
13860 	if (control) {
13861 		sctp_m_freem(control);
13862 		control = NULL;
13863 	}
13864 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
13865 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13866 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13867 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13868 		if (sinfo_flags & SCTP_ABORT) {
13869 			;
13870 		} else {
13871 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13872 			error = ECONNRESET;
13873 			goto out_unlocked;
13874 		}
13875 	}
13876 	/* Ok, we will attempt a msgsnd :> */
13877 #if !(defined(_WIN32) || defined(__Userspace__))
13878 	if (p) {
13879 #if defined(__FreeBSD__)
13880 		p->td_ru.ru_msgsnd++;
13881 #else
13882 		p->p_stats->p_ru.ru_msgsnd++;
13883 #endif
13884 	}
13885 #endif
13886 	/* Are we aborting? */
13887 	if (sinfo_flags & SCTP_ABORT) {
13888 		struct mbuf *mm;
13889 		ssize_t tot_demand, tot_out = 0, max_out;
13890 
13891 		SCTP_STAT_INCR(sctps_sends_with_abort);
13892 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
13893 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
13894 			/* It has to be up before we abort */
13895 			/* how big is the user initiated abort? */
13896 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13897 			error = EINVAL;
13898 			goto out;
13899 		}
13900 		if (hold_tcblock) {
13901 			SCTP_TCB_UNLOCK(stcb);
13902 			hold_tcblock = 0;
13903 		}
13904 		if (top) {
13905 			struct mbuf *cntm = NULL;
13906 
13907 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13908 			if (sndlen != 0) {
13909 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13910 					tot_out += SCTP_BUF_LEN(cntm);
13911 				}
13912 			}
13913 		} else {
13914 			/* Must fit in a MTU */
13915 			tot_out = sndlen;
13916 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13917 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13918 				/* To big */
13919 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13920 				error = EMSGSIZE;
13921 				goto out;
13922 			}
13923 			mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
13924 		}
13925 		if (mm == NULL) {
13926 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13927 			error = ENOMEM;
13928 			goto out;
13929 		}
13930 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13931 		max_out -= sizeof(struct sctp_abort_msg);
13932 		if (tot_out > max_out) {
13933 			tot_out = max_out;
13934 		}
13935 		if (mm) {
13936 			struct sctp_paramhdr *ph;
13937 
13938 			/* now move forward the data pointer */
13939 			ph = mtod(mm, struct sctp_paramhdr *);
13940 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13941 			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
13942 			ph++;
13943 			SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
13944 			if (top == NULL) {
13945 #if defined(__APPLE__) && !defined(__Userspace__)
13946 				SCTP_SOCKET_UNLOCK(so, 0);
13947 #endif
13948 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
13949 #if defined(__APPLE__) && !defined(__Userspace__)
13950 				SCTP_SOCKET_LOCK(so, 0);
13951 #endif
13952 				if (error) {
13953 					/*-
13954 					 * Here if we can't get his data we
13955 					 * still abort we just don't get to
13956 					 * send the users note :-0
13957 					 */
13958 					sctp_m_freem(mm);
13959 					mm = NULL;
13960 				}
13961 			} else {
13962 				if (sndlen != 0) {
13963 					SCTP_BUF_NEXT(mm) = top;
13964 				}
13965 			}
13966 		}
13967 		if (hold_tcblock == 0) {
13968 			SCTP_TCB_LOCK(stcb);
13969 		}
13970 		atomic_add_int(&stcb->asoc.refcnt, -1);
13971 		free_cnt_applied = 0;
13972 		/* release this lock, otherwise we hang on ourselves */
13973 #if defined(__FreeBSD__) && !defined(__Userspace__)
13974 		NET_EPOCH_ENTER(et);
13975 #endif
13976 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13977 #if defined(__FreeBSD__) && !defined(__Userspace__)
13978 		NET_EPOCH_EXIT(et);
13979 #endif
13980 		/* now relock the stcb so everything is sane */
13981 		hold_tcblock = 0;
13982 		stcb = NULL;
13983 		/* In this case top is already chained to mm
13984 		 * avoid double free, since we free it below if
13985 		 * top != NULL and driver would free it after sending
13986 		 * the packet out
13987 		 */
13988 		if (sndlen != 0) {
13989 			top = NULL;
13990 		}
13991 		goto out_unlocked;
13992 	}
13993 	/* Calculate the maximum we can send */
13994 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13995 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13996 		max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13997 	} else {
13998 		max_len = 0;
13999 	}
14000 	if (hold_tcblock) {
14001 		SCTP_TCB_UNLOCK(stcb);
14002 		hold_tcblock = 0;
14003 	}
14004 	if (asoc->strmout == NULL) {
14005 		/* huh? software error */
14006 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14007 		error = EFAULT;
14008 		goto out_unlocked;
14009 	}
14010 
14011 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
14012 	if ((user_marks_eor == 0) &&
14013 	    (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14014 		/* It will NEVER fit */
14015 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14016 		error = EMSGSIZE;
14017 		goto out_unlocked;
14018 	}
14019 	if ((uio == NULL) && user_marks_eor) {
14020 		/*-
14021 		 * We do not support eeor mode for
14022 		 * sending with mbuf chains (like sendfile).
14023 		 */
14024 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14025 		error = EINVAL;
14026 		goto out_unlocked;
14027 	}
14028 
14029 	if (user_marks_eor) {
14030 		local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14031 	} else {
14032 		/*-
14033 		 * For non-eeor the whole message must fit in
14034 		 * the socket send buffer.
14035 		 */
14036 		local_add_more = sndlen;
14037 	}
14038 	len = 0;
14039 	if (non_blocking) {
14040 		goto skip_preblock;
14041 	}
14042 	if (((max_len <= local_add_more) &&
14043 	     ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14044 	    (max_len == 0) ||
14045 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14046 		/* No room right now ! */
14047 		SOCKBUF_LOCK(&so->so_snd);
14048 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14049 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14050 		       ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14051 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
14052 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
14053 			        inqueue_bytes,
14054 			        local_add_more,
14055 			        stcb->asoc.stream_queue_cnt,
14056 			        stcb->asoc.chunks_on_out_queue,
14057 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14058 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14059 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14060 			}
14061 			be.error = 0;
14062 #if !(defined(_WIN32) && !defined(__Userspace__))
14063 			stcb->block_entry = &be;
14064 #endif
14065 			error = sbwait(&so->so_snd);
14066 			stcb->block_entry = NULL;
14067 			if (error || so->so_error || be.error) {
14068 				if (error == 0) {
14069 					if (so->so_error)
14070 						error = so->so_error;
14071 					if (be.error) {
14072 						error = be.error;
14073 					}
14074 				}
14075 				SOCKBUF_UNLOCK(&so->so_snd);
14076 				goto out_unlocked;
14077 			}
14078 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14079 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14080 				               asoc, stcb->asoc.total_output_queue_size);
14081 			}
14082 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14083 				SOCKBUF_UNLOCK(&so->so_snd);
14084 				goto out_unlocked;
14085 			}
14086 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14087 		}
14088 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14089 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
14090 		} else {
14091 			max_len = 0;
14092 		}
14093 		SOCKBUF_UNLOCK(&so->so_snd);
14094 	}
14095 
14096 skip_preblock:
14097 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14098 		goto out_unlocked;
14099 	}
14100 #if defined(__APPLE__) && !defined(__Userspace__)
14101 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14102 #endif
14103 	/* sndlen covers for mbuf case
14104 	 * uio_resid covers for the non-mbuf case
14105 	 * NOTE: uio will be null when top/mbuf is passed
14106 	 */
14107 	if (sndlen == 0) {
14108 		if (sinfo_flags & SCTP_EOF) {
14109 			got_all_of_the_send = 1;
14110 			goto dataless_eof;
14111 		} else {
14112 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14113 			error = EINVAL;
14114 			goto out;
14115 		}
14116 	}
14117 	if (top == NULL) {
14118 		struct sctp_stream_queue_pending *sp;
14119 		struct sctp_stream_out *strm;
14120 		uint32_t sndout;
14121 
14122 		SCTP_TCB_SEND_LOCK(stcb);
14123 		if ((asoc->stream_locked) &&
14124 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
14125 			SCTP_TCB_SEND_UNLOCK(stcb);
14126 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14127 			error = EINVAL;
14128 			goto out;
14129 		}
14130 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14131 		if (strm->last_msg_incomplete == 0) {
14132 		do_a_copy_in:
14133 			SCTP_TCB_SEND_UNLOCK(stcb);
14134 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14135 			if (error) {
14136 				goto out;
14137 			}
14138 			SCTP_TCB_SEND_LOCK(stcb);
14139 			if (sp->msg_is_complete) {
14140 				strm->last_msg_incomplete = 0;
14141 				asoc->stream_locked = 0;
14142 			} else {
14143 				/* Just got locked to this guy in
14144 				 * case of an interrupt.
14145 				 */
14146 				strm->last_msg_incomplete = 1;
14147 				if (stcb->asoc.idata_supported == 0) {
14148 					asoc->stream_locked = 1;
14149 					asoc->stream_locked_on  = srcv->sinfo_stream;
14150 				}
14151 				sp->sender_all_done = 0;
14152 			}
14153 			sctp_snd_sb_alloc(stcb, sp->length);
14154 			atomic_add_int(&asoc->stream_queue_cnt, 1);
14155 			if (sinfo_flags & SCTP_UNORDERED) {
14156 				SCTP_STAT_INCR(sctps_sends_with_unord);
14157 			}
14158 			sp->processing = 1;
14159 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14160 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14161 		} else {
14162 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14163 			if (sp == NULL) {
14164 				/* ???? Huh ??? last msg is gone */
14165 #ifdef INVARIANTS
14166 				panic("Warning: Last msg marked incomplete, yet nothing left?");
14167 #else
14168 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14169 				strm->last_msg_incomplete = 0;
14170 #endif
14171 				goto do_a_copy_in;
14172 			}
14173 			if (sp->processing) {
14174 				SCTP_TCB_SEND_UNLOCK(stcb);
14175 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14176 				error = EINVAL;
14177 				goto out;
14178 			} else {
14179 				sp->processing = 1;
14180 			}
14181 		}
14182 		SCTP_TCB_SEND_UNLOCK(stcb);
14183 #if defined(__APPLE__) && !defined(__Userspace__)
14184 #if defined(APPLE_LEOPARD)
14185 		while (uio->uio_resid > 0) {
14186 #else
14187 		while (uio_resid(uio) > 0) {
14188 #endif
14189 #else
14190 		while (uio->uio_resid > 0) {
14191 #endif
14192 			/* How much room do we have? */
14193 			struct mbuf *new_tail, *mm;
14194 
14195 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14196 			if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14197 				max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14198 			else
14199 				max_len = 0;
14200 
14201 			if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14202 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14203 #if defined(__APPLE__) && !defined(__Userspace__)
14204 #if defined(APPLE_LEOPARD)
14205 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14206 #else
14207 			    (uio_resid(uio) && (uio_resid(uio) <= max_len))) {
14208 #endif
14209 #else
14210 			    (uio->uio_resid && (uio->uio_resid <= max_len))) {
14211 #endif
14212 				sndout = 0;
14213 				new_tail = NULL;
14214 				if (hold_tcblock) {
14215 					SCTP_TCB_UNLOCK(stcb);
14216 					hold_tcblock = 0;
14217 				}
14218 #if defined(__APPLE__) && !defined(__Userspace__)
14219 				SCTP_SOCKET_UNLOCK(so, 0);
14220 #endif
14221 #if defined(__FreeBSD__) || defined(__Userspace__)
14222 				mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
14223 #else
14224 				mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail);
14225 #endif
14226 #if defined(__APPLE__) && !defined(__Userspace__)
14227 				SCTP_SOCKET_LOCK(so, 0);
14228 #endif
14229 				if ((mm == NULL) || error) {
14230 					if (mm) {
14231 						sctp_m_freem(mm);
14232 					}
14233 					SCTP_TCB_SEND_LOCK(stcb);
14234 					if (((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) &&
14235 					    ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) == 0) &&
14236 					    (sp != NULL)) {
14237 						sp->processing = 0;
14238 					}
14239 					SCTP_TCB_SEND_UNLOCK(stcb);
14240 					goto out;
14241 				}
14242 				/* Update the mbuf and count */
14243 				SCTP_TCB_SEND_LOCK(stcb);
14244 				if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
14245 				    (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) {
14246 					/* we need to get out.
14247 					 * Peer probably aborted.
14248 					 */
14249 					sctp_m_freem(mm);
14250 					if (stcb->asoc.state & SCTP_STATE_WAS_ABORTED) {
14251 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14252 						error = ECONNRESET;
14253 					}
14254 					SCTP_TCB_SEND_UNLOCK(stcb);
14255 					goto out;
14256 				}
14257 				if (sp->tail_mbuf) {
14258 					/* tack it to the end */
14259 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14260 					sp->tail_mbuf = new_tail;
14261 				} else {
14262 					/* A stolen mbuf */
14263 					sp->data = mm;
14264 					sp->tail_mbuf = new_tail;
14265 				}
14266 				sctp_snd_sb_alloc(stcb, sndout);
14267 				atomic_add_int(&sp->length, sndout);
14268 				len += sndout;
14269 				if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14270 					sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14271 				}
14272 
14273 				/* Did we reach EOR? */
14274 #if defined(__APPLE__) && !defined(__Userspace__)
14275 #if defined(APPLE_LEOPARD)
14276 				if ((uio->uio_resid == 0) &&
14277 #else
14278 				if ((uio_resid(uio) == 0) &&
14279 #endif
14280 #else
14281 				if ((uio->uio_resid == 0) &&
14282 #endif
14283 				    ((user_marks_eor == 0) ||
14284 				     (sinfo_flags & SCTP_EOF) ||
14285 				     (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
14286 					sp->msg_is_complete = 1;
14287 				} else {
14288 					sp->msg_is_complete = 0;
14289 				}
14290 				SCTP_TCB_SEND_UNLOCK(stcb);
14291 			}
14292 #if defined(__APPLE__) && !defined(__Userspace__)
14293 #if defined(APPLE_LEOPARD)
14294 			if (uio->uio_resid == 0) {
14295 #else
14296 			if (uio_resid(uio) == 0) {
14297 #endif
14298 #else
14299 			if (uio->uio_resid == 0) {
14300 #endif
14301 				/* got it all? */
14302 				continue;
14303 			}
14304 			/* PR-SCTP? */
14305 			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14306 				/* This is ugly but we must assure locking order */
14307 				if (hold_tcblock == 0) {
14308 					SCTP_TCB_LOCK(stcb);
14309 					hold_tcblock = 1;
14310 				}
14311 				sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
14312 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14313 				if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
14314 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14315 				else
14316 					max_len = 0;
14317 				if (max_len > 0) {
14318 					continue;
14319 				}
14320 				SCTP_TCB_UNLOCK(stcb);
14321 				hold_tcblock = 0;
14322 			}
14323 			/* wait for space now */
14324 			if (non_blocking) {
14325 				/* Non-blocking io in place out */
14326 				SCTP_TCB_SEND_LOCK(stcb);
14327 				if (sp != NULL) {
14328 					sp->processing = 0;
14329 				}
14330 				SCTP_TCB_SEND_UNLOCK(stcb);
14331 				goto skip_out_eof;
14332 			}
14333 			/* What about the INIT, send it maybe */
14334 			if (queue_only_for_init) {
14335 				if (hold_tcblock == 0) {
14336 					SCTP_TCB_LOCK(stcb);
14337 					hold_tcblock = 1;
14338 				}
14339 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14340 					/* a collision took us forward? */
14341 					queue_only = 0;
14342 				} else {
14343 #if defined(__FreeBSD__) && !defined(__Userspace__)
14344 					NET_EPOCH_ENTER(et);
14345 #endif
14346 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14347 #if defined(__FreeBSD__) && !defined(__Userspace__)
14348 					NET_EPOCH_EXIT(et);
14349 #endif
14350 					SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14351 					queue_only = 1;
14352 				}
14353 			}
14354 			if ((net->flight_size > net->cwnd) &&
14355 			    (asoc->sctp_cmt_on_off == 0)) {
14356 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14357 				queue_only = 1;
14358 			} else if (asoc->ifp_had_enobuf) {
14359 				SCTP_STAT_INCR(sctps_ifnomemqueued);
14360 				if (net->flight_size > (2 * net->mtu)) {
14361 					queue_only = 1;
14362 				}
14363 				asoc->ifp_had_enobuf = 0;
14364 			}
14365 			un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14366 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14367 			    (stcb->asoc.total_flight > 0) &&
14368 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14369 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14370 				/*-
14371 				 * Ok, Nagle is set on and we have data outstanding.
14372 				 * Don't send anything and let SACKs drive out the
14373 				 * data unless we have a "full" segment to send.
14374 				 */
14375 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14376 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14377 				}
14378 				SCTP_STAT_INCR(sctps_naglequeued);
14379 				nagle_applies = 1;
14380 			} else {
14381 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14382 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14383 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14384 				}
14385 				SCTP_STAT_INCR(sctps_naglesent);
14386 				nagle_applies = 0;
14387 			}
14388 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14389 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14390 					       nagle_applies, un_sent);
14391 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14392 					       stcb->asoc.total_flight,
14393 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14394 			}
14395 			if (queue_only_for_init)
14396 				queue_only_for_init = 0;
14397 			if ((queue_only == 0) && (nagle_applies == 0)) {
14398 				/*-
14399 				 * need to start chunk output
14400 				 * before blocking.. note that if
14401 				 * a lock is already applied, then
14402 				 * the input via the net is happening
14403 				 * and I don't need to start output :-D
14404 				 */
14405 #if defined(__FreeBSD__) && !defined(__Userspace__)
14406 				NET_EPOCH_ENTER(et);
14407 #endif
14408 				if (hold_tcblock == 0) {
14409 					if (SCTP_TCB_TRYLOCK(stcb)) {
14410 						hold_tcblock = 1;
14411 						sctp_chunk_output(inp,
14412 								  stcb,
14413 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14414 					}
14415 				} else {
14416 					sctp_chunk_output(inp,
14417 							  stcb,
14418 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14419 				}
14420 #if defined(__FreeBSD__) && !defined(__Userspace__)
14421 				NET_EPOCH_EXIT(et);
14422 #endif
14423 			}
14424 			if (hold_tcblock == 1) {
14425 				SCTP_TCB_UNLOCK(stcb);
14426 				hold_tcblock = 0;
14427 			}
14428 			SOCKBUF_LOCK(&so->so_snd);
14429 			/*-
14430 			 * This is a bit strange, but I think it will
14431 			 * work. The total_output_queue_size is locked and
14432 			 * protected by the TCB_LOCK, which we just released.
14433 			 * There is a race that can occur between releasing it
14434 			 * above, and me getting the socket lock, where sacks
14435 			 * come in but we have not put the SB_WAIT on the
14436 			 * so_snd buffer to get the wakeup. After the LOCK
14437 			 * is applied the sack_processing will also need to
14438 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
14439 			 * once we have the socket buffer lock if we recheck the
14440 			 * size we KNOW we will get to sleep safely with the
14441 			 * wakeup flag in place.
14442 			 */
14443 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
14444 			if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
14445 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14446 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14447 #if defined(__APPLE__) && !defined(__Userspace__)
14448 #if defined(APPLE_LEOPARD)
14449 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14450 						       asoc, uio->uio_resid);
14451 #else
14452 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14453 						       asoc, uio_resid(uio));
14454 #endif
14455 #else
14456 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14457 						       asoc, uio->uio_resid);
14458 #endif
14459 				}
14460 				be.error = 0;
14461 #if !(defined(_WIN32) && !defined(__Userspace__))
14462 				stcb->block_entry = &be;
14463 #endif
14464 #if defined(__APPLE__) && !defined(__Userspace__)
14465 				sbunlock(&so->so_snd, 1);
14466 #endif
14467 				error = sbwait(&so->so_snd);
14468 				stcb->block_entry = NULL;
14469 
14470 				if (error || so->so_error || be.error) {
14471 					if (error == 0) {
14472 						if (so->so_error)
14473 							error = so->so_error;
14474 						if (be.error) {
14475 							error = be.error;
14476 						}
14477 					}
14478 					SOCKBUF_UNLOCK(&so->so_snd);
14479 					SCTP_TCB_SEND_LOCK(stcb);
14480 					if (((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) &&
14481 					    ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) == 0) &&
14482 					    (sp != NULL)) {
14483 						sp->processing = 0;
14484 					}
14485 					SCTP_TCB_SEND_UNLOCK(stcb);
14486 					goto out_unlocked;
14487 				}
14488 
14489 #if defined(__APPLE__) && !defined(__Userspace__)
14490 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14491 #endif
14492 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14493 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14494 						       asoc, stcb->asoc.total_output_queue_size);
14495 				}
14496 			}
14497 			SOCKBUF_UNLOCK(&so->so_snd);
14498 			SCTP_TCB_SEND_LOCK(stcb);
14499 			if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
14500 			    (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) {
14501 				SCTP_TCB_SEND_UNLOCK(stcb);
14502 				goto out_unlocked;
14503 			}
14504 			SCTP_TCB_SEND_UNLOCK(stcb);
14505 		}
14506 		SCTP_TCB_SEND_LOCK(stcb);
14507 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
14508 		    (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) {
14509 			SCTP_TCB_SEND_UNLOCK(stcb);
14510 			goto out_unlocked;
14511 		}
14512 		if (sp) {
14513 			if (sp->msg_is_complete == 0) {
14514 				strm->last_msg_incomplete = 1;
14515 				if (stcb->asoc.idata_supported == 0) {
14516 					asoc->stream_locked = 1;
14517 					asoc->stream_locked_on  = srcv->sinfo_stream;
14518 				}
14519 			} else {
14520 				sp->sender_all_done = 1;
14521 				strm->last_msg_incomplete = 0;
14522 				asoc->stream_locked = 0;
14523 			}
14524 			sp->processing = 0;
14525 		} else {
14526 			SCTP_PRINTF("Huh no sp TSNH?\n");
14527 			strm->last_msg_incomplete = 0;
14528 			asoc->stream_locked = 0;
14529 		}
14530 		SCTP_TCB_SEND_UNLOCK(stcb);
14531 #if defined(__APPLE__) && !defined(__Userspace__)
14532 #if defined(APPLE_LEOPARD)
14533 		if (uio->uio_resid == 0) {
14534 #else
14535 		if (uio_resid(uio) == 0) {
14536 #endif
14537 #else
14538 		if (uio->uio_resid == 0) {
14539 #endif
14540 			got_all_of_the_send = 1;
14541 		}
14542 	} else {
14543 		/* We send in a 0, since we do NOT have any locks */
14544 		error = sctp_msg_append(stcb, net, top, srcv, 0);
14545 		top = NULL;
14546 		if (sinfo_flags & SCTP_EOF) {
14547 			got_all_of_the_send = 1;
14548 		}
14549 	}
14550 	if (error) {
14551 		goto out;
14552 	}
14553 dataless_eof:
14554 	/* EOF thing ? */
14555 	if ((sinfo_flags & SCTP_EOF) &&
14556 	    (got_all_of_the_send == 1)) {
14557 		SCTP_STAT_INCR(sctps_sends_with_eof);
14558 		error = 0;
14559 		if (hold_tcblock == 0) {
14560 			SCTP_TCB_LOCK(stcb);
14561 			hold_tcblock = 1;
14562 		}
14563 		if (TAILQ_EMPTY(&asoc->send_queue) &&
14564 		    TAILQ_EMPTY(&asoc->sent_queue) &&
14565 		    sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14566 			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14567 				goto abort_anyway;
14568 			}
14569 			/* there is nothing queued to send, so I'm done... */
14570 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14571 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14572 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14573 				struct sctp_nets *netp;
14574 
14575 				/* only send SHUTDOWN the first time through */
14576 				if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14577 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14578 				}
14579 				SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
14580 				sctp_stop_timers_for_shutdown(stcb);
14581 				if (stcb->asoc.alternate) {
14582 					netp = stcb->asoc.alternate;
14583 				} else {
14584 					netp = stcb->asoc.primary_destination;
14585 				}
14586 				sctp_send_shutdown(stcb, netp);
14587 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14588 				                 netp);
14589 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14590 				                 NULL);
14591 			}
14592 		} else {
14593 			/*-
14594 			 * we still got (or just got) data to send, so set
14595 			 * SHUTDOWN_PENDING
14596 			 */
14597 			/*-
14598 			 * XXX sockets draft says that SCTP_EOF should be
14599 			 * sent with no data.  currently, we will allow user
14600 			 * data to be sent first and move to
14601 			 * SHUTDOWN-PENDING
14602 			 */
14603 			if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
14604 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14605 			    (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14606 				if (hold_tcblock == 0) {
14607 					SCTP_TCB_LOCK(stcb);
14608 					hold_tcblock = 1;
14609 				}
14610 				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14611 					SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
14612 				}
14613 				SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
14614 				if (TAILQ_EMPTY(&asoc->send_queue) &&
14615 				    TAILQ_EMPTY(&asoc->sent_queue) &&
14616 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14617 					struct mbuf *op_err;
14618 					char msg[SCTP_DIAG_INFO_LEN];
14619 
14620 				abort_anyway:
14621 					if (free_cnt_applied) {
14622 						atomic_add_int(&stcb->asoc.refcnt, -1);
14623 						free_cnt_applied = 0;
14624 					}
14625 					SCTP_SNPRINTF(msg, sizeof(msg),
14626 					              "%s:%d at %s", __FILE__, __LINE__, __func__);
14627 					op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14628 					                             msg);
14629 #if defined(__FreeBSD__) && !defined(__Userspace__)
14630 					NET_EPOCH_ENTER(et);
14631 #endif
14632 					sctp_abort_an_association(stcb->sctp_ep, stcb,
14633 					                          op_err, SCTP_SO_LOCKED);
14634 #if defined(__FreeBSD__) && !defined(__Userspace__)
14635 					NET_EPOCH_EXIT(et);
14636 #endif
14637 					/* now relock the stcb so everything is sane */
14638 					hold_tcblock = 0;
14639 					stcb = NULL;
14640 					goto out;
14641 				}
14642 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14643 				                 NULL);
14644 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14645 			}
14646 		}
14647 	}
14648 skip_out_eof:
14649 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14650 		some_on_control = 1;
14651 	}
14652 	if (queue_only_for_init) {
14653 		if (hold_tcblock == 0) {
14654 			SCTP_TCB_LOCK(stcb);
14655 			hold_tcblock = 1;
14656 		}
14657 		if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
14658 			/* a collision took us forward? */
14659 			queue_only = 0;
14660 		} else {
14661 #if defined(__FreeBSD__) && !defined(__Userspace__)
14662 			NET_EPOCH_ENTER(et);
14663 #endif
14664 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14665 #if defined(__FreeBSD__) && !defined(__Userspace__)
14666 			NET_EPOCH_EXIT(et);
14667 #endif
14668 			SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
14669 			queue_only = 1;
14670 		}
14671 	}
14672 	if ((net->flight_size > net->cwnd) &&
14673 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
14674 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14675 		queue_only = 1;
14676 	} else if (asoc->ifp_had_enobuf) {
14677 		SCTP_STAT_INCR(sctps_ifnomemqueued);
14678 		if (net->flight_size > (2 * net->mtu)) {
14679 			queue_only = 1;
14680 		}
14681 		asoc->ifp_had_enobuf = 0;
14682 	}
14683 	un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
14684 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14685 	    (stcb->asoc.total_flight > 0) &&
14686 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14687 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14688 		/*-
14689 		 * Ok, Nagle is set on and we have data outstanding.
14690 		 * Don't send anything and let SACKs drive out the
14691 		 * data unless wen have a "full" segment to send.
14692 		 */
14693 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14694 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14695 		}
14696 		SCTP_STAT_INCR(sctps_naglequeued);
14697 		nagle_applies = 1;
14698 	} else {
14699 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14700 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14701 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14702 		}
14703 		SCTP_STAT_INCR(sctps_naglesent);
14704 		nagle_applies = 0;
14705 	}
14706 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14707 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14708 		               nagle_applies, un_sent);
14709 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14710 		               stcb->asoc.total_flight,
14711 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14712 	}
14713 #if defined(__FreeBSD__) && !defined(__Userspace__)
14714 	NET_EPOCH_ENTER(et);
14715 #endif
14716 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14717 		/* we can attempt to send too. */
14718 		if (hold_tcblock == 0) {
14719 			/* If there is activity recv'ing sacks no need to send */
14720 			if (SCTP_TCB_TRYLOCK(stcb)) {
14721 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14722 				hold_tcblock = 1;
14723 			}
14724 		} else {
14725 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14726 		}
14727 	} else if ((queue_only == 0) &&
14728 	           (stcb->asoc.peers_rwnd == 0) &&
14729 	           (stcb->asoc.total_flight == 0)) {
14730 		/* We get to have a probe outstanding */
14731 		if (hold_tcblock == 0) {
14732 			hold_tcblock = 1;
14733 			SCTP_TCB_LOCK(stcb);
14734 		}
14735 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14736 	} else if (some_on_control) {
14737 		int num_out, reason, frag_point;
14738 
14739 		/* Here we do control only */
14740 		if (hold_tcblock == 0) {
14741 			hold_tcblock = 1;
14742 			SCTP_TCB_LOCK(stcb);
14743 		}
14744 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14745 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14746 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14747 	}
14748 #if defined(__FreeBSD__) && !defined(__Userspace__)
14749 	NET_EPOCH_EXIT(et);
14750 #endif
14751 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14752 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
14753 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14754 	        stcb->asoc.total_output_queue_size, error);
14755 
14756 out:
14757 #if defined(__APPLE__) && !defined(__Userspace__)
14758 	sbunlock(&so->so_snd, 1);
14759 #endif
14760 out_unlocked:
14761 
14762 	if (local_soresv && stcb) {
14763 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14764 	}
14765 	if (create_lock_applied) {
14766 		SCTP_ASOC_CREATE_UNLOCK(inp);
14767 	}
14768 	if ((stcb) && hold_tcblock) {
14769 		SCTP_TCB_UNLOCK(stcb);
14770 	}
14771 	if (stcb && free_cnt_applied) {
14772 		atomic_add_int(&stcb->asoc.refcnt, -1);
14773 	}
14774 #ifdef INVARIANTS
14775 #if defined(__FreeBSD__) && !defined(__Userspace__)
14776 	if (stcb) {
14777 		if (mtx_owned(&stcb->tcb_mtx)) {
14778 			panic("Leaving with tcb mtx owned?");
14779 		}
14780 		if (mtx_owned(&stcb->tcb_send_mtx)) {
14781 			panic("Leaving with tcb send mtx owned?");
14782 		}
14783 	}
14784 #endif
14785 #endif
14786 	if (top) {
14787 		sctp_m_freem(top);
14788 	}
14789 	if (control) {
14790 		sctp_m_freem(control);
14791 	}
14792 	return (error);
14793 }
14794 
14795 /*
14796  * generate an AUTHentication chunk, if required
14797  */
14798 struct mbuf *
14799 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14800     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14801     struct sctp_tcb *stcb, uint8_t chunk)
14802 {
14803 	struct mbuf *m_auth;
14804 	struct sctp_auth_chunk *auth;
14805 	int chunk_len;
14806 	struct mbuf *cn;
14807 
14808 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14809 	    (stcb == NULL))
14810 		return (m);
14811 
14812 	if (stcb->asoc.auth_supported == 0) {
14813 		return (m);
14814 	}
14815 	/* does the requested chunk require auth? */
14816 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14817 		return (m);
14818 	}
14819 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14820 	if (m_auth == NULL) {
14821 		/* no mbuf's */
14822 		return (m);
14823 	}
14824 	/* reserve some space if this will be the first mbuf */
14825 	if (m == NULL)
14826 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14827 	/* fill in the AUTH chunk details */
14828 	auth = mtod(m_auth, struct sctp_auth_chunk *);
14829 	memset(auth, 0, sizeof(*auth));
14830 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
14831 	auth->ch.chunk_flags = 0;
14832 	chunk_len = sizeof(*auth) +
14833 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14834 	auth->ch.chunk_length = htons(chunk_len);
14835 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14836 	/* key id and hmac digest will be computed and filled in upon send */
14837 
14838 	/* save the offset where the auth was inserted into the chain */
14839 	*offset = 0;
14840 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14841 		*offset += SCTP_BUF_LEN(cn);
14842 	}
14843 
14844 	/* update length and return pointer to the auth chunk */
14845 	SCTP_BUF_LEN(m_auth) = chunk_len;
14846 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14847 	if (auth_ret != NULL)
14848 		*auth_ret = auth;
14849 
14850 	return (m);
14851 }
14852 
14853 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
14854 #ifdef INET6
14855 int
14856 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14857 {
14858 	struct nd_prefix *pfx = NULL;
14859 	struct nd_pfxrouter *pfxrtr = NULL;
14860 	struct sockaddr_in6 gw6;
14861 
14862 #if defined(__FreeBSD__)
14863 	if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
14864 #else
14865 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14866 #endif
14867 		return (0);
14868 
14869 	/* get prefix entry of address */
14870 #if defined(__FreeBSD__)
14871 	ND6_RLOCK();
14872 #endif
14873 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14874 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14875 			continue;
14876 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14877 		    &src6->sin6_addr, &pfx->ndpr_mask))
14878 			break;
14879 	}
14880 	/* no prefix entry in the prefix list */
14881 	if (pfx == NULL) {
14882 #if defined(__FreeBSD__)
14883 		ND6_RUNLOCK();
14884 #endif
14885 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14886 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14887 		return (0);
14888 	}
14889 
14890 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14891 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14892 
14893 	/* search installed gateway from prefix entry */
14894 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14895 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
14896 		gw6.sin6_family = AF_INET6;
14897 #ifdef HAVE_SIN6_LEN
14898 		gw6.sin6_len = sizeof(struct sockaddr_in6);
14899 #endif
14900 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14901 		    sizeof(struct in6_addr));
14902 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14903 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14904 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14905 #if defined(__FreeBSD__)
14906 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14907 #else
14908 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14909 #endif
14910 #if defined(__FreeBSD__)
14911 		if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
14912 			ND6_RUNLOCK();
14913 #else
14914 		if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
14915 #endif
14916 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14917 			return (1);
14918 		}
14919 	}
14920 #if defined(__FreeBSD__)
14921 	ND6_RUNLOCK();
14922 #endif
14923 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14924 	return (0);
14925 }
14926 #endif
14927 
14928 int
14929 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14930 {
14931 #ifdef INET
14932 	struct sockaddr_in *sin, *mask;
14933 	struct ifaddr *ifa;
14934 	struct in_addr srcnetaddr, gwnetaddr;
14935 
14936 #if defined(__FreeBSD__)
14937 	if (ro == NULL || ro->ro_nh == NULL ||
14938 #else
14939 	if (ro == NULL || ro->ro_rt == NULL ||
14940 #endif
14941 	    sifa->address.sa.sa_family != AF_INET) {
14942 		return (0);
14943 	}
14944 	ifa = (struct ifaddr *)sifa->ifa;
14945 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14946 	sin = &sifa->address.sin;
14947 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14948 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14949 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14950 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14951 
14952 #if defined(__FreeBSD__)
14953 	sin = &ro->ro_nh->gw4_sa;
14954 #else
14955 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14956 #endif
14957 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14958 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14959 #if defined(__FreeBSD__)
14960 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
14961 #else
14962 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14963 #endif
14964 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14965 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14966 		return (1);
14967 	}
14968 #endif
14969 	return (0);
14970 }
14971 #elif defined(__Userspace__)
14972 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14973 int
14974 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14975 {
14976     return (0);
14977 }
14978 int
14979 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14980 {
14981     return (0);
14982 }
14983 
14984 #endif
14985