1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifdef __FreeBSD__
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 280371 2015-03-23 15:12:02Z tuexen $");
36 #endif
37 
38 #include <netinet/sctp_os.h>
39 #ifdef __FreeBSD__
40 #include <sys/proc.h>
41 #endif
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_pcb.h>
46 #include <netinet/sctputil.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctputil.h>
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_indata.h>
54 #include <netinet/sctp_bsd_addr.h>
55 #include <netinet/sctp_input.h>
56 #include <netinet/sctp_crc32.h>
57 #if defined(__Userspace_os_Linux)
58 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
59 #endif
60 #if defined(INET) || defined(INET6)
61 #if !defined(__Userspace_os_Windows)
62 #include <netinet/udp.h>
63 #endif
64 #endif
65 #if defined(__APPLE__)
66 #include <netinet/in.h>
67 #endif
68 #if defined(__FreeBSD__)
69 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
70 #include <netinet/udp_var.h>
71 #endif
72 #include <machine/in_cksum.h>
73 #endif
74 #if defined(__Userspace__) && defined(INET6)
75 #include <netinet6/sctp6_var.h>
76 #endif
77 
78 #if defined(__APPLE__)
79 #define APPLE_FILE_NO 3
80 #endif
81 
82 #if defined(__APPLE__)
83 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
84 #define SCTP_MAX_LINKHDR 16
85 #endif
86 #endif
87 
88 #define SCTP_MAX_GAPS_INARRAY 4
89 struct sack_track {
90 	uint8_t right_edge;	/* mergable on the right edge */
91 	uint8_t left_edge;	/* mergable on the left edge */
92 	uint8_t num_entries;
93 	uint8_t spare;
94 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
95 };
96 
97 struct sack_track sack_array[256] = {
98 	{0, 0, 0, 0,		/* 0x00 */
99 		{{0, 0},
100 		{0, 0},
101 		{0, 0},
102 		{0, 0}
103 		}
104 	},
105 	{1, 0, 1, 0,		/* 0x01 */
106 		{{0, 0},
107 		{0, 0},
108 		{0, 0},
109 		{0, 0}
110 		}
111 	},
112 	{0, 0, 1, 0,		/* 0x02 */
113 		{{1, 1},
114 		{0, 0},
115 		{0, 0},
116 		{0, 0}
117 		}
118 	},
119 	{1, 0, 1, 0,		/* 0x03 */
120 		{{0, 1},
121 		{0, 0},
122 		{0, 0},
123 		{0, 0}
124 		}
125 	},
126 	{0, 0, 1, 0,		/* 0x04 */
127 		{{2, 2},
128 		{0, 0},
129 		{0, 0},
130 		{0, 0}
131 		}
132 	},
133 	{1, 0, 2, 0,		/* 0x05 */
134 		{{0, 0},
135 		{2, 2},
136 		{0, 0},
137 		{0, 0}
138 		}
139 	},
140 	{0, 0, 1, 0,		/* 0x06 */
141 		{{1, 2},
142 		{0, 0},
143 		{0, 0},
144 		{0, 0}
145 		}
146 	},
147 	{1, 0, 1, 0,		/* 0x07 */
148 		{{0, 2},
149 		{0, 0},
150 		{0, 0},
151 		{0, 0}
152 		}
153 	},
154 	{0, 0, 1, 0,		/* 0x08 */
155 		{{3, 3},
156 		{0, 0},
157 		{0, 0},
158 		{0, 0}
159 		}
160 	},
161 	{1, 0, 2, 0,		/* 0x09 */
162 		{{0, 0},
163 		{3, 3},
164 		{0, 0},
165 		{0, 0}
166 		}
167 	},
168 	{0, 0, 2, 0,		/* 0x0a */
169 		{{1, 1},
170 		{3, 3},
171 		{0, 0},
172 		{0, 0}
173 		}
174 	},
175 	{1, 0, 2, 0,		/* 0x0b */
176 		{{0, 1},
177 		{3, 3},
178 		{0, 0},
179 		{0, 0}
180 		}
181 	},
182 	{0, 0, 1, 0,		/* 0x0c */
183 		{{2, 3},
184 		{0, 0},
185 		{0, 0},
186 		{0, 0}
187 		}
188 	},
189 	{1, 0, 2, 0,		/* 0x0d */
190 		{{0, 0},
191 		{2, 3},
192 		{0, 0},
193 		{0, 0}
194 		}
195 	},
196 	{0, 0, 1, 0,		/* 0x0e */
197 		{{1, 3},
198 		{0, 0},
199 		{0, 0},
200 		{0, 0}
201 		}
202 	},
203 	{1, 0, 1, 0,		/* 0x0f */
204 		{{0, 3},
205 		{0, 0},
206 		{0, 0},
207 		{0, 0}
208 		}
209 	},
210 	{0, 0, 1, 0,		/* 0x10 */
211 		{{4, 4},
212 		{0, 0},
213 		{0, 0},
214 		{0, 0}
215 		}
216 	},
217 	{1, 0, 2, 0,		/* 0x11 */
218 		{{0, 0},
219 		{4, 4},
220 		{0, 0},
221 		{0, 0}
222 		}
223 	},
224 	{0, 0, 2, 0,		/* 0x12 */
225 		{{1, 1},
226 		{4, 4},
227 		{0, 0},
228 		{0, 0}
229 		}
230 	},
231 	{1, 0, 2, 0,		/* 0x13 */
232 		{{0, 1},
233 		{4, 4},
234 		{0, 0},
235 		{0, 0}
236 		}
237 	},
238 	{0, 0, 2, 0,		/* 0x14 */
239 		{{2, 2},
240 		{4, 4},
241 		{0, 0},
242 		{0, 0}
243 		}
244 	},
245 	{1, 0, 3, 0,		/* 0x15 */
246 		{{0, 0},
247 		{2, 2},
248 		{4, 4},
249 		{0, 0}
250 		}
251 	},
252 	{0, 0, 2, 0,		/* 0x16 */
253 		{{1, 2},
254 		{4, 4},
255 		{0, 0},
256 		{0, 0}
257 		}
258 	},
259 	{1, 0, 2, 0,		/* 0x17 */
260 		{{0, 2},
261 		{4, 4},
262 		{0, 0},
263 		{0, 0}
264 		}
265 	},
266 	{0, 0, 1, 0,		/* 0x18 */
267 		{{3, 4},
268 		{0, 0},
269 		{0, 0},
270 		{0, 0}
271 		}
272 	},
273 	{1, 0, 2, 0,		/* 0x19 */
274 		{{0, 0},
275 		{3, 4},
276 		{0, 0},
277 		{0, 0}
278 		}
279 	},
280 	{0, 0, 2, 0,		/* 0x1a */
281 		{{1, 1},
282 		{3, 4},
283 		{0, 0},
284 		{0, 0}
285 		}
286 	},
287 	{1, 0, 2, 0,		/* 0x1b */
288 		{{0, 1},
289 		{3, 4},
290 		{0, 0},
291 		{0, 0}
292 		}
293 	},
294 	{0, 0, 1, 0,		/* 0x1c */
295 		{{2, 4},
296 		{0, 0},
297 		{0, 0},
298 		{0, 0}
299 		}
300 	},
301 	{1, 0, 2, 0,		/* 0x1d */
302 		{{0, 0},
303 		{2, 4},
304 		{0, 0},
305 		{0, 0}
306 		}
307 	},
308 	{0, 0, 1, 0,		/* 0x1e */
309 		{{1, 4},
310 		{0, 0},
311 		{0, 0},
312 		{0, 0}
313 		}
314 	},
315 	{1, 0, 1, 0,		/* 0x1f */
316 		{{0, 4},
317 		{0, 0},
318 		{0, 0},
319 		{0, 0}
320 		}
321 	},
322 	{0, 0, 1, 0,		/* 0x20 */
323 		{{5, 5},
324 		{0, 0},
325 		{0, 0},
326 		{0, 0}
327 		}
328 	},
329 	{1, 0, 2, 0,		/* 0x21 */
330 		{{0, 0},
331 		{5, 5},
332 		{0, 0},
333 		{0, 0}
334 		}
335 	},
336 	{0, 0, 2, 0,		/* 0x22 */
337 		{{1, 1},
338 		{5, 5},
339 		{0, 0},
340 		{0, 0}
341 		}
342 	},
343 	{1, 0, 2, 0,		/* 0x23 */
344 		{{0, 1},
345 		{5, 5},
346 		{0, 0},
347 		{0, 0}
348 		}
349 	},
350 	{0, 0, 2, 0,		/* 0x24 */
351 		{{2, 2},
352 		{5, 5},
353 		{0, 0},
354 		{0, 0}
355 		}
356 	},
357 	{1, 0, 3, 0,		/* 0x25 */
358 		{{0, 0},
359 		{2, 2},
360 		{5, 5},
361 		{0, 0}
362 		}
363 	},
364 	{0, 0, 2, 0,		/* 0x26 */
365 		{{1, 2},
366 		{5, 5},
367 		{0, 0},
368 		{0, 0}
369 		}
370 	},
371 	{1, 0, 2, 0,		/* 0x27 */
372 		{{0, 2},
373 		{5, 5},
374 		{0, 0},
375 		{0, 0}
376 		}
377 	},
378 	{0, 0, 2, 0,		/* 0x28 */
379 		{{3, 3},
380 		{5, 5},
381 		{0, 0},
382 		{0, 0}
383 		}
384 	},
385 	{1, 0, 3, 0,		/* 0x29 */
386 		{{0, 0},
387 		{3, 3},
388 		{5, 5},
389 		{0, 0}
390 		}
391 	},
392 	{0, 0, 3, 0,		/* 0x2a */
393 		{{1, 1},
394 		{3, 3},
395 		{5, 5},
396 		{0, 0}
397 		}
398 	},
399 	{1, 0, 3, 0,		/* 0x2b */
400 		{{0, 1},
401 		{3, 3},
402 		{5, 5},
403 		{0, 0}
404 		}
405 	},
406 	{0, 0, 2, 0,		/* 0x2c */
407 		{{2, 3},
408 		{5, 5},
409 		{0, 0},
410 		{0, 0}
411 		}
412 	},
413 	{1, 0, 3, 0,		/* 0x2d */
414 		{{0, 0},
415 		{2, 3},
416 		{5, 5},
417 		{0, 0}
418 		}
419 	},
420 	{0, 0, 2, 0,		/* 0x2e */
421 		{{1, 3},
422 		{5, 5},
423 		{0, 0},
424 		{0, 0}
425 		}
426 	},
427 	{1, 0, 2, 0,		/* 0x2f */
428 		{{0, 3},
429 		{5, 5},
430 		{0, 0},
431 		{0, 0}
432 		}
433 	},
434 	{0, 0, 1, 0,		/* 0x30 */
435 		{{4, 5},
436 		{0, 0},
437 		{0, 0},
438 		{0, 0}
439 		}
440 	},
441 	{1, 0, 2, 0,		/* 0x31 */
442 		{{0, 0},
443 		{4, 5},
444 		{0, 0},
445 		{0, 0}
446 		}
447 	},
448 	{0, 0, 2, 0,		/* 0x32 */
449 		{{1, 1},
450 		{4, 5},
451 		{0, 0},
452 		{0, 0}
453 		}
454 	},
455 	{1, 0, 2, 0,		/* 0x33 */
456 		{{0, 1},
457 		{4, 5},
458 		{0, 0},
459 		{0, 0}
460 		}
461 	},
462 	{0, 0, 2, 0,		/* 0x34 */
463 		{{2, 2},
464 		{4, 5},
465 		{0, 0},
466 		{0, 0}
467 		}
468 	},
469 	{1, 0, 3, 0,		/* 0x35 */
470 		{{0, 0},
471 		{2, 2},
472 		{4, 5},
473 		{0, 0}
474 		}
475 	},
476 	{0, 0, 2, 0,		/* 0x36 */
477 		{{1, 2},
478 		{4, 5},
479 		{0, 0},
480 		{0, 0}
481 		}
482 	},
483 	{1, 0, 2, 0,		/* 0x37 */
484 		{{0, 2},
485 		{4, 5},
486 		{0, 0},
487 		{0, 0}
488 		}
489 	},
490 	{0, 0, 1, 0,		/* 0x38 */
491 		{{3, 5},
492 		{0, 0},
493 		{0, 0},
494 		{0, 0}
495 		}
496 	},
497 	{1, 0, 2, 0,		/* 0x39 */
498 		{{0, 0},
499 		{3, 5},
500 		{0, 0},
501 		{0, 0}
502 		}
503 	},
504 	{0, 0, 2, 0,		/* 0x3a */
505 		{{1, 1},
506 		{3, 5},
507 		{0, 0},
508 		{0, 0}
509 		}
510 	},
511 	{1, 0, 2, 0,		/* 0x3b */
512 		{{0, 1},
513 		{3, 5},
514 		{0, 0},
515 		{0, 0}
516 		}
517 	},
518 	{0, 0, 1, 0,		/* 0x3c */
519 		{{2, 5},
520 		{0, 0},
521 		{0, 0},
522 		{0, 0}
523 		}
524 	},
525 	{1, 0, 2, 0,		/* 0x3d */
526 		{{0, 0},
527 		{2, 5},
528 		{0, 0},
529 		{0, 0}
530 		}
531 	},
532 	{0, 0, 1, 0,		/* 0x3e */
533 		{{1, 5},
534 		{0, 0},
535 		{0, 0},
536 		{0, 0}
537 		}
538 	},
539 	{1, 0, 1, 0,		/* 0x3f */
540 		{{0, 5},
541 		{0, 0},
542 		{0, 0},
543 		{0, 0}
544 		}
545 	},
546 	{0, 0, 1, 0,		/* 0x40 */
547 		{{6, 6},
548 		{0, 0},
549 		{0, 0},
550 		{0, 0}
551 		}
552 	},
553 	{1, 0, 2, 0,		/* 0x41 */
554 		{{0, 0},
555 		{6, 6},
556 		{0, 0},
557 		{0, 0}
558 		}
559 	},
560 	{0, 0, 2, 0,		/* 0x42 */
561 		{{1, 1},
562 		{6, 6},
563 		{0, 0},
564 		{0, 0}
565 		}
566 	},
567 	{1, 0, 2, 0,		/* 0x43 */
568 		{{0, 1},
569 		{6, 6},
570 		{0, 0},
571 		{0, 0}
572 		}
573 	},
574 	{0, 0, 2, 0,		/* 0x44 */
575 		{{2, 2},
576 		{6, 6},
577 		{0, 0},
578 		{0, 0}
579 		}
580 	},
581 	{1, 0, 3, 0,		/* 0x45 */
582 		{{0, 0},
583 		{2, 2},
584 		{6, 6},
585 		{0, 0}
586 		}
587 	},
588 	{0, 0, 2, 0,		/* 0x46 */
589 		{{1, 2},
590 		{6, 6},
591 		{0, 0},
592 		{0, 0}
593 		}
594 	},
595 	{1, 0, 2, 0,		/* 0x47 */
596 		{{0, 2},
597 		{6, 6},
598 		{0, 0},
599 		{0, 0}
600 		}
601 	},
602 	{0, 0, 2, 0,		/* 0x48 */
603 		{{3, 3},
604 		{6, 6},
605 		{0, 0},
606 		{0, 0}
607 		}
608 	},
609 	{1, 0, 3, 0,		/* 0x49 */
610 		{{0, 0},
611 		{3, 3},
612 		{6, 6},
613 		{0, 0}
614 		}
615 	},
616 	{0, 0, 3, 0,		/* 0x4a */
617 		{{1, 1},
618 		{3, 3},
619 		{6, 6},
620 		{0, 0}
621 		}
622 	},
623 	{1, 0, 3, 0,		/* 0x4b */
624 		{{0, 1},
625 		{3, 3},
626 		{6, 6},
627 		{0, 0}
628 		}
629 	},
630 	{0, 0, 2, 0,		/* 0x4c */
631 		{{2, 3},
632 		{6, 6},
633 		{0, 0},
634 		{0, 0}
635 		}
636 	},
637 	{1, 0, 3, 0,		/* 0x4d */
638 		{{0, 0},
639 		{2, 3},
640 		{6, 6},
641 		{0, 0}
642 		}
643 	},
644 	{0, 0, 2, 0,		/* 0x4e */
645 		{{1, 3},
646 		{6, 6},
647 		{0, 0},
648 		{0, 0}
649 		}
650 	},
651 	{1, 0, 2, 0,		/* 0x4f */
652 		{{0, 3},
653 		{6, 6},
654 		{0, 0},
655 		{0, 0}
656 		}
657 	},
658 	{0, 0, 2, 0,		/* 0x50 */
659 		{{4, 4},
660 		{6, 6},
661 		{0, 0},
662 		{0, 0}
663 		}
664 	},
665 	{1, 0, 3, 0,		/* 0x51 */
666 		{{0, 0},
667 		{4, 4},
668 		{6, 6},
669 		{0, 0}
670 		}
671 	},
672 	{0, 0, 3, 0,		/* 0x52 */
673 		{{1, 1},
674 		{4, 4},
675 		{6, 6},
676 		{0, 0}
677 		}
678 	},
679 	{1, 0, 3, 0,		/* 0x53 */
680 		{{0, 1},
681 		{4, 4},
682 		{6, 6},
683 		{0, 0}
684 		}
685 	},
686 	{0, 0, 3, 0,		/* 0x54 */
687 		{{2, 2},
688 		{4, 4},
689 		{6, 6},
690 		{0, 0}
691 		}
692 	},
693 	{1, 0, 4, 0,		/* 0x55 */
694 		{{0, 0},
695 		{2, 2},
696 		{4, 4},
697 		{6, 6}
698 		}
699 	},
700 	{0, 0, 3, 0,		/* 0x56 */
701 		{{1, 2},
702 		{4, 4},
703 		{6, 6},
704 		{0, 0}
705 		}
706 	},
707 	{1, 0, 3, 0,		/* 0x57 */
708 		{{0, 2},
709 		{4, 4},
710 		{6, 6},
711 		{0, 0}
712 		}
713 	},
714 	{0, 0, 2, 0,		/* 0x58 */
715 		{{3, 4},
716 		{6, 6},
717 		{0, 0},
718 		{0, 0}
719 		}
720 	},
721 	{1, 0, 3, 0,		/* 0x59 */
722 		{{0, 0},
723 		{3, 4},
724 		{6, 6},
725 		{0, 0}
726 		}
727 	},
728 	{0, 0, 3, 0,		/* 0x5a */
729 		{{1, 1},
730 		{3, 4},
731 		{6, 6},
732 		{0, 0}
733 		}
734 	},
735 	{1, 0, 3, 0,		/* 0x5b */
736 		{{0, 1},
737 		{3, 4},
738 		{6, 6},
739 		{0, 0}
740 		}
741 	},
742 	{0, 0, 2, 0,		/* 0x5c */
743 		{{2, 4},
744 		{6, 6},
745 		{0, 0},
746 		{0, 0}
747 		}
748 	},
749 	{1, 0, 3, 0,		/* 0x5d */
750 		{{0, 0},
751 		{2, 4},
752 		{6, 6},
753 		{0, 0}
754 		}
755 	},
756 	{0, 0, 2, 0,		/* 0x5e */
757 		{{1, 4},
758 		{6, 6},
759 		{0, 0},
760 		{0, 0}
761 		}
762 	},
763 	{1, 0, 2, 0,		/* 0x5f */
764 		{{0, 4},
765 		{6, 6},
766 		{0, 0},
767 		{0, 0}
768 		}
769 	},
770 	{0, 0, 1, 0,		/* 0x60 */
771 		{{5, 6},
772 		{0, 0},
773 		{0, 0},
774 		{0, 0}
775 		}
776 	},
777 	{1, 0, 2, 0,		/* 0x61 */
778 		{{0, 0},
779 		{5, 6},
780 		{0, 0},
781 		{0, 0}
782 		}
783 	},
784 	{0, 0, 2, 0,		/* 0x62 */
785 		{{1, 1},
786 		{5, 6},
787 		{0, 0},
788 		{0, 0}
789 		}
790 	},
791 	{1, 0, 2, 0,		/* 0x63 */
792 		{{0, 1},
793 		{5, 6},
794 		{0, 0},
795 		{0, 0}
796 		}
797 	},
798 	{0, 0, 2, 0,		/* 0x64 */
799 		{{2, 2},
800 		{5, 6},
801 		{0, 0},
802 		{0, 0}
803 		}
804 	},
805 	{1, 0, 3, 0,		/* 0x65 */
806 		{{0, 0},
807 		{2, 2},
808 		{5, 6},
809 		{0, 0}
810 		}
811 	},
812 	{0, 0, 2, 0,		/* 0x66 */
813 		{{1, 2},
814 		{5, 6},
815 		{0, 0},
816 		{0, 0}
817 		}
818 	},
819 	{1, 0, 2, 0,		/* 0x67 */
820 		{{0, 2},
821 		{5, 6},
822 		{0, 0},
823 		{0, 0}
824 		}
825 	},
826 	{0, 0, 2, 0,		/* 0x68 */
827 		{{3, 3},
828 		{5, 6},
829 		{0, 0},
830 		{0, 0}
831 		}
832 	},
833 	{1, 0, 3, 0,		/* 0x69 */
834 		{{0, 0},
835 		{3, 3},
836 		{5, 6},
837 		{0, 0}
838 		}
839 	},
840 	{0, 0, 3, 0,		/* 0x6a */
841 		{{1, 1},
842 		{3, 3},
843 		{5, 6},
844 		{0, 0}
845 		}
846 	},
847 	{1, 0, 3, 0,		/* 0x6b */
848 		{{0, 1},
849 		{3, 3},
850 		{5, 6},
851 		{0, 0}
852 		}
853 	},
854 	{0, 0, 2, 0,		/* 0x6c */
855 		{{2, 3},
856 		{5, 6},
857 		{0, 0},
858 		{0, 0}
859 		}
860 	},
861 	{1, 0, 3, 0,		/* 0x6d */
862 		{{0, 0},
863 		{2, 3},
864 		{5, 6},
865 		{0, 0}
866 		}
867 	},
868 	{0, 0, 2, 0,		/* 0x6e */
869 		{{1, 3},
870 		{5, 6},
871 		{0, 0},
872 		{0, 0}
873 		}
874 	},
875 	{1, 0, 2, 0,		/* 0x6f */
876 		{{0, 3},
877 		{5, 6},
878 		{0, 0},
879 		{0, 0}
880 		}
881 	},
882 	{0, 0, 1, 0,		/* 0x70 */
883 		{{4, 6},
884 		{0, 0},
885 		{0, 0},
886 		{0, 0}
887 		}
888 	},
889 	{1, 0, 2, 0,		/* 0x71 */
890 		{{0, 0},
891 		{4, 6},
892 		{0, 0},
893 		{0, 0}
894 		}
895 	},
896 	{0, 0, 2, 0,		/* 0x72 */
897 		{{1, 1},
898 		{4, 6},
899 		{0, 0},
900 		{0, 0}
901 		}
902 	},
903 	{1, 0, 2, 0,		/* 0x73 */
904 		{{0, 1},
905 		{4, 6},
906 		{0, 0},
907 		{0, 0}
908 		}
909 	},
910 	{0, 0, 2, 0,		/* 0x74 */
911 		{{2, 2},
912 		{4, 6},
913 		{0, 0},
914 		{0, 0}
915 		}
916 	},
917 	{1, 0, 3, 0,		/* 0x75 */
918 		{{0, 0},
919 		{2, 2},
920 		{4, 6},
921 		{0, 0}
922 		}
923 	},
924 	{0, 0, 2, 0,		/* 0x76 */
925 		{{1, 2},
926 		{4, 6},
927 		{0, 0},
928 		{0, 0}
929 		}
930 	},
931 	{1, 0, 2, 0,		/* 0x77 */
932 		{{0, 2},
933 		{4, 6},
934 		{0, 0},
935 		{0, 0}
936 		}
937 	},
938 	{0, 0, 1, 0,		/* 0x78 */
939 		{{3, 6},
940 		{0, 0},
941 		{0, 0},
942 		{0, 0}
943 		}
944 	},
945 	{1, 0, 2, 0,		/* 0x79 */
946 		{{0, 0},
947 		{3, 6},
948 		{0, 0},
949 		{0, 0}
950 		}
951 	},
952 	{0, 0, 2, 0,		/* 0x7a */
953 		{{1, 1},
954 		{3, 6},
955 		{0, 0},
956 		{0, 0}
957 		}
958 	},
959 	{1, 0, 2, 0,		/* 0x7b */
960 		{{0, 1},
961 		{3, 6},
962 		{0, 0},
963 		{0, 0}
964 		}
965 	},
966 	{0, 0, 1, 0,		/* 0x7c */
967 		{{2, 6},
968 		{0, 0},
969 		{0, 0},
970 		{0, 0}
971 		}
972 	},
973 	{1, 0, 2, 0,		/* 0x7d */
974 		{{0, 0},
975 		{2, 6},
976 		{0, 0},
977 		{0, 0}
978 		}
979 	},
980 	{0, 0, 1, 0,		/* 0x7e */
981 		{{1, 6},
982 		{0, 0},
983 		{0, 0},
984 		{0, 0}
985 		}
986 	},
987 	{1, 0, 1, 0,		/* 0x7f */
988 		{{0, 6},
989 		{0, 0},
990 		{0, 0},
991 		{0, 0}
992 		}
993 	},
994 	{0, 1, 1, 0,		/* 0x80 */
995 		{{7, 7},
996 		{0, 0},
997 		{0, 0},
998 		{0, 0}
999 		}
1000 	},
1001 	{1, 1, 2, 0,		/* 0x81 */
1002 		{{0, 0},
1003 		{7, 7},
1004 		{0, 0},
1005 		{0, 0}
1006 		}
1007 	},
1008 	{0, 1, 2, 0,		/* 0x82 */
1009 		{{1, 1},
1010 		{7, 7},
1011 		{0, 0},
1012 		{0, 0}
1013 		}
1014 	},
1015 	{1, 1, 2, 0,		/* 0x83 */
1016 		{{0, 1},
1017 		{7, 7},
1018 		{0, 0},
1019 		{0, 0}
1020 		}
1021 	},
1022 	{0, 1, 2, 0,		/* 0x84 */
1023 		{{2, 2},
1024 		{7, 7},
1025 		{0, 0},
1026 		{0, 0}
1027 		}
1028 	},
1029 	{1, 1, 3, 0,		/* 0x85 */
1030 		{{0, 0},
1031 		{2, 2},
1032 		{7, 7},
1033 		{0, 0}
1034 		}
1035 	},
1036 	{0, 1, 2, 0,		/* 0x86 */
1037 		{{1, 2},
1038 		{7, 7},
1039 		{0, 0},
1040 		{0, 0}
1041 		}
1042 	},
1043 	{1, 1, 2, 0,		/* 0x87 */
1044 		{{0, 2},
1045 		{7, 7},
1046 		{0, 0},
1047 		{0, 0}
1048 		}
1049 	},
1050 	{0, 1, 2, 0,		/* 0x88 */
1051 		{{3, 3},
1052 		{7, 7},
1053 		{0, 0},
1054 		{0, 0}
1055 		}
1056 	},
1057 	{1, 1, 3, 0,		/* 0x89 */
1058 		{{0, 0},
1059 		{3, 3},
1060 		{7, 7},
1061 		{0, 0}
1062 		}
1063 	},
1064 	{0, 1, 3, 0,		/* 0x8a */
1065 		{{1, 1},
1066 		{3, 3},
1067 		{7, 7},
1068 		{0, 0}
1069 		}
1070 	},
1071 	{1, 1, 3, 0,		/* 0x8b */
1072 		{{0, 1},
1073 		{3, 3},
1074 		{7, 7},
1075 		{0, 0}
1076 		}
1077 	},
1078 	{0, 1, 2, 0,		/* 0x8c */
1079 		{{2, 3},
1080 		{7, 7},
1081 		{0, 0},
1082 		{0, 0}
1083 		}
1084 	},
1085 	{1, 1, 3, 0,		/* 0x8d */
1086 		{{0, 0},
1087 		{2, 3},
1088 		{7, 7},
1089 		{0, 0}
1090 		}
1091 	},
1092 	{0, 1, 2, 0,		/* 0x8e */
1093 		{{1, 3},
1094 		{7, 7},
1095 		{0, 0},
1096 		{0, 0}
1097 		}
1098 	},
1099 	{1, 1, 2, 0,		/* 0x8f */
1100 		{{0, 3},
1101 		{7, 7},
1102 		{0, 0},
1103 		{0, 0}
1104 		}
1105 	},
1106 	{0, 1, 2, 0,		/* 0x90 */
1107 		{{4, 4},
1108 		{7, 7},
1109 		{0, 0},
1110 		{0, 0}
1111 		}
1112 	},
1113 	{1, 1, 3, 0,		/* 0x91 */
1114 		{{0, 0},
1115 		{4, 4},
1116 		{7, 7},
1117 		{0, 0}
1118 		}
1119 	},
1120 	{0, 1, 3, 0,		/* 0x92 */
1121 		{{1, 1},
1122 		{4, 4},
1123 		{7, 7},
1124 		{0, 0}
1125 		}
1126 	},
1127 	{1, 1, 3, 0,		/* 0x93 */
1128 		{{0, 1},
1129 		{4, 4},
1130 		{7, 7},
1131 		{0, 0}
1132 		}
1133 	},
1134 	{0, 1, 3, 0,		/* 0x94 */
1135 		{{2, 2},
1136 		{4, 4},
1137 		{7, 7},
1138 		{0, 0}
1139 		}
1140 	},
1141 	{1, 1, 4, 0,		/* 0x95 */
1142 		{{0, 0},
1143 		{2, 2},
1144 		{4, 4},
1145 		{7, 7}
1146 		}
1147 	},
1148 	{0, 1, 3, 0,		/* 0x96 */
1149 		{{1, 2},
1150 		{4, 4},
1151 		{7, 7},
1152 		{0, 0}
1153 		}
1154 	},
1155 	{1, 1, 3, 0,		/* 0x97 */
1156 		{{0, 2},
1157 		{4, 4},
1158 		{7, 7},
1159 		{0, 0}
1160 		}
1161 	},
1162 	{0, 1, 2, 0,		/* 0x98 */
1163 		{{3, 4},
1164 		{7, 7},
1165 		{0, 0},
1166 		{0, 0}
1167 		}
1168 	},
1169 	{1, 1, 3, 0,		/* 0x99 */
1170 		{{0, 0},
1171 		{3, 4},
1172 		{7, 7},
1173 		{0, 0}
1174 		}
1175 	},
1176 	{0, 1, 3, 0,		/* 0x9a */
1177 		{{1, 1},
1178 		{3, 4},
1179 		{7, 7},
1180 		{0, 0}
1181 		}
1182 	},
1183 	{1, 1, 3, 0,		/* 0x9b */
1184 		{{0, 1},
1185 		{3, 4},
1186 		{7, 7},
1187 		{0, 0}
1188 		}
1189 	},
1190 	{0, 1, 2, 0,		/* 0x9c */
1191 		{{2, 4},
1192 		{7, 7},
1193 		{0, 0},
1194 		{0, 0}
1195 		}
1196 	},
1197 	{1, 1, 3, 0,		/* 0x9d */
1198 		{{0, 0},
1199 		{2, 4},
1200 		{7, 7},
1201 		{0, 0}
1202 		}
1203 	},
1204 	{0, 1, 2, 0,		/* 0x9e */
1205 		{{1, 4},
1206 		{7, 7},
1207 		{0, 0},
1208 		{0, 0}
1209 		}
1210 	},
1211 	{1, 1, 2, 0,		/* 0x9f */
1212 		{{0, 4},
1213 		{7, 7},
1214 		{0, 0},
1215 		{0, 0}
1216 		}
1217 	},
1218 	{0, 1, 2, 0,		/* 0xa0 */
1219 		{{5, 5},
1220 		{7, 7},
1221 		{0, 0},
1222 		{0, 0}
1223 		}
1224 	},
1225 	{1, 1, 3, 0,		/* 0xa1 */
1226 		{{0, 0},
1227 		{5, 5},
1228 		{7, 7},
1229 		{0, 0}
1230 		}
1231 	},
1232 	{0, 1, 3, 0,		/* 0xa2 */
1233 		{{1, 1},
1234 		{5, 5},
1235 		{7, 7},
1236 		{0, 0}
1237 		}
1238 	},
1239 	{1, 1, 3, 0,		/* 0xa3 */
1240 		{{0, 1},
1241 		{5, 5},
1242 		{7, 7},
1243 		{0, 0}
1244 		}
1245 	},
1246 	{0, 1, 3, 0,		/* 0xa4 */
1247 		{{2, 2},
1248 		{5, 5},
1249 		{7, 7},
1250 		{0, 0}
1251 		}
1252 	},
1253 	{1, 1, 4, 0,		/* 0xa5 */
1254 		{{0, 0},
1255 		{2, 2},
1256 		{5, 5},
1257 		{7, 7}
1258 		}
1259 	},
1260 	{0, 1, 3, 0,		/* 0xa6 */
1261 		{{1, 2},
1262 		{5, 5},
1263 		{7, 7},
1264 		{0, 0}
1265 		}
1266 	},
1267 	{1, 1, 3, 0,		/* 0xa7 */
1268 		{{0, 2},
1269 		{5, 5},
1270 		{7, 7},
1271 		{0, 0}
1272 		}
1273 	},
1274 	{0, 1, 3, 0,		/* 0xa8 */
1275 		{{3, 3},
1276 		{5, 5},
1277 		{7, 7},
1278 		{0, 0}
1279 		}
1280 	},
1281 	{1, 1, 4, 0,		/* 0xa9 */
1282 		{{0, 0},
1283 		{3, 3},
1284 		{5, 5},
1285 		{7, 7}
1286 		}
1287 	},
1288 	{0, 1, 4, 0,		/* 0xaa */
1289 		{{1, 1},
1290 		{3, 3},
1291 		{5, 5},
1292 		{7, 7}
1293 		}
1294 	},
1295 	{1, 1, 4, 0,		/* 0xab */
1296 		{{0, 1},
1297 		{3, 3},
1298 		{5, 5},
1299 		{7, 7}
1300 		}
1301 	},
1302 	{0, 1, 3, 0,		/* 0xac */
1303 		{{2, 3},
1304 		{5, 5},
1305 		{7, 7},
1306 		{0, 0}
1307 		}
1308 	},
1309 	{1, 1, 4, 0,		/* 0xad */
1310 		{{0, 0},
1311 		{2, 3},
1312 		{5, 5},
1313 		{7, 7}
1314 		}
1315 	},
1316 	{0, 1, 3, 0,		/* 0xae */
1317 		{{1, 3},
1318 		{5, 5},
1319 		{7, 7},
1320 		{0, 0}
1321 		}
1322 	},
1323 	{1, 1, 3, 0,		/* 0xaf */
1324 		{{0, 3},
1325 		{5, 5},
1326 		{7, 7},
1327 		{0, 0}
1328 		}
1329 	},
1330 	{0, 1, 2, 0,		/* 0xb0 */
1331 		{{4, 5},
1332 		{7, 7},
1333 		{0, 0},
1334 		{0, 0}
1335 		}
1336 	},
1337 	{1, 1, 3, 0,		/* 0xb1 */
1338 		{{0, 0},
1339 		{4, 5},
1340 		{7, 7},
1341 		{0, 0}
1342 		}
1343 	},
1344 	{0, 1, 3, 0,		/* 0xb2 */
1345 		{{1, 1},
1346 		{4, 5},
1347 		{7, 7},
1348 		{0, 0}
1349 		}
1350 	},
1351 	{1, 1, 3, 0,		/* 0xb3 */
1352 		{{0, 1},
1353 		{4, 5},
1354 		{7, 7},
1355 		{0, 0}
1356 		}
1357 	},
1358 	{0, 1, 3, 0,		/* 0xb4 */
1359 		{{2, 2},
1360 		{4, 5},
1361 		{7, 7},
1362 		{0, 0}
1363 		}
1364 	},
1365 	{1, 1, 4, 0,		/* 0xb5 */
1366 		{{0, 0},
1367 		{2, 2},
1368 		{4, 5},
1369 		{7, 7}
1370 		}
1371 	},
1372 	{0, 1, 3, 0,		/* 0xb6 */
1373 		{{1, 2},
1374 		{4, 5},
1375 		{7, 7},
1376 		{0, 0}
1377 		}
1378 	},
1379 	{1, 1, 3, 0,		/* 0xb7 */
1380 		{{0, 2},
1381 		{4, 5},
1382 		{7, 7},
1383 		{0, 0}
1384 		}
1385 	},
1386 	{0, 1, 2, 0,		/* 0xb8 */
1387 		{{3, 5},
1388 		{7, 7},
1389 		{0, 0},
1390 		{0, 0}
1391 		}
1392 	},
1393 	{1, 1, 3, 0,		/* 0xb9 */
1394 		{{0, 0},
1395 		{3, 5},
1396 		{7, 7},
1397 		{0, 0}
1398 		}
1399 	},
1400 	{0, 1, 3, 0,		/* 0xba */
1401 		{{1, 1},
1402 		{3, 5},
1403 		{7, 7},
1404 		{0, 0}
1405 		}
1406 	},
1407 	{1, 1, 3, 0,		/* 0xbb */
1408 		{{0, 1},
1409 		{3, 5},
1410 		{7, 7},
1411 		{0, 0}
1412 		}
1413 	},
1414 	{0, 1, 2, 0,		/* 0xbc */
1415 		{{2, 5},
1416 		{7, 7},
1417 		{0, 0},
1418 		{0, 0}
1419 		}
1420 	},
1421 	{1, 1, 3, 0,		/* 0xbd */
1422 		{{0, 0},
1423 		{2, 5},
1424 		{7, 7},
1425 		{0, 0}
1426 		}
1427 	},
1428 	{0, 1, 2, 0,		/* 0xbe */
1429 		{{1, 5},
1430 		{7, 7},
1431 		{0, 0},
1432 		{0, 0}
1433 		}
1434 	},
1435 	{1, 1, 2, 0,		/* 0xbf */
1436 		{{0, 5},
1437 		{7, 7},
1438 		{0, 0},
1439 		{0, 0}
1440 		}
1441 	},
1442 	{0, 1, 1, 0,		/* 0xc0 */
1443 		{{6, 7},
1444 		{0, 0},
1445 		{0, 0},
1446 		{0, 0}
1447 		}
1448 	},
1449 	{1, 1, 2, 0,		/* 0xc1 */
1450 		{{0, 0},
1451 		{6, 7},
1452 		{0, 0},
1453 		{0, 0}
1454 		}
1455 	},
1456 	{0, 1, 2, 0,		/* 0xc2 */
1457 		{{1, 1},
1458 		{6, 7},
1459 		{0, 0},
1460 		{0, 0}
1461 		}
1462 	},
1463 	{1, 1, 2, 0,		/* 0xc3 */
1464 		{{0, 1},
1465 		{6, 7},
1466 		{0, 0},
1467 		{0, 0}
1468 		}
1469 	},
1470 	{0, 1, 2, 0,		/* 0xc4 */
1471 		{{2, 2},
1472 		{6, 7},
1473 		{0, 0},
1474 		{0, 0}
1475 		}
1476 	},
1477 	{1, 1, 3, 0,		/* 0xc5 */
1478 		{{0, 0},
1479 		{2, 2},
1480 		{6, 7},
1481 		{0, 0}
1482 		}
1483 	},
1484 	{0, 1, 2, 0,		/* 0xc6 */
1485 		{{1, 2},
1486 		{6, 7},
1487 		{0, 0},
1488 		{0, 0}
1489 		}
1490 	},
1491 	{1, 1, 2, 0,		/* 0xc7 */
1492 		{{0, 2},
1493 		{6, 7},
1494 		{0, 0},
1495 		{0, 0}
1496 		}
1497 	},
1498 	{0, 1, 2, 0,		/* 0xc8 */
1499 		{{3, 3},
1500 		{6, 7},
1501 		{0, 0},
1502 		{0, 0}
1503 		}
1504 	},
1505 	{1, 1, 3, 0,		/* 0xc9 */
1506 		{{0, 0},
1507 		{3, 3},
1508 		{6, 7},
1509 		{0, 0}
1510 		}
1511 	},
1512 	{0, 1, 3, 0,		/* 0xca */
1513 		{{1, 1},
1514 		{3, 3},
1515 		{6, 7},
1516 		{0, 0}
1517 		}
1518 	},
1519 	{1, 1, 3, 0,		/* 0xcb */
1520 		{{0, 1},
1521 		{3, 3},
1522 		{6, 7},
1523 		{0, 0}
1524 		}
1525 	},
1526 	{0, 1, 2, 0,		/* 0xcc */
1527 		{{2, 3},
1528 		{6, 7},
1529 		{0, 0},
1530 		{0, 0}
1531 		}
1532 	},
1533 	{1, 1, 3, 0,		/* 0xcd */
1534 		{{0, 0},
1535 		{2, 3},
1536 		{6, 7},
1537 		{0, 0}
1538 		}
1539 	},
1540 	{0, 1, 2, 0,		/* 0xce */
1541 		{{1, 3},
1542 		{6, 7},
1543 		{0, 0},
1544 		{0, 0}
1545 		}
1546 	},
1547 	{1, 1, 2, 0,		/* 0xcf */
1548 		{{0, 3},
1549 		{6, 7},
1550 		{0, 0},
1551 		{0, 0}
1552 		}
1553 	},
1554 	{0, 1, 2, 0,		/* 0xd0 */
1555 		{{4, 4},
1556 		{6, 7},
1557 		{0, 0},
1558 		{0, 0}
1559 		}
1560 	},
1561 	{1, 1, 3, 0,		/* 0xd1 */
1562 		{{0, 0},
1563 		{4, 4},
1564 		{6, 7},
1565 		{0, 0}
1566 		}
1567 	},
1568 	{0, 1, 3, 0,		/* 0xd2 */
1569 		{{1, 1},
1570 		{4, 4},
1571 		{6, 7},
1572 		{0, 0}
1573 		}
1574 	},
1575 	{1, 1, 3, 0,		/* 0xd3 */
1576 		{{0, 1},
1577 		{4, 4},
1578 		{6, 7},
1579 		{0, 0}
1580 		}
1581 	},
1582 	{0, 1, 3, 0,		/* 0xd4 */
1583 		{{2, 2},
1584 		{4, 4},
1585 		{6, 7},
1586 		{0, 0}
1587 		}
1588 	},
1589 	{1, 1, 4, 0,		/* 0xd5 */
1590 		{{0, 0},
1591 		{2, 2},
1592 		{4, 4},
1593 		{6, 7}
1594 		}
1595 	},
1596 	{0, 1, 3, 0,		/* 0xd6 */
1597 		{{1, 2},
1598 		{4, 4},
1599 		{6, 7},
1600 		{0, 0}
1601 		}
1602 	},
1603 	{1, 1, 3, 0,		/* 0xd7 */
1604 		{{0, 2},
1605 		{4, 4},
1606 		{6, 7},
1607 		{0, 0}
1608 		}
1609 	},
1610 	{0, 1, 2, 0,		/* 0xd8 */
1611 		{{3, 4},
1612 		{6, 7},
1613 		{0, 0},
1614 		{0, 0}
1615 		}
1616 	},
1617 	{1, 1, 3, 0,		/* 0xd9 */
1618 		{{0, 0},
1619 		{3, 4},
1620 		{6, 7},
1621 		{0, 0}
1622 		}
1623 	},
1624 	{0, 1, 3, 0,		/* 0xda */
1625 		{{1, 1},
1626 		{3, 4},
1627 		{6, 7},
1628 		{0, 0}
1629 		}
1630 	},
1631 	{1, 1, 3, 0,		/* 0xdb */
1632 		{{0, 1},
1633 		{3, 4},
1634 		{6, 7},
1635 		{0, 0}
1636 		}
1637 	},
1638 	{0, 1, 2, 0,		/* 0xdc */
1639 		{{2, 4},
1640 		{6, 7},
1641 		{0, 0},
1642 		{0, 0}
1643 		}
1644 	},
1645 	{1, 1, 3, 0,		/* 0xdd */
1646 		{{0, 0},
1647 		{2, 4},
1648 		{6, 7},
1649 		{0, 0}
1650 		}
1651 	},
1652 	{0, 1, 2, 0,		/* 0xde */
1653 		{{1, 4},
1654 		{6, 7},
1655 		{0, 0},
1656 		{0, 0}
1657 		}
1658 	},
1659 	{1, 1, 2, 0,		/* 0xdf */
1660 		{{0, 4},
1661 		{6, 7},
1662 		{0, 0},
1663 		{0, 0}
1664 		}
1665 	},
1666 	{0, 1, 1, 0,		/* 0xe0 */
1667 		{{5, 7},
1668 		{0, 0},
1669 		{0, 0},
1670 		{0, 0}
1671 		}
1672 	},
1673 	{1, 1, 2, 0,		/* 0xe1 */
1674 		{{0, 0},
1675 		{5, 7},
1676 		{0, 0},
1677 		{0, 0}
1678 		}
1679 	},
1680 	{0, 1, 2, 0,		/* 0xe2 */
1681 		{{1, 1},
1682 		{5, 7},
1683 		{0, 0},
1684 		{0, 0}
1685 		}
1686 	},
1687 	{1, 1, 2, 0,		/* 0xe3 */
1688 		{{0, 1},
1689 		{5, 7},
1690 		{0, 0},
1691 		{0, 0}
1692 		}
1693 	},
1694 	{0, 1, 2, 0,		/* 0xe4 */
1695 		{{2, 2},
1696 		{5, 7},
1697 		{0, 0},
1698 		{0, 0}
1699 		}
1700 	},
1701 	{1, 1, 3, 0,		/* 0xe5 */
1702 		{{0, 0},
1703 		{2, 2},
1704 		{5, 7},
1705 		{0, 0}
1706 		}
1707 	},
1708 	{0, 1, 2, 0,		/* 0xe6 */
1709 		{{1, 2},
1710 		{5, 7},
1711 		{0, 0},
1712 		{0, 0}
1713 		}
1714 	},
1715 	{1, 1, 2, 0,		/* 0xe7 */
1716 		{{0, 2},
1717 		{5, 7},
1718 		{0, 0},
1719 		{0, 0}
1720 		}
1721 	},
1722 	{0, 1, 2, 0,		/* 0xe8 */
1723 		{{3, 3},
1724 		{5, 7},
1725 		{0, 0},
1726 		{0, 0}
1727 		}
1728 	},
1729 	{1, 1, 3, 0,		/* 0xe9 */
1730 		{{0, 0},
1731 		{3, 3},
1732 		{5, 7},
1733 		{0, 0}
1734 		}
1735 	},
1736 	{0, 1, 3, 0,		/* 0xea */
1737 		{{1, 1},
1738 		{3, 3},
1739 		{5, 7},
1740 		{0, 0}
1741 		}
1742 	},
1743 	{1, 1, 3, 0,		/* 0xeb */
1744 		{{0, 1},
1745 		{3, 3},
1746 		{5, 7},
1747 		{0, 0}
1748 		}
1749 	},
1750 	{0, 1, 2, 0,		/* 0xec */
1751 		{{2, 3},
1752 		{5, 7},
1753 		{0, 0},
1754 		{0, 0}
1755 		}
1756 	},
1757 	{1, 1, 3, 0,		/* 0xed */
1758 		{{0, 0},
1759 		{2, 3},
1760 		{5, 7},
1761 		{0, 0}
1762 		}
1763 	},
1764 	{0, 1, 2, 0,		/* 0xee */
1765 		{{1, 3},
1766 		{5, 7},
1767 		{0, 0},
1768 		{0, 0}
1769 		}
1770 	},
1771 	{1, 1, 2, 0,		/* 0xef */
1772 		{{0, 3},
1773 		{5, 7},
1774 		{0, 0},
1775 		{0, 0}
1776 		}
1777 	},
1778 	{0, 1, 1, 0,		/* 0xf0 */
1779 		{{4, 7},
1780 		{0, 0},
1781 		{0, 0},
1782 		{0, 0}
1783 		}
1784 	},
1785 	{1, 1, 2, 0,		/* 0xf1 */
1786 		{{0, 0},
1787 		{4, 7},
1788 		{0, 0},
1789 		{0, 0}
1790 		}
1791 	},
1792 	{0, 1, 2, 0,		/* 0xf2 */
1793 		{{1, 1},
1794 		{4, 7},
1795 		{0, 0},
1796 		{0, 0}
1797 		}
1798 	},
1799 	{1, 1, 2, 0,		/* 0xf3 */
1800 		{{0, 1},
1801 		{4, 7},
1802 		{0, 0},
1803 		{0, 0}
1804 		}
1805 	},
1806 	{0, 1, 2, 0,		/* 0xf4 */
1807 		{{2, 2},
1808 		{4, 7},
1809 		{0, 0},
1810 		{0, 0}
1811 		}
1812 	},
1813 	{1, 1, 3, 0,		/* 0xf5 */
1814 		{{0, 0},
1815 		{2, 2},
1816 		{4, 7},
1817 		{0, 0}
1818 		}
1819 	},
1820 	{0, 1, 2, 0,		/* 0xf6 */
1821 		{{1, 2},
1822 		{4, 7},
1823 		{0, 0},
1824 		{0, 0}
1825 		}
1826 	},
1827 	{1, 1, 2, 0,		/* 0xf7 */
1828 		{{0, 2},
1829 		{4, 7},
1830 		{0, 0},
1831 		{0, 0}
1832 		}
1833 	},
1834 	{0, 1, 1, 0,		/* 0xf8 */
1835 		{{3, 7},
1836 		{0, 0},
1837 		{0, 0},
1838 		{0, 0}
1839 		}
1840 	},
1841 	{1, 1, 2, 0,		/* 0xf9 */
1842 		{{0, 0},
1843 		{3, 7},
1844 		{0, 0},
1845 		{0, 0}
1846 		}
1847 	},
1848 	{0, 1, 2, 0,		/* 0xfa */
1849 		{{1, 1},
1850 		{3, 7},
1851 		{0, 0},
1852 		{0, 0}
1853 		}
1854 	},
1855 	{1, 1, 2, 0,		/* 0xfb */
1856 		{{0, 1},
1857 		{3, 7},
1858 		{0, 0},
1859 		{0, 0}
1860 		}
1861 	},
1862 	{0, 1, 1, 0,		/* 0xfc */
1863 		{{2, 7},
1864 		{0, 0},
1865 		{0, 0},
1866 		{0, 0}
1867 		}
1868 	},
1869 	{1, 1, 2, 0,		/* 0xfd */
1870 		{{0, 0},
1871 		{2, 7},
1872 		{0, 0},
1873 		{0, 0}
1874 		}
1875 	},
1876 	{0, 1, 1, 0,		/* 0xfe */
1877 		{{1, 7},
1878 		{0, 0},
1879 		{0, 0},
1880 		{0, 0}
1881 		}
1882 	},
1883 	{1, 1, 1, 0,		/* 0xff */
1884 		{{0, 7},
1885 		{0, 0},
1886 		{0, 0},
1887 		{0, 0}
1888 		}
1889 	}
1890 };
1891 
1892 
1893 int
sctp_is_address_in_scope(struct sctp_ifa * ifa,struct sctp_scoping * scope,int do_update)1894 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1895                          struct sctp_scoping *scope,
1896                          int do_update)
1897 {
1898 	if ((scope->loopback_scope == 0) &&
1899 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1900 		/*
1901 		 * skip loopback if not in scope *
1902 		 */
1903 		return (0);
1904 	}
1905 	switch (ifa->address.sa.sa_family) {
1906 #ifdef INET
1907 	case AF_INET:
1908 		if (scope->ipv4_addr_legal) {
1909 			struct sockaddr_in *sin;
1910 
1911 			sin = &ifa->address.sin;
1912 			if (sin->sin_addr.s_addr == 0) {
1913 				/* not in scope , unspecified */
1914 				return (0);
1915 			}
1916 			if ((scope->ipv4_local_scope == 0) &&
1917 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1918 				/* private address not in scope */
1919 				return (0);
1920 			}
1921 		} else {
1922 			return (0);
1923 		}
1924 		break;
1925 #endif
1926 #ifdef INET6
1927 	case AF_INET6:
1928 		if (scope->ipv6_addr_legal) {
1929 			struct sockaddr_in6 *sin6;
1930 
1931 #if !defined(__Panda__)
1932 			/* Must update the flags,  bummer, which
1933 			 * means any IFA locks must now be applied HERE <->
1934 			 */
1935 			if (do_update) {
1936 				sctp_gather_internal_ifa_flags(ifa);
1937 			}
1938 #endif
1939 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1940 				return (0);
1941 			}
1942 			/* ok to use deprecated addresses? */
1943 			sin6 = &ifa->address.sin6;
1944 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1945 				/* skip unspecifed addresses */
1946 				return (0);
1947 			}
1948 			if (		/* (local_scope == 0) && */
1949 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1950 				return (0);
1951 			}
1952 			if ((scope->site_scope == 0) &&
1953 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1954 				return (0);
1955 			}
1956 		} else {
1957 			return (0);
1958 		}
1959 		break;
1960 #endif
1961 #if defined(__Userspace__)
1962 	case AF_CONN:
1963 		if (!scope->conn_addr_legal) {
1964 			return (0);
1965 		}
1966 		break;
1967 #endif
1968 	default:
1969 		return (0);
1970 	}
1971 	return (1);
1972 }
1973 
1974 static struct mbuf *
sctp_add_addr_to_mbuf(struct mbuf * m,struct sctp_ifa * ifa,uint16_t * len)1975 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1976 {
1977 #if defined(INET) || defined(INET6)
1978 	struct sctp_paramhdr *parmh;
1979 	struct mbuf *mret;
1980 	uint16_t plen;
1981 #endif
1982 
1983 	switch (ifa->address.sa.sa_family) {
1984 #ifdef INET
1985 	case AF_INET:
1986 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1987 		break;
1988 #endif
1989 #ifdef INET6
1990 	case AF_INET6:
1991 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1992 		break;
1993 #endif
1994 	default:
1995 		return (m);
1996 	}
1997 #if defined(INET) || defined(INET6)
1998 	if (M_TRAILINGSPACE(m) >= plen) {
1999 		/* easy side we just drop it on the end */
2000 		parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2001 		mret = m;
2002 	} else {
2003 		/* Need more space */
2004 		mret = m;
2005 		while (SCTP_BUF_NEXT(mret) != NULL) {
2006 			mret = SCTP_BUF_NEXT(mret);
2007 		}
2008 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2009 		if (SCTP_BUF_NEXT(mret) == NULL) {
2010 			/* We are hosed, can't add more addresses */
2011 			return (m);
2012 		}
2013 		mret = SCTP_BUF_NEXT(mret);
2014 		parmh = mtod(mret, struct sctp_paramhdr *);
2015 	}
2016 	/* now add the parameter */
2017 	switch (ifa->address.sa.sa_family) {
2018 #ifdef INET
2019 	case AF_INET:
2020 	{
2021 		struct sctp_ipv4addr_param *ipv4p;
2022 		struct sockaddr_in *sin;
2023 
2024 		sin = &ifa->address.sin;
2025 		ipv4p = (struct sctp_ipv4addr_param *)parmh;
2026 		parmh->param_type = htons(SCTP_IPV4_ADDRESS);
2027 		parmh->param_length = htons(plen);
2028 		ipv4p->addr = sin->sin_addr.s_addr;
2029 		SCTP_BUF_LEN(mret) += plen;
2030 		break;
2031 	}
2032 #endif
2033 #ifdef INET6
2034 	case AF_INET6:
2035 	{
2036 		struct sctp_ipv6addr_param *ipv6p;
2037 		struct sockaddr_in6 *sin6;
2038 
2039 		sin6 = &ifa->address.sin6;
2040 		ipv6p = (struct sctp_ipv6addr_param *)parmh;
2041 		parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2042 		parmh->param_length = htons(plen);
2043 		memcpy(ipv6p->addr, &sin6->sin6_addr,
2044 		    sizeof(ipv6p->addr));
2045 #if defined(SCTP_EMBEDDED_V6_SCOPE)
2046 		/* clear embedded scope in the address */
2047 		in6_clearscope((struct in6_addr *)ipv6p->addr);
2048 #endif
2049 		SCTP_BUF_LEN(mret) += plen;
2050 		break;
2051 	}
2052 #endif
2053 	default:
2054 		return (m);
2055 	}
2056 	if (len != NULL) {
2057 		*len += plen;
2058 	}
2059 	return (mret);
2060 #endif
2061 }
2062 
2063 
2064 struct mbuf *
sctp_add_addresses_to_i_ia(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_scoping * scope,struct mbuf * m_at,int cnt_inits_to,uint16_t * padding_len,uint16_t * chunk_len)2065 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2066                            struct sctp_scoping *scope,
2067 			   struct mbuf *m_at, int cnt_inits_to,
2068 			   uint16_t *padding_len, uint16_t *chunk_len)
2069 {
2070 	struct sctp_vrf *vrf = NULL;
2071 	int cnt, limit_out = 0, total_count;
2072 	uint32_t vrf_id;
2073 
2074 	vrf_id = inp->def_vrf_id;
2075 	SCTP_IPI_ADDR_RLOCK();
2076 	vrf = sctp_find_vrf(vrf_id);
2077 	if (vrf == NULL) {
2078 		SCTP_IPI_ADDR_RUNLOCK();
2079 		return (m_at);
2080 	}
2081 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2082 		struct sctp_ifa *sctp_ifap;
2083 		struct sctp_ifn *sctp_ifnp;
2084 
2085 		cnt = cnt_inits_to;
2086 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2087 			limit_out = 1;
2088 			cnt = SCTP_ADDRESS_LIMIT;
2089 			goto skip_count;
2090 		}
2091 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2092 			if ((scope->loopback_scope == 0) &&
2093 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2094 				/*
2095 				 * Skip loopback devices if loopback_scope
2096 				 * not set
2097 				 */
2098 				continue;
2099 			}
2100 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2101 #if defined(__FreeBSD__)
2102 #ifdef INET
2103 				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2104 				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2105 				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2106 					continue;
2107 				}
2108 #endif
2109 #ifdef INET6
2110 				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2111 				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2112 				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2113 					continue;
2114 				}
2115 #endif
2116 #endif
2117 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2118 					continue;
2119 				}
2120 #if defined(__Userspace__)
2121 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2122 					continue;
2123 				}
2124 #endif
2125 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2126 					continue;
2127 				}
2128 				cnt++;
2129 				if (cnt > SCTP_ADDRESS_LIMIT) {
2130 					break;
2131 				}
2132 			}
2133 			if (cnt > SCTP_ADDRESS_LIMIT) {
2134 				break;
2135 			}
2136 		}
2137 	skip_count:
2138 		if (cnt > 1) {
2139 			total_count = 0;
2140 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2141 				cnt = 0;
2142 				if ((scope->loopback_scope == 0) &&
2143 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2144 					/*
2145 					 * Skip loopback devices if
2146 					 * loopback_scope not set
2147 					 */
2148 					continue;
2149 				}
2150 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2151 #if defined(__FreeBSD__)
2152 #ifdef INET
2153 					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2154 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2155 					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
2156 						continue;
2157 					}
2158 #endif
2159 #ifdef INET6
2160 					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2161 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2162 					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2163 						continue;
2164 					}
2165 #endif
2166 #endif
2167 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2168 						continue;
2169 					}
2170 #if defined(__Userspace__)
2171 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2172 						continue;
2173 					}
2174 #endif
2175 					if (sctp_is_address_in_scope(sctp_ifap,
2176 								     scope, 0) == 0) {
2177 						continue;
2178 					}
2179 					if ((chunk_len != NULL) &&
2180 					    (padding_len != NULL) &&
2181 					    (*padding_len > 0)) {
2182 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2183 						SCTP_BUF_LEN(m_at) += *padding_len;
2184 						*chunk_len += *padding_len;
2185 						*padding_len = 0;
2186 					}
2187 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2188 					if (limit_out) {
2189 						cnt++;
2190 						total_count++;
2191 						if (cnt >= 2) {
2192 							/* two from each address */
2193 							break;
2194 						}
2195 						if (total_count > SCTP_ADDRESS_LIMIT) {
2196 							/* No more addresses */
2197 							break;
2198 						}
2199 					}
2200 				}
2201 			}
2202 		}
2203 	} else {
2204 		struct sctp_laddr *laddr;
2205 
2206 		cnt = cnt_inits_to;
2207 		/* First, how many ? */
2208 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2209 			if (laddr->ifa == NULL) {
2210 				continue;
2211 			}
2212 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2213 				/* Address being deleted by the system, dont
2214 				 * list.
2215 				 */
2216 				continue;
2217 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2218 				/* Address being deleted on this ep
2219 				 * don't list.
2220 				 */
2221 				continue;
2222 			}
2223 #if defined(__Userspace__)
2224 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2225 				continue;
2226 			}
2227 #endif
2228 			if (sctp_is_address_in_scope(laddr->ifa,
2229 						     scope, 1) == 0) {
2230 				continue;
2231 			}
2232 			cnt++;
2233 		}
2234 		/*
2235 		 * To get through a NAT we only list addresses if we have
2236 		 * more than one. That way if you just bind a single address
2237 		 * we let the source of the init dictate our address.
2238 		 */
2239 		if (cnt > 1) {
2240 			cnt = cnt_inits_to;
2241 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2242 				if (laddr->ifa == NULL) {
2243 					continue;
2244 				}
2245 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2246 					continue;
2247 				}
2248 #if defined(__Userspace__)
2249 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2250 					continue;
2251 				}
2252 #endif
2253 				if (sctp_is_address_in_scope(laddr->ifa,
2254 							     scope, 0) == 0) {
2255 					continue;
2256 				}
2257 				if ((chunk_len != NULL) &&
2258 				    (padding_len != NULL) &&
2259 				    (*padding_len > 0)) {
2260 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2261 					SCTP_BUF_LEN(m_at) += *padding_len;
2262 					*chunk_len += *padding_len;
2263 					*padding_len = 0;
2264 				}
2265 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2266 				cnt++;
2267 				if (cnt >= SCTP_ADDRESS_LIMIT) {
2268 					break;
2269 				}
2270 			}
2271 		}
2272 	}
2273 	SCTP_IPI_ADDR_RUNLOCK();
2274 	return (m_at);
2275 }
2276 
2277 static struct sctp_ifa *
sctp_is_ifa_addr_preferred(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2278 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2279 			   uint8_t dest_is_loop,
2280 			   uint8_t dest_is_priv,
2281 			   sa_family_t fam)
2282 {
2283 	uint8_t dest_is_global = 0;
2284 	/* dest_is_priv is true if destination is a private address */
2285 	/* dest_is_loop is true if destination is a loopback addresses */
2286 
2287 	/**
2288 	 * Here we determine if its a preferred address. A preferred address
2289 	 * means it is the same scope or higher scope then the destination.
2290 	 * L = loopback, P = private, G = global
2291 	 * -----------------------------------------
2292 	 *    src    |  dest | result
2293 	 *  ----------------------------------------
2294 	 *     L     |    L  |    yes
2295 	 *  -----------------------------------------
2296 	 *     P     |    L  |    yes-v4 no-v6
2297 	 *  -----------------------------------------
2298 	 *     G     |    L  |    yes-v4 no-v6
2299 	 *  -----------------------------------------
2300 	 *     L     |    P  |    no
2301 	 *  -----------------------------------------
2302 	 *     P     |    P  |    yes
2303 	 *  -----------------------------------------
2304 	 *     G     |    P  |    no
2305 	 *   -----------------------------------------
2306 	 *     L     |    G  |    no
2307 	 *   -----------------------------------------
2308 	 *     P     |    G  |    no
2309 	 *    -----------------------------------------
2310 	 *     G     |    G  |    yes
2311 	 *    -----------------------------------------
2312 	 */
2313 
2314 	if (ifa->address.sa.sa_family != fam) {
2315 		/* forget mis-matched family */
2316 		return (NULL);
2317 	}
2318 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2319 		dest_is_global = 1;
2320 	}
2321 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2322 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2323 	/* Ok the address may be ok */
2324 #ifdef INET6
2325 	if (fam == AF_INET6) {
2326 		/* ok to use deprecated addresses? no lets not! */
2327 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2328 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2329 			return (NULL);
2330 		}
2331 		if (ifa->src_is_priv && !ifa->src_is_loop) {
2332 			if (dest_is_loop) {
2333 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2334 				return (NULL);
2335 			}
2336 		}
2337 		if (ifa->src_is_glob) {
2338 			if (dest_is_loop) {
2339 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2340 				return (NULL);
2341 			}
2342 		}
2343 	}
2344 #endif
2345 	/* Now that we know what is what, implement or table
2346 	 * this could in theory be done slicker (it used to be), but this
2347 	 * is straightforward and easier to validate :-)
2348 	 */
2349 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2350 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2351 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2352 		dest_is_loop, dest_is_priv, dest_is_global);
2353 
2354 	if ((ifa->src_is_loop) && (dest_is_priv)) {
2355 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2356 		return (NULL);
2357 	}
2358 	if ((ifa->src_is_glob) && (dest_is_priv)) {
2359 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2360 		return (NULL);
2361 	}
2362 	if ((ifa->src_is_loop) && (dest_is_global)) {
2363 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2364 		return (NULL);
2365 	}
2366 	if ((ifa->src_is_priv) && (dest_is_global)) {
2367 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2368 		return (NULL);
2369 	}
2370 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2371 	/* its a preferred address */
2372 	return (ifa);
2373 }
2374 
2375 static struct sctp_ifa *
sctp_is_ifa_addr_acceptable(struct sctp_ifa * ifa,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2376 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2377 			    uint8_t dest_is_loop,
2378 			    uint8_t dest_is_priv,
2379 			    sa_family_t fam)
2380 {
2381 	uint8_t dest_is_global = 0;
2382 
2383 	/**
2384 	 * Here we determine if its a acceptable address. A acceptable
2385 	 * address means it is the same scope or higher scope but we can
2386 	 * allow for NAT which means its ok to have a global dest and a
2387 	 * private src.
2388 	 *
2389 	 * L = loopback, P = private, G = global
2390 	 * -----------------------------------------
2391 	 *  src    |  dest | result
2392 	 * -----------------------------------------
2393 	 *   L     |   L   |    yes
2394 	 *  -----------------------------------------
2395 	 *   P     |   L   |    yes-v4 no-v6
2396 	 *  -----------------------------------------
2397 	 *   G     |   L   |    yes
2398 	 * -----------------------------------------
2399 	 *   L     |   P   |    no
2400 	 * -----------------------------------------
2401 	 *   P     |   P   |    yes
2402 	 * -----------------------------------------
2403 	 *   G     |   P   |    yes - May not work
2404 	 * -----------------------------------------
2405 	 *   L     |   G   |    no
2406 	 * -----------------------------------------
2407 	 *   P     |   G   |    yes - May not work
2408 	 * -----------------------------------------
2409 	 *   G     |   G   |    yes
2410 	 * -----------------------------------------
2411 	 */
2412 
2413 	if (ifa->address.sa.sa_family != fam) {
2414 		/* forget non matching family */
2415 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2416 			ifa->address.sa.sa_family, fam);
2417 		return (NULL);
2418 	}
2419 	/* Ok the address may be ok */
2420 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2421 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2422 		dest_is_loop, dest_is_priv);
2423 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2424 		dest_is_global = 1;
2425 	}
2426 #ifdef INET6
2427 	if (fam == AF_INET6) {
2428 		/* ok to use deprecated addresses? */
2429 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2430 			return (NULL);
2431 		}
2432 		if (ifa->src_is_priv) {
2433 			/* Special case, linklocal to loop */
2434 			if (dest_is_loop)
2435 				return (NULL);
2436 		}
2437 	}
2438 #endif
2439 	/*
2440 	 * Now that we know what is what, implement our table.
2441 	 * This could in theory be done slicker (it used to be), but this
2442 	 * is straightforward and easier to validate :-)
2443 	 */
2444 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2445 		ifa->src_is_loop,
2446 		dest_is_priv);
2447 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2448 		return (NULL);
2449 	}
2450 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2451 		ifa->src_is_loop,
2452 		dest_is_global);
2453 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2454 		return (NULL);
2455 	}
2456 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2457 	/* its an acceptable address */
2458 	return (ifa);
2459 }
2460 
2461 int
sctp_is_addr_restricted(struct sctp_tcb * stcb,struct sctp_ifa * ifa)2462 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2463 {
2464 	struct sctp_laddr *laddr;
2465 
2466 	if (stcb == NULL) {
2467 		/* There are no restrictions, no TCB :-) */
2468 		return (0);
2469 	}
2470 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2471 		if (laddr->ifa == NULL) {
2472 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2473 				__FUNCTION__);
2474 			continue;
2475 		}
2476 		if (laddr->ifa == ifa) {
2477 			/* Yes it is on the list */
2478 			return (1);
2479 		}
2480 	}
2481 	return (0);
2482 }
2483 
2484 
2485 int
sctp_is_addr_in_ep(struct sctp_inpcb * inp,struct sctp_ifa * ifa)2486 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2487 {
2488 	struct sctp_laddr *laddr;
2489 
2490 	if (ifa == NULL)
2491 		return (0);
2492 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2493 		if (laddr->ifa == NULL) {
2494 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2495 				__FUNCTION__);
2496 			continue;
2497 		}
2498 		if ((laddr->ifa == ifa) && laddr->action == 0)
2499 			/* same pointer */
2500 			return (1);
2501 	}
2502 	return (0);
2503 }
2504 
2505 
2506 
2507 static struct sctp_ifa *
sctp_choose_boundspecific_inp(struct sctp_inpcb * inp,sctp_route_t * ro,uint32_t vrf_id,int non_asoc_addr_ok,uint8_t dest_is_priv,uint8_t dest_is_loop,sa_family_t fam)2508 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2509 			      sctp_route_t *ro,
2510 			      uint32_t vrf_id,
2511 			      int non_asoc_addr_ok,
2512 			      uint8_t dest_is_priv,
2513 			      uint8_t dest_is_loop,
2514 			      sa_family_t fam)
2515 {
2516 	struct sctp_laddr *laddr, *starting_point;
2517 	void *ifn;
2518 	int resettotop = 0;
2519 	struct sctp_ifn *sctp_ifn;
2520 	struct sctp_ifa *sctp_ifa, *sifa;
2521 	struct sctp_vrf *vrf;
2522 	uint32_t ifn_index;
2523 
2524 	vrf = sctp_find_vrf(vrf_id);
2525 	if (vrf == NULL)
2526 		return (NULL);
2527 
2528 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2529 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2530 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2531 	/*
2532 	 * first question, is the ifn we will emit on in our list, if so, we
2533 	 * want such an address. Note that we first looked for a
2534 	 * preferred address.
2535 	 */
2536 	if (sctp_ifn) {
2537 		/* is a preferred one on the interface we route out? */
2538 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2539 #if defined(__FreeBSD__)
2540 #ifdef INET
2541 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2542 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2543 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2544 				continue;
2545 			}
2546 #endif
2547 #ifdef INET6
2548 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2549 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2550 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2551 				continue;
2552 			}
2553 #endif
2554 #endif
2555 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2556 			    (non_asoc_addr_ok == 0))
2557 				continue;
2558 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2559 							  dest_is_loop,
2560 							  dest_is_priv, fam);
2561 			if (sifa == NULL)
2562 				continue;
2563 			if (sctp_is_addr_in_ep(inp, sifa)) {
2564 				atomic_add_int(&sifa->refcount, 1);
2565 				return (sifa);
2566 			}
2567 		}
2568 	}
2569 	/*
2570 	 * ok, now we now need to find one on the list of the addresses.
2571 	 * We can't get one on the emitting interface so let's find first
2572 	 * a preferred one. If not that an acceptable one otherwise...
2573 	 * we return NULL.
2574 	 */
2575 	starting_point = inp->next_addr_touse;
2576  once_again:
2577 	if (inp->next_addr_touse == NULL) {
2578 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2579 		resettotop = 1;
2580 	}
2581 	for (laddr = inp->next_addr_touse; laddr;
2582 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2583 		if (laddr->ifa == NULL) {
2584 			/* address has been removed */
2585 			continue;
2586 		}
2587 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2588 			/* address is being deleted */
2589 			continue;
2590 		}
2591 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2592 						  dest_is_priv, fam);
2593 		if (sifa == NULL)
2594 			continue;
2595 		atomic_add_int(&sifa->refcount, 1);
2596 		return (sifa);
2597 	}
2598 	if (resettotop == 0) {
2599 		inp->next_addr_touse = NULL;
2600 		goto once_again;
2601 	}
2602 
2603 	inp->next_addr_touse = starting_point;
2604 	resettotop = 0;
2605  once_again_too:
2606 	if (inp->next_addr_touse == NULL) {
2607 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2608 		resettotop = 1;
2609 	}
2610 
2611 	/* ok, what about an acceptable address in the inp */
2612 	for (laddr = inp->next_addr_touse; laddr;
2613 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2614 		if (laddr->ifa == NULL) {
2615 			/* address has been removed */
2616 			continue;
2617 		}
2618 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2619 			/* address is being deleted */
2620 			continue;
2621 		}
2622 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2623 						   dest_is_priv, fam);
2624 		if (sifa == NULL)
2625 			continue;
2626 		atomic_add_int(&sifa->refcount, 1);
2627 		return (sifa);
2628 	}
2629 	if (resettotop == 0) {
2630 		inp->next_addr_touse = NULL;
2631 		goto once_again_too;
2632 	}
2633 
2634 	/*
2635 	 * no address bound can be a source for the destination we are in
2636 	 * trouble
2637 	 */
2638 	return (NULL);
2639 }
2640 
2641 
2642 
2643 static struct sctp_ifa *
sctp_choose_boundspecific_stcb(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)2644 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2645 			       struct sctp_tcb *stcb,
2646 			       sctp_route_t *ro,
2647 			       uint32_t vrf_id,
2648 			       uint8_t dest_is_priv,
2649 			       uint8_t dest_is_loop,
2650 			       int non_asoc_addr_ok,
2651 			       sa_family_t fam)
2652 {
2653 	struct sctp_laddr *laddr, *starting_point;
2654 	void *ifn;
2655 	struct sctp_ifn *sctp_ifn;
2656 	struct sctp_ifa *sctp_ifa, *sifa;
2657 	uint8_t start_at_beginning = 0;
2658 	struct sctp_vrf *vrf;
2659 	uint32_t ifn_index;
2660 
2661 	/*
2662 	 * first question, is the ifn we will emit on in our list, if so, we
2663 	 * want that one.
2664 	 */
2665 	vrf = sctp_find_vrf(vrf_id);
2666 	if (vrf == NULL)
2667 		return (NULL);
2668 
2669 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2670 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2671 	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2672 
2673 	/*
2674 	 * first question, is the ifn we will emit on in our list?  If so,
2675 	 * we want that one. First we look for a preferred. Second, we go
2676 	 * for an acceptable.
2677 	 */
2678 	if (sctp_ifn) {
2679 		/* first try for a preferred address on the ep */
2680 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2681 #if defined(__FreeBSD__)
2682 #ifdef INET
2683 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2684 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2685 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2686 				continue;
2687 			}
2688 #endif
2689 #ifdef INET6
2690 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2691 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2692 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2693 				continue;
2694 			}
2695 #endif
2696 #endif
2697 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2698 				continue;
2699 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2700 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2701 				if (sifa == NULL)
2702 					continue;
2703 				if (((non_asoc_addr_ok == 0) &&
2704 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2705 				    (non_asoc_addr_ok &&
2706 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2707 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2708 					/* on the no-no list */
2709 					continue;
2710 				}
2711 				atomic_add_int(&sifa->refcount, 1);
2712 				return (sifa);
2713 			}
2714 		}
2715 		/* next try for an acceptable address on the ep */
2716 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2717 #if defined(__FreeBSD__)
2718 #ifdef INET
2719 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2720 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2721 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
2722 				continue;
2723 			}
2724 #endif
2725 #ifdef INET6
2726 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2727 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2728 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2729 				continue;
2730 			}
2731 #endif
2732 #endif
2733 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2734 				continue;
2735 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2736 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2737 				if (sifa == NULL)
2738 					continue;
2739 				if (((non_asoc_addr_ok == 0) &&
2740 				     (sctp_is_addr_restricted(stcb, sifa))) ||
2741 				    (non_asoc_addr_ok &&
2742 				     (sctp_is_addr_restricted(stcb, sifa)) &&
2743 				     (!sctp_is_addr_pending(stcb, sifa)))) {
2744 					/* on the no-no list */
2745 					continue;
2746 				}
2747 				atomic_add_int(&sifa->refcount, 1);
2748 				return (sifa);
2749 			}
2750 		}
2751 
2752 	}
2753 	/*
2754 	 * if we can't find one like that then we must look at all
2755 	 * addresses bound to pick one at first preferable then
2756 	 * secondly acceptable.
2757 	 */
2758 	starting_point = stcb->asoc.last_used_address;
2759  sctp_from_the_top:
2760 	if (stcb->asoc.last_used_address == NULL) {
2761 		start_at_beginning = 1;
2762 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2763 	}
2764 	/* search beginning with the last used address */
2765 	for (laddr = stcb->asoc.last_used_address; laddr;
2766 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2767 		if (laddr->ifa == NULL) {
2768 			/* address has been removed */
2769 			continue;
2770 		}
2771 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2772 			/* address is being deleted */
2773 			continue;
2774 		}
2775 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2776 		if (sifa == NULL)
2777 			continue;
2778 		if (((non_asoc_addr_ok == 0) &&
2779 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2780 		    (non_asoc_addr_ok &&
2781 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2782 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2783 			/* on the no-no list */
2784 			continue;
2785 		}
2786 		stcb->asoc.last_used_address = laddr;
2787 		atomic_add_int(&sifa->refcount, 1);
2788 		return (sifa);
2789 	}
2790 	if (start_at_beginning == 0) {
2791 		stcb->asoc.last_used_address = NULL;
2792 		goto sctp_from_the_top;
2793 	}
2794 	/* now try for any higher scope than the destination */
2795 	stcb->asoc.last_used_address = starting_point;
2796 	start_at_beginning = 0;
2797  sctp_from_the_top2:
2798 	if (stcb->asoc.last_used_address == NULL) {
2799 		start_at_beginning = 1;
2800 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2801 	}
2802 	/* search beginning with the last used address */
2803 	for (laddr = stcb->asoc.last_used_address; laddr;
2804 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2805 		if (laddr->ifa == NULL) {
2806 			/* address has been removed */
2807 			continue;
2808 		}
2809 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2810 			/* address is being deleted */
2811 			continue;
2812 		}
2813 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2814 						   dest_is_priv, fam);
2815 		if (sifa == NULL)
2816 			continue;
2817 		if (((non_asoc_addr_ok == 0) &&
2818 		     (sctp_is_addr_restricted(stcb, sifa))) ||
2819 		    (non_asoc_addr_ok &&
2820 		     (sctp_is_addr_restricted(stcb, sifa)) &&
2821 		     (!sctp_is_addr_pending(stcb, sifa)))) {
2822 			/* on the no-no list */
2823 			continue;
2824 		}
2825 		stcb->asoc.last_used_address = laddr;
2826 		atomic_add_int(&sifa->refcount, 1);
2827 		return (sifa);
2828 	}
2829 	if (start_at_beginning == 0) {
2830 		stcb->asoc.last_used_address = NULL;
2831 		goto sctp_from_the_top2;
2832 	}
2833 	return (NULL);
2834 }
2835 
2836 static struct sctp_ifa *
sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,int addr_wanted,sa_family_t fam,sctp_route_t * ro)2837 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2838 #if defined(__FreeBSD__)
2839                                                  struct sctp_inpcb *inp,
2840 #else
2841                                                  struct sctp_inpcb *inp SCTP_UNUSED,
2842 #endif
2843 						 struct sctp_tcb *stcb,
2844 						 int non_asoc_addr_ok,
2845 						 uint8_t dest_is_loop,
2846 						 uint8_t dest_is_priv,
2847 						 int addr_wanted,
2848 						 sa_family_t fam,
2849 						 sctp_route_t *ro
2850 						 )
2851 {
2852 	struct sctp_ifa *ifa, *sifa;
2853 	int num_eligible_addr = 0;
2854 #ifdef INET6
2855 #ifdef SCTP_EMBEDDED_V6_SCOPE
2856 	struct sockaddr_in6 sin6, lsa6;
2857 
2858 	if (fam == AF_INET6) {
2859 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2860 #ifdef SCTP_KAME
2861 		(void)sa6_recoverscope(&sin6);
2862 #else
2863 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2864 #endif  /* SCTP_KAME */
2865 	}
2866 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2867 #endif	/* INET6 */
2868 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2869 #if defined(__FreeBSD__)
2870 #ifdef INET
2871 		if ((ifa->address.sa.sa_family == AF_INET) &&
2872 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2873 		                      &ifa->address.sin.sin_addr) != 0)) {
2874 			continue;
2875 		}
2876 #endif
2877 #ifdef INET6
2878 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2879 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2880 		                      &ifa->address.sin6.sin6_addr) != 0)) {
2881 			continue;
2882 		}
2883 #endif
2884 #endif
2885 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2886 		    (non_asoc_addr_ok == 0))
2887 			continue;
2888 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2889 						  dest_is_priv, fam);
2890 		if (sifa == NULL)
2891 			continue;
2892 #ifdef INET6
2893 		if (fam == AF_INET6 &&
2894 		    dest_is_loop &&
2895 		    sifa->src_is_loop && sifa->src_is_priv) {
2896 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
2897 			 * to the peer so we will get an abort.
2898 			 */
2899 			continue;
2900 		}
2901 #ifdef SCTP_EMBEDDED_V6_SCOPE
2902 		if (fam == AF_INET6 &&
2903 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2904 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2905 			/* link-local <-> link-local must belong to the same scope. */
2906 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2907 #ifdef SCTP_KAME
2908 			(void)sa6_recoverscope(&lsa6);
2909 #else
2910 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2911 #endif  /* SCTP_KAME */
2912 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2913 				continue;
2914 			}
2915 		}
2916 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
2917 #endif	/* INET6 */
2918 
2919 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2920 		/* Check if the IPv6 address matches to next-hop.
2921 		   In the mobile case, old IPv6 address may be not deleted
2922 		   from the interface. Then, the interface has previous and
2923 		   new addresses.  We should use one corresponding to the
2924 		   next-hop.  (by micchie)
2925 		 */
2926 #ifdef INET6
2927 		if (stcb && fam == AF_INET6 &&
2928 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2929 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2930 			    == 0) {
2931 				continue;
2932 			}
2933 		}
2934 #endif
2935 #ifdef INET
2936 		/* Avoid topologically incorrect IPv4 address */
2937 		if (stcb && fam == AF_INET &&
2938 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2939 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2940 				continue;
2941 			}
2942 		}
2943 #endif
2944 #endif
2945 		if (stcb) {
2946 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2947 				continue;
2948 			}
2949 			if (((non_asoc_addr_ok == 0) &&
2950 			     (sctp_is_addr_restricted(stcb, sifa))) ||
2951 			    (non_asoc_addr_ok &&
2952 			     (sctp_is_addr_restricted(stcb, sifa)) &&
2953 			     (!sctp_is_addr_pending(stcb, sifa)))) {
2954 				/*
2955 				 * It is restricted for some reason..
2956 				 * probably not yet added.
2957 				 */
2958 				continue;
2959 			}
2960 		}
2961 		if (num_eligible_addr >= addr_wanted) {
2962 			return (sifa);
2963 		}
2964 		num_eligible_addr++;
2965 	}
2966 	return (NULL);
2967 }
2968 
2969 
2970 static int
sctp_count_num_preferred_boundall(struct sctp_ifn * ifn,struct sctp_inpcb * inp,struct sctp_tcb * stcb,int non_asoc_addr_ok,uint8_t dest_is_loop,uint8_t dest_is_priv,sa_family_t fam)2971 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2972 #if defined(__FreeBSD__)
2973                                   struct sctp_inpcb *inp,
2974 #else
2975                                   struct sctp_inpcb *inp SCTP_UNUSED,
2976 #endif
2977 				  struct sctp_tcb *stcb,
2978 				  int non_asoc_addr_ok,
2979 				  uint8_t dest_is_loop,
2980 				  uint8_t dest_is_priv,
2981 				  sa_family_t fam)
2982 {
2983 	struct sctp_ifa *ifa, *sifa;
2984 	int num_eligible_addr = 0;
2985 
2986 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2987 #if defined(__FreeBSD__)
2988 #ifdef INET
2989 		if ((ifa->address.sa.sa_family == AF_INET) &&
2990 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2991 		                      &ifa->address.sin.sin_addr) != 0)) {
2992 			continue;
2993 		}
2994 #endif
2995 #ifdef INET6
2996 		if ((ifa->address.sa.sa_family == AF_INET6) &&
2997 		    (stcb != NULL) &&
2998 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2999 		                      &ifa->address.sin6.sin6_addr) != 0)) {
3000 			continue;
3001 		}
3002 #endif
3003 #endif
3004 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3005 		    (non_asoc_addr_ok == 0)) {
3006 			continue;
3007 		}
3008 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3009 						  dest_is_priv, fam);
3010 		if (sifa == NULL) {
3011 			continue;
3012 		}
3013 		if (stcb) {
3014 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3015 				continue;
3016 			}
3017 			if (((non_asoc_addr_ok == 0) &&
3018 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3019 			    (non_asoc_addr_ok &&
3020 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3021 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3022 				/*
3023 				 * It is restricted for some reason..
3024 				 * probably not yet added.
3025 				 */
3026 				continue;
3027 			}
3028 		}
3029 		num_eligible_addr++;
3030 	}
3031 	return (num_eligible_addr);
3032 }
3033 
3034 static struct sctp_ifa *
sctp_choose_boundall(struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,sctp_route_t * ro,uint32_t vrf_id,uint8_t dest_is_priv,uint8_t dest_is_loop,int non_asoc_addr_ok,sa_family_t fam)3035 sctp_choose_boundall(struct sctp_inpcb *inp,
3036                      struct sctp_tcb *stcb,
3037 		     struct sctp_nets *net,
3038 		     sctp_route_t *ro,
3039 		     uint32_t vrf_id,
3040 		     uint8_t dest_is_priv,
3041 		     uint8_t dest_is_loop,
3042 		     int non_asoc_addr_ok,
3043 		     sa_family_t fam)
3044 {
3045 	int cur_addr_num = 0, num_preferred = 0;
3046 	void *ifn;
3047 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3048 	struct sctp_ifa *sctp_ifa, *sifa;
3049 	uint32_t ifn_index;
3050 	struct sctp_vrf *vrf;
3051 #ifdef INET
3052 	int retried = 0;
3053 #endif
3054 
3055 	/*-
3056 	 * For boundall we can use any address in the association.
3057 	 * If non_asoc_addr_ok is set we can use any address (at least in
3058 	 * theory). So we look for preferred addresses first. If we find one,
3059 	 * we use it. Otherwise we next try to get an address on the
3060 	 * interface, which we should be able to do (unless non_asoc_addr_ok
3061 	 * is false and we are routed out that way). In these cases where we
3062 	 * can't use the address of the interface we go through all the
3063 	 * ifn's looking for an address we can use and fill that in. Punting
3064 	 * means we send back address 0, which will probably cause problems
3065 	 * actually since then IP will fill in the address of the route ifn,
3066 	 * which means we probably already rejected it.. i.e. here comes an
3067 	 * abort :-<.
3068 	 */
3069 	vrf = sctp_find_vrf(vrf_id);
3070 	if (vrf == NULL)
3071 		return (NULL);
3072 
3073 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3074 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3075 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3076 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3077 	if (sctp_ifn == NULL) {
3078 		/* ?? We don't have this guy ?? */
3079 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3080 		goto bound_all_plan_b;
3081 	}
3082 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3083 		ifn_index, sctp_ifn->ifn_name);
3084 
3085 	if (net) {
3086 		cur_addr_num = net->indx_of_eligible_next_to_use;
3087 	}
3088 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3089 							  inp, stcb,
3090 							  non_asoc_addr_ok,
3091 							  dest_is_loop,
3092 							  dest_is_priv, fam);
3093 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3094 		num_preferred, sctp_ifn->ifn_name);
3095 	if (num_preferred == 0) {
3096 		/*
3097 		 * no eligible addresses, we must use some other interface
3098 		 * address if we can find one.
3099 		 */
3100 		goto bound_all_plan_b;
3101 	}
3102 	/*
3103 	 * Ok we have num_eligible_addr set with how many we can use, this
3104 	 * may vary from call to call due to addresses being deprecated
3105 	 * etc..
3106 	 */
3107 	if (cur_addr_num >= num_preferred) {
3108 		cur_addr_num = 0;
3109 	}
3110 	/*
3111 	 * select the nth address from the list (where cur_addr_num is the
3112 	 * nth) and 0 is the first one, 1 is the second one etc...
3113 	 */
3114 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3115 
3116 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3117                                                                     dest_is_priv, cur_addr_num, fam, ro);
3118 
3119 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
3120 	if (sctp_ifa) {
3121 		atomic_add_int(&sctp_ifa->refcount, 1);
3122 		if (net) {
3123 			/* save off where the next one we will want */
3124 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3125 		}
3126 		return (sctp_ifa);
3127 	}
3128 	/*
3129 	 * plan_b: Look at all interfaces and find a preferred address. If
3130 	 * no preferred fall through to plan_c.
3131 	 */
3132  bound_all_plan_b:
3133 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3134 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3135 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3136 			sctp_ifn->ifn_name);
3137 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3138 			/* wrong base scope */
3139 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3140 			continue;
3141 		}
3142 		if ((sctp_ifn == looked_at) && looked_at) {
3143 			/* already looked at this guy */
3144 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3145 			continue;
3146 		}
3147 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3148                                                                   dest_is_loop, dest_is_priv, fam);
3149 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3150 			"Found ifn:%p %d preferred source addresses\n",
3151 			ifn, num_preferred);
3152 		if (num_preferred == 0) {
3153 			/* None on this interface. */
3154 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3155 			continue;
3156 		}
3157 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
3158 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
3159 			num_preferred, (void *)sctp_ifn, cur_addr_num);
3160 
3161 		/*
3162 		 * Ok we have num_eligible_addr set with how many we can
3163 		 * use, this may vary from call to call due to addresses
3164 		 * being deprecated etc..
3165 		 */
3166 		if (cur_addr_num >= num_preferred) {
3167 			cur_addr_num = 0;
3168 		}
3169 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3170                                                                         dest_is_priv, cur_addr_num, fam, ro);
3171 		if (sifa == NULL)
3172 			continue;
3173 		if (net) {
3174 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3175 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3176 				cur_addr_num);
3177 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3178 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3179 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3180 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3181 		}
3182 		atomic_add_int(&sifa->refcount, 1);
3183 		return (sifa);
3184 	}
3185 #ifdef INET
3186 again_with_private_addresses_allowed:
3187 #endif
3188 	/* plan_c: do we have an acceptable address on the emit interface */
3189 	sifa = NULL;
3190 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3191 	if (emit_ifn == NULL) {
3192 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3193 		goto plan_d;
3194 	}
3195 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3196 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3197 #if defined(__FreeBSD__)
3198 #ifdef INET
3199 		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3200 		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3201 		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3202 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3203 			continue;
3204 		}
3205 #endif
3206 #ifdef INET6
3207 		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3208 		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3209 		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3210 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3211 			continue;
3212 		}
3213 #endif
3214 #endif
3215 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3216 		    (non_asoc_addr_ok == 0)) {
3217 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3218 			continue;
3219 		}
3220 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3221 						   dest_is_priv, fam);
3222 		if (sifa == NULL) {
3223 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3224 			continue;
3225 		}
3226 		if (stcb) {
3227 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3228 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3229 				sifa = NULL;
3230 				continue;
3231 			}
3232 			if (((non_asoc_addr_ok == 0) &&
3233 			     (sctp_is_addr_restricted(stcb, sifa))) ||
3234 			    (non_asoc_addr_ok &&
3235 			     (sctp_is_addr_restricted(stcb, sifa)) &&
3236 			     (!sctp_is_addr_pending(stcb, sifa)))) {
3237 				/*
3238 				 * It is restricted for some
3239 				 * reason.. probably not yet added.
3240 				 */
3241 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3242 				sifa = NULL;
3243 				continue;
3244 			}
3245 		} else {
3246 			SCTP_PRINTF("Stcb is null - no print\n");
3247 		}
3248 		atomic_add_int(&sifa->refcount, 1);
3249 		goto out;
3250 	}
3251  plan_d:
3252 	/*
3253 	 * plan_d: We are in trouble. No preferred address on the emit
3254 	 * interface. And not even a preferred address on all interfaces.
3255 	 * Go out and see if we can find an acceptable address somewhere
3256 	 * amongst all interfaces.
3257 	 */
3258 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3259 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3260 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3261 			/* wrong base scope */
3262 			continue;
3263 		}
3264 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3265 #if defined(__FreeBSD__)
3266 #ifdef INET
3267 			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3268 			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3269 			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3270 				continue;
3271 			}
3272 #endif
3273 #ifdef INET6
3274 			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3275 			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3276 			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3277 				continue;
3278 			}
3279 #endif
3280 #endif
3281 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3282 			    (non_asoc_addr_ok == 0))
3283 				continue;
3284 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3285 							   dest_is_loop,
3286 							   dest_is_priv, fam);
3287 			if (sifa == NULL)
3288 				continue;
3289 			if (stcb) {
3290 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3291 					sifa = NULL;
3292 					continue;
3293 				}
3294 				if (((non_asoc_addr_ok == 0) &&
3295 				     (sctp_is_addr_restricted(stcb, sifa))) ||
3296 				    (non_asoc_addr_ok &&
3297 				     (sctp_is_addr_restricted(stcb, sifa)) &&
3298 				     (!sctp_is_addr_pending(stcb, sifa)))) {
3299 					/*
3300 					 * It is restricted for some
3301 					 * reason.. probably not yet added.
3302 					 */
3303 					sifa = NULL;
3304 					continue;
3305 				}
3306 			}
3307 			goto out;
3308 		}
3309 	}
3310 #ifdef INET
3311 	if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3312 		stcb->asoc.scope.ipv4_local_scope = 1;
3313 		retried = 1;
3314 		goto again_with_private_addresses_allowed;
3315 	} else if (retried == 1) {
3316 		stcb->asoc.scope.ipv4_local_scope = 0;
3317 	}
3318 #endif
3319 out:
3320 #ifdef INET
3321 	if (sifa) {
3322 		if (retried == 1) {
3323 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3324 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3325 					/* wrong base scope */
3326 					continue;
3327 				}
3328 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3329 					struct sctp_ifa *tmp_sifa;
3330 
3331 #if defined(__FreeBSD__)
3332 #ifdef INET
3333 					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3334 					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3335 					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
3336 						continue;
3337 					}
3338 #endif
3339 #ifdef INET6
3340 					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3341 					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3342 					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3343 						continue;
3344 					}
3345 #endif
3346 #endif
3347 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3348 					    (non_asoc_addr_ok == 0))
3349 						continue;
3350 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3351 					                                       dest_is_loop,
3352 					                                       dest_is_priv, fam);
3353 					if (tmp_sifa == NULL) {
3354 						continue;
3355 					}
3356 					if (tmp_sifa == sifa) {
3357 						continue;
3358 					}
3359 					if (stcb) {
3360 						if (sctp_is_address_in_scope(tmp_sifa,
3361 						                             &stcb->asoc.scope, 0) == 0) {
3362 							continue;
3363 						}
3364 						if (((non_asoc_addr_ok == 0) &&
3365 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3366 						    (non_asoc_addr_ok &&
3367 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3368 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3369 							/*
3370 							 * It is restricted for some
3371 							 * reason.. probably not yet added.
3372 							 */
3373 							continue;
3374 						}
3375 					}
3376 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3377 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3378 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
3379 					}
3380 				}
3381 			}
3382 		}
3383 		atomic_add_int(&sifa->refcount, 1);
3384 	}
3385 #endif
3386 	return (sifa);
3387 }
3388 
3389 
3390 
3391 /* tcb may be NULL */
3392 struct sctp_ifa *
sctp_source_address_selection(struct sctp_inpcb * inp,struct sctp_tcb * stcb,sctp_route_t * ro,struct sctp_nets * net,int non_asoc_addr_ok,uint32_t vrf_id)3393 sctp_source_address_selection(struct sctp_inpcb *inp,
3394 			      struct sctp_tcb *stcb,
3395 			      sctp_route_t *ro,
3396 			      struct sctp_nets *net,
3397 			      int non_asoc_addr_ok, uint32_t vrf_id)
3398 {
3399 	struct sctp_ifa *answer;
3400 	uint8_t dest_is_priv, dest_is_loop;
3401 	sa_family_t fam;
3402 #ifdef INET
3403 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3404 #endif
3405 #ifdef INET6
3406 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3407 #endif
3408 
3409 	/**
3410 	 * Rules: - Find the route if needed, cache if I can. - Look at
3411 	 * interface address in route, Is it in the bound list. If so we
3412 	 * have the best source. - If not we must rotate amongst the
3413 	 * addresses.
3414 	 *
3415 	 * Cavets and issues
3416 	 *
3417 	 * Do we need to pay attention to scope. We can have a private address
3418 	 * or a global address we are sourcing or sending to. So if we draw
3419 	 * it out
3420 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3421 	 * For V4
3422 	 * ------------------------------------------
3423 	 *      source     *      dest  *  result
3424 	 * -----------------------------------------
3425 	 * <a>  Private    *    Global  *  NAT
3426 	 * -----------------------------------------
3427 	 * <b>  Private    *    Private *  No problem
3428 	 * -----------------------------------------
3429 	 * <c>  Global     *    Private *  Huh, How will this work?
3430 	 * -----------------------------------------
3431 	 * <d>  Global     *    Global  *  No Problem
3432 	 *------------------------------------------
3433 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3434 	 * For V6
3435 	 *------------------------------------------
3436 	 *      source     *      dest  *  result
3437 	 * -----------------------------------------
3438 	 * <a>  Linklocal  *    Global  *
3439 	 * -----------------------------------------
3440 	 * <b>  Linklocal  * Linklocal  *  No problem
3441 	 * -----------------------------------------
3442 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
3443 	 * -----------------------------------------
3444 	 * <d>  Global     *    Global  *  No Problem
3445 	 *------------------------------------------
3446 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3447 	 *
3448 	 * And then we add to that what happens if there are multiple addresses
3449 	 * assigned to an interface. Remember the ifa on a ifn is a linked
3450 	 * list of addresses. So one interface can have more than one IP
3451 	 * address. What happens if we have both a private and a global
3452 	 * address? Do we then use context of destination to sort out which
3453 	 * one is best? And what about NAT's sending P->G may get you a NAT
3454 	 * translation, or should you select the G thats on the interface in
3455 	 * preference.
3456 	 *
3457 	 * Decisions:
3458 	 *
3459 	 * - count the number of addresses on the interface.
3460 	 * - if it is one, no problem except case <c>.
3461 	 *   For <a> we will assume a NAT out there.
3462 	 * - if there are more than one, then we need to worry about scope P
3463 	 *   or G. We should prefer G -> G and P -> P if possible.
3464 	 *   Then as a secondary fall back to mixed types G->P being a last
3465 	 *   ditch one.
3466 	 * - The above all works for bound all, but bound specific we need to
3467 	 *   use the same concept but instead only consider the bound
3468 	 *   addresses. If the bound set is NOT assigned to the interface then
3469 	 *   we must use rotation amongst the bound addresses..
3470 	 */
3471 	if (ro->ro_rt == NULL) {
3472 		/*
3473 		 * Need a route to cache.
3474 		 */
3475 		SCTP_RTALLOC(ro, vrf_id);
3476 	}
3477 	if (ro->ro_rt == NULL) {
3478 		return (NULL);
3479 	}
3480 #if defined(__Userspace_os_Windows)
3481 	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3482 	fam = (sa_family_t)ro->ro_dst.sa_family;
3483 #else
3484 	fam = ro->ro_dst.sa_family;
3485 #endif
3486 	dest_is_priv = dest_is_loop = 0;
3487 	/* Setup our scopes for the destination */
3488 	switch (fam) {
3489 #ifdef INET
3490 	case AF_INET:
3491 		/* Scope based on outbound address */
3492 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3493 			dest_is_loop = 1;
3494 			if (net != NULL) {
3495 				/* mark it as local */
3496 				net->addr_is_local = 1;
3497 			}
3498 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3499 			dest_is_priv = 1;
3500 		}
3501 		break;
3502 #endif
3503 #ifdef INET6
3504 	case AF_INET6:
3505 		/* Scope based on outbound address */
3506 #if defined(__Userspace_os_Windows)
3507 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3508 #else
3509 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3510 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3511 #endif
3512 			/*
3513 			 * If the address is a loopback address, which
3514 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
3515 			 * scope. But we don't use dest_is_priv (link local
3516 			 * addresses).
3517 			 */
3518 			dest_is_loop = 1;
3519 			if (net != NULL) {
3520 				/* mark it as local */
3521 				net->addr_is_local = 1;
3522 			}
3523 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3524 			dest_is_priv = 1;
3525 		}
3526 		break;
3527 #endif
3528 	}
3529 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3530 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3531 	SCTP_IPI_ADDR_RLOCK();
3532 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3533 		/*
3534 		 * Bound all case
3535 		 */
3536 		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3537 					      dest_is_priv, dest_is_loop,
3538 					      non_asoc_addr_ok, fam);
3539 		SCTP_IPI_ADDR_RUNLOCK();
3540 		return (answer);
3541 	}
3542 	/*
3543 	 * Subset bound case
3544 	 */
3545 	if (stcb) {
3546 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3547 							vrf_id,	dest_is_priv,
3548 							dest_is_loop,
3549 							non_asoc_addr_ok, fam);
3550 	} else {
3551 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3552 						       non_asoc_addr_ok,
3553 						       dest_is_priv,
3554 						       dest_is_loop, fam);
3555 	}
3556 	SCTP_IPI_ADDR_RUNLOCK();
3557 	return (answer);
3558 }
3559 
3560 static int
3561 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3562 {
3563 #if defined(__Userspace_os_Windows)
3564 	WSACMSGHDR cmh;
3565 #else
3566 	struct cmsghdr cmh;
3567 #endif
3568 	int tlen, at, found;
3569 	struct sctp_sndinfo sndinfo;
3570 	struct sctp_prinfo prinfo;
3571 	struct sctp_authinfo authinfo;
3572 
3573 	tlen = SCTP_BUF_LEN(control);
3574 	at = 0;
3575 	found = 0;
3576 	/*
3577 	 * Independent of how many mbufs, find the c_type inside the control
3578 	 * structure and copy out the data.
3579 	 */
3580 	while (at < tlen) {
3581 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3582 			/* There is not enough room for one more. */
3583 			return (found);
3584 		}
3585 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3586 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3587 			/* We dont't have a complete CMSG header. */
3588 			return (found);
3589 		}
3590 		if (((int)cmh.cmsg_len + at) > tlen) {
3591 			/* We don't have the complete CMSG. */
3592 			return (found);
3593 		}
3594 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3595 		    ((c_type == cmh.cmsg_type) ||
3596 		     ((c_type == SCTP_SNDRCV) &&
3597 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
3598 		       (cmh.cmsg_type == SCTP_PRINFO) ||
3599 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3600 			if (c_type == cmh.cmsg_type) {
3601 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3602 					return (found);
3603 				}
3604 				/* It is exactly what we want. Copy it out. */
3605 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3606 				return (1);
3607 			} else {
3608 				struct sctp_sndrcvinfo *sndrcvinfo;
3609 
3610 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3611 				if (found == 0) {
3612 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3613 						return (found);
3614 					}
3615 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3616 				}
3617 				switch (cmh.cmsg_type) {
3618 				case SCTP_SNDINFO:
3619 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3620 						return (found);
3621 					}
3622 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3623 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3624 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3625 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3626 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
3627 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3628 					break;
3629 				case SCTP_PRINFO:
3630 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3631 						return (found);
3632 					}
3633 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3634 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3635 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3636 					} else {
3637 						sndrcvinfo->sinfo_timetolive = 0;
3638 					}
3639 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3640 					break;
3641 				case SCTP_AUTHINFO:
3642 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3643 						return (found);
3644 					}
3645 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3646 					sndrcvinfo->sinfo_keynumber_valid = 1;
3647 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3648 					break;
3649 				default:
3650 					return (found);
3651 				}
3652 				found = 1;
3653 			}
3654 		}
3655 		at += CMSG_ALIGN(cmh.cmsg_len);
3656 	}
3657 	return (found);
3658 }
3659 
3660 static int
3661 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3662 {
3663 #if defined(__Userspace_os_Windows)
3664 	WSACMSGHDR cmh;
3665 #else
3666 	struct cmsghdr cmh;
3667 #endif
3668 	int tlen, at;
3669 	struct sctp_initmsg initmsg;
3670 #ifdef INET
3671 	struct sockaddr_in sin;
3672 #endif
3673 #ifdef INET6
3674 	struct sockaddr_in6 sin6;
3675 #endif
3676 
3677 	tlen = SCTP_BUF_LEN(control);
3678 	at = 0;
3679 	while (at < tlen) {
3680 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3681 			/* There is not enough room for one more. */
3682 			*error = EINVAL;
3683 			return (1);
3684 		}
3685 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3686 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3687 			/* We dont't have a complete CMSG header. */
3688 			*error = EINVAL;
3689 			return (1);
3690 		}
3691 		if (((int)cmh.cmsg_len + at) > tlen) {
3692 			/* We don't have the complete CMSG. */
3693 			*error = EINVAL;
3694 			return (1);
3695 		}
3696 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3697 			switch (cmh.cmsg_type) {
3698 			case SCTP_INIT:
3699 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3700 					*error = EINVAL;
3701 					return (1);
3702 				}
3703 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3704 				if (initmsg.sinit_max_attempts)
3705 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3706 				if (initmsg.sinit_num_ostreams)
3707 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3708 				if (initmsg.sinit_max_instreams)
3709 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3710 				if (initmsg.sinit_max_init_timeo)
3711 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3712 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3713 					struct sctp_stream_out *tmp_str;
3714 					unsigned int i;
3715 #if defined(SCTP_DETAILED_STR_STATS)
3716 					int j;
3717 #endif
3718 
3719 					/* Default is NOT correct */
3720 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3721 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3722 					SCTP_TCB_UNLOCK(stcb);
3723 					SCTP_MALLOC(tmp_str,
3724 					            struct sctp_stream_out *,
3725 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3726 					            SCTP_M_STRMO);
3727 					SCTP_TCB_LOCK(stcb);
3728 					if (tmp_str != NULL) {
3729 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3730 						stcb->asoc.strmout = tmp_str;
3731 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3732 					} else {
3733 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3734 					}
3735 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3736 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3737 						stcb->asoc.strmout[i].chunks_on_queues = 0;
3738 						stcb->asoc.strmout[i].next_sequence_send = 0;
3739 #if defined(SCTP_DETAILED_STR_STATS)
3740 						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3741 							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3742 							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3743 						}
3744 #else
3745 						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3746 						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3747 #endif
3748 						stcb->asoc.strmout[i].stream_no = i;
3749 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3750 						stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3751 					}
3752 				}
3753 				break;
3754 #ifdef INET
3755 			case SCTP_DSTADDRV4:
3756 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3757 					*error = EINVAL;
3758 					return (1);
3759 				}
3760 				memset(&sin, 0, sizeof(struct sockaddr_in));
3761 				sin.sin_family = AF_INET;
3762 #ifdef HAVE_SIN_LEN
3763 				sin.sin_len = sizeof(struct sockaddr_in);
3764 #endif
3765 				sin.sin_port = stcb->rport;
3766 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3767 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3768 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3769 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3770 					*error = EINVAL;
3771 					return (1);
3772 				}
3773 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3774 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3775 					*error = ENOBUFS;
3776 					return (1);
3777 				}
3778 				break;
3779 #endif
3780 #ifdef INET6
3781 			case SCTP_DSTADDRV6:
3782 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3783 					*error = EINVAL;
3784 					return (1);
3785 				}
3786 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3787 				sin6.sin6_family = AF_INET6;
3788 #ifdef HAVE_SIN6_LEN
3789 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3790 #endif
3791 				sin6.sin6_port = stcb->rport;
3792 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3793 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3794 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3795 					*error = EINVAL;
3796 					return (1);
3797 				}
3798 #ifdef INET
3799 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3800 					in6_sin6_2_sin(&sin, &sin6);
3801 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3802 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3803 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3804 						*error = EINVAL;
3805 						return (1);
3806 					}
3807 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3808 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3809 						*error = ENOBUFS;
3810 						return (1);
3811 					}
3812 				} else
3813 #endif
3814 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3815 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3816 						*error = ENOBUFS;
3817 						return (1);
3818 					}
3819 				break;
3820 #endif
3821 			default:
3822 				break;
3823 			}
3824 		}
3825 		at += CMSG_ALIGN(cmh.cmsg_len);
3826 	}
3827 	return (0);
3828 }
3829 
3830 static struct sctp_tcb *
3831 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3832                            uint16_t port,
3833                            struct mbuf *control,
3834                            struct sctp_nets **net_p,
3835                            int *error)
3836 {
3837 #if defined(__Userspace_os_Windows)
3838 	WSACMSGHDR cmh;
3839 #else
3840 	struct cmsghdr cmh;
3841 #endif
3842 	int tlen, at;
3843 	struct sctp_tcb *stcb;
3844 	struct sockaddr *addr;
3845 #ifdef INET
3846 	struct sockaddr_in sin;
3847 #endif
3848 #ifdef INET6
3849 	struct sockaddr_in6 sin6;
3850 #endif
3851 
3852 	tlen = SCTP_BUF_LEN(control);
3853 	at = 0;
3854 	while (at < tlen) {
3855 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3856 			/* There is not enough room for one more. */
3857 			*error = EINVAL;
3858 			return (NULL);
3859 		}
3860 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3861 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3862 			/* We dont't have a complete CMSG header. */
3863 			*error = EINVAL;
3864 			return (NULL);
3865 		}
3866 		if (((int)cmh.cmsg_len + at) > tlen) {
3867 			/* We don't have the complete CMSG. */
3868 			*error = EINVAL;
3869 			return (NULL);
3870 		}
3871 		if (cmh.cmsg_level == IPPROTO_SCTP) {
3872 			switch (cmh.cmsg_type) {
3873 #ifdef INET
3874 			case SCTP_DSTADDRV4:
3875 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3876 					*error = EINVAL;
3877 					return (NULL);
3878 				}
3879 				memset(&sin, 0, sizeof(struct sockaddr_in));
3880 				sin.sin_family = AF_INET;
3881 #ifdef HAVE_SIN_LEN
3882 				sin.sin_len = sizeof(struct sockaddr_in);
3883 #endif
3884 				sin.sin_port = port;
3885 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3886 				addr = (struct sockaddr *)&sin;
3887 				break;
3888 #endif
3889 #ifdef INET6
3890 			case SCTP_DSTADDRV6:
3891 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3892 					*error = EINVAL;
3893 					return (NULL);
3894 				}
3895 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
3896 				sin6.sin6_family = AF_INET6;
3897 #ifdef HAVE_SIN6_LEN
3898 				sin6.sin6_len = sizeof(struct sockaddr_in6);
3899 #endif
3900 				sin6.sin6_port = port;
3901 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3902 #ifdef INET
3903 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3904 					in6_sin6_2_sin(&sin, &sin6);
3905 					addr = (struct sockaddr *)&sin;
3906 				} else
3907 #endif
3908 					addr = (struct sockaddr *)&sin6;
3909 				break;
3910 #endif
3911 			default:
3912 				addr = NULL;
3913 				break;
3914 			}
3915 			if (addr) {
3916 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3917 				if (stcb != NULL) {
3918 					return (stcb);
3919 				}
3920 			}
3921 		}
3922 		at += CMSG_ALIGN(cmh.cmsg_len);
3923 	}
3924 	return (NULL);
3925 }
3926 
3927 static struct mbuf *
3928 sctp_add_cookie(struct mbuf *init, int init_offset,
3929     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3930 {
3931 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3932 	struct sctp_state_cookie *stc;
3933 	struct sctp_paramhdr *ph;
3934 	uint8_t *foo;
3935 	int sig_offset;
3936 	uint16_t cookie_sz;
3937 
3938 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3939 				      sizeof(struct sctp_paramhdr)), 0,
3940 				     M_NOWAIT, 1, MT_DATA);
3941 	if (mret == NULL) {
3942 		return (NULL);
3943 	}
3944 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3945 	if (copy_init == NULL) {
3946 		sctp_m_freem(mret);
3947 		return (NULL);
3948 	}
3949 #ifdef SCTP_MBUF_LOGGING
3950 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3951 		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3952 	}
3953 #endif
3954 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3955 	    M_NOWAIT);
3956 	if (copy_initack == NULL) {
3957 		sctp_m_freem(mret);
3958 		sctp_m_freem(copy_init);
3959 		return (NULL);
3960 	}
3961 #ifdef SCTP_MBUF_LOGGING
3962 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3963 		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3964 	}
3965 #endif
3966 	/* easy side we just drop it on the end */
3967 	ph = mtod(mret, struct sctp_paramhdr *);
3968 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3969 	    sizeof(struct sctp_paramhdr);
3970 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
3971 	    sizeof(struct sctp_paramhdr));
3972 	ph->param_type = htons(SCTP_STATE_COOKIE);
3973 	ph->param_length = 0;	/* fill in at the end */
3974 	/* Fill in the stc cookie data */
3975 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3976 
3977 	/* tack the INIT and then the INIT-ACK onto the chain */
3978 	cookie_sz = 0;
3979 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3980 		cookie_sz += SCTP_BUF_LEN(m_at);
3981 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3982 			SCTP_BUF_NEXT(m_at) = copy_init;
3983 			break;
3984 		}
3985 	}
3986 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3987 		cookie_sz += SCTP_BUF_LEN(m_at);
3988 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3989 			SCTP_BUF_NEXT(m_at) = copy_initack;
3990 			break;
3991 		}
3992 	}
3993 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3994 		cookie_sz += SCTP_BUF_LEN(m_at);
3995 		if (SCTP_BUF_NEXT(m_at) == NULL) {
3996 			break;
3997 		}
3998 	}
3999 	sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4000 	if (sig == NULL) {
4001 		/* no space, so free the entire chain */
4002 		sctp_m_freem(mret);
4003 		return (NULL);
4004 	}
4005 	SCTP_BUF_LEN(sig) = 0;
4006 	SCTP_BUF_NEXT(m_at) = sig;
4007 	sig_offset = 0;
4008 	foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
4009 	memset(foo, 0, SCTP_SIGNATURE_SIZE);
4010 	*signature = foo;
4011 	SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
4012 	cookie_sz += SCTP_SIGNATURE_SIZE;
4013 	ph->param_length = htons(cookie_sz);
4014 	return (mret);
4015 }
4016 
4017 
4018 static uint8_t
4019 sctp_get_ect(struct sctp_tcb *stcb)
4020 {
4021 	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4022 		return (SCTP_ECT0_BIT);
4023 	} else {
4024 		return (0);
4025 	}
4026 }
4027 
4028 #if defined(INET) || defined(INET6)
4029 static void
4030 sctp_handle_no_route(struct sctp_tcb *stcb,
4031                      struct sctp_nets *net,
4032                      int so_locked)
4033 {
4034 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4035 
4036 	if (net) {
4037 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4038 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4039 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4040 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4041 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4042 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4043 			                        stcb, 0,
4044 			                        (void *)net,
4045 			                        so_locked);
4046 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
4047 				net->dest_state &= ~SCTP_ADDR_PF;
4048 			}
4049 		}
4050 		if (stcb) {
4051 			if (net == stcb->asoc.primary_destination) {
4052 				/* need a new primary */
4053 				struct sctp_nets *alt;
4054 
4055 				alt = sctp_find_alternate_net(stcb, net, 0);
4056 				if (alt != net) {
4057 					if (stcb->asoc.alternate) {
4058 						sctp_free_remote_addr(stcb->asoc.alternate);
4059 					}
4060 					stcb->asoc.alternate = alt;
4061 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4062 					if (net->ro._s_addr) {
4063 						sctp_free_ifa(net->ro._s_addr);
4064 						net->ro._s_addr = NULL;
4065 					}
4066 					net->src_addr_selected = 0;
4067 				}
4068 			}
4069 		}
4070 	}
4071 }
4072 #endif
4073 
4074 static int
4075 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4076     struct sctp_tcb *stcb,	/* may be NULL */
4077     struct sctp_nets *net,
4078     struct sockaddr *to,
4079     struct mbuf *m,
4080     uint32_t auth_offset,
4081     struct sctp_auth_chunk *auth,
4082     uint16_t auth_keyid,
4083     int nofragment_flag,
4084     int ecn_ok,
4085     int out_of_asoc_ok,
4086     uint16_t src_port,
4087     uint16_t dest_port,
4088     uint32_t v_tag,
4089     uint16_t port,
4090     union sctp_sockstore *over_addr,
4091 #if defined(__FreeBSD__)
4092     uint8_t mflowtype, uint32_t mflowid,
4093 #endif
4094 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4095     int so_locked SCTP_UNUSED
4096 #else
4097     int so_locked
4098 #endif
4099     )
4100 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4101 {
4102 	/**
4103 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4104 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4105 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
4106 	 * - calculate and fill in the SCTP checksum.
4107 	 * - prepend an IP address header.
4108 	 * - if boundall use INADDR_ANY.
4109 	 * - if boundspecific do source address selection.
4110 	 * - set fragmentation option for ipV4.
4111 	 * - On return from IP output, check/adjust mtu size of output
4112 	 *   interface and smallest_mtu size as well.
4113 	 */
4114 	/* Will need ifdefs around this */
4115 #ifdef __Panda__
4116 	pakhandle_type o_pak;
4117 #endif
4118 	struct mbuf *newm;
4119 	struct sctphdr *sctphdr;
4120 	int packet_length;
4121 	int ret;
4122 #if defined(INET) || defined(INET6)
4123 	uint32_t vrf_id;
4124 #endif
4125 #if defined(INET) || defined(INET6)
4126 #if !defined(__Panda__)
4127 	struct mbuf *o_pak;
4128 #endif
4129 	sctp_route_t *ro = NULL;
4130 	struct udphdr *udp = NULL;
4131 #endif
4132 	uint8_t tos_value;
4133 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4134 	struct socket *so = NULL;
4135 #endif
4136 
4137 #if defined(__APPLE__)
4138 	if (so_locked) {
4139 		sctp_lock_assert(SCTP_INP_SO(inp));
4140 		SCTP_TCB_LOCK_ASSERT(stcb);
4141 	} else {
4142 		sctp_unlock_assert(SCTP_INP_SO(inp));
4143 	}
4144 #endif
4145 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4146 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4147 		sctp_m_freem(m);
4148 		return (EFAULT);
4149 	}
4150 #if defined(INET) || defined(INET6)
4151 	if (stcb) {
4152 		vrf_id = stcb->asoc.vrf_id;
4153 	} else {
4154 		vrf_id = inp->def_vrf_id;
4155 	}
4156 #endif
4157 	/* fill in the HMAC digest for any AUTH chunk in the packet */
4158 	if ((auth != NULL) && (stcb != NULL)) {
4159 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4160 	}
4161 
4162 	if (net) {
4163 		tos_value = net->dscp;
4164 	} else if (stcb) {
4165 		tos_value = stcb->asoc.default_dscp;
4166 	} else {
4167 		tos_value = inp->sctp_ep.default_dscp;
4168 	}
4169 
4170 	switch (to->sa_family) {
4171 #ifdef INET
4172 	case AF_INET:
4173 	{
4174 		struct ip *ip = NULL;
4175 		sctp_route_t iproute;
4176 		int len;
4177 
4178 		len = sizeof(struct ip) + sizeof(struct sctphdr);
4179 		if (port) {
4180 			len += sizeof(struct udphdr);
4181 		}
4182 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4183 		if (newm == NULL) {
4184 			sctp_m_freem(m);
4185 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4186 			return (ENOMEM);
4187 		}
4188 		SCTP_ALIGN_TO_END(newm, len);
4189 		SCTP_BUF_LEN(newm) = len;
4190 		SCTP_BUF_NEXT(newm) = m;
4191 		m = newm;
4192 #if defined(__FreeBSD__)
4193 		if (net != NULL) {
4194 			m->m_pkthdr.flowid = net->flowid;
4195 			M_HASHTYPE_SET(m, net->flowtype);
4196 		} else {
4197 			m->m_pkthdr.flowid = mflowid;
4198 			M_HASHTYPE_SET(m, mflowtype);
4199  		}
4200 #endif
4201 		packet_length = sctp_calculate_len(m);
4202 		ip = mtod(m, struct ip *);
4203 		ip->ip_v = IPVERSION;
4204 		ip->ip_hl = (sizeof(struct ip) >> 2);
4205 		if (tos_value == 0) {
4206 			/*
4207 			 * This means especially, that it is not set at the
4208 			 * SCTP layer. So use the value from the IP layer.
4209 			 */
4210 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4211 			tos_value = inp->ip_inp.inp.inp_ip_tos;
4212 #else
4213 			tos_value = inp->inp_ip_tos;
4214 #endif
4215 		}
4216 		tos_value &= 0xfc;
4217 		if (ecn_ok) {
4218 			tos_value |= sctp_get_ect(stcb);
4219 		}
4220                 if ((nofragment_flag) && (port == 0)) {
4221 #if defined(__FreeBSD__)
4222 #if __FreeBSD_version >= 1000000
4223 			ip->ip_off = htons(IP_DF);
4224 #else
4225 			ip->ip_off = IP_DF;
4226 #endif
4227 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
4228 			ip->ip_off = IP_DF;
4229 #else
4230 			ip->ip_off = htons(IP_DF);
4231 #endif
4232 		} else {
4233 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4234 			ip->ip_off = htons(0);
4235 #else
4236 			ip->ip_off = 0;
4237 #endif
4238 		}
4239 #if defined(__FreeBSD__)
4240 		/* FreeBSD has a function for ip_id's */
4241 		ip->ip_id = ip_newid();
4242 #elif defined(RANDOM_IP_ID)
4243 		/* Apple has RANDOM_IP_ID switch */
4244 		ip->ip_id = htons(ip_randomid());
4245 #elif defined(__Userspace__)
4246                 ip->ip_id = htons(SCTP_IP_ID(inp)++);
4247 #else
4248 		ip->ip_id = SCTP_IP_ID(inp)++;
4249 #endif
4250 
4251 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4252 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4253 #else
4254 		ip->ip_ttl = inp->inp_ip_ttl;
4255 #endif
4256 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4257 		ip->ip_len = htons(packet_length);
4258 #else
4259 		ip->ip_len = packet_length;
4260 #endif
4261 		ip->ip_tos = tos_value;
4262 		if (port) {
4263 			ip->ip_p = IPPROTO_UDP;
4264 		} else {
4265 			ip->ip_p = IPPROTO_SCTP;
4266 		}
4267 		ip->ip_sum = 0;
4268 		if (net == NULL) {
4269 			ro = &iproute;
4270 			memset(&iproute, 0, sizeof(iproute));
4271 #ifdef HAVE_SA_LEN
4272 			memcpy(&ro->ro_dst, to, to->sa_len);
4273 #else
4274 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4275 #endif
4276 		} else {
4277 			ro = (sctp_route_t *)&net->ro;
4278 		}
4279 		/* Now the address selection part */
4280 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4281 
4282 		/* call the routine to select the src address */
4283 		if (net && out_of_asoc_ok == 0) {
4284 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4285 				sctp_free_ifa(net->ro._s_addr);
4286 				net->ro._s_addr = NULL;
4287 				net->src_addr_selected = 0;
4288 				if (ro->ro_rt) {
4289 					RTFREE(ro->ro_rt);
4290 					ro->ro_rt = NULL;
4291 				}
4292 			}
4293 			if (net->src_addr_selected == 0) {
4294 				/* Cache the source address */
4295 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4296 										ro, net, 0,
4297 										vrf_id);
4298 				net->src_addr_selected = 1;
4299 			}
4300 			if (net->ro._s_addr == NULL) {
4301 				/* No route to host */
4302 				net->src_addr_selected = 0;
4303 				sctp_handle_no_route(stcb, net, so_locked);
4304 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4305 				sctp_m_freem(m);
4306 				return (EHOSTUNREACH);
4307 			}
4308 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4309 		} else {
4310 			if (over_addr == NULL) {
4311 				struct sctp_ifa *_lsrc;
4312 
4313 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4314 				                                      net,
4315 				                                      out_of_asoc_ok,
4316 				                                      vrf_id);
4317 				if (_lsrc == NULL) {
4318 					sctp_handle_no_route(stcb, net, so_locked);
4319 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4320 					sctp_m_freem(m);
4321 					return (EHOSTUNREACH);
4322 				}
4323 				ip->ip_src = _lsrc->address.sin.sin_addr;
4324 				sctp_free_ifa(_lsrc);
4325 			} else {
4326 				ip->ip_src = over_addr->sin.sin_addr;
4327 				SCTP_RTALLOC(ro, vrf_id);
4328 			}
4329 		}
4330 		if (port) {
4331 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4332 				sctp_handle_no_route(stcb, net, so_locked);
4333 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4334 				sctp_m_freem(m);
4335 				return (EHOSTUNREACH);
4336 			}
4337 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4338 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4339 			udp->uh_dport = port;
4340 			udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4341 #if !defined(__Windows__) && !defined(__Userspace__)
4342 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4343 			if (V_udp_cksum) {
4344 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4345 			} else {
4346 				udp->uh_sum = 0;
4347 			}
4348 #else
4349 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4350 #endif
4351 #else
4352 			udp->uh_sum = 0;
4353 #endif
4354 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4355 		} else {
4356 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4357 		}
4358 
4359 		sctphdr->src_port = src_port;
4360 		sctphdr->dest_port = dest_port;
4361 		sctphdr->v_tag = v_tag;
4362 		sctphdr->checksum = 0;
4363 
4364 		/*
4365 		 * If source address selection fails and we find no route
4366 		 * then the ip_output should fail as well with a
4367 		 * NO_ROUTE_TO_HOST type error. We probably should catch
4368 		 * that somewhere and abort the association right away
4369 		 * (assuming this is an INIT being sent).
4370 		 */
4371 		if (ro->ro_rt == NULL) {
4372 			/*
4373 			 * src addr selection failed to find a route (or
4374 			 * valid source addr), so we can't get there from
4375 			 * here (yet)!
4376 			 */
4377 			sctp_handle_no_route(stcb, net, so_locked);
4378 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4379 			sctp_m_freem(m);
4380 			return (EHOSTUNREACH);
4381 		}
4382 		if (ro != &iproute) {
4383 			memcpy(&iproute, ro, sizeof(*ro));
4384 		}
4385 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4386 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
4387 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4388 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
4389 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4390 			(void *)ro->ro_rt);
4391 
4392 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4393 			/* failed to prepend data, give up */
4394 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4395 			sctp_m_freem(m);
4396 			return (ENOMEM);
4397 		}
4398 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4399 		if (port) {
4400 #if defined(SCTP_WITH_NO_CSUM)
4401 			SCTP_STAT_INCR(sctps_sendnocrc);
4402 #else
4403 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4404 			SCTP_STAT_INCR(sctps_sendswcrc);
4405 #endif
4406 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4407 			if (V_udp_cksum) {
4408 				SCTP_ENABLE_UDP_CSUM(o_pak);
4409 			}
4410 #else
4411 			SCTP_ENABLE_UDP_CSUM(o_pak);
4412 #endif
4413 		} else {
4414 #if defined(SCTP_WITH_NO_CSUM)
4415 			SCTP_STAT_INCR(sctps_sendnocrc);
4416 #else
4417 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4418 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4419 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4420 			SCTP_STAT_INCR(sctps_sendhwcrc);
4421 #else
4422 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4423 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4424 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4425 				SCTP_STAT_INCR(sctps_sendswcrc);
4426 			} else {
4427 				SCTP_STAT_INCR(sctps_sendnocrc);
4428 			}
4429 #endif
4430 #endif
4431 		}
4432 #ifdef SCTP_PACKET_LOGGING
4433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4434 			sctp_packet_log(o_pak);
4435 #endif
4436 		/* send it out.  table id is taken from stcb */
4437 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4438 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4439 			so = SCTP_INP_SO(inp);
4440 			SCTP_SOCKET_UNLOCK(so, 0);
4441 		}
4442 #endif
4443 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4444 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4445 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4446 			atomic_add_int(&stcb->asoc.refcnt, 1);
4447 			SCTP_TCB_UNLOCK(stcb);
4448 			SCTP_SOCKET_LOCK(so, 0);
4449 			SCTP_TCB_LOCK(stcb);
4450 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4451 		}
4452 #endif
4453 		SCTP_STAT_INCR(sctps_sendpackets);
4454 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4455 		if (ret)
4456 			SCTP_STAT_INCR(sctps_senderrors);
4457 
4458 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4459 		if (net == NULL) {
4460 			/* free tempy routes */
4461 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4462 			RO_RTFREE(ro);
4463 #else
4464 			if (ro->ro_rt) {
4465 				RTFREE(ro->ro_rt);
4466 				ro->ro_rt = NULL;
4467 			}
4468 #endif
4469 		} else {
4470 			/* PMTU check versus smallest asoc MTU goes here */
4471 			if ((ro->ro_rt != NULL) &&
4472 			    (net->ro._s_addr)) {
4473 				uint32_t mtu;
4474 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4475 				if (net->port) {
4476 					mtu -= sizeof(struct udphdr);
4477 				}
4478 				if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4479 					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4480 					net->mtu = mtu;
4481 				}
4482 			} else if (ro->ro_rt == NULL) {
4483 				/* route was freed */
4484 				if (net->ro._s_addr &&
4485 				    net->src_addr_selected) {
4486 					sctp_free_ifa(net->ro._s_addr);
4487 					net->ro._s_addr = NULL;
4488 				}
4489 				net->src_addr_selected = 0;
4490 			}
4491 		}
4492 		return (ret);
4493 	}
4494 #endif
4495 #ifdef INET6
4496 	case AF_INET6:
4497 	{
4498 		uint32_t flowlabel, flowinfo;
4499 		struct ip6_hdr *ip6h;
4500 		struct route_in6 ip6route;
4501 #if !(defined(__Panda__) || defined(__Userspace__))
4502 		struct ifnet *ifp;
4503 #endif
4504 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4505 		int prev_scope = 0;
4506 #ifdef SCTP_EMBEDDED_V6_SCOPE
4507 		struct sockaddr_in6 lsa6_storage;
4508 		int error;
4509 #endif
4510 		u_short prev_port = 0;
4511 		int len;
4512 
4513 		if (net) {
4514 			flowlabel = net->flowlabel;
4515 		} else if (stcb) {
4516 			flowlabel = stcb->asoc.default_flowlabel;
4517 		} else {
4518 			flowlabel = inp->sctp_ep.default_flowlabel;
4519 		}
4520 		if (flowlabel == 0) {
4521 			/*
4522 			 * This means especially, that it is not set at the
4523 			 * SCTP layer. So use the value from the IP layer.
4524 			 */
4525 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4526 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4527 #else
4528 			flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4529 #endif
4530 		}
4531 		flowlabel &= 0x000fffff;
4532 		len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4533 		if (port) {
4534 			len += sizeof(struct udphdr);
4535 		}
4536 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4537 		if (newm == NULL) {
4538 			sctp_m_freem(m);
4539 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4540 			return (ENOMEM);
4541 		}
4542 		SCTP_ALIGN_TO_END(newm, len);
4543 		SCTP_BUF_LEN(newm) = len;
4544 		SCTP_BUF_NEXT(newm) = m;
4545 		m = newm;
4546 #if defined(__FreeBSD__)
4547 		if (net != NULL) {
4548 			m->m_pkthdr.flowid = net->flowid;
4549 			M_HASHTYPE_SET(m, net->flowtype);
4550 		} else {
4551 			m->m_pkthdr.flowid = mflowid;
4552 			M_HASHTYPE_SET(m, mflowtype);
4553  		}
4554 #endif
4555 		packet_length = sctp_calculate_len(m);
4556 
4557 		ip6h = mtod(m, struct ip6_hdr *);
4558 		/* protect *sin6 from overwrite */
4559 		sin6 = (struct sockaddr_in6 *)to;
4560 		tmp = *sin6;
4561 		sin6 = &tmp;
4562 
4563 #ifdef SCTP_EMBEDDED_V6_SCOPE
4564 		/* KAME hack: embed scopeid */
4565 #if defined(__APPLE__)
4566 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4567 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4568 #else
4569 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4570 #endif
4571 #elif defined(SCTP_KAME)
4572 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4573 #else
4574 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4575 #endif
4576 		{
4577 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4578 			return (EINVAL);
4579 		}
4580 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4581 		if (net == NULL) {
4582 			memset(&ip6route, 0, sizeof(ip6route));
4583 			ro = (sctp_route_t *)&ip6route;
4584 #ifdef HAVE_SIN6_LEN
4585 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4586 #else
4587 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4588 #endif
4589 		} else {
4590 			ro = (sctp_route_t *)&net->ro;
4591 		}
4592 		/*
4593 		 * We assume here that inp_flow is in host byte order within
4594 		 * the TCB!
4595 		 */
4596 		if (tos_value == 0) {
4597 			/*
4598 			 * This means especially, that it is not set at the
4599 			 * SCTP layer. So use the value from the IP layer.
4600 			 */
4601 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4602 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4603 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4604 #else
4605 			tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4606 #endif
4607 #endif
4608 		}
4609 		tos_value &= 0xfc;
4610 		if (ecn_ok) {
4611 			tos_value |= sctp_get_ect(stcb);
4612 		}
4613 		flowinfo = 0x06;
4614 		flowinfo <<= 8;
4615 		flowinfo |= tos_value;
4616 		flowinfo <<= 20;
4617 		flowinfo |= flowlabel;
4618 		ip6h->ip6_flow = htonl(flowinfo);
4619 		if (port) {
4620 			ip6h->ip6_nxt = IPPROTO_UDP;
4621 		} else {
4622 			ip6h->ip6_nxt = IPPROTO_SCTP;
4623 		}
4624 		ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4625 		ip6h->ip6_dst = sin6->sin6_addr;
4626 
4627 		/*
4628 		 * Add SRC address selection here: we can only reuse to a
4629 		 * limited degree the kame src-addr-sel, since we can try
4630 		 * their selection but it may not be bound.
4631 		 */
4632 		bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4633 		lsa6_tmp.sin6_family = AF_INET6;
4634 #ifdef HAVE_SIN6_LEN
4635 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4636 #endif
4637 		lsa6 = &lsa6_tmp;
4638 		if (net && out_of_asoc_ok == 0) {
4639 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4640 				sctp_free_ifa(net->ro._s_addr);
4641 				net->ro._s_addr = NULL;
4642 				net->src_addr_selected = 0;
4643 				if (ro->ro_rt) {
4644 					RTFREE(ro->ro_rt);
4645 					ro->ro_rt = NULL;
4646 				}
4647 			}
4648 			if (net->src_addr_selected == 0) {
4649 #ifdef SCTP_EMBEDDED_V6_SCOPE
4650 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4651 				/* KAME hack: embed scopeid */
4652 #if defined(__APPLE__)
4653 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4654 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4655 #else
4656 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4657 #endif
4658 #elif defined(SCTP_KAME)
4659 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4660 #else
4661 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4662 #endif
4663 				{
4664 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4665 					return (EINVAL);
4666 				}
4667 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4668 				/* Cache the source address */
4669 				net->ro._s_addr = sctp_source_address_selection(inp,
4670 										stcb,
4671 										ro,
4672 										net,
4673 										0,
4674 										vrf_id);
4675 #ifdef SCTP_EMBEDDED_V6_SCOPE
4676 #ifdef SCTP_KAME
4677 				(void)sa6_recoverscope(sin6);
4678 #else
4679 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4680 #endif	/* SCTP_KAME */
4681 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4682 				net->src_addr_selected = 1;
4683 			}
4684 			if (net->ro._s_addr == NULL) {
4685 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4686 				net->src_addr_selected = 0;
4687 				sctp_handle_no_route(stcb, net, so_locked);
4688 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4689 				sctp_m_freem(m);
4690 				return (EHOSTUNREACH);
4691 			}
4692 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4693 		} else {
4694 #ifdef SCTP_EMBEDDED_V6_SCOPE
4695 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4696 			/* KAME hack: embed scopeid */
4697 #if defined(__APPLE__)
4698 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4699 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4700 #else
4701 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4702 #endif
4703 #elif defined(SCTP_KAME)
4704 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4705 #else
4706 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4707 #endif
4708 			  {
4709 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4710 				return (EINVAL);
4711 			  }
4712 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4713 			if (over_addr == NULL) {
4714 				struct sctp_ifa *_lsrc;
4715 
4716 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
4717 				                                      net,
4718 				                                      out_of_asoc_ok,
4719 				                                      vrf_id);
4720 				if (_lsrc == NULL) {
4721 					sctp_handle_no_route(stcb, net, so_locked);
4722 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4723 					sctp_m_freem(m);
4724 					return (EHOSTUNREACH);
4725 				}
4726 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4727 				sctp_free_ifa(_lsrc);
4728 			} else {
4729 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4730 				SCTP_RTALLOC(ro, vrf_id);
4731 			}
4732 #ifdef SCTP_EMBEDDED_V6_SCOPE
4733 #ifdef SCTP_KAME
4734 			(void)sa6_recoverscope(sin6);
4735 #else
4736 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4737 #endif	/* SCTP_KAME */
4738 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
4739 		}
4740 		lsa6->sin6_port = inp->sctp_lport;
4741 
4742 		if (ro->ro_rt == NULL) {
4743 			/*
4744 			 * src addr selection failed to find a route (or
4745 			 * valid source addr), so we can't get there from
4746 			 * here!
4747 			 */
4748 			sctp_handle_no_route(stcb, net, so_locked);
4749 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4750 			sctp_m_freem(m);
4751 			return (EHOSTUNREACH);
4752 		}
4753 #ifndef SCOPEDROUTING
4754 #ifdef SCTP_EMBEDDED_V6_SCOPE
4755 		/*
4756 		 * XXX: sa6 may not have a valid sin6_scope_id in the
4757 		 * non-SCOPEDROUTING case.
4758 		 */
4759 		bzero(&lsa6_storage, sizeof(lsa6_storage));
4760 		lsa6_storage.sin6_family = AF_INET6;
4761 #ifdef HAVE_SIN6_LEN
4762 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
4763 #endif
4764 #ifdef SCTP_KAME
4765 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4766 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4767 #else
4768 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4769 		    NULL)) != 0) {
4770 #endif				/* SCTP_KAME */
4771 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4772 			sctp_m_freem(m);
4773 			return (error);
4774 		}
4775 		/* XXX */
4776 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
4777 		lsa6_storage.sin6_port = inp->sctp_lport;
4778 		lsa6 = &lsa6_storage;
4779 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4780 #endif /* SCOPEDROUTING */
4781 		ip6h->ip6_src = lsa6->sin6_addr;
4782 
4783 		if (port) {
4784 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4785 				sctp_handle_no_route(stcb, net, so_locked);
4786 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4787 				sctp_m_freem(m);
4788 				return (EHOSTUNREACH);
4789 			}
4790 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4791 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4792 			udp->uh_dport = port;
4793 			udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4794 			udp->uh_sum = 0;
4795 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4796 		} else {
4797 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4798 		}
4799 
4800 		sctphdr->src_port = src_port;
4801 		sctphdr->dest_port = dest_port;
4802 		sctphdr->v_tag = v_tag;
4803 		sctphdr->checksum = 0;
4804 
4805 		/*
4806 		 * We set the hop limit now since there is a good chance
4807 		 * that our ro pointer is now filled
4808 		 */
4809 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4810 #if !(defined(__Panda__) || defined(__Userspace__))
4811 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4812 #endif
4813 
4814 #ifdef SCTP_DEBUG
4815 		/* Copy to be sure something bad is not happening */
4816 		sin6->sin6_addr = ip6h->ip6_dst;
4817 		lsa6->sin6_addr = ip6h->ip6_src;
4818 #endif
4819 
4820 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4821 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4822 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4823 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4824 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4825 		if (net) {
4826 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4827 			/* preserve the port and scope for link local send */
4828 			prev_scope = sin6->sin6_scope_id;
4829 			prev_port = sin6->sin6_port;
4830 		}
4831 
4832 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4833 			/* failed to prepend data, give up */
4834 			sctp_m_freem(m);
4835 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4836 			return (ENOMEM);
4837 		}
4838 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4839 		if (port) {
4840 #if defined(SCTP_WITH_NO_CSUM)
4841 			SCTP_STAT_INCR(sctps_sendnocrc);
4842 #else
4843 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4844 			SCTP_STAT_INCR(sctps_sendswcrc);
4845 #endif
4846 #if defined(__Windows__)
4847 			udp->uh_sum = 0;
4848 #elif !defined(__Userspace__)
4849 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4850 				udp->uh_sum = 0xffff;
4851 			}
4852 #endif
4853 		} else {
4854 #if defined(SCTP_WITH_NO_CSUM)
4855 			SCTP_STAT_INCR(sctps_sendnocrc);
4856 #else
4857 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4858 #if __FreeBSD_version < 900000
4859 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4860 			SCTP_STAT_INCR(sctps_sendswcrc);
4861 #else
4862 #if __FreeBSD_version > 901000
4863 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4864 #else
4865 			m->m_pkthdr.csum_flags = CSUM_SCTP;
4866 #endif
4867 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4868 			SCTP_STAT_INCR(sctps_sendhwcrc);
4869 #endif
4870 #else
4871 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4872 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
4873 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4874 				SCTP_STAT_INCR(sctps_sendswcrc);
4875 			} else {
4876 				SCTP_STAT_INCR(sctps_sendnocrc);
4877 			}
4878 #endif
4879 #endif
4880 		}
4881 		/* send it out. table id is taken from stcb */
4882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4883 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4884 			so = SCTP_INP_SO(inp);
4885 			SCTP_SOCKET_UNLOCK(so, 0);
4886 		}
4887 #endif
4888 #ifdef SCTP_PACKET_LOGGING
4889 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4890 			sctp_packet_log(o_pak);
4891 #endif
4892 #if !(defined(__Panda__) || defined(__Userspace__))
4893 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4894 #else
4895 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4896 #endif
4897 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4898 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4899 			atomic_add_int(&stcb->asoc.refcnt, 1);
4900 			SCTP_TCB_UNLOCK(stcb);
4901 			SCTP_SOCKET_LOCK(so, 0);
4902 			SCTP_TCB_LOCK(stcb);
4903 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4904 		}
4905 #endif
4906 		if (net) {
4907 			/* for link local this must be done */
4908 			sin6->sin6_scope_id = prev_scope;
4909 			sin6->sin6_port = prev_port;
4910 		}
4911 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4912 		SCTP_STAT_INCR(sctps_sendpackets);
4913 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4914 		if (ret) {
4915 			SCTP_STAT_INCR(sctps_senderrors);
4916 		}
4917 		if (net == NULL) {
4918 			/* Now if we had a temp route free it */
4919 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
4920 			RO_RTFREE(ro);
4921 #else
4922 			if (ro->ro_rt) {
4923 				RTFREE(ro->ro_rt);
4924 				ro->ro_rt = NULL;
4925 			}
4926 #endif
4927 		} else {
4928 			/* PMTU check versus smallest asoc MTU goes here */
4929 			if (ro->ro_rt == NULL) {
4930 				/* Route was freed */
4931 				if (net->ro._s_addr &&
4932 				    net->src_addr_selected) {
4933 					sctp_free_ifa(net->ro._s_addr);
4934 					net->ro._s_addr = NULL;
4935 				}
4936 				net->src_addr_selected = 0;
4937 			}
4938 			if ((ro->ro_rt != NULL) &&
4939 			    (net->ro._s_addr)) {
4940 				uint32_t mtu;
4941 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4942 				if (mtu &&
4943 				    (stcb->asoc.smallest_mtu > mtu)) {
4944 					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4945 					net->mtu = mtu;
4946 					if (net->port) {
4947 						net->mtu -= sizeof(struct udphdr);
4948 					}
4949 				}
4950 			}
4951 #if !defined(__Panda__) && !defined(__Userspace__)
4952 			else if (ifp) {
4953 #if defined(__Windows__)
4954 #define ND_IFINFO(ifp)	(ifp)
4955 #define linkmtu		if_mtu
4956 #endif
4957 				if (ND_IFINFO(ifp)->linkmtu &&
4958 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4959 					sctp_mtu_size_reset(inp,
4960 					    &stcb->asoc,
4961 					    ND_IFINFO(ifp)->linkmtu);
4962 				}
4963 			}
4964 #endif
4965 		}
4966 		return (ret);
4967 	}
4968 #endif
4969 #if defined(__Userspace__)
4970 	case AF_CONN:
4971 	{
4972 		char *buffer;
4973 		struct sockaddr_conn *sconn;
4974 		int len;
4975 
4976 		sconn = (struct sockaddr_conn *)to;
4977 		len = sizeof(struct sctphdr);
4978 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4979 		if (newm == NULL) {
4980 			sctp_m_freem(m);
4981 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4982 			return (ENOMEM);
4983 		}
4984 		SCTP_ALIGN_TO_END(newm, len);
4985 		SCTP_BUF_LEN(newm) = len;
4986 		SCTP_BUF_NEXT(newm) = m;
4987 		m = newm;
4988 		packet_length = sctp_calculate_len(m);
4989 		sctphdr = mtod(m, struct sctphdr *);
4990 		sctphdr->src_port = src_port;
4991 		sctphdr->dest_port = dest_port;
4992 		sctphdr->v_tag = v_tag;
4993 		sctphdr->checksum = 0;
4994 #if defined(SCTP_WITH_NO_CSUM)
4995 		SCTP_STAT_INCR(sctps_sendnocrc);
4996 #else
4997 		sctphdr->checksum = sctp_calculate_cksum(m, 0);
4998 		SCTP_STAT_INCR(sctps_sendswcrc);
4999 #endif
5000 		if (tos_value == 0) {
5001 			tos_value = inp->ip_inp.inp.inp_ip_tos;
5002 		}
5003 		tos_value &= 0xfc;
5004 		if (ecn_ok) {
5005 			tos_value |= sctp_get_ect(stcb);
5006 		}
5007 		/* Don't alloc/free for each packet */
5008 		if ((buffer = malloc(packet_length)) != NULL) {
5009 			m_copydata(m, 0, packet_length, buffer);
5010 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5011 			free(buffer);
5012 		} else {
5013 			ret = ENOMEM;
5014 		}
5015 		sctp_m_freem(m);
5016 		return (ret);
5017 	}
5018 #endif
5019 	default:
5020 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5021 		        ((struct sockaddr *)to)->sa_family);
5022 		sctp_m_freem(m);
5023 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5024 		return (EFAULT);
5025 	}
5026 }
5027 
5028 
5029 void
5030 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
5031 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5032     SCTP_UNUSED
5033 #endif
5034     )
5035 {
5036 	struct mbuf *m, *m_last;
5037 	struct sctp_nets *net;
5038 	struct sctp_init_chunk *init;
5039 	struct sctp_supported_addr_param *sup_addr;
5040 	struct sctp_adaptation_layer_indication *ali;
5041 	struct sctp_supported_chunk_types_param *pr_supported;
5042 	struct sctp_paramhdr *ph;
5043 	int cnt_inits_to = 0;
5044 	int ret;
5045 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5046 
5047 #if defined(__APPLE__)
5048 	if (so_locked) {
5049 		sctp_lock_assert(SCTP_INP_SO(inp));
5050 	} else {
5051 		sctp_unlock_assert(SCTP_INP_SO(inp));
5052 	}
5053 #endif
5054 	/* INIT's always go to the primary (and usually ONLY address) */
5055 	net = stcb->asoc.primary_destination;
5056 	if (net == NULL) {
5057 		net = TAILQ_FIRST(&stcb->asoc.nets);
5058 		if (net == NULL) {
5059 			/* TSNH */
5060 			return;
5061 		}
5062 		/* we confirm any address we send an INIT to */
5063 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5064 		(void)sctp_set_primary_addr(stcb, NULL, net);
5065 	} else {
5066 		/* we confirm any address we send an INIT to */
5067 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5068 	}
5069 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5070 #ifdef INET6
5071 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5072 		/*
5073 		 * special hook, if we are sending to link local it will not
5074 		 * show up in our private address count.
5075 		 */
5076 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5077 			cnt_inits_to = 1;
5078 	}
5079 #endif
5080 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5081 		/* This case should not happen */
5082 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5083 		return;
5084 	}
5085 	/* start the INIT timer */
5086 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5087 
5088 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5089 	if (m == NULL) {
5090 		/* No memory, INIT timer will re-attempt. */
5091 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5092 		return;
5093 	}
5094 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5095 	padding_len = 0;
5096 	/* Now lets put the chunk header in place */
5097 	init = mtod(m, struct sctp_init_chunk *);
5098 	/* now the chunk header */
5099 	init->ch.chunk_type = SCTP_INITIATION;
5100 	init->ch.chunk_flags = 0;
5101 	/* fill in later from mbuf we build */
5102 	init->ch.chunk_length = 0;
5103 	/* place in my tag */
5104 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5105 	/* set up some of the credits. */
5106 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5107 	                              SCTP_MINIMAL_RWND));
5108 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5109 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5110 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5111 
5112 	/* Adaptation layer indication parameter */
5113 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5114 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5115 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5116 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5117 		ali->ph.param_length = htons(parameter_len);
5118 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5119 		chunk_len += parameter_len;
5120 	}
5121 
5122 	/* ECN parameter */
5123 	if (stcb->asoc.ecn_supported == 1) {
5124 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5125 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5126 		ph->param_type = htons(SCTP_ECN_CAPABLE);
5127 		ph->param_length = htons(parameter_len);
5128 		chunk_len += parameter_len;
5129 	}
5130 
5131 	/* PR-SCTP supported parameter */
5132 	if (stcb->asoc.prsctp_supported == 1) {
5133 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5134 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5135 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5136 		ph->param_length = htons(parameter_len);
5137 		chunk_len += parameter_len;
5138 	}
5139 
5140 	/* Add NAT friendly parameter. */
5141 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5142 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5143 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5144 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5145 		ph->param_length = htons(parameter_len);
5146 		chunk_len += parameter_len;
5147 	}
5148 
5149 	/* And now tell the peer which extensions we support */
5150 	num_ext = 0;
5151 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5152 	if (stcb->asoc.prsctp_supported == 1) {
5153 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5154 	}
5155 	if (stcb->asoc.auth_supported == 1) {
5156 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5157 	}
5158 	if (stcb->asoc.asconf_supported == 1) {
5159 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5160 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5161 	}
5162 	if (stcb->asoc.reconfig_supported == 1) {
5163 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5164 	}
5165 	if (stcb->asoc.nrsack_supported == 1) {
5166 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5167 	}
5168 	if (stcb->asoc.pktdrop_supported == 1) {
5169 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5170 	}
5171 	if (num_ext > 0) {
5172 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5173 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5174 		pr_supported->ph.param_length = htons(parameter_len);
5175 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5176 		chunk_len += parameter_len;
5177 	}
5178 	/* add authentication parameters */
5179 	if (stcb->asoc.auth_supported) {
5180 		/* attach RANDOM parameter, if available */
5181 		if (stcb->asoc.authinfo.random != NULL) {
5182 			struct sctp_auth_random *randp;
5183 
5184 			if (padding_len > 0) {
5185 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5186 				chunk_len += padding_len;
5187 				padding_len = 0;
5188 			}
5189 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5190 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5191 			/* random key already contains the header */
5192 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5193 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5194 			chunk_len += parameter_len;
5195 		}
5196 		/* add HMAC_ALGO parameter */
5197 		if (stcb->asoc.local_hmacs != NULL) {
5198 			struct sctp_auth_hmac_algo *hmacs;
5199 
5200 			if (padding_len > 0) {
5201 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5202 				chunk_len += padding_len;
5203 				padding_len = 0;
5204 			}
5205 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5206 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5207 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5208 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5209 			hmacs->ph.param_length = htons(parameter_len);
5210 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5211 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5212 			chunk_len += parameter_len;
5213 		}
5214 		/* add CHUNKS parameter */
5215 		if (stcb->asoc.local_auth_chunks != NULL) {
5216 			struct sctp_auth_chunk_list *chunks;
5217 
5218 			if (padding_len > 0) {
5219 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5220 				chunk_len += padding_len;
5221 				padding_len = 0;
5222 			}
5223 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5224 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5225 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5226 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5227 			chunks->ph.param_length = htons(parameter_len);
5228 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5229 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5230 			chunk_len += parameter_len;
5231 		}
5232 	}
5233 
5234 	/* now any cookie time extensions */
5235 	if (stcb->asoc.cookie_preserve_req) {
5236 		struct sctp_cookie_perserve_param *cookie_preserve;
5237 
5238 		if (padding_len > 0) {
5239 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5240 			chunk_len += padding_len;
5241 			padding_len = 0;
5242 		}
5243 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5244 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5245 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5246 		cookie_preserve->ph.param_length = htons(parameter_len);
5247 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5248 		stcb->asoc.cookie_preserve_req = 0;
5249 		chunk_len += parameter_len;
5250 	}
5251 
5252 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5253 		uint8_t i;
5254 
5255 		if (padding_len > 0) {
5256 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5257 			chunk_len += padding_len;
5258 			padding_len = 0;
5259 		}
5260 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5261 		if (stcb->asoc.scope.ipv4_addr_legal) {
5262 			parameter_len += (uint16_t)sizeof(uint16_t);
5263 		}
5264 		if (stcb->asoc.scope.ipv6_addr_legal) {
5265 			parameter_len += (uint16_t)sizeof(uint16_t);
5266 		}
5267 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5268 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5269 		sup_addr->ph.param_length = htons(parameter_len);
5270 		i = 0;
5271 		if (stcb->asoc.scope.ipv4_addr_legal) {
5272 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5273 		}
5274 		if (stcb->asoc.scope.ipv6_addr_legal) {
5275 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5276 		}
5277 		padding_len = 4 - 2 * i;
5278 		chunk_len += parameter_len;
5279 	}
5280 
5281 	SCTP_BUF_LEN(m) = chunk_len;
5282 	/* now the addresses */
5283 	/* To optimize this we could put the scoping stuff
5284 	 * into a structure and remove the individual uint8's from
5285 	 * the assoc structure. Then we could just sifa in the
5286 	 * address within the stcb. But for now this is a quick
5287 	 * hack to get the address stuff teased apart.
5288 	 */
5289 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5290 	                                    m, cnt_inits_to,
5291 	                                    &padding_len, &chunk_len);
5292 
5293 	init->ch.chunk_length = htons(chunk_len);
5294 	if (padding_len > 0) {
5295 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5296 			sctp_m_freem(m);
5297 			return;
5298 		}
5299 	}
5300 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5301 	ret = sctp_lowlevel_chunk_output(inp, stcb, net,
5302 	                                 (struct sockaddr *)&net->ro._l_addr,
5303 	                                 m, 0, NULL, 0, 0, 0, 0,
5304 	                                 inp->sctp_lport, stcb->rport, htonl(0),
5305 	                                 net->port, NULL,
5306 #if defined(__FreeBSD__)
5307 	                                 0, 0,
5308 #endif
5309 	                                 so_locked);
5310 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
5311 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5312 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5313 }
5314 
5315 struct mbuf *
5316 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5317 	int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
5318 {
5319 	/*
5320 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5321 	 * being equal to the beginning of the params i.e. (iphlen +
5322 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
5323 	 * end of the mbuf verifying that all parameters are known.
5324 	 *
5325 	 * For unknown parameters build and return a mbuf with
5326 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5327 	 * processing this chunk stop, and set *abort_processing to 1.
5328 	 *
5329 	 * By having param_offset be pre-set to where parameters begin it is
5330 	 * hoped that this routine may be reused in the future by new
5331 	 * features.
5332 	 */
5333 	struct sctp_paramhdr *phdr, params;
5334 
5335 	struct mbuf *mat, *op_err;
5336 	char tempbuf[SCTP_PARAM_BUFFER_SIZE];
5337 	int at, limit, pad_needed;
5338 	uint16_t ptype, plen, padded_size;
5339 	int err_at;
5340 
5341 	*abort_processing = 0;
5342 	mat = in_initpkt;
5343 	err_at = 0;
5344 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5345 	at = param_offset;
5346 	op_err = NULL;
5347 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5348 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5349 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5350 		ptype = ntohs(phdr->param_type);
5351 		plen = ntohs(phdr->param_length);
5352 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5353 			/* wacked parameter */
5354 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5355 			goto invalid_size;
5356 		}
5357 		limit -= SCTP_SIZE32(plen);
5358 		/*-
5359 		 * All parameters for all chunks that we know/understand are
5360 		 * listed here. We process them other places and make
5361 		 * appropriate stop actions per the upper bits. However this
5362 		 * is the generic routine processor's can call to get back
5363 		 * an operr.. to either incorporate (init-ack) or send.
5364 		 */
5365 		padded_size = SCTP_SIZE32(plen);
5366 		switch (ptype) {
5367 			/* Param's with variable size */
5368 		case SCTP_HEARTBEAT_INFO:
5369 		case SCTP_STATE_COOKIE:
5370 		case SCTP_UNRECOG_PARAM:
5371 		case SCTP_ERROR_CAUSE_IND:
5372 			/* ok skip fwd */
5373 			at += padded_size;
5374 			break;
5375 			/* Param's with variable size within a range */
5376 		case SCTP_CHUNK_LIST:
5377 		case SCTP_SUPPORTED_CHUNK_EXT:
5378 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5379 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5380 				goto invalid_size;
5381 			}
5382 			at += padded_size;
5383 			break;
5384 		case SCTP_SUPPORTED_ADDRTYPE:
5385 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5386 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5387 				goto invalid_size;
5388 			}
5389 			at += padded_size;
5390 			break;
5391 		case SCTP_RANDOM:
5392 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5393 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5394 				goto invalid_size;
5395 			}
5396 			at += padded_size;
5397 			break;
5398 		case SCTP_SET_PRIM_ADDR:
5399 		case SCTP_DEL_IP_ADDRESS:
5400 		case SCTP_ADD_IP_ADDRESS:
5401 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5402 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5403 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5404 				goto invalid_size;
5405 			}
5406 			at += padded_size;
5407 			break;
5408 			/* Param's with a fixed size */
5409 		case SCTP_IPV4_ADDRESS:
5410 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5411 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5412 				goto invalid_size;
5413 			}
5414 			at += padded_size;
5415 			break;
5416 		case SCTP_IPV6_ADDRESS:
5417 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5418 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5419 				goto invalid_size;
5420 			}
5421 			at += padded_size;
5422 			break;
5423 		case SCTP_COOKIE_PRESERVE:
5424 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5425 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5426 				goto invalid_size;
5427 			}
5428 			at += padded_size;
5429 			break;
5430 		case SCTP_HAS_NAT_SUPPORT:
5431 			*nat_friendly = 1;
5432 			/* fall through */
5433 		case SCTP_PRSCTP_SUPPORTED:
5434 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5435 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5436 				goto invalid_size;
5437 			}
5438 			at += padded_size;
5439 			break;
5440 		case SCTP_ECN_CAPABLE:
5441 			if (padded_size != sizeof(struct sctp_paramhdr)) {
5442 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5443 				goto invalid_size;
5444 			}
5445 			at += padded_size;
5446 			break;
5447 		case SCTP_ULP_ADAPTATION:
5448 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5449 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5450 				goto invalid_size;
5451 			}
5452 			at += padded_size;
5453 			break;
5454 		case SCTP_SUCCESS_REPORT:
5455 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5456 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5457 				goto invalid_size;
5458 			}
5459 			at += padded_size;
5460 			break;
5461 		case SCTP_HOSTNAME_ADDRESS:
5462 		{
5463 			/* We can NOT handle HOST NAME addresses!! */
5464 			int l_len;
5465 
5466 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5467 			*abort_processing = 1;
5468 			if (op_err == NULL) {
5469 				/* Ok need to try to get a mbuf */
5470 #ifdef INET6
5471 				l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5472 #else
5473 				l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5474 #endif
5475 				l_len += plen;
5476 				l_len += sizeof(struct sctp_paramhdr);
5477 				op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5478 				if (op_err) {
5479 					SCTP_BUF_LEN(op_err) = 0;
5480 					/*
5481 					 * pre-reserve space for ip and sctp
5482 					 * header  and chunk hdr
5483 					 */
5484 #ifdef INET6
5485 					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5486 #else
5487 					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5488 #endif
5489 					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5490 					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5491 				}
5492 			}
5493 			if (op_err) {
5494 				/* If we have space */
5495 				struct sctp_paramhdr s;
5496 
5497 				if (err_at % 4) {
5498 					uint32_t cpthis = 0;
5499 
5500 					pad_needed = 4 - (err_at % 4);
5501 					m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5502 					err_at += pad_needed;
5503 				}
5504 				s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5505 				s.param_length = htons(sizeof(s) + plen);
5506 				m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5507 				err_at += sizeof(s);
5508 				phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5509 				if (phdr == NULL) {
5510 					sctp_m_freem(op_err);
5511 					/*
5512 					 * we are out of memory but we still
5513 					 * need to have a look at what to do
5514 					 * (the system is in trouble
5515 					 * though).
5516 					 */
5517 					return (NULL);
5518 				}
5519 				m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5520 			}
5521 			return (op_err);
5522 			break;
5523 		}
5524 		default:
5525 			/*
5526 			 * we do not recognize the parameter figure out what
5527 			 * we do.
5528 			 */
5529 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5530 			if ((ptype & 0x4000) == 0x4000) {
5531 				/* Report bit is set?? */
5532 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5533 				if (op_err == NULL) {
5534 					int l_len;
5535 					/* Ok need to try to get an mbuf */
5536 #ifdef INET6
5537 					l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5538 #else
5539 					l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5540 #endif
5541 					l_len += plen;
5542 					l_len += sizeof(struct sctp_paramhdr);
5543 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5544 					if (op_err) {
5545 						SCTP_BUF_LEN(op_err) = 0;
5546 #ifdef INET6
5547 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5548 #else
5549 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5550 #endif
5551 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5552 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5553 					}
5554 				}
5555 				if (op_err) {
5556 					/* If we have space */
5557 					struct sctp_paramhdr s;
5558 
5559 					if (err_at % 4) {
5560 						uint32_t cpthis = 0;
5561 
5562 						pad_needed = 4 - (err_at % 4);
5563 						m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5564 						err_at += pad_needed;
5565 					}
5566 					s.param_type = htons(SCTP_UNRECOG_PARAM);
5567 					s.param_length = htons(sizeof(s) + plen);
5568 					m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5569 					err_at += sizeof(s);
5570 					if (plen > sizeof(tempbuf)) {
5571 						plen = sizeof(tempbuf);
5572 					}
5573 					phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5574 					if (phdr == NULL) {
5575 						sctp_m_freem(op_err);
5576 						/*
5577 						 * we are out of memory but
5578 						 * we still need to have a
5579 						 * look at what to do (the
5580 						 * system is in trouble
5581 						 * though).
5582 						 */
5583 						op_err = NULL;
5584 						goto more_processing;
5585 					}
5586 					m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5587 					err_at += plen;
5588 				}
5589 			}
5590 		more_processing:
5591 			if ((ptype & 0x8000) == 0x0000) {
5592 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5593 				return (op_err);
5594 			} else {
5595 				/* skip this chunk and continue processing */
5596 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5597 				at += SCTP_SIZE32(plen);
5598 			}
5599 			break;
5600 
5601 		}
5602 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5603 	}
5604 	return (op_err);
5605  invalid_size:
5606 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5607 	*abort_processing = 1;
5608 	if ((op_err == NULL) && phdr) {
5609 		int l_len;
5610 #ifdef INET6
5611 		l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5612 #else
5613 		l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5614 #endif
5615 		l_len += (2 * sizeof(struct sctp_paramhdr));
5616 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5617 		if (op_err) {
5618 			SCTP_BUF_LEN(op_err) = 0;
5619 #ifdef INET6
5620 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5621 #else
5622 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5623 #endif
5624 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5625 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5626 		}
5627 	}
5628 	if ((op_err) && phdr) {
5629 		struct sctp_paramhdr s;
5630 
5631 		if (err_at % 4) {
5632 			uint32_t cpthis = 0;
5633 
5634 			pad_needed = 4 - (err_at % 4);
5635 			m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5636 			err_at += pad_needed;
5637 		}
5638 		s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5639 		s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5640 		m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5641 		err_at += sizeof(s);
5642 		/* Only copy back the p-hdr that caused the issue */
5643 		m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5644 	}
5645 	return (op_err);
5646 }
5647 
5648 static int
5649 sctp_are_there_new_addresses(struct sctp_association *asoc,
5650     struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5651 {
5652 	/*
5653 	 * Given a INIT packet, look through the packet to verify that there
5654 	 * are NO new addresses. As we go through the parameters add reports
5655 	 * of any un-understood parameters that require an error.  Also we
5656 	 * must return (1) to drop the packet if we see a un-understood
5657 	 * parameter that tells us to drop the chunk.
5658 	 */
5659 	struct sockaddr *sa_touse;
5660 	struct sockaddr *sa;
5661 	struct sctp_paramhdr *phdr, params;
5662 	uint16_t ptype, plen;
5663 	uint8_t fnd;
5664 	struct sctp_nets *net;
5665 #ifdef INET
5666 	struct sockaddr_in sin4, *sa4;
5667 #endif
5668 #ifdef INET6
5669 	struct sockaddr_in6 sin6, *sa6;
5670 #endif
5671 
5672 #ifdef INET
5673 	memset(&sin4, 0, sizeof(sin4));
5674 	sin4.sin_family = AF_INET;
5675 #ifdef HAVE_SIN_LEN
5676 	sin4.sin_len = sizeof(sin4);
5677 #endif
5678 #endif
5679 #ifdef INET6
5680 	memset(&sin6, 0, sizeof(sin6));
5681 	sin6.sin6_family = AF_INET6;
5682 #ifdef HAVE_SIN6_LEN
5683 	sin6.sin6_len = sizeof(sin6);
5684 #endif
5685 #endif
5686 	/* First what about the src address of the pkt ? */
5687 	fnd = 0;
5688 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5689 		sa = (struct sockaddr *)&net->ro._l_addr;
5690 		if (sa->sa_family == src->sa_family) {
5691 #ifdef INET
5692 			if (sa->sa_family == AF_INET) {
5693 				struct sockaddr_in *src4;
5694 
5695 				sa4 = (struct sockaddr_in *)sa;
5696 				src4 = (struct sockaddr_in *)src;
5697 				if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5698 					fnd = 1;
5699 					break;
5700 				}
5701 			}
5702 #endif
5703 #ifdef INET6
5704 			if (sa->sa_family == AF_INET6) {
5705 				struct sockaddr_in6 *src6;
5706 
5707 				sa6 = (struct sockaddr_in6 *)sa;
5708 				src6 = (struct sockaddr_in6 *)src;
5709 				if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5710 					fnd = 1;
5711 					break;
5712 				}
5713 			}
5714 #endif
5715 		}
5716 	}
5717 	if (fnd == 0) {
5718 		/* New address added! no need to look futher. */
5719 		return (1);
5720 	}
5721 	/* Ok so far lets munge through the rest of the packet */
5722 	offset += sizeof(struct sctp_init_chunk);
5723 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5724 	while (phdr) {
5725 		sa_touse = NULL;
5726 		ptype = ntohs(phdr->param_type);
5727 		plen = ntohs(phdr->param_length);
5728 		switch (ptype) {
5729 #ifdef INET
5730 		case SCTP_IPV4_ADDRESS:
5731 		{
5732 			struct sctp_ipv4addr_param *p4, p4_buf;
5733 
5734 			phdr = sctp_get_next_param(in_initpkt, offset,
5735 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5736 			if (plen != sizeof(struct sctp_ipv4addr_param) ||
5737 			    phdr == NULL) {
5738 				return (1);
5739 			}
5740 			p4 = (struct sctp_ipv4addr_param *)phdr;
5741 			sin4.sin_addr.s_addr = p4->addr;
5742 			sa_touse = (struct sockaddr *)&sin4;
5743 			break;
5744 		}
5745 #endif
5746 #ifdef INET6
5747 		case SCTP_IPV6_ADDRESS:
5748 		{
5749 			struct sctp_ipv6addr_param *p6, p6_buf;
5750 
5751 			phdr = sctp_get_next_param(in_initpkt, offset,
5752 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5753 			if (plen != sizeof(struct sctp_ipv6addr_param) ||
5754 			    phdr == NULL) {
5755 				return (1);
5756 			}
5757 			p6 = (struct sctp_ipv6addr_param *)phdr;
5758 			memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5759 			    sizeof(p6->addr));
5760 			sa_touse = (struct sockaddr *)&sin6;
5761 			break;
5762 		}
5763 #endif
5764 		default:
5765 			sa_touse = NULL;
5766 			break;
5767 		}
5768 		if (sa_touse) {
5769 			/* ok, sa_touse points to one to check */
5770 			fnd = 0;
5771 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5772 				sa = (struct sockaddr *)&net->ro._l_addr;
5773 				if (sa->sa_family != sa_touse->sa_family) {
5774 					continue;
5775 				}
5776 #ifdef INET
5777 				if (sa->sa_family == AF_INET) {
5778 					sa4 = (struct sockaddr_in *)sa;
5779 					if (sa4->sin_addr.s_addr ==
5780 					    sin4.sin_addr.s_addr) {
5781 						fnd = 1;
5782 						break;
5783 					}
5784 				}
5785 #endif
5786 #ifdef INET6
5787 				if (sa->sa_family == AF_INET6) {
5788 					sa6 = (struct sockaddr_in6 *)sa;
5789 					if (SCTP6_ARE_ADDR_EQUAL(
5790 					    sa6, &sin6)) {
5791 						fnd = 1;
5792 						break;
5793 					}
5794 				}
5795 #endif
5796 			}
5797 			if (!fnd) {
5798 				/* New addr added! no need to look further */
5799 				return (1);
5800 			}
5801 		}
5802 		offset += SCTP_SIZE32(plen);
5803 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5804 	}
5805 	return (0);
5806 }
5807 
5808 /*
5809  * Given a MBUF chain that was sent into us containing an INIT. Build a
5810  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5811  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5812  * message (i.e. the struct sctp_init_msg).
5813  */
5814 void
5815 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5816                        struct mbuf *init_pkt, int iphlen, int offset,
5817                        struct sockaddr *src, struct sockaddr *dst,
5818                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5819 #if defined(__FreeBSD__)
5820 		       uint8_t mflowtype, uint32_t mflowid,
5821 #endif
5822                        uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5823 {
5824 	struct sctp_association *asoc;
5825 	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5826 	struct sctp_init_ack_chunk *initack;
5827 	struct sctp_adaptation_layer_indication *ali;
5828 	struct sctp_supported_chunk_types_param *pr_supported;
5829 	struct sctp_paramhdr *ph;
5830 	union sctp_sockstore *over_addr;
5831 	struct sctp_scoping scp;
5832 #ifdef INET
5833 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5834 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5835 	struct sockaddr_in *sin;
5836 #endif
5837 #ifdef INET6
5838 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5839 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5840 	struct sockaddr_in6 *sin6;
5841 #endif
5842 #if defined(__Userspace__)
5843 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5844 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5845 	struct sockaddr_conn *sconn;
5846 #endif
5847 	struct sockaddr *to;
5848 	struct sctp_state_cookie stc;
5849 	struct sctp_nets *net = NULL;
5850 	uint8_t *signature = NULL;
5851 	int cnt_inits_to = 0;
5852 	uint16_t his_limit, i_want;
5853 	int abort_flag;
5854 	int nat_friendly = 0;
5855 	struct socket *so;
5856 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
5857 
5858 	if (stcb) {
5859 		asoc = &stcb->asoc;
5860 	} else {
5861 		asoc = NULL;
5862 	}
5863 	if ((asoc != NULL) &&
5864 	    (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5865 	    (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5866 		/* new addresses, out of here in non-cookie-wait states */
5867 		/*
5868 		 * Send a ABORT, we don't add the new address error clause
5869 		 * though we even set the T bit and copy in the 0 tag.. this
5870 		 * looks no different than if no listener was present.
5871 		 */
5872 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5873 		                             "Address added");
5874 		sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5875 #if defined(__FreeBSD__)
5876 		                mflowtype, mflowid,
5877 #endif
5878 		                vrf_id, port);
5879 		return;
5880 	}
5881 	abort_flag = 0;
5882 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5883 						       (offset + sizeof(struct sctp_init_chunk)),
5884 						       &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5885 	if (abort_flag) {
5886 	do_a_abort:
5887 		if (op_err == NULL) {
5888 			char msg[SCTP_DIAG_INFO_LEN];
5889 
5890 			snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5891 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5892 			                             msg);
5893 		}
5894 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5895 				init_chk->init.initiate_tag, op_err,
5896 #if defined(__FreeBSD__)
5897 		                mflowtype, mflowid,
5898 #endif
5899 		                vrf_id, port);
5900 		return;
5901 	}
5902 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5903 	if (m == NULL) {
5904 		/* No memory, INIT timer will re-attempt. */
5905 		if (op_err)
5906 			sctp_m_freem(op_err);
5907 		return;
5908 	}
5909 	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5910 	padding_len = 0;
5911 
5912 	/*
5913 	 * We might not overwrite the identification[] completely and on
5914 	 * some platforms time_entered will contain some padding.
5915 	 * Therefore zero out the cookie to avoid putting
5916 	 * uninitialized memory on the wire.
5917 	 */
5918 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
5919 
5920 	/* the time I built cookie */
5921 	(void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5922 
5923 	/* populate any tie tags */
5924 	if (asoc != NULL) {
5925 		/* unlock before tag selections */
5926 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5927 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5928 		stc.cookie_life = asoc->cookie_life;
5929 		net = asoc->primary_destination;
5930 	} else {
5931 		stc.tie_tag_my_vtag = 0;
5932 		stc.tie_tag_peer_vtag = 0;
5933 		/* life I will award this cookie */
5934 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
5935 	}
5936 
5937 	/* copy in the ports for later check */
5938 	stc.myport = sh->dest_port;
5939 	stc.peerport = sh->src_port;
5940 
5941 	/*
5942 	 * If we wanted to honor cookie life extentions, we would add to
5943 	 * stc.cookie_life. For now we should NOT honor any extension
5944 	 */
5945 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5946 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5947 		stc.ipv6_addr_legal = 1;
5948 		if (SCTP_IPV6_V6ONLY(inp)) {
5949 			stc.ipv4_addr_legal = 0;
5950 		} else {
5951 			stc.ipv4_addr_legal = 1;
5952 		}
5953 #if defined(__Userspace__)
5954 		stc.conn_addr_legal = 0;
5955 #endif
5956 	} else {
5957 		stc.ipv6_addr_legal = 0;
5958 #if defined(__Userspace__)
5959 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
5960 			stc.conn_addr_legal = 1;
5961 			stc.ipv4_addr_legal = 0;
5962 		} else {
5963 			stc.conn_addr_legal = 0;
5964 			stc.ipv4_addr_legal = 1;
5965 		}
5966 #else
5967 		stc.ipv4_addr_legal = 1;
5968 #endif
5969 	}
5970 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5971 	stc.ipv4_scope = 1;
5972 #else
5973 	stc.ipv4_scope = 0;
5974 #endif
5975 	if (net == NULL) {
5976 		to = src;
5977 		switch (dst->sa_family) {
5978 #ifdef INET
5979 		case AF_INET:
5980 		{
5981 			/* lookup address */
5982 			stc.address[0] = src4->sin_addr.s_addr;
5983 			stc.address[1] = 0;
5984 			stc.address[2] = 0;
5985 			stc.address[3] = 0;
5986 			stc.addr_type = SCTP_IPV4_ADDRESS;
5987 			/* local from address */
5988 			stc.laddress[0] = dst4->sin_addr.s_addr;
5989 			stc.laddress[1] = 0;
5990 			stc.laddress[2] = 0;
5991 			stc.laddress[3] = 0;
5992 			stc.laddr_type = SCTP_IPV4_ADDRESS;
5993 			/* scope_id is only for v6 */
5994 			stc.scope_id = 0;
5995 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5996 			if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5997 				stc.ipv4_scope = 1;
5998 			}
5999 #else
6000 			stc.ipv4_scope = 1;
6001 #endif				/* SCTP_DONT_DO_PRIVADDR_SCOPE */
6002 			/* Must use the address in this case */
6003 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6004 				stc.loopback_scope = 1;
6005 				stc.ipv4_scope = 1;
6006 				stc.site_scope = 1;
6007 				stc.local_scope = 0;
6008 			}
6009 			break;
6010 		}
6011 #endif
6012 #ifdef INET6
6013 		case AF_INET6:
6014 		{
6015 			stc.addr_type = SCTP_IPV6_ADDRESS;
6016 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6017 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
6018 			stc.scope_id = in6_getscope(&src6->sin6_addr);
6019 #else
6020 			stc.scope_id = 0;
6021 #endif
6022 			if (sctp_is_address_on_local_host(src, vrf_id)) {
6023 				stc.loopback_scope = 1;
6024 				stc.local_scope = 0;
6025 				stc.site_scope = 1;
6026 				stc.ipv4_scope = 1;
6027 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
6028 				/*
6029 				 * If the new destination is a LINK_LOCAL we
6030 				 * must have common both site and local
6031 				 * scope. Don't set local scope though since
6032 				 * we must depend on the source to be added
6033 				 * implicitly. We cannot assure just because
6034 				 * we share one link that all links are
6035 				 * common.
6036 				 */
6037 #if defined(__APPLE__)
6038 				/* Mac OS X currently doesn't have in6_getscope() */
6039 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
6040 #endif
6041 				stc.local_scope = 0;
6042 				stc.site_scope = 1;
6043 				stc.ipv4_scope = 1;
6044 				/*
6045 				 * we start counting for the private address
6046 				 * stuff at 1. since the link local we
6047 				 * source from won't show up in our scoped
6048 				 * count.
6049 				 */
6050 				cnt_inits_to = 1;
6051 				/* pull out the scope_id from incoming pkt */
6052 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
6053 				/*
6054 				 * If the new destination is SITE_LOCAL then
6055 				 * we must have site scope in common.
6056 				 */
6057 				stc.site_scope = 1;
6058 			}
6059 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6060 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6061 			break;
6062 		}
6063 #endif
6064 #if defined(__Userspace__)
6065 		case AF_CONN:
6066 		{
6067 			/* lookup address */
6068 			stc.address[0] = 0;
6069 			stc.address[1] = 0;
6070 			stc.address[2] = 0;
6071 			stc.address[3] = 0;
6072 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6073 			stc.addr_type = SCTP_CONN_ADDRESS;
6074 			/* local from address */
6075 			stc.laddress[0] = 0;
6076 			stc.laddress[1] = 0;
6077 			stc.laddress[2] = 0;
6078 			stc.laddress[3] = 0;
6079 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6080 			stc.laddr_type = SCTP_CONN_ADDRESS;
6081 			/* scope_id is only for v6 */
6082 			stc.scope_id = 0;
6083 			break;
6084 		}
6085 #endif
6086 		default:
6087 			/* TSNH */
6088 			goto do_a_abort;
6089 			break;
6090 		}
6091 	} else {
6092 		/* set the scope per the existing tcb */
6093 
6094 #ifdef INET6
6095 		struct sctp_nets *lnet;
6096 #endif
6097 
6098 		stc.loopback_scope = asoc->scope.loopback_scope;
6099 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6100 		stc.site_scope = asoc->scope.site_scope;
6101 		stc.local_scope = asoc->scope.local_scope;
6102 #ifdef INET6
6103 		/* Why do we not consider IPv4 LL addresses? */
6104 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6105 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6106 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6107 					/*
6108 					 * if we have a LL address, start
6109 					 * counting at 1.
6110 					 */
6111 					cnt_inits_to = 1;
6112 				}
6113 			}
6114 		}
6115 #endif
6116 		/* use the net pointer */
6117 		to = (struct sockaddr *)&net->ro._l_addr;
6118 		switch (to->sa_family) {
6119 #ifdef INET
6120 		case AF_INET:
6121 			sin = (struct sockaddr_in *)to;
6122 			stc.address[0] = sin->sin_addr.s_addr;
6123 			stc.address[1] = 0;
6124 			stc.address[2] = 0;
6125 			stc.address[3] = 0;
6126 			stc.addr_type = SCTP_IPV4_ADDRESS;
6127 			if (net->src_addr_selected == 0) {
6128 				/*
6129 				 * strange case here, the INIT should have
6130 				 * did the selection.
6131 				 */
6132 				net->ro._s_addr = sctp_source_address_selection(inp,
6133 										stcb, (sctp_route_t *)&net->ro,
6134 										net, 0, vrf_id);
6135 				if (net->ro._s_addr == NULL)
6136 					return;
6137 
6138 				net->src_addr_selected = 1;
6139 
6140 			}
6141 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6142 			stc.laddress[1] = 0;
6143 			stc.laddress[2] = 0;
6144 			stc.laddress[3] = 0;
6145 			stc.laddr_type = SCTP_IPV4_ADDRESS;
6146 			/* scope_id is only for v6 */
6147 			stc.scope_id = 0;
6148 			break;
6149 #endif
6150 #ifdef INET6
6151 		case AF_INET6:
6152 			sin6 = (struct sockaddr_in6 *)to;
6153 			memcpy(&stc.address, &sin6->sin6_addr,
6154 			       sizeof(struct in6_addr));
6155 			stc.addr_type = SCTP_IPV6_ADDRESS;
6156 			stc.scope_id = sin6->sin6_scope_id;
6157 			if (net->src_addr_selected == 0) {
6158 				/*
6159 				 * strange case here, the INIT should have
6160 				 * done the selection.
6161 				 */
6162 				net->ro._s_addr = sctp_source_address_selection(inp,
6163 										stcb, (sctp_route_t *)&net->ro,
6164 										net, 0, vrf_id);
6165 				if (net->ro._s_addr == NULL)
6166 					return;
6167 
6168 				net->src_addr_selected = 1;
6169 			}
6170 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6171 			       sizeof(struct in6_addr));
6172 			stc.laddr_type = SCTP_IPV6_ADDRESS;
6173 			break;
6174 #endif
6175 #if defined(__Userspace__)
6176 		case AF_CONN:
6177 			sconn = (struct sockaddr_conn *)to;
6178 			stc.address[0] = 0;
6179 			stc.address[1] = 0;
6180 			stc.address[2] = 0;
6181 			stc.address[3] = 0;
6182 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6183 			stc.addr_type = SCTP_CONN_ADDRESS;
6184 			stc.laddress[0] = 0;
6185 			stc.laddress[1] = 0;
6186 			stc.laddress[2] = 0;
6187 			stc.laddress[3] = 0;
6188 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6189 			stc.laddr_type = SCTP_CONN_ADDRESS;
6190 			stc.scope_id = 0;
6191 			break;
6192 #endif
6193 		}
6194 	}
6195 	/* Now lets put the SCTP header in place */
6196 	initack = mtod(m, struct sctp_init_ack_chunk *);
6197 	/* Save it off for quick ref */
6198 	stc.peers_vtag = init_chk->init.initiate_tag;
6199 	/* who are we */
6200 	memcpy(stc.identification, SCTP_VERSION_STRING,
6201 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6202 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6203 	/* now the chunk header */
6204 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
6205 	initack->ch.chunk_flags = 0;
6206 	/* fill in later from mbuf we build */
6207 	initack->ch.chunk_length = 0;
6208 	/* place in my tag */
6209 	if ((asoc != NULL) &&
6210 	    ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
6211 	     (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
6212 	     (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
6213 		/* re-use the v-tags and init-seq here */
6214 		initack->init.initiate_tag = htonl(asoc->my_vtag);
6215 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
6216 	} else {
6217 		uint32_t vtag, itsn;
6218 		if (hold_inp_lock) {
6219 			SCTP_INP_INCR_REF(inp);
6220 			SCTP_INP_RUNLOCK(inp);
6221 		}
6222 		if (asoc) {
6223 			atomic_add_int(&asoc->refcnt, 1);
6224 			SCTP_TCB_UNLOCK(stcb);
6225 		new_tag:
6226 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6227 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
6228 				/* Got a duplicate vtag on some guy behind a nat
6229 				 * make sure we don't use it.
6230 				 */
6231 				goto new_tag;
6232 			}
6233 			initack->init.initiate_tag = htonl(vtag);
6234 			/* get a TSN to use too */
6235 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6236 			initack->init.initial_tsn = htonl(itsn);
6237 			SCTP_TCB_LOCK(stcb);
6238 			atomic_add_int(&asoc->refcnt, -1);
6239 		} else {
6240 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6241 			initack->init.initiate_tag = htonl(vtag);
6242 			/* get a TSN to use too */
6243 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6244 		}
6245 		if (hold_inp_lock) {
6246 			SCTP_INP_RLOCK(inp);
6247 			SCTP_INP_DECR_REF(inp);
6248 		}
6249 	}
6250 	/* save away my tag to */
6251 	stc.my_vtag = initack->init.initiate_tag;
6252 
6253 	/* set up some of the credits. */
6254 	so = inp->sctp_socket;
6255 	if (so == NULL) {
6256 		/* memory problem */
6257 		sctp_m_freem(m);
6258 		return;
6259 	} else {
6260 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6261 	}
6262 	/* set what I want */
6263 	his_limit = ntohs(init_chk->init.num_inbound_streams);
6264 	/* choose what I want */
6265 	if (asoc != NULL) {
6266 		if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
6267 			i_want = asoc->streamoutcnt;
6268 		} else {
6269 			i_want = inp->sctp_ep.pre_open_stream_count;
6270 		}
6271 	} else {
6272 		i_want = inp->sctp_ep.pre_open_stream_count;
6273 	}
6274 	if (his_limit < i_want) {
6275 		/* I Want more :< */
6276 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6277 	} else {
6278 		/* I can have what I want :> */
6279 		initack->init.num_outbound_streams = htons(i_want);
6280 	}
6281 	/* tell him his limit. */
6282 	initack->init.num_inbound_streams =
6283 		htons(inp->sctp_ep.max_open_streams_intome);
6284 
6285 	/* adaptation layer indication parameter */
6286 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6287 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6288 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6289 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6290 		ali->ph.param_length = htons(parameter_len);
6291 		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6292 		chunk_len += parameter_len;
6293 	}
6294 
6295 	/* ECN parameter */
6296 	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6297 	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
6298 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6299 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6300 		ph->param_type = htons(SCTP_ECN_CAPABLE);
6301 		ph->param_length = htons(parameter_len);
6302 		chunk_len += parameter_len;
6303 	}
6304 
6305 	/* PR-SCTP supported parameter */
6306 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6307 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6308 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6309 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6310 		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6311 		ph->param_length = htons(parameter_len);
6312 		chunk_len += parameter_len;
6313 	}
6314 
6315 	/* Add NAT friendly parameter */
6316 	if (nat_friendly) {
6317 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6318 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6319 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6320 		ph->param_length = htons(parameter_len);
6321 		chunk_len += parameter_len;
6322 	}
6323 
6324 	/* And now tell the peer which extensions we support */
6325 	num_ext = 0;
6326 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6327 	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6328 	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6329 		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6330 	}
6331 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6332 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6333 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6334 	}
6335 	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6336 	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
6337 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6338 		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6339 	}
6340 	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6341 	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6342 		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6343 	}
6344 	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6345 	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6346 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6347 	}
6348 	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6349 	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6350 		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6351 	}
6352 	if (num_ext > 0) {
6353 		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6354 		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6355 		pr_supported->ph.param_length = htons(parameter_len);
6356 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6357 		chunk_len += parameter_len;
6358 	}
6359 
6360 	/* add authentication parameters */
6361 	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6362 	    ((asoc == NULL) && (inp->auth_supported == 1))) {
6363 		struct sctp_auth_random *randp;
6364 		struct sctp_auth_hmac_algo *hmacs;
6365 		struct sctp_auth_chunk_list *chunks;
6366 
6367 		if (padding_len > 0) {
6368 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6369 			chunk_len += padding_len;
6370 			padding_len = 0;
6371 		}
6372 		/* generate and add RANDOM parameter */
6373 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6374 		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6375 		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6376 		randp->ph.param_type = htons(SCTP_RANDOM);
6377 		randp->ph.param_length = htons(parameter_len);
6378 		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6379 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6380 		chunk_len += parameter_len;
6381 
6382 		if (padding_len > 0) {
6383 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6384 			chunk_len += padding_len;
6385 			padding_len = 0;
6386 		}
6387 		/* add HMAC_ALGO parameter */
6388 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6389 		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6390 		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6391 		                                        (uint8_t *)hmacs->hmac_ids);
6392 		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6393 		hmacs->ph.param_length = htons(parameter_len);
6394 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6395 		chunk_len += parameter_len;
6396 
6397 		if (padding_len > 0) {
6398 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6399 			chunk_len += padding_len;
6400 			padding_len = 0;
6401 		}
6402 		/* add CHUNKS parameter */
6403 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6404 		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6405 		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6406 		                                           chunks->chunk_types);
6407 		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6408 		chunks->ph.param_length = htons(parameter_len);
6409 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6410 		chunk_len += parameter_len;
6411 	}
6412 	SCTP_BUF_LEN(m) = chunk_len;
6413 	m_last = m;
6414 	/* now the addresses */
6415 	/* To optimize this we could put the scoping stuff
6416 	 * into a structure and remove the individual uint8's from
6417 	 * the stc structure. Then we could just sifa in the
6418 	 * address within the stc.. but for now this is a quick
6419 	 * hack to get the address stuff teased apart.
6420 	 */
6421 	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6422 	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6423 #if defined(__Userspace__)
6424 	scp.conn_addr_legal = stc.conn_addr_legal;
6425 #endif
6426 	scp.loopback_scope = stc.loopback_scope;
6427 	scp.ipv4_local_scope = stc.ipv4_scope;
6428 	scp.local_scope = stc.local_scope;
6429 	scp.site_scope = stc.site_scope;
6430 	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6431 	                                    cnt_inits_to,
6432 	                                    &padding_len, &chunk_len);
6433 	/* padding_len can only be positive, if no addresses have been added */
6434 	if (padding_len > 0) {
6435 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6436 		chunk_len += padding_len;
6437 		SCTP_BUF_LEN(m) += padding_len;
6438 		padding_len = 0;
6439 	}
6440 
6441 	/* tack on the operational error if present */
6442 	if (op_err) {
6443 		parameter_len = 0;
6444 		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6445 			parameter_len += SCTP_BUF_LEN(m_tmp);
6446 		}
6447 		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6448 		SCTP_BUF_NEXT(m_last) = op_err;
6449 		while (SCTP_BUF_NEXT(m_last) != NULL) {
6450 			m_last = SCTP_BUF_NEXT(m_last);
6451 		}
6452 		chunk_len += parameter_len;
6453 	}
6454 	if (padding_len > 0) {
6455 		m_last = sctp_add_pad_tombuf(m_last, padding_len);
6456 		if (m_last == NULL) {
6457 			/* Houston we have a problem, no space */
6458 			sctp_m_freem(m);
6459 			return;
6460 		}
6461 		chunk_len += padding_len;
6462 		padding_len = 0;
6463 	}
6464 	/* Now we must build a cookie */
6465 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6466 	if (m_cookie == NULL) {
6467 		/* memory problem */
6468 		sctp_m_freem(m);
6469 		return;
6470 	}
6471 	/* Now append the cookie to the end and update the space/size */
6472 	SCTP_BUF_NEXT(m_last) = m_cookie;
6473 	parameter_len = 0;
6474 	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6475 		parameter_len += SCTP_BUF_LEN(m_tmp);
6476 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6477 			m_last = m_tmp;
6478 		}
6479 	}
6480 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6481 	chunk_len += parameter_len;
6482 
6483 	/* Place in the size, but we don't include
6484 	 * the last pad (if any) in the INIT-ACK.
6485 	 */
6486 	initack->ch.chunk_length = htons(chunk_len);
6487 
6488 	/* Time to sign the cookie, we don't sign over the cookie
6489 	 * signature though thus we set trailer.
6490 	 */
6491 	(void)sctp_hmac_m(SCTP_HMAC,
6492 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6493 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6494 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6495 	/*
6496 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6497 	 * here since the timer will drive a retranmission.
6498 	 */
6499 	if (padding_len > 0) {
6500 		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6501 			sctp_m_freem(m);
6502 			return;
6503 		}
6504 	}
6505 	if (stc.loopback_scope) {
6506 		over_addr = (union sctp_sockstore *)dst;
6507 	} else {
6508 		over_addr = NULL;
6509 	}
6510 
6511 	(void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6512 	                                 0, 0,
6513 	                                 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6514 	                                 port, over_addr,
6515 #if defined(__FreeBSD__)
6516 	                                 mflowtype, mflowid,
6517 #endif
6518 	                                 SCTP_SO_NOT_LOCKED);
6519 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6520 }
6521 
6522 
6523 static void
6524 sctp_prune_prsctp(struct sctp_tcb *stcb,
6525     struct sctp_association *asoc,
6526     struct sctp_sndrcvinfo *srcv,
6527     int dataout)
6528 {
6529 	int freed_spc = 0;
6530 	struct sctp_tmit_chunk *chk, *nchk;
6531 
6532 	SCTP_TCB_LOCK_ASSERT(stcb);
6533 	if ((asoc->prsctp_supported) &&
6534 	    (asoc->sent_queue_cnt_removeable > 0)) {
6535 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6536 			/*
6537 			 * Look for chunks marked with the PR_SCTP flag AND
6538 			 * the buffer space flag. If the one being sent is
6539 			 * equal or greater priority then purge the old one
6540 			 * and free some space.
6541 			 */
6542 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6543 				/*
6544 				 * This one is PR-SCTP AND buffer space
6545 				 * limited type
6546 				 */
6547 				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6548 					/*
6549 					 * Lower numbers equates to higher
6550 					 * priority so if the one we are
6551 					 * looking at has a larger or equal
6552 					 * priority we want to drop the data
6553 					 * and NOT retransmit it.
6554 					 */
6555 					if (chk->data) {
6556 						/*
6557 						 * We release the book_size
6558 						 * if the mbuf is here
6559 						 */
6560 						int ret_spc;
6561 						uint8_t sent;
6562 
6563 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
6564 							sent = 1;
6565 						else
6566 							sent = 0;
6567 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6568 						    sent,
6569 						    SCTP_SO_LOCKED);
6570 						freed_spc += ret_spc;
6571 						if (freed_spc >= dataout) {
6572 							return;
6573 						}
6574 					}	/* if chunk was present */
6575 				}	/* if of sufficent priority */
6576 			}	/* if chunk has enabled */
6577 		}		/* tailqforeach */
6578 
6579 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6580 			/* Here we must move to the sent queue and mark */
6581 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6582 				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6583 					if (chk->data) {
6584 						/*
6585 						 * We release the book_size
6586 						 * if the mbuf is here
6587 						 */
6588 						int ret_spc;
6589 
6590 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6591 						    0, SCTP_SO_LOCKED);
6592 
6593 						freed_spc += ret_spc;
6594 						if (freed_spc >= dataout) {
6595 							return;
6596 						}
6597 					}	/* end if chk->data */
6598 				}	/* end if right class */
6599 			}	/* end if chk pr-sctp */
6600 		}		/* tailqforeachsafe (chk) */
6601 	}			/* if enabled in asoc */
6602 }
6603 
6604 int
6605 sctp_get_frag_point(struct sctp_tcb *stcb,
6606     struct sctp_association *asoc)
6607 {
6608 	int siz, ovh;
6609 
6610 	/*
6611 	 * For endpoints that have both v6 and v4 addresses we must reserve
6612 	 * room for the ipv6 header, for those that are only dealing with V4
6613 	 * we use a larger frag point.
6614 	 */
6615 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6616 		ovh = SCTP_MED_OVERHEAD;
6617 	} else {
6618 		ovh = SCTP_MED_V4_OVERHEAD;
6619 	}
6620 
6621 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6622 		siz = asoc->smallest_mtu - ovh;
6623 	else
6624 		siz = (stcb->asoc.sctp_frag_point - ovh);
6625 	/*
6626 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6627 	 */
6628 	/* A data chunk MUST fit in a cluster */
6629 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6630 	/* } */
6631 
6632 	/* adjust for an AUTH chunk if DATA requires auth */
6633 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6634 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6635 
6636 	if (siz % 4) {
6637 		/* make it an even word boundary please */
6638 		siz -= (siz % 4);
6639 	}
6640 	return (siz);
6641 }
6642 
6643 static void
6644 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6645 {
6646 	/*
6647 	 * We assume that the user wants PR_SCTP_TTL if the user
6648 	 * provides a positive lifetime but does not specify any
6649 	 * PR_SCTP policy.
6650 	 */
6651 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6652 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6653 	} else if (sp->timetolive > 0) {
6654 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6655 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6656 	} else {
6657 		return;
6658 	}
6659 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6660 	case CHUNK_FLAGS_PR_SCTP_BUF:
6661 		/*
6662 		 * Time to live is a priority stored in tv_sec when
6663 		 * doing the buffer drop thing.
6664 		 */
6665 		sp->ts.tv_sec = sp->timetolive;
6666 		sp->ts.tv_usec = 0;
6667 		break;
6668 	case CHUNK_FLAGS_PR_SCTP_TTL:
6669 	{
6670 		struct timeval tv;
6671 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6672 		tv.tv_sec = sp->timetolive / 1000;
6673 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6674 		/* TODO sctp_constants.h needs alternative time macros when
6675 		 *  _KERNEL is undefined.
6676 		 */
6677 #ifndef __FreeBSD__
6678 		timeradd(&sp->ts, &tv, &sp->ts);
6679 #else
6680 		timevaladd(&sp->ts, &tv);
6681 #endif
6682 	}
6683 		break;
6684 	case CHUNK_FLAGS_PR_SCTP_RTX:
6685 		/*
6686 		 * Time to live is a the number or retransmissions
6687 		 * stored in tv_sec.
6688 		 */
6689 		sp->ts.tv_sec = sp->timetolive;
6690 		sp->ts.tv_usec = 0;
6691 		break;
6692 	default:
6693 		SCTPDBG(SCTP_DEBUG_USRREQ1,
6694 			"Unknown PR_SCTP policy %u.\n",
6695 			PR_SCTP_POLICY(sp->sinfo_flags));
6696 		break;
6697 	}
6698 }
6699 
6700 static int
6701 sctp_msg_append(struct sctp_tcb *stcb,
6702 		struct sctp_nets *net,
6703 		struct mbuf *m,
6704 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6705 {
6706 	int error = 0;
6707 	struct mbuf *at;
6708 	struct sctp_stream_queue_pending *sp = NULL;
6709 	struct sctp_stream_out *strm;
6710 
6711 	/* Given an mbuf chain, put it
6712 	 * into the association send queue and
6713 	 * place it on the wheel
6714 	 */
6715 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6716 		/* Invalid stream number */
6717 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6718 		error = EINVAL;
6719 		goto out_now;
6720 	}
6721 	if ((stcb->asoc.stream_locked) &&
6722 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6723 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6724 		error = EINVAL;
6725 		goto out_now;
6726 	}
6727 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6728 	/* Now can we send this? */
6729 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6730 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6731 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6732 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6733 		/* got data while shutting down */
6734 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6735 		error = ECONNRESET;
6736 		goto out_now;
6737 	}
6738 	sctp_alloc_a_strmoq(stcb, sp);
6739 	if (sp == NULL) {
6740 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6741 		error = ENOMEM;
6742 		goto out_now;
6743 	}
6744 	sp->sinfo_flags = srcv->sinfo_flags;
6745 	sp->timetolive = srcv->sinfo_timetolive;
6746 	sp->ppid = srcv->sinfo_ppid;
6747 	sp->context = srcv->sinfo_context;
6748 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6749 		sp->net = net;
6750 		atomic_add_int(&sp->net->ref_count, 1);
6751 	} else {
6752 		sp->net = NULL;
6753 	}
6754 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6755 	sp->stream = srcv->sinfo_stream;
6756 	sp->msg_is_complete = 1;
6757 	sp->sender_all_done = 1;
6758 	sp->some_taken = 0;
6759 	sp->data = m;
6760 	sp->tail_mbuf = NULL;
6761 	sctp_set_prsctp_policy(sp);
6762 	/* We could in theory (for sendall) sifa the length
6763 	 * in, but we would still have to hunt through the
6764 	 * chain since we need to setup the tail_mbuf
6765 	 */
6766 	sp->length = 0;
6767 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6768 		if (SCTP_BUF_NEXT(at) == NULL)
6769 			sp->tail_mbuf = at;
6770 		sp->length += SCTP_BUF_LEN(at);
6771 	}
6772 	if (srcv->sinfo_keynumber_valid) {
6773 		sp->auth_keyid = srcv->sinfo_keynumber;
6774 	} else {
6775 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6776 	}
6777 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6778 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
6779 		sp->holds_key_ref = 1;
6780 	}
6781 	if (hold_stcb_lock == 0) {
6782 		SCTP_TCB_SEND_LOCK(stcb);
6783 	}
6784 	sctp_snd_sb_alloc(stcb, sp->length);
6785 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6786 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6787 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6788 	m = NULL;
6789 	if (hold_stcb_lock == 0) {
6790 		SCTP_TCB_SEND_UNLOCK(stcb);
6791 	}
6792 out_now:
6793 	if (m) {
6794 		sctp_m_freem(m);
6795 	}
6796 	return (error);
6797 }
6798 
6799 
6800 static struct mbuf *
6801 sctp_copy_mbufchain(struct mbuf *clonechain,
6802 		    struct mbuf *outchain,
6803 		    struct mbuf **endofchain,
6804 		    int can_take_mbuf,
6805 		    int sizeofcpy,
6806 		    uint8_t copy_by_ref)
6807 {
6808 	struct mbuf *m;
6809 	struct mbuf *appendchain;
6810 	caddr_t cp;
6811 	int len;
6812 
6813 	if (endofchain == NULL) {
6814 		/* error */
6815 	error_out:
6816 		if (outchain)
6817 			sctp_m_freem(outchain);
6818 		return (NULL);
6819 	}
6820 	if (can_take_mbuf) {
6821 		appendchain = clonechain;
6822 	} else {
6823 		if (!copy_by_ref &&
6824 #if defined(__Panda__)
6825 		    0
6826 #else
6827 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6828 #endif
6829 		    ) {
6830 			/* Its not in a cluster */
6831 			if (*endofchain == NULL) {
6832 				/* lets get a mbuf cluster */
6833 				if (outchain == NULL) {
6834 					/* This is the general case */
6835 				new_mbuf:
6836 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6837 					if (outchain == NULL) {
6838 						goto error_out;
6839 					}
6840 					SCTP_BUF_LEN(outchain) = 0;
6841 					*endofchain = outchain;
6842 					/* get the prepend space */
6843 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
6844 				} else {
6845 					/* We really should not get a NULL in endofchain */
6846 					/* find end */
6847 					m = outchain;
6848 					while (m) {
6849 						if (SCTP_BUF_NEXT(m) == NULL) {
6850 							*endofchain = m;
6851 							break;
6852 						}
6853 						m = SCTP_BUF_NEXT(m);
6854 					}
6855 					/* sanity */
6856 					if (*endofchain == NULL) {
6857 						/* huh, TSNH XXX maybe we should panic */
6858 						sctp_m_freem(outchain);
6859 						goto new_mbuf;
6860 					}
6861 				}
6862 				/* get the new end of length */
6863 				len = M_TRAILINGSPACE(*endofchain);
6864 			} else {
6865 				/* how much is left at the end? */
6866 				len = M_TRAILINGSPACE(*endofchain);
6867 			}
6868 			/* Find the end of the data, for appending */
6869 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
6870 
6871 			/* Now lets copy it out */
6872 			if (len >= sizeofcpy) {
6873 				/* It all fits, copy it in */
6874 				m_copydata(clonechain, 0, sizeofcpy, cp);
6875 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6876 			} else {
6877 				/* fill up the end of the chain */
6878 				if (len > 0) {
6879 					m_copydata(clonechain, 0, len, cp);
6880 					SCTP_BUF_LEN((*endofchain)) += len;
6881 					/* now we need another one */
6882 					sizeofcpy -= len;
6883 				}
6884 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6885 				if (m == NULL) {
6886 					/* We failed */
6887 					goto error_out;
6888 				}
6889 				SCTP_BUF_NEXT((*endofchain)) = m;
6890 				*endofchain = m;
6891 				cp = mtod((*endofchain), caddr_t);
6892 				m_copydata(clonechain, len, sizeofcpy, cp);
6893 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6894 			}
6895 			return (outchain);
6896 		} else {
6897 			/* copy the old fashion way */
6898 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6899 #ifdef SCTP_MBUF_LOGGING
6900 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6901 				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6902 			}
6903 #endif
6904 		}
6905 	}
6906 	if (appendchain == NULL) {
6907 		/* error */
6908 		if (outchain)
6909 			sctp_m_freem(outchain);
6910 		return (NULL);
6911 	}
6912 	if (outchain) {
6913 		/* tack on to the end */
6914 		if (*endofchain != NULL) {
6915 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6916 		} else {
6917 			m = outchain;
6918 			while (m) {
6919 				if (SCTP_BUF_NEXT(m) == NULL) {
6920 					SCTP_BUF_NEXT(m) = appendchain;
6921 					break;
6922 				}
6923 				m = SCTP_BUF_NEXT(m);
6924 			}
6925 		}
6926 		/*
6927 		 * save off the end and update the end-chain
6928 		 * postion
6929 		 */
6930 		m = appendchain;
6931 		while (m) {
6932 			if (SCTP_BUF_NEXT(m) == NULL) {
6933 				*endofchain = m;
6934 				break;
6935 			}
6936 			m = SCTP_BUF_NEXT(m);
6937 		}
6938 		return (outchain);
6939 	} else {
6940 		/* save off the end and update the end-chain postion */
6941 		m = appendchain;
6942 		while (m) {
6943 			if (SCTP_BUF_NEXT(m) == NULL) {
6944 				*endofchain = m;
6945 				break;
6946 			}
6947 			m = SCTP_BUF_NEXT(m);
6948 		}
6949 		return (appendchain);
6950 	}
6951 }
6952 
6953 static int
6954 sctp_med_chunk_output(struct sctp_inpcb *inp,
6955 		      struct sctp_tcb *stcb,
6956 		      struct sctp_association *asoc,
6957 		      int *num_out,
6958 		      int *reason_code,
6959 		      int control_only, int from_where,
6960 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
6961 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6962 		      SCTP_UNUSED
6963 #endif
6964                       );
6965 
6966 static void
6967 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6968     uint32_t val SCTP_UNUSED)
6969 {
6970 	struct sctp_copy_all *ca;
6971 	struct mbuf *m;
6972 	int ret = 0;
6973 	int added_control = 0;
6974 	int un_sent, do_chunk_output = 1;
6975 	struct sctp_association *asoc;
6976 	struct sctp_nets *net;
6977 
6978 	ca = (struct sctp_copy_all *)ptr;
6979 	if (ca->m == NULL) {
6980 		return;
6981 	}
6982 	if (ca->inp != inp) {
6983 		/* TSNH */
6984 		return;
6985 	}
6986 	if (ca->sndlen > 0) {
6987 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6988 		if (m == NULL) {
6989 			/* can't copy so we are done */
6990 			ca->cnt_failed++;
6991 			return;
6992 		}
6993 #ifdef SCTP_MBUF_LOGGING
6994 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6995 			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6996 		}
6997 #endif
6998 	} else {
6999 		m = NULL;
7000 	}
7001 	SCTP_TCB_LOCK_ASSERT(stcb);
7002 	if (stcb->asoc.alternate) {
7003 		net = stcb->asoc.alternate;
7004 	} else {
7005 		net = stcb->asoc.primary_destination;
7006 	}
7007 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7008 		/* Abort this assoc with m as the user defined reason */
7009 		if (m != NULL) {
7010 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7011 		} else {
7012 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7013 			                          0, M_NOWAIT, 1, MT_DATA);
7014 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7015 		}
7016 		if (m != NULL) {
7017 			struct sctp_paramhdr *ph;
7018 
7019 			ph = mtod(m, struct sctp_paramhdr *);
7020 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7021 			ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
7022 		}
7023 		/* We add one here to keep the assoc from
7024 		 * dis-appearing on us.
7025 		 */
7026 		atomic_add_int(&stcb->asoc.refcnt, 1);
7027 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7028 		/* sctp_abort_an_association calls sctp_free_asoc()
7029 		 * free association will NOT free it since we
7030 		 * incremented the refcnt .. we do this to prevent
7031 		 * it being freed and things getting tricky since
7032 		 * we could end up (from free_asoc) calling inpcb_free
7033 		 * which would get a recursive lock call to the
7034 		 * iterator lock.. But as a consequence of that the
7035 		 * stcb will return to us un-locked.. since free_asoc
7036 		 * returns with either no TCB or the TCB unlocked, we
7037 		 * must relock.. to unlock in the iterator timer :-0
7038 		 */
7039 		SCTP_TCB_LOCK(stcb);
7040 		atomic_add_int(&stcb->asoc.refcnt, -1);
7041 		goto no_chunk_output;
7042 	} else {
7043 		if (m) {
7044 			ret = sctp_msg_append(stcb, net, m,
7045 					      &ca->sndrcv, 1);
7046 		}
7047 		asoc = &stcb->asoc;
7048 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7049 			/* shutdown this assoc */
7050 			int cnt;
7051 			cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
7052 
7053 			if (TAILQ_EMPTY(&asoc->send_queue) &&
7054 			    TAILQ_EMPTY(&asoc->sent_queue) &&
7055 			    (cnt == 0)) {
7056 				if (asoc->locked_on_sending) {
7057 					goto abort_anyway;
7058 				}
7059 				/* there is nothing queued to send, so I'm done... */
7060 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7061 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7062 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7063 					/* only send SHUTDOWN the first time through */
7064 					if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
7065 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7066 					}
7067 					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7068 					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7069 					sctp_stop_timers_for_shutdown(stcb);
7070 					sctp_send_shutdown(stcb, net);
7071 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7072 							 net);
7073 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7074 							 asoc->primary_destination);
7075 					added_control = 1;
7076 					do_chunk_output = 0;
7077 				}
7078 			} else {
7079 				/*
7080 				 * we still got (or just got) data to send, so set
7081 				 * SHUTDOWN_PENDING
7082 				 */
7083 				/*
7084 				 * XXX sockets draft says that SCTP_EOF should be
7085 				 * sent with no data.  currently, we will allow user
7086 				 * data to be sent first and move to
7087 				 * SHUTDOWN-PENDING
7088 				 */
7089 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7090 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7091 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7092 					if (asoc->locked_on_sending) {
7093 						/* Locked to send out the data */
7094 						struct sctp_stream_queue_pending *sp;
7095 						sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
7096 						if (sp) {
7097 							if ((sp->length == 0) && (sp->msg_is_complete == 0))
7098 								asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7099 						}
7100 					}
7101 					asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
7102 					if (TAILQ_EMPTY(&asoc->send_queue) &&
7103 					    TAILQ_EMPTY(&asoc->sent_queue) &&
7104 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7105 					abort_anyway:
7106 						atomic_add_int(&stcb->asoc.refcnt, 1);
7107 						sctp_abort_an_association(stcb->sctp_ep, stcb,
7108 									  NULL, SCTP_SO_NOT_LOCKED);
7109 						atomic_add_int(&stcb->asoc.refcnt, -1);
7110 						goto no_chunk_output;
7111 					}
7112 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7113 							 asoc->primary_destination);
7114 				}
7115 			}
7116 
7117 		}
7118 	}
7119 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7120 		   (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
7121 
7122 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7123 	    (stcb->asoc.total_flight > 0) &&
7124 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7125 		do_chunk_output = 0;
7126 	}
7127 	if (do_chunk_output)
7128 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7129 	else if (added_control) {
7130 		int num_out, reason, now_filled = 0;
7131 		struct timeval now;
7132 		int frag_point;
7133 
7134 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7135 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7136 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7137 	}
7138  no_chunk_output:
7139 	if (ret) {
7140 		ca->cnt_failed++;
7141 	} else {
7142 		ca->cnt_sent++;
7143 	}
7144 }
7145 
7146 static void
7147 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7148 {
7149 	struct sctp_copy_all *ca;
7150 
7151 	ca = (struct sctp_copy_all *)ptr;
7152 	/*
7153 	 * Do a notify here? Kacheong suggests that the notify be done at
7154 	 * the send time.. so you would push up a notification if any send
7155 	 * failed. Don't know if this is feasable since the only failures we
7156 	 * have is "memory" related and if you cannot get an mbuf to send
7157 	 * the data you surely can't get an mbuf to send up to notify the
7158 	 * user you can't send the data :->
7159 	 */
7160 
7161 	/* now free everything */
7162 	sctp_m_freem(ca->m);
7163 	SCTP_FREE(ca, SCTP_M_COPYAL);
7164 }
7165 
7166 static struct mbuf *
7167 sctp_copy_out_all(struct uio *uio, int len)
7168 {
7169 	struct mbuf *ret, *at;
7170 	int left, willcpy, cancpy, error;
7171 
7172 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7173 	if (ret == NULL) {
7174 		/* TSNH */
7175 		return (NULL);
7176 	}
7177 	left = len;
7178 	SCTP_BUF_LEN(ret) = 0;
7179 	/* save space for the data chunk header */
7180 	cancpy = M_TRAILINGSPACE(ret);
7181 	willcpy = min(cancpy, left);
7182 	at = ret;
7183 	while (left > 0) {
7184 		/* Align data to the end */
7185 		error = uiomove(mtod(at, caddr_t), willcpy, uio);
7186 		if (error) {
7187 	err_out_now:
7188 			sctp_m_freem(at);
7189 			return (NULL);
7190 		}
7191 		SCTP_BUF_LEN(at) = willcpy;
7192 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7193 		left -= willcpy;
7194 		if (left > 0) {
7195 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
7196 			if (SCTP_BUF_NEXT(at) == NULL) {
7197 				goto err_out_now;
7198 			}
7199 			at = SCTP_BUF_NEXT(at);
7200 			SCTP_BUF_LEN(at) = 0;
7201 			cancpy = M_TRAILINGSPACE(at);
7202 			willcpy = min(cancpy, left);
7203 		}
7204 	}
7205 	return (ret);
7206 }
7207 
7208 static int
7209 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7210     struct sctp_sndrcvinfo *srcv)
7211 {
7212 	int ret;
7213 	struct sctp_copy_all *ca;
7214 
7215 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7216 		    SCTP_M_COPYAL);
7217 	if (ca == NULL) {
7218 		sctp_m_freem(m);
7219 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7220 		return (ENOMEM);
7221 	}
7222 	memset(ca, 0, sizeof(struct sctp_copy_all));
7223 
7224 	ca->inp = inp;
7225 	if (srcv) {
7226 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7227 	}
7228 	/*
7229 	 * take off the sendall flag, it would be bad if we failed to do
7230 	 * this :-0
7231 	 */
7232 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7233 	/* get length and mbuf chain */
7234 	if (uio) {
7235 #if defined(__APPLE__)
7236 #if defined(APPLE_LEOPARD)
7237 		ca->sndlen = uio->uio_resid;
7238 #else
7239 		ca->sndlen = uio_resid(uio);
7240 #endif
7241 #else
7242 		ca->sndlen = uio->uio_resid;
7243 #endif
7244 #if defined(__APPLE__)
7245 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7246 #endif
7247 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
7248 #if defined(__APPLE__)
7249 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7250 #endif
7251 		if (ca->m == NULL) {
7252 			SCTP_FREE(ca, SCTP_M_COPYAL);
7253 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7254 			return (ENOMEM);
7255 		}
7256 	} else {
7257 		/* Gather the length of the send */
7258 		struct mbuf *mat;
7259 
7260 		ca->sndlen = 0;
7261 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7262 			ca->sndlen += SCTP_BUF_LEN(mat);
7263 		}
7264 	}
7265 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7266 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7267 				     SCTP_ASOC_ANY_STATE,
7268 				     (void *)ca, 0,
7269 				     sctp_sendall_completes, inp, 1);
7270 	if (ret) {
7271 		SCTP_PRINTF("Failed to initiate iterator for sendall\n");
7272 		SCTP_FREE(ca, SCTP_M_COPYAL);
7273 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7274 		return (EFAULT);
7275 	}
7276 	return (0);
7277 }
7278 
7279 
7280 void
7281 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7282 {
7283 	struct sctp_tmit_chunk *chk, *nchk;
7284 
7285 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7286 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7287 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7288 			if (chk->data) {
7289 				sctp_m_freem(chk->data);
7290 				chk->data = NULL;
7291 			}
7292 			asoc->ctrl_queue_cnt--;
7293 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7294 		}
7295 	}
7296 }
7297 
7298 void
7299 sctp_toss_old_asconf(struct sctp_tcb *stcb)
7300 {
7301 	struct sctp_association *asoc;
7302 	struct sctp_tmit_chunk *chk, *nchk;
7303 	struct sctp_asconf_chunk *acp;
7304 
7305 	asoc = &stcb->asoc;
7306 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7307 		/* find SCTP_ASCONF chunk in queue */
7308 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7309 			if (chk->data) {
7310 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
7311 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7312 					/* Not Acked yet */
7313 					break;
7314 				}
7315 			}
7316 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7317 			if (chk->data) {
7318 				sctp_m_freem(chk->data);
7319 				chk->data = NULL;
7320 			}
7321 			asoc->ctrl_queue_cnt--;
7322 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7323 		}
7324 	}
7325 }
7326 
7327 
7328 static void
7329 sctp_clean_up_datalist(struct sctp_tcb *stcb,
7330     struct sctp_association *asoc,
7331     struct sctp_tmit_chunk **data_list,
7332     int bundle_at,
7333     struct sctp_nets *net)
7334 {
7335 	int i;
7336 	struct sctp_tmit_chunk *tp1;
7337 
7338 	for (i = 0; i < bundle_at; i++) {
7339 		/* off of the send queue */
7340 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7341 		asoc->send_queue_cnt--;
7342 		if (i > 0) {
7343 			/*
7344 			 * Any chunk NOT 0 you zap the time chunk 0 gets
7345 			 * zapped or set based on if a RTO measurment is
7346 			 * needed.
7347 			 */
7348 			data_list[i]->do_rtt = 0;
7349 		}
7350 		/* record time */
7351 		data_list[i]->sent_rcv_time = net->last_sent_time;
7352 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7353 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
7354 		if (data_list[i]->whoTo == NULL) {
7355 			data_list[i]->whoTo = net;
7356 			atomic_add_int(&net->ref_count, 1);
7357 		}
7358 		/* on to the sent queue */
7359 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7360 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7361 			struct sctp_tmit_chunk *tpp;
7362 
7363 			/* need to move back */
7364 		back_up_more:
7365 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7366 			if (tpp == NULL) {
7367 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7368 				goto all_done;
7369 			}
7370 			tp1 = tpp;
7371 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7372 				goto back_up_more;
7373 			}
7374 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7375 		} else {
7376 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
7377 					  data_list[i],
7378 					  sctp_next);
7379 		}
7380 	all_done:
7381 		/* This does not lower until the cum-ack passes it */
7382 		asoc->sent_queue_cnt++;
7383 		if ((asoc->peers_rwnd <= 0) &&
7384 		    (asoc->total_flight == 0) &&
7385 		    (bundle_at == 1)) {
7386 			/* Mark the chunk as being a window probe */
7387 			SCTP_STAT_INCR(sctps_windowprobed);
7388 		}
7389 #ifdef SCTP_AUDITING_ENABLED
7390 		sctp_audit_log(0xC2, 3);
7391 #endif
7392 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
7393 		data_list[i]->snd_count = 1;
7394 		data_list[i]->rec.data.chunk_was_revoked = 0;
7395 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7396 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7397 				       data_list[i]->whoTo->flight_size,
7398 				       data_list[i]->book_size,
7399 				       (uintptr_t)data_list[i]->whoTo,
7400 				       data_list[i]->rec.data.TSN_seq);
7401 		}
7402 		sctp_flight_size_increase(data_list[i]);
7403 		sctp_total_flight_increase(stcb, data_list[i]);
7404 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7405 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7406 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7407 		}
7408 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7409 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7410 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7411 			/* SWS sender side engages */
7412 			asoc->peers_rwnd = 0;
7413 		}
7414 	}
7415 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7416 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7417 	}
7418 }
7419 
7420 static void
7421 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7422 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7423 	SCTP_UNUSED
7424 #endif
7425 )
7426 {
7427 	struct sctp_tmit_chunk *chk, *nchk;
7428 
7429 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7430 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7431 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
7432 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7433 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7434 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7435 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7436 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7437 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7438 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7439 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7440 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7441 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7442 			/* Stray chunks must be cleaned up */
7443 	clean_up_anyway:
7444 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7445 			if (chk->data) {
7446 				sctp_m_freem(chk->data);
7447 				chk->data = NULL;
7448 			}
7449 			asoc->ctrl_queue_cnt--;
7450 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7451 				asoc->fwd_tsn_cnt--;
7452 			sctp_free_a_chunk(stcb, chk, so_locked);
7453 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7454 			/* special handling, we must look into the param */
7455 			if (chk != asoc->str_reset) {
7456 				goto clean_up_anyway;
7457 			}
7458 		}
7459 	}
7460 }
7461 
7462 
7463 static int
7464 sctp_can_we_split_this(struct sctp_tcb *stcb,
7465                        uint32_t length,
7466                        uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7467 {
7468 	/* Make a decision on if I should split a
7469 	 * msg into multiple parts. This is only asked of
7470 	 * incomplete messages.
7471 	 */
7472 	if (eeor_on) {
7473 		/* If we are doing EEOR we need to always send
7474 		 * it if its the entire thing, since it might
7475 		 * be all the guy is putting in the hopper.
7476 		 */
7477 		if (goal_mtu >= length) {
7478 			/*-
7479 			 * If we have data outstanding,
7480 			 * we get another chance when the sack
7481 			 * arrives to transmit - wait for more data
7482 			 */
7483 			if (stcb->asoc.total_flight == 0) {
7484 				/* If nothing is in flight, we zero
7485 				 * the packet counter.
7486 				 */
7487 				return (length);
7488 			}
7489 			return (0);
7490 
7491 		} else {
7492 			/* You can fill the rest */
7493 			return (goal_mtu);
7494 		}
7495 	}
7496 	/*-
7497 	 * For those strange folk that make the send buffer
7498 	 * smaller than our fragmentation point, we can't
7499 	 * get a full msg in so we have to allow splitting.
7500 	 */
7501 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7502 		return (length);
7503 	}
7504 
7505 	if ((length <= goal_mtu) ||
7506 	    ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7507 		/* Sub-optimial residual don't split in non-eeor mode. */
7508 		return (0);
7509 	}
7510 	/* If we reach here length is larger
7511 	 * than the goal_mtu. Do we wish to split
7512 	 * it for the sake of packet putting together?
7513 	 */
7514 	if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7515 		/* Its ok to split it */
7516 		return (min(goal_mtu, frag_point));
7517 	}
7518 	/* Nope, can't split */
7519 	return (0);
7520 
7521 }
7522 
7523 static uint32_t
7524 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7525                       struct sctp_stream_out *strq,
7526                       uint32_t goal_mtu,
7527                       uint32_t frag_point,
7528                       int *locked,
7529                       int *giveup,
7530                       int eeor_mode,
7531                       int *bail,
7532                       int so_locked
7533 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7534                       SCTP_UNUSED
7535 #endif
7536 	)
7537 {
7538 	/* Move from the stream to the send_queue keeping track of the total */
7539 	struct sctp_association *asoc;
7540 	struct sctp_stream_queue_pending *sp;
7541 	struct sctp_tmit_chunk *chk;
7542 	struct sctp_data_chunk *dchkh;
7543 	uint32_t to_move, length;
7544 	uint8_t rcv_flags = 0;
7545 	uint8_t some_taken;
7546 	uint8_t send_lock_up = 0;
7547 
7548 	SCTP_TCB_LOCK_ASSERT(stcb);
7549 	asoc = &stcb->asoc;
7550 one_more_time:
7551 	/*sa_ignore FREED_MEMORY*/
7552 	sp = TAILQ_FIRST(&strq->outqueue);
7553 	if (sp == NULL) {
7554 		*locked = 0;
7555 		if (send_lock_up == 0) {
7556 			SCTP_TCB_SEND_LOCK(stcb);
7557 			send_lock_up = 1;
7558 		}
7559 		sp = TAILQ_FIRST(&strq->outqueue);
7560 		if (sp) {
7561 			goto one_more_time;
7562 		}
7563 		if (strq->last_msg_incomplete) {
7564 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7565 			            strq->stream_no,
7566 			            strq->last_msg_incomplete);
7567 			strq->last_msg_incomplete = 0;
7568 		}
7569 		to_move = 0;
7570 		if (send_lock_up) {
7571 			SCTP_TCB_SEND_UNLOCK(stcb);
7572 			send_lock_up = 0;
7573 		}
7574 		goto out_of;
7575 	}
7576 	if ((sp->msg_is_complete) && (sp->length == 0)) {
7577 		if (sp->sender_all_done) {
7578 			/* We are doing differed cleanup. Last
7579 			 * time through when we took all the data
7580 			 * the sender_all_done was not set.
7581 			 */
7582 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7583 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7584 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7585 				            sp->sender_all_done,
7586 				            sp->length,
7587 				            sp->msg_is_complete,
7588 				            sp->put_last_out,
7589 				            send_lock_up);
7590 			}
7591 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
7592 				SCTP_TCB_SEND_LOCK(stcb);
7593 				send_lock_up = 1;
7594 			}
7595 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7596 			TAILQ_REMOVE(&strq->outqueue, sp, next);
7597 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7598 			if (sp->net) {
7599 				sctp_free_remote_addr(sp->net);
7600 				sp->net = NULL;
7601 			}
7602 			if (sp->data) {
7603 				sctp_m_freem(sp->data);
7604 				sp->data = NULL;
7605 			}
7606 			sctp_free_a_strmoq(stcb, sp, so_locked);
7607 			/* we can't be locked to it */
7608 			*locked = 0;
7609 			stcb->asoc.locked_on_sending = NULL;
7610 			if (send_lock_up) {
7611 				SCTP_TCB_SEND_UNLOCK(stcb);
7612 				send_lock_up = 0;
7613 			}
7614 			/* back to get the next msg */
7615 			goto one_more_time;
7616 		} else {
7617 			/* sender just finished this but
7618 			 * still holds a reference
7619 			 */
7620 			*locked = 1;
7621 			*giveup = 1;
7622 			to_move = 0;
7623 			goto out_of;
7624 		}
7625 	} else {
7626 		/* is there some to get */
7627 		if (sp->length == 0) {
7628 			/* no */
7629 			*locked = 1;
7630 			*giveup = 1;
7631 			to_move = 0;
7632 			goto out_of;
7633 		} else if (sp->discard_rest) {
7634 			if (send_lock_up == 0) {
7635 				SCTP_TCB_SEND_LOCK(stcb);
7636 				send_lock_up = 1;
7637 			}
7638 			/* Whack down the size */
7639 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7640 			if ((stcb->sctp_socket != NULL) &&	     \
7641 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7642 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7643 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7644 			}
7645 			if (sp->data) {
7646 				sctp_m_freem(sp->data);
7647 				sp->data = NULL;
7648 				sp->tail_mbuf = NULL;
7649 			}
7650 			sp->length = 0;
7651 			sp->some_taken = 1;
7652 			*locked = 1;
7653 			*giveup = 1;
7654 			to_move = 0;
7655 			goto out_of;
7656 		}
7657 	}
7658 	some_taken = sp->some_taken;
7659 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7660 		sp->msg_is_complete = 1;
7661 	}
7662 re_look:
7663 	length = sp->length;
7664 	if (sp->msg_is_complete) {
7665 		/* The message is complete */
7666 		to_move = min(length, frag_point);
7667 		if (to_move == length) {
7668 			/* All of it fits in the MTU */
7669 			if (sp->some_taken) {
7670 				rcv_flags |= SCTP_DATA_LAST_FRAG;
7671 				sp->put_last_out = 1;
7672 			} else {
7673 				rcv_flags |= SCTP_DATA_NOT_FRAG;
7674 				sp->put_last_out = 1;
7675 			}
7676 		} else {
7677 			/* Not all of it fits, we fragment */
7678 			if (sp->some_taken == 0) {
7679 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7680 			}
7681 			sp->some_taken = 1;
7682 		}
7683 	} else {
7684 		to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7685 		if (to_move) {
7686 			/*-
7687 			 * We use a snapshot of length in case it
7688 			 * is expanding during the compare.
7689 			 */
7690 			uint32_t llen;
7691 
7692 			llen = length;
7693 			if (to_move >= llen) {
7694 				to_move = llen;
7695 				if (send_lock_up == 0) {
7696 					/*-
7697 					 * We are taking all of an incomplete msg
7698 					 * thus we need a send lock.
7699 					 */
7700 					SCTP_TCB_SEND_LOCK(stcb);
7701 					send_lock_up = 1;
7702 					if (sp->msg_is_complete) {
7703 						/* the sender finished the msg */
7704 						goto re_look;
7705 					}
7706 				}
7707 			}
7708 			if (sp->some_taken == 0) {
7709 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
7710 				sp->some_taken = 1;
7711 			}
7712 		} else {
7713 			/* Nothing to take. */
7714 			if (sp->some_taken) {
7715 				*locked = 1;
7716 			}
7717 			*giveup = 1;
7718 			to_move = 0;
7719 			goto out_of;
7720 		}
7721 	}
7722 
7723 	/* If we reach here, we can copy out a chunk */
7724 	sctp_alloc_a_chunk(stcb, chk);
7725 	if (chk == NULL) {
7726 		/* No chunk memory */
7727 		*giveup = 1;
7728 		to_move = 0;
7729 		goto out_of;
7730 	}
7731 	/* Setup for unordered if needed by looking
7732 	 * at the user sent info flags.
7733 	 */
7734 	if (sp->sinfo_flags & SCTP_UNORDERED) {
7735 		rcv_flags |= SCTP_DATA_UNORDERED;
7736 	}
7737 	if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7738 	    ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7739 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7740 	}
7741 	/* clear out the chunk before setting up */
7742 	memset(chk, 0, sizeof(*chk));
7743 	chk->rec.data.rcv_flags = rcv_flags;
7744 
7745 	if (to_move >= length) {
7746 		/* we think we can steal the whole thing */
7747 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7748 			SCTP_TCB_SEND_LOCK(stcb);
7749 			send_lock_up = 1;
7750 		}
7751 		if (to_move < sp->length) {
7752 			/* bail, it changed */
7753 			goto dont_do_it;
7754 		}
7755 		chk->data = sp->data;
7756 		chk->last_mbuf = sp->tail_mbuf;
7757 		/* register the stealing */
7758 		sp->data = sp->tail_mbuf = NULL;
7759 	} else {
7760 		struct mbuf *m;
7761 	dont_do_it:
7762 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7763 		chk->last_mbuf = NULL;
7764 		if (chk->data == NULL) {
7765 			sp->some_taken = some_taken;
7766 			sctp_free_a_chunk(stcb, chk, so_locked);
7767 			*bail = 1;
7768 			to_move = 0;
7769 			goto out_of;
7770 		}
7771 #ifdef SCTP_MBUF_LOGGING
7772 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7773 			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7774 		}
7775 #endif
7776 		/* Pull off the data */
7777 		m_adj(sp->data, to_move);
7778 		/* Now lets work our way down and compact it */
7779 		m = sp->data;
7780 		while (m && (SCTP_BUF_LEN(m) == 0)) {
7781 			sp->data  = SCTP_BUF_NEXT(m);
7782 			SCTP_BUF_NEXT(m) = NULL;
7783 			if (sp->tail_mbuf == m) {
7784 				/*-
7785 				 * Freeing tail? TSNH since
7786 				 * we supposedly were taking less
7787 				 * than the sp->length.
7788 				 */
7789 #ifdef INVARIANTS
7790 				panic("Huh, freing tail? - TSNH");
7791 #else
7792 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7793 				sp->tail_mbuf = sp->data = NULL;
7794 				sp->length = 0;
7795 #endif
7796 
7797 			}
7798 			sctp_m_free(m);
7799 			m = sp->data;
7800 		}
7801 	}
7802 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7803 		chk->copy_by_ref = 1;
7804 	} else {
7805 		chk->copy_by_ref = 0;
7806 	}
7807 	/* get last_mbuf and counts of mb useage
7808 	 * This is ugly but hopefully its only one mbuf.
7809 	 */
7810 	if (chk->last_mbuf == NULL) {
7811 		chk->last_mbuf = chk->data;
7812 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7813 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7814 		}
7815 	}
7816 
7817 	if (to_move > length) {
7818 		/*- This should not happen either
7819 		 * since we always lower to_move to the size
7820 		 * of sp->length if its larger.
7821 		 */
7822 #ifdef INVARIANTS
7823 		panic("Huh, how can to_move be larger?");
7824 #else
7825 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
7826 		sp->length = 0;
7827 #endif
7828 	} else {
7829 		atomic_subtract_int(&sp->length, to_move);
7830 	}
7831 	if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7832 		/* Not enough room for a chunk header, get some */
7833 		struct mbuf *m;
7834 
7835 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7836 		if (m == NULL) {
7837 			/*
7838 			 * we're in trouble here. _PREPEND below will free
7839 			 * all the data if there is no leading space, so we
7840 			 * must put the data back and restore.
7841 			 */
7842 			if (send_lock_up == 0) {
7843 				SCTP_TCB_SEND_LOCK(stcb);
7844 				send_lock_up = 1;
7845 			}
7846 			if (sp->data == NULL) {
7847 				/* unsteal the data */
7848 				sp->data = chk->data;
7849 				sp->tail_mbuf = chk->last_mbuf;
7850 			} else {
7851 				struct mbuf *m_tmp;
7852 				/* reassemble the data */
7853 				m_tmp = sp->data;
7854 				sp->data = chk->data;
7855 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7856 			}
7857 			sp->some_taken = some_taken;
7858 			atomic_add_int(&sp->length, to_move);
7859 			chk->data = NULL;
7860 			*bail = 1;
7861 			sctp_free_a_chunk(stcb, chk, so_locked);
7862 			to_move = 0;
7863 			goto out_of;
7864 		} else {
7865 			SCTP_BUF_LEN(m) = 0;
7866 			SCTP_BUF_NEXT(m) = chk->data;
7867 			chk->data = m;
7868 			M_ALIGN(chk->data, 4);
7869 		}
7870 	}
7871 	SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7872 	if (chk->data == NULL) {
7873 		/* HELP, TSNH since we assured it would not above? */
7874 #ifdef INVARIANTS
7875 		panic("prepend failes HELP?");
7876 #else
7877 		SCTP_PRINTF("prepend fails HELP?\n");
7878 		sctp_free_a_chunk(stcb, chk, so_locked);
7879 #endif
7880 		*bail = 1;
7881 		to_move = 0;
7882 		goto out_of;
7883 	}
7884 	sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7885 	chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7886 	chk->book_size_scale = 0;
7887 	chk->sent = SCTP_DATAGRAM_UNSENT;
7888 
7889 	chk->flags = 0;
7890 	chk->asoc = &stcb->asoc;
7891 	chk->pad_inplace = 0;
7892 	chk->no_fr_allowed = 0;
7893 	chk->rec.data.stream_seq = strq->next_sequence_send;
7894 	if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
7895 	    !(rcv_flags & SCTP_DATA_UNORDERED)) {
7896 		strq->next_sequence_send++;
7897 	}
7898 	chk->rec.data.stream_number = sp->stream;
7899 	chk->rec.data.payloadtype = sp->ppid;
7900 	chk->rec.data.context = sp->context;
7901 	chk->rec.data.doing_fast_retransmit = 0;
7902 
7903 	chk->rec.data.timetodrop = sp->ts;
7904 	chk->flags = sp->act_flags;
7905 
7906 	if (sp->net) {
7907 		chk->whoTo = sp->net;
7908 		atomic_add_int(&chk->whoTo->ref_count, 1);
7909 	} else
7910 		chk->whoTo = NULL;
7911 
7912 	if (sp->holds_key_ref) {
7913 		chk->auth_keyid = sp->auth_keyid;
7914 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
7915 		chk->holds_key_ref = 1;
7916 	}
7917 #if defined(__FreeBSD__) || defined(__Panda__)
7918 	chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7919 #else
7920 	chk->rec.data.TSN_seq = asoc->sending_seq++;
7921 #endif
7922 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7923 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7924 		               (uintptr_t)stcb, sp->length,
7925 		               (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7926 		               chk->rec.data.TSN_seq);
7927 	}
7928 	dchkh = mtod(chk->data, struct sctp_data_chunk *);
7929 	/*
7930 	 * Put the rest of the things in place now. Size was done
7931 	 * earlier in previous loop prior to padding.
7932 	 */
7933 
7934 #ifdef SCTP_ASOCLOG_OF_TSNS
7935 	SCTP_TCB_LOCK_ASSERT(stcb);
7936 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7937 		asoc->tsn_out_at = 0;
7938 		asoc->tsn_out_wrapped = 1;
7939 	}
7940 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7941 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7942 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7943 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7944 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7945 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7946 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7947 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7948 	asoc->tsn_out_at++;
7949 #endif
7950 
7951 	dchkh->ch.chunk_type = SCTP_DATA;
7952 	dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7953 	dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7954 	dchkh->dp.stream_id = htons(strq->stream_no);
7955 	dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7956 	dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7957 	dchkh->ch.chunk_length = htons(chk->send_size);
7958 	/* Now advance the chk->send_size by the actual pad needed. */
7959 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7960 		/* need a pad */
7961 		struct mbuf *lm;
7962 		int pads;
7963 
7964 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7965 		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7966 		if (lm != NULL) {
7967 			chk->last_mbuf = lm;
7968 			chk->pad_inplace = 1;
7969 		}
7970 		chk->send_size += pads;
7971 	}
7972 	if (PR_SCTP_ENABLED(chk->flags)) {
7973 		asoc->pr_sctp_cnt++;
7974 	}
7975 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7976 		/* All done pull and kill the message */
7977 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7978 		if (sp->put_last_out == 0) {
7979 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7980 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7981 			            sp->sender_all_done,
7982 			            sp->length,
7983 			            sp->msg_is_complete,
7984 			            sp->put_last_out,
7985 			            send_lock_up);
7986 		}
7987 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7988 			SCTP_TCB_SEND_LOCK(stcb);
7989 			send_lock_up = 1;
7990 		}
7991 		TAILQ_REMOVE(&strq->outqueue, sp, next);
7992 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7993 		if (sp->net) {
7994 			sctp_free_remote_addr(sp->net);
7995 			sp->net = NULL;
7996 		}
7997 		if (sp->data) {
7998 			sctp_m_freem(sp->data);
7999 			sp->data = NULL;
8000 		}
8001 		sctp_free_a_strmoq(stcb, sp, so_locked);
8002 
8003 		/* we can't be locked to it */
8004 		*locked = 0;
8005 		stcb->asoc.locked_on_sending = NULL;
8006 	} else {
8007 		/* more to go, we are locked */
8008 		*locked = 1;
8009 	}
8010 	asoc->chunks_on_out_queue++;
8011 	strq->chunks_on_queues++;
8012 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8013 	asoc->send_queue_cnt++;
8014 out_of:
8015 	if (send_lock_up) {
8016 		SCTP_TCB_SEND_UNLOCK(stcb);
8017 	}
8018 	return (to_move);
8019 }
8020 
8021 
8022 static void
8023 sctp_fill_outqueue(struct sctp_tcb *stcb,
8024     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
8025 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8026 	SCTP_UNUSED
8027 #endif
8028 )
8029 {
8030 	struct sctp_association *asoc;
8031 	struct sctp_stream_out *strq;
8032 	int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
8033 	int locked, giveup;
8034 
8035 	SCTP_TCB_LOCK_ASSERT(stcb);
8036 	asoc = &stcb->asoc;
8037 	switch (net->ro._l_addr.sa.sa_family) {
8038 #ifdef INET
8039 		case AF_INET:
8040 			goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8041 			break;
8042 #endif
8043 #ifdef INET6
8044 		case AF_INET6:
8045 			goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
8046 			break;
8047 #endif
8048 #if defined(__Userspace__)
8049 		case AF_CONN:
8050 			goal_mtu = net->mtu - sizeof(struct sctphdr);
8051 			break;
8052 #endif
8053 		default:
8054 			/* TSNH */
8055 			goal_mtu = net->mtu;
8056 			break;
8057 	}
8058 	/* Need an allowance for the data chunk header too */
8059 	goal_mtu -= sizeof(struct sctp_data_chunk);
8060 
8061 	/* must make even word boundary */
8062 	goal_mtu &= 0xfffffffc;
8063 	if (asoc->locked_on_sending) {
8064 		/* We are stuck on one stream until the message completes. */
8065 		strq = asoc->locked_on_sending;
8066 		locked = 1;
8067 	} else {
8068 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8069 		locked = 0;
8070 	}
8071 	while ((goal_mtu > 0) && strq) {
8072 		giveup = 0;
8073 		bail = 0;
8074 		moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
8075 						       &giveup, eeor_mode, &bail, so_locked);
8076 		if (moved_how_much)
8077 			stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
8078 
8079 		if (locked) {
8080 			asoc->locked_on_sending = strq;
8081 			if ((moved_how_much == 0) || (giveup) || bail)
8082 				/* no more to move for now */
8083 				break;
8084 		} else {
8085 			asoc->locked_on_sending = NULL;
8086 			if ((giveup) || bail) {
8087 				break;
8088 			}
8089 			strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8090 			if (strq == NULL) {
8091 				break;
8092 			}
8093 		}
8094 		total_moved += moved_how_much;
8095 		goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
8096 		goal_mtu &= 0xfffffffc;
8097 	}
8098 	if (bail)
8099 		*quit_now = 1;
8100 
8101 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8102 
8103 	if (total_moved == 0) {
8104 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8105 		    (net == stcb->asoc.primary_destination)) {
8106 			/* ran dry for primary network net */
8107 			SCTP_STAT_INCR(sctps_primary_randry);
8108 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
8109 			/* ran dry with CMT on */
8110 			SCTP_STAT_INCR(sctps_cmt_randry);
8111 		}
8112 	}
8113 }
8114 
8115 void
8116 sctp_fix_ecn_echo(struct sctp_association *asoc)
8117 {
8118 	struct sctp_tmit_chunk *chk;
8119 
8120 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8121 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8122 			chk->sent = SCTP_DATAGRAM_UNSENT;
8123 		}
8124 	}
8125 }
8126 
8127 void
8128 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8129 {
8130 	struct sctp_association *asoc;
8131 	struct sctp_tmit_chunk *chk;
8132 	struct sctp_stream_queue_pending *sp;
8133 	unsigned int i;
8134 
8135 	if (net == NULL) {
8136 		return;
8137 	}
8138 	asoc = &stcb->asoc;
8139 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8140 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8141 			if (sp->net == net) {
8142 				sctp_free_remote_addr(sp->net);
8143 				sp->net = NULL;
8144 			}
8145 		}
8146 	}
8147 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8148 		if (chk->whoTo == net) {
8149 			sctp_free_remote_addr(chk->whoTo);
8150 			chk->whoTo = NULL;
8151 		}
8152 	}
8153 }
8154 
8155 int
8156 sctp_med_chunk_output(struct sctp_inpcb *inp,
8157 		      struct sctp_tcb *stcb,
8158 		      struct sctp_association *asoc,
8159 		      int *num_out,
8160 		      int *reason_code,
8161 		      int control_only, int from_where,
8162 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
8163 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8164 		      SCTP_UNUSED
8165 #endif
8166 	)
8167 {
8168 	/**
8169 	 * Ok this is the generic chunk service queue. we must do the
8170 	 * following: - Service the stream queue that is next, moving any
8171 	 * message (note I must get a complete message i.e. FIRST/MIDDLE and
8172 	 * LAST to the out queue in one pass) and assigning TSN's - Check to
8173 	 * see if the cwnd/rwnd allows any output, if so we go ahead and
8174 	 * fomulate and send the low level chunks. Making sure to combine
8175 	 * any control in the control chunk queue also.
8176 	 */
8177 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8178 	struct mbuf *outchain, *endoutchain;
8179 	struct sctp_tmit_chunk *chk, *nchk;
8180 
8181 	/* temp arrays for unlinking */
8182 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8183 	int no_fragmentflg, error;
8184 	unsigned int max_rwnd_per_dest, max_send_per_dest;
8185 	int one_chunk, hbflag, skip_data_for_this_net;
8186 	int asconf, cookie, no_out_cnt;
8187 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8188 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8189 	int tsns_sent = 0;
8190 	uint32_t auth_offset = 0;
8191 	struct sctp_auth_chunk *auth = NULL;
8192 	uint16_t auth_keyid;
8193 	int override_ok = 1;
8194 	int skip_fill_up = 0;
8195 	int data_auth_reqd = 0;
8196 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8197 	   the destination. */
8198 	int quit_now = 0;
8199 
8200 #if defined(__APPLE__)
8201 	if (so_locked) {
8202 		sctp_lock_assert(SCTP_INP_SO(inp));
8203 	} else {
8204 		sctp_unlock_assert(SCTP_INP_SO(inp));
8205 	}
8206 #endif
8207 	*num_out = 0;
8208 	*reason_code = 0;
8209 	auth_keyid = stcb->asoc.authinfo.active_keyid;
8210 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8211 	    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
8212 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8213 		eeor_mode = 1;
8214 	} else {
8215 		eeor_mode = 0;
8216 	}
8217 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
8218 	/*
8219 	 * First lets prime the pump. For each destination, if there is room
8220 	 * in the flight size, attempt to pull an MTU's worth out of the
8221 	 * stream queues into the general send_queue
8222 	 */
8223 #ifdef SCTP_AUDITING_ENABLED
8224 	sctp_audit_log(0xC2, 2);
8225 #endif
8226 	SCTP_TCB_LOCK_ASSERT(stcb);
8227 	hbflag = 0;
8228 	if ((control_only) || (asoc->stream_reset_outstanding))
8229 		no_data_chunks = 1;
8230 	else
8231 		no_data_chunks = 0;
8232 
8233 	/* Nothing to possible to send? */
8234 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8235 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8236 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8237 	    TAILQ_EMPTY(&asoc->send_queue) &&
8238 	    stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
8239 	nothing_to_send:
8240 		*reason_code = 9;
8241 		return (0);
8242 	}
8243 	if (asoc->peers_rwnd == 0) {
8244 		/* No room in peers rwnd */
8245 		*reason_code = 1;
8246 		if (asoc->total_flight > 0) {
8247 			/* we are allowed one chunk in flight */
8248 			no_data_chunks = 1;
8249 		}
8250 	}
8251 	if (stcb->asoc.ecn_echo_cnt_onq) {
8252 		/* Record where a sack goes, if any */
8253 		if (no_data_chunks &&
8254 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8255 			/* Nothing but ECNe to send - we don't do that */
8256 			goto nothing_to_send;
8257 		}
8258 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8259 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8260 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8261 				sack_goes_to = chk->whoTo;
8262 				break;
8263 			}
8264 		}
8265 	}
8266 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8267 	if (stcb->sctp_socket)
8268 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8269 	else
8270 		max_send_per_dest = 0;
8271 	if (no_data_chunks == 0) {
8272 		/* How many non-directed chunks are there? */
8273 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8274 			if (chk->whoTo == NULL) {
8275 				/* We already have non-directed
8276 				 * chunks on the queue, no need
8277 				 * to do a fill-up.
8278 				 */
8279 				skip_fill_up = 1;
8280 				break;
8281 			}
8282 		}
8283 
8284 	}
8285 	if ((no_data_chunks == 0) &&
8286 	    (skip_fill_up == 0) &&
8287 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8288 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8289 			/*
8290 			 * This for loop we are in takes in
8291 			 * each net, if its's got space in cwnd and
8292 			 * has data sent to it (when CMT is off) then it
8293 			 * calls sctp_fill_outqueue for the net. This gets
8294 			 * data on the send queue for that network.
8295 			 *
8296 			 * In sctp_fill_outqueue TSN's are assigned and
8297 			 * data is copied out of the stream buffers. Note
8298 			 * mostly copy by reference (we hope).
8299 			 */
8300 			net->window_probe = 0;
8301 			if ((net != stcb->asoc.alternate) &&
8302 			    ((net->dest_state & SCTP_ADDR_PF) ||
8303 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8304 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8305 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8306 					sctp_log_cwnd(stcb, net, 1,
8307 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8308 				}
8309 			        continue;
8310 			}
8311 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8312 			    (net->flight_size == 0)) {
8313 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8314 			}
8315 			if (net->flight_size >= net->cwnd) {
8316 				/* skip this network, no room - can't fill */
8317 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8318 					sctp_log_cwnd(stcb, net, 3,
8319 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8320 				}
8321 				continue;
8322 			}
8323 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8324 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8325 			}
8326 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8327 			if (quit_now) {
8328 				/* memory alloc failure */
8329 				no_data_chunks = 1;
8330 				break;
8331 			}
8332 		}
8333 	}
8334 	/* now service each destination and send out what we can for it */
8335 	/* Nothing to send? */
8336 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8337 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8338 	    TAILQ_EMPTY(&asoc->send_queue)) {
8339 		*reason_code = 8;
8340 		return (0);
8341 	}
8342 
8343 	if (asoc->sctp_cmt_on_off > 0) {
8344 		/* get the last start point */
8345 		start_at = asoc->last_net_cmt_send_started;
8346 		if (start_at == NULL) {
8347 			/* null so to beginning */
8348 			start_at = TAILQ_FIRST(&asoc->nets);
8349 		} else {
8350 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8351 			if (start_at == NULL) {
8352 				start_at = TAILQ_FIRST(&asoc->nets);
8353 			}
8354 		}
8355 		asoc->last_net_cmt_send_started = start_at;
8356 	} else {
8357 		start_at = TAILQ_FIRST(&asoc->nets);
8358 	}
8359 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8360 		if (chk->whoTo == NULL) {
8361 			if (asoc->alternate) {
8362 				chk->whoTo = asoc->alternate;
8363 			} else {
8364 				chk->whoTo = asoc->primary_destination;
8365 			}
8366 			atomic_add_int(&chk->whoTo->ref_count, 1);
8367 		}
8368 	}
8369 	old_start_at = NULL;
8370 again_one_more_time:
8371 	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8372 		/* how much can we send? */
8373 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8374 		if (old_start_at && (old_start_at == net)) {
8375 			/* through list ocmpletely. */
8376 			break;
8377 		}
8378 		tsns_sent = 0xa;
8379 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8380 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8381 		    (net->flight_size >= net->cwnd)) {
8382 			/* Nothing on control or asconf and flight is full, we can skip
8383 			 * even in the CMT case.
8384 			 */
8385 			continue;
8386 		}
8387 		bundle_at = 0;
8388 		endoutchain = outchain = NULL;
8389 		no_fragmentflg = 1;
8390 		one_chunk = 0;
8391 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8392 			skip_data_for_this_net = 1;
8393 		} else {
8394 			skip_data_for_this_net = 0;
8395 		}
8396 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8397 #ifdef INET
8398 		case AF_INET:
8399 			mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8400 			break;
8401 #endif
8402 #ifdef INET6
8403 		case AF_INET6:
8404 			mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8405 			break;
8406 #endif
8407 #if defined(__Userspace__)
8408 		case AF_CONN:
8409 			mtu = net->mtu - sizeof(struct sctphdr);
8410 			break;
8411 #endif
8412 		default:
8413 			/* TSNH */
8414 			mtu = net->mtu;
8415 			break;
8416 		}
8417 		mx_mtu = mtu;
8418 		to_out = 0;
8419 		if (mtu > asoc->peers_rwnd) {
8420 			if (asoc->total_flight > 0) {
8421 				/* We have a packet in flight somewhere */
8422 				r_mtu = asoc->peers_rwnd;
8423 			} else {
8424 				/* We are always allowed to send one MTU out */
8425 				one_chunk = 1;
8426 				r_mtu = mtu;
8427 			}
8428 		} else {
8429 			r_mtu = mtu;
8430 		}
8431 		/************************/
8432 		/* ASCONF transmission */
8433 		/************************/
8434 		/* Now first lets go through the asconf queue */
8435 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8436 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8437 				continue;
8438 			}
8439 			if (chk->whoTo == NULL) {
8440 				if (asoc->alternate == NULL) {
8441 					if (asoc->primary_destination != net) {
8442 						break;
8443 					}
8444 				} else {
8445 					if (asoc->alternate != net) {
8446 						break;
8447 					}
8448 				}
8449 			} else {
8450 				if (chk->whoTo != net) {
8451 					break;
8452 				}
8453 			}
8454 			if (chk->data == NULL) {
8455 				break;
8456 			}
8457 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8458 			    chk->sent != SCTP_DATAGRAM_RESEND) {
8459 				break;
8460 			}
8461 			/*
8462 			 * if no AUTH is yet included and this chunk
8463 			 * requires it, make sure to account for it.  We
8464 			 * don't apply the size until the AUTH chunk is
8465 			 * actually added below in case there is no room for
8466 			 * this chunk. NOTE: we overload the use of "omtu"
8467 			 * here
8468 			 */
8469 			if ((auth == NULL) &&
8470 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8471 							stcb->asoc.peer_auth_chunks)) {
8472 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8473 			} else
8474 				omtu = 0;
8475 			/* Here we do NOT factor the r_mtu */
8476 			if ((chk->send_size < (int)(mtu - omtu)) ||
8477 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8478 				/*
8479 				 * We probably should glom the mbuf chain
8480 				 * from the chk->data for control but the
8481 				 * problem is it becomes yet one more level
8482 				 * of tracking to do if for some reason
8483 				 * output fails. Then I have got to
8484 				 * reconstruct the merged control chain.. el
8485 				 * yucko.. for now we take the easy way and
8486 				 * do the copy
8487 				 */
8488 				/*
8489 				 * Add an AUTH chunk, if chunk requires it
8490 				 * save the offset into the chain for AUTH
8491 				 */
8492 				if ((auth == NULL) &&
8493 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8494 								 stcb->asoc.peer_auth_chunks))) {
8495 					outchain = sctp_add_auth_chunk(outchain,
8496 								       &endoutchain,
8497 								       &auth,
8498 								       &auth_offset,
8499 								       stcb,
8500 								       chk->rec.chunk_id.id);
8501 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8502 				}
8503 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8504 							       (int)chk->rec.chunk_id.can_take_data,
8505 							       chk->send_size, chk->copy_by_ref);
8506 				if (outchain == NULL) {
8507 					*reason_code = 8;
8508 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8509 					return (ENOMEM);
8510 				}
8511 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8512 				/* update our MTU size */
8513 				if (mtu > (chk->send_size + omtu))
8514 					mtu -= (chk->send_size + omtu);
8515 				else
8516 					mtu = 0;
8517 				to_out += (chk->send_size + omtu);
8518 				/* Do clear IP_DF ? */
8519 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8520 					no_fragmentflg = 0;
8521 				}
8522 				if (chk->rec.chunk_id.can_take_data)
8523 					chk->data = NULL;
8524 				/*
8525 				 * set hb flag since we can
8526 				 * use these for RTO
8527 				 */
8528 				hbflag = 1;
8529 				asconf = 1;
8530 				/*
8531 				 * should sysctl this: don't
8532 				 * bundle data with ASCONF
8533 				 * since it requires AUTH
8534 				 */
8535 				no_data_chunks = 1;
8536 				chk->sent = SCTP_DATAGRAM_SENT;
8537 				if (chk->whoTo == NULL) {
8538 					chk->whoTo = net;
8539 					atomic_add_int(&net->ref_count, 1);
8540 				}
8541 				chk->snd_count++;
8542 				if (mtu == 0) {
8543 					/*
8544 					 * Ok we are out of room but we can
8545 					 * output without effecting the
8546 					 * flight size since this little guy
8547 					 * is a control only packet.
8548 					 */
8549 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8550 					/*
8551 					 * do NOT clear the asconf
8552 					 * flag as it is used to do
8553 					 * appropriate source address
8554 					 * selection.
8555 					 */
8556 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8557 					                                        (struct sockaddr *)&net->ro._l_addr,
8558 					                                        outchain, auth_offset, auth,
8559 					                                        stcb->asoc.authinfo.active_keyid,
8560 					                                        no_fragmentflg, 0, asconf,
8561 					                                        inp->sctp_lport, stcb->rport,
8562 					                                        htonl(stcb->asoc.peer_vtag),
8563 					                                        net->port, NULL,
8564 #if defined(__FreeBSD__)
8565 					                                        0, 0,
8566 #endif
8567 					                                        so_locked))) {
8568 						if (error == ENOBUFS) {
8569 							asoc->ifp_had_enobuf = 1;
8570 							SCTP_STAT_INCR(sctps_lowlevelerr);
8571 						}
8572 						if (from_where == 0) {
8573 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8574 						}
8575 						if (*now_filled == 0) {
8576 							(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8577 							*now_filled = 1;
8578 							*now = net->last_sent_time;
8579 						} else {
8580 							net->last_sent_time = *now;
8581 						}
8582 						hbflag = 0;
8583 						/* error, could not output */
8584 						if (error == EHOSTUNREACH) {
8585 							/*
8586 							 * Destination went
8587 							 * unreachable
8588 							 * during this send
8589 							 */
8590 							sctp_move_chunks_from_net(stcb, net);
8591 						}
8592 						*reason_code = 7;
8593 						continue;
8594 					} else
8595 						asoc->ifp_had_enobuf = 0;
8596 					if (*now_filled == 0) {
8597 						(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8598 						*now_filled = 1;
8599 						*now = net->last_sent_time;
8600 					} else {
8601 						net->last_sent_time = *now;
8602 					}
8603 					hbflag = 0;
8604 					/*
8605 					 * increase the number we sent, if a
8606 					 * cookie is sent we don't tell them
8607 					 * any was sent out.
8608 					 */
8609 					outchain = endoutchain = NULL;
8610 					auth = NULL;
8611 					auth_offset = 0;
8612 					if (!no_out_cnt)
8613 						*num_out += ctl_cnt;
8614 					/* recalc a clean slate and setup */
8615 					switch (net->ro._l_addr.sa.sa_family) {
8616 #ifdef INET
8617 						case AF_INET:
8618 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8619 							break;
8620 #endif
8621 #ifdef INET6
8622 						case AF_INET6:
8623 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8624 							break;
8625 #endif
8626 #if defined(__Userspace__)
8627 						case AF_CONN:
8628 							mtu = net->mtu - sizeof(struct sctphdr);
8629 							break;
8630 #endif
8631 						default:
8632 							/* TSNH */
8633 							mtu = net->mtu;
8634 							break;
8635 					}
8636 					to_out = 0;
8637 					no_fragmentflg = 1;
8638 				}
8639 			}
8640 		}
8641 		/************************/
8642 		/* Control transmission */
8643 		/************************/
8644 		/* Now first lets go through the control queue */
8645 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8646 			if ((sack_goes_to) &&
8647 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8648 			    (chk->whoTo != sack_goes_to)) {
8649 				/*
8650 				 * if we have a sack in queue, and we are looking at an
8651 				 * ecn echo that is NOT queued to where the sack is going..
8652 				 */
8653 				if (chk->whoTo == net) {
8654 					/* Don't transmit it to where its going (current net) */
8655 					continue;
8656 				} else if (sack_goes_to == net) {
8657 					/* But do transmit it to this address */
8658 					goto skip_net_check;
8659 				}
8660 			}
8661 			if (chk->whoTo == NULL) {
8662 				if (asoc->alternate == NULL) {
8663 					if (asoc->primary_destination != net) {
8664 						continue;
8665 					}
8666 				} else {
8667 					if (asoc->alternate != net) {
8668 						continue;
8669 					}
8670 				}
8671 			} else {
8672 				if (chk->whoTo != net) {
8673 					continue;
8674 				}
8675 			}
8676 		skip_net_check:
8677 			if (chk->data == NULL) {
8678 				continue;
8679 			}
8680 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8681 				/*
8682 				 * It must be unsent. Cookies and ASCONF's
8683 				 * hang around but there timers will force
8684 				 * when marked for resend.
8685 				 */
8686 				continue;
8687 			}
8688 			/*
8689 			 * if no AUTH is yet included and this chunk
8690 			 * requires it, make sure to account for it.  We
8691 			 * don't apply the size until the AUTH chunk is
8692 			 * actually added below in case there is no room for
8693 			 * this chunk. NOTE: we overload the use of "omtu"
8694 			 * here
8695 			 */
8696 			if ((auth == NULL) &&
8697 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8698 							stcb->asoc.peer_auth_chunks)) {
8699 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8700 			} else
8701 				omtu = 0;
8702 			/* Here we do NOT factor the r_mtu */
8703 			if ((chk->send_size <= (int)(mtu - omtu)) ||
8704 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8705 				/*
8706 				 * We probably should glom the mbuf chain
8707 				 * from the chk->data for control but the
8708 				 * problem is it becomes yet one more level
8709 				 * of tracking to do if for some reason
8710 				 * output fails. Then I have got to
8711 				 * reconstruct the merged control chain.. el
8712 				 * yucko.. for now we take the easy way and
8713 				 * do the copy
8714 				 */
8715 				/*
8716 				 * Add an AUTH chunk, if chunk requires it
8717 				 * save the offset into the chain for AUTH
8718 				 */
8719 				if ((auth == NULL) &&
8720 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8721 								 stcb->asoc.peer_auth_chunks))) {
8722 					outchain = sctp_add_auth_chunk(outchain,
8723 								       &endoutchain,
8724 								       &auth,
8725 								       &auth_offset,
8726 								       stcb,
8727 								       chk->rec.chunk_id.id);
8728 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8729 				}
8730 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8731 							       (int)chk->rec.chunk_id.can_take_data,
8732 							       chk->send_size, chk->copy_by_ref);
8733 				if (outchain == NULL) {
8734 					*reason_code = 8;
8735 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8736 					return (ENOMEM);
8737 				}
8738 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8739 				/* update our MTU size */
8740 				if (mtu > (chk->send_size + omtu))
8741 					mtu -= (chk->send_size + omtu);
8742 				else
8743 					mtu = 0;
8744 				to_out += (chk->send_size + omtu);
8745 				/* Do clear IP_DF ? */
8746 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8747 					no_fragmentflg = 0;
8748 				}
8749 				if (chk->rec.chunk_id.can_take_data)
8750 					chk->data = NULL;
8751 				/* Mark things to be removed, if needed */
8752 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8753 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8754 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8755 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8756 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8757 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8758 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8759 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8760 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8761 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8762 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8763 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8764 						hbflag = 1;
8765 					}
8766 					/* remove these chunks at the end */
8767 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8768 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8769 						/* turn off the timer */
8770 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8771 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8772 									inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1);
8773 						}
8774 					}
8775 					ctl_cnt++;
8776 				} else {
8777 					/*
8778 					 * Other chunks, since they have
8779 					 * timers running (i.e. COOKIE)
8780 					 * we just "trust" that it
8781 					 * gets sent or retransmitted.
8782 					 */
8783 					ctl_cnt++;
8784 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8785 						cookie = 1;
8786 						no_out_cnt = 1;
8787 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8788 						/*
8789 						 * Increment ecne send count here
8790 						 * this means we may be over-zealous in
8791 						 * our counting if the send fails, but its
8792 						 * the best place to do it (we used to do
8793 						 * it in the queue of the chunk, but that did
8794 						 * not tell how many times it was sent.
8795 						 */
8796 						SCTP_STAT_INCR(sctps_sendecne);
8797 					}
8798 					chk->sent = SCTP_DATAGRAM_SENT;
8799 					if (chk->whoTo == NULL) {
8800 						chk->whoTo = net;
8801 						atomic_add_int(&net->ref_count, 1);
8802 					}
8803 					chk->snd_count++;
8804 				}
8805 				if (mtu == 0) {
8806 					/*
8807 					 * Ok we are out of room but we can
8808 					 * output without effecting the
8809 					 * flight size since this little guy
8810 					 * is a control only packet.
8811 					 */
8812 					if (asconf) {
8813 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8814 						/*
8815 						 * do NOT clear the asconf
8816 						 * flag as it is used to do
8817 						 * appropriate source address
8818 						 * selection.
8819 						 */
8820 					}
8821 					if (cookie) {
8822 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8823 						cookie = 0;
8824 					}
8825 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8826 					                                        (struct sockaddr *)&net->ro._l_addr,
8827 					                                        outchain,
8828 					                                        auth_offset, auth,
8829 					                                        stcb->asoc.authinfo.active_keyid,
8830 					                                        no_fragmentflg, 0, asconf,
8831 					                                        inp->sctp_lport, stcb->rport,
8832 					                                        htonl(stcb->asoc.peer_vtag),
8833 					                                        net->port, NULL,
8834 #if defined(__FreeBSD__)
8835 					                                        0, 0,
8836 #endif
8837 					                                        so_locked))) {
8838 						if (error == ENOBUFS) {
8839 							asoc->ifp_had_enobuf = 1;
8840 							SCTP_STAT_INCR(sctps_lowlevelerr);
8841 						}
8842 						if (from_where == 0) {
8843 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
8844 						}
8845 						/* error, could not output */
8846 						if (hbflag) {
8847 							if (*now_filled == 0) {
8848 								(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8849 								*now_filled = 1;
8850 								*now = net->last_sent_time;
8851 							} else {
8852 								net->last_sent_time = *now;
8853 							}
8854 							hbflag = 0;
8855 						}
8856 						if (error == EHOSTUNREACH) {
8857 							/*
8858 							 * Destination went
8859 							 * unreachable
8860 							 * during this send
8861 							 */
8862 							sctp_move_chunks_from_net(stcb, net);
8863 						}
8864 						*reason_code = 7;
8865 						continue;
8866 					} else
8867 						asoc->ifp_had_enobuf = 0;
8868 					/* Only HB or ASCONF advances time */
8869 					if (hbflag) {
8870 						if (*now_filled == 0) {
8871 							(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8872 							*now_filled = 1;
8873 							*now = net->last_sent_time;
8874 						} else {
8875 							net->last_sent_time = *now;
8876 						}
8877 						hbflag = 0;
8878 					}
8879 					/*
8880 					 * increase the number we sent, if a
8881 					 * cookie is sent we don't tell them
8882 					 * any was sent out.
8883 					 */
8884 					outchain = endoutchain = NULL;
8885 					auth = NULL;
8886 					auth_offset = 0;
8887 					if (!no_out_cnt)
8888 						*num_out += ctl_cnt;
8889 					/* recalc a clean slate and setup */
8890 					switch (net->ro._l_addr.sa.sa_family) {
8891 #ifdef INET
8892 						case AF_INET:
8893 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8894 							break;
8895 #endif
8896 #ifdef INET6
8897 						case AF_INET6:
8898 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
8899 							break;
8900 #endif
8901 #if defined(__Userspace__)
8902 						case AF_CONN:
8903 							mtu = net->mtu - sizeof(struct sctphdr);
8904 							break;
8905 #endif
8906 						default:
8907 							/* TSNH */
8908 							mtu = net->mtu;
8909 							break;
8910 					}
8911 					to_out = 0;
8912 					no_fragmentflg = 1;
8913 				}
8914 			}
8915 		}
8916 		/* JRI: if dest is in PF state, do not send data to it */
8917 		if ((asoc->sctp_cmt_on_off > 0) &&
8918 		    (net != stcb->asoc.alternate) &&
8919 		    (net->dest_state & SCTP_ADDR_PF)) {
8920 			goto no_data_fill;
8921 		}
8922 		if (net->flight_size >= net->cwnd) {
8923 			goto no_data_fill;
8924 		}
8925 		if ((asoc->sctp_cmt_on_off > 0) &&
8926 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8927 		    (net->flight_size > max_rwnd_per_dest)) {
8928 			goto no_data_fill;
8929 		}
8930 		/*
8931 		 * We need a specific accounting for the usage of the
8932 		 * send buffer. We also need to check the number of messages
8933 		 * per net. For now, this is better than nothing and it
8934 		 * disabled by default...
8935 		 */
8936 		if ((asoc->sctp_cmt_on_off > 0) &&
8937 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8938 		    (max_send_per_dest > 0) &&
8939 		    (net->flight_size > max_send_per_dest)) {
8940 			goto no_data_fill;
8941 		}
8942 		/*********************/
8943 		/* Data transmission */
8944 		/*********************/
8945 		/*
8946 		 * if AUTH for DATA is required and no AUTH has been added
8947 		 * yet, account for this in the mtu now... if no data can be
8948 		 * bundled, this adjustment won't matter anyways since the
8949 		 * packet will be going out...
8950 		 */
8951 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8952 							     stcb->asoc.peer_auth_chunks);
8953 		if (data_auth_reqd && (auth == NULL)) {
8954 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8955 		}
8956 		/* now lets add any data within the MTU constraints */
8957 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8958 #ifdef INET
8959 		case AF_INET:
8960 			if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8961 				omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8962 			else
8963 				omtu = 0;
8964 			break;
8965 #endif
8966 #ifdef INET6
8967 		case AF_INET6:
8968 			if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8969 				omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8970 			else
8971 				omtu = 0;
8972 			break;
8973 #endif
8974 #if defined(__Userspace__)
8975 		case AF_CONN:
8976 			if (net->mtu > sizeof(struct sctphdr)) {
8977 				omtu = net->mtu - sizeof(struct sctphdr);
8978 			} else {
8979 				omtu = 0;
8980 			}
8981 			break;
8982 #endif
8983 		default:
8984 			/* TSNH */
8985 			omtu = 0;
8986 			break;
8987 		}
8988 		if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8989 		     (skip_data_for_this_net == 0)) ||
8990 		    (cookie)) {
8991 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8992 				if (no_data_chunks) {
8993 					/* let only control go out */
8994 					*reason_code = 1;
8995 					break;
8996 				}
8997 				if (net->flight_size >= net->cwnd) {
8998 					/* skip this net, no room for data */
8999 					*reason_code = 2;
9000 					break;
9001 				}
9002 				if ((chk->whoTo != NULL) &&
9003 				    (chk->whoTo != net)) {
9004 					/* Don't send the chunk on this net */
9005 					continue;
9006 				}
9007 
9008 				if (asoc->sctp_cmt_on_off == 0) {
9009 					if ((asoc->alternate) &&
9010 					    (asoc->alternate != net) &&
9011 					    (chk->whoTo == NULL)) {
9012 						continue;
9013 					} else if ((net != asoc->primary_destination) &&
9014 						   (asoc->alternate == NULL) &&
9015 						   (chk->whoTo == NULL)) {
9016 						continue;
9017 					}
9018 				}
9019 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9020 					/*-
9021 					 * strange, we have a chunk that is
9022 					 * to big for its destination and
9023 					 * yet no fragment ok flag.
9024 					 * Something went wrong when the
9025 					 * PMTU changed...we did not mark
9026 					 * this chunk for some reason?? I
9027 					 * will fix it here by letting IP
9028 					 * fragment it for now and printing
9029 					 * a warning. This really should not
9030 					 * happen ...
9031 					 */
9032 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9033 						    chk->send_size, mtu);
9034 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9035 				}
9036 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9037 				    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
9038 					struct sctp_data_chunk *dchkh;
9039 
9040 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
9041 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9042 				}
9043 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9044 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9045 					/* ok we will add this one */
9046 
9047 					/*
9048 					 * Add an AUTH chunk, if chunk
9049 					 * requires it, save the offset into
9050 					 * the chain for AUTH
9051 					 */
9052 					if (data_auth_reqd) {
9053 						if (auth == NULL) {
9054 							outchain = sctp_add_auth_chunk(outchain,
9055 										       &endoutchain,
9056 										       &auth,
9057 										       &auth_offset,
9058 										       stcb,
9059 										       SCTP_DATA);
9060 							auth_keyid = chk->auth_keyid;
9061 							override_ok = 0;
9062 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9063 						} else if (override_ok) {
9064 							/* use this data's keyid */
9065 							auth_keyid = chk->auth_keyid;
9066 							override_ok = 0;
9067 						} else if (auth_keyid != chk->auth_keyid) {
9068 							/* different keyid, so done bundling */
9069 							break;
9070 						}
9071 					}
9072 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9073 								       chk->send_size, chk->copy_by_ref);
9074 					if (outchain == NULL) {
9075 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9076 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9077 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9078 						}
9079 						*reason_code = 3;
9080 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9081 						return (ENOMEM);
9082 					}
9083 					/* upate our MTU size */
9084 					/* Do clear IP_DF ? */
9085 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9086 						no_fragmentflg = 0;
9087 					}
9088 					/* unsigned subtraction of mtu */
9089 					if (mtu > chk->send_size)
9090 						mtu -= chk->send_size;
9091 					else
9092 						mtu = 0;
9093 					/* unsigned subtraction of r_mtu */
9094 					if (r_mtu > chk->send_size)
9095 						r_mtu -= chk->send_size;
9096 					else
9097 						r_mtu = 0;
9098 
9099 					to_out += chk->send_size;
9100 					if ((to_out > mx_mtu) && no_fragmentflg) {
9101 #ifdef INVARIANTS
9102 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9103 #else
9104 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9105 							    mx_mtu, to_out);
9106 #endif
9107 					}
9108 					chk->window_probe = 0;
9109 					data_list[bundle_at++] = chk;
9110 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9111 						break;
9112 					}
9113 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9114 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9115 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9116 						} else {
9117 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9118 						}
9119 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9120 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9121 							/* Count number of user msg's that were fragmented
9122 							 * we do this by counting when we see a LAST fragment
9123 							 * only.
9124 							 */
9125 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9126 					}
9127 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9128 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9129 							data_list[0]->window_probe = 1;
9130 							net->window_probe = 1;
9131 						}
9132 						break;
9133 					}
9134 				} else {
9135 					/*
9136 					 * Must be sent in order of the
9137 					 * TSN's (on a network)
9138 					 */
9139 					break;
9140 				}
9141 			}	/* for (chunk gather loop for this net) */
9142 		}		/* if asoc.state OPEN */
9143 	no_data_fill:
9144 		/* Is there something to send for this destination? */
9145 		if (outchain) {
9146 			/* We may need to start a control timer or two */
9147 			if (asconf) {
9148 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9149 						 stcb, net);
9150 				/*
9151 				 * do NOT clear the asconf flag as it is used
9152 				 * to do appropriate source address selection.
9153 				 */
9154 			}
9155 			if (cookie) {
9156 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9157 				cookie = 0;
9158 			}
9159 			/* must start a send timer if data is being sent */
9160 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9161 				/*
9162 				 * no timer running on this destination
9163 				 * restart it.
9164 				 */
9165 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9166 			}
9167 			/* Now send it, if there is anything to send :> */
9168 			if ((error = sctp_lowlevel_chunk_output(inp,
9169 			                                        stcb,
9170 			                                        net,
9171 			                                        (struct sockaddr *)&net->ro._l_addr,
9172 			                                        outchain,
9173 			                                        auth_offset,
9174 			                                        auth,
9175 			                                        auth_keyid,
9176 			                                        no_fragmentflg,
9177 			                                        bundle_at,
9178 			                                        asconf,
9179 			                                        inp->sctp_lport, stcb->rport,
9180 			                                        htonl(stcb->asoc.peer_vtag),
9181 			                                        net->port, NULL,
9182 #if defined(__FreeBSD__)
9183 			                                        0, 0,
9184 #endif
9185 			                                        so_locked))) {
9186 				/* error, we could not output */
9187 				if (error == ENOBUFS) {
9188 					SCTP_STAT_INCR(sctps_lowlevelerr);
9189 					asoc->ifp_had_enobuf = 1;
9190 				}
9191 				if (from_where == 0) {
9192 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
9193 				}
9194 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9195 				if (hbflag) {
9196 					if (*now_filled == 0) {
9197 						(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9198 						*now_filled = 1;
9199 						*now = net->last_sent_time;
9200 					} else {
9201 						net->last_sent_time = *now;
9202 					}
9203 					hbflag = 0;
9204 				}
9205 				if (error == EHOSTUNREACH) {
9206 					/*
9207 					 * Destination went unreachable
9208 					 * during this send
9209 					 */
9210 					sctp_move_chunks_from_net(stcb, net);
9211 				}
9212 				*reason_code = 6;
9213 				/*-
9214 				 * I add this line to be paranoid. As far as
9215 				 * I can tell the continue, takes us back to
9216 				 * the top of the for, but just to make sure
9217 				 * I will reset these again here.
9218 				 */
9219 				ctl_cnt = bundle_at = 0;
9220 				continue; /* This takes us back to the for() for the nets. */
9221 			} else {
9222 				asoc->ifp_had_enobuf = 0;
9223 			}
9224 			endoutchain = NULL;
9225 			auth = NULL;
9226 			auth_offset = 0;
9227 			if (bundle_at || hbflag) {
9228 				/* For data/asconf and hb set time */
9229 				if (*now_filled == 0) {
9230 					(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
9231 					*now_filled = 1;
9232 					*now = net->last_sent_time;
9233 				} else {
9234 					net->last_sent_time = *now;
9235 				}
9236 			}
9237 			if (!no_out_cnt) {
9238 				*num_out += (ctl_cnt + bundle_at);
9239 			}
9240 			if (bundle_at) {
9241 				/* setup for a RTO measurement */
9242 				tsns_sent = data_list[0]->rec.data.TSN_seq;
9243 				/* fill time if not already filled */
9244 				if (*now_filled == 0) {
9245 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9246 					*now_filled = 1;
9247 					*now = asoc->time_last_sent;
9248 				} else {
9249 					asoc->time_last_sent = *now;
9250 				}
9251 				if (net->rto_needed) {
9252 					data_list[0]->do_rtt = 1;
9253 					net->rto_needed = 0;
9254 				}
9255 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9256 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9257 			}
9258 			if (one_chunk) {
9259 				break;
9260 			}
9261 		}
9262 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9263 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9264 		}
9265 	}
9266 	if (old_start_at == NULL) {
9267 		old_start_at = start_at;
9268 		start_at = TAILQ_FIRST(&asoc->nets);
9269 		if (old_start_at)
9270 			goto again_one_more_time;
9271 	}
9272 
9273 	/*
9274 	 * At the end there should be no NON timed chunks hanging on this
9275 	 * queue.
9276 	 */
9277 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9278 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9279 	}
9280 	if ((*num_out == 0) && (*reason_code == 0)) {
9281 		*reason_code = 4;
9282 	} else {
9283 		*reason_code = 5;
9284 	}
9285 	sctp_clean_up_ctl(stcb, asoc, so_locked);
9286 	return (0);
9287 }
9288 
9289 void
9290 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9291 {
9292 	/*-
9293 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9294 	 * the control chunk queue.
9295 	 */
9296 	struct sctp_chunkhdr *hdr;
9297 	struct sctp_tmit_chunk *chk;
9298 	struct mbuf *mat;
9299 
9300 	SCTP_TCB_LOCK_ASSERT(stcb);
9301 	sctp_alloc_a_chunk(stcb, chk);
9302 	if (chk == NULL) {
9303 		/* no memory */
9304 		sctp_m_freem(op_err);
9305 		return;
9306 	}
9307 	chk->copy_by_ref = 0;
9308 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9309 	if (op_err == NULL) {
9310 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
9311 		return;
9312 	}
9313 	chk->send_size = 0;
9314 	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9315 		chk->send_size += SCTP_BUF_LEN(mat);
9316 	}
9317 	chk->sent = SCTP_DATAGRAM_UNSENT;
9318 	chk->snd_count = 0;
9319 	chk->asoc = &stcb->asoc;
9320 	chk->data = op_err;
9321 	chk->whoTo = NULL;
9322 	hdr = mtod(op_err, struct sctp_chunkhdr *);
9323 	hdr->chunk_type = SCTP_OPERATION_ERROR;
9324 	hdr->chunk_flags = 0;
9325 	hdr->chunk_length = htons(chk->send_size);
9326 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
9327 	    chk,
9328 	    sctp_next);
9329 	chk->asoc->ctrl_queue_cnt++;
9330 }
9331 
9332 int
9333 sctp_send_cookie_echo(struct mbuf *m,
9334     int offset,
9335     struct sctp_tcb *stcb,
9336     struct sctp_nets *net)
9337 {
9338 	/*-
9339 	 * pull out the cookie and put it at the front of the control chunk
9340 	 * queue.
9341 	 */
9342 	int at;
9343 	struct mbuf *cookie;
9344 	struct sctp_paramhdr parm, *phdr;
9345 	struct sctp_chunkhdr *hdr;
9346 	struct sctp_tmit_chunk *chk;
9347 	uint16_t ptype, plen;
9348 
9349 	SCTP_TCB_LOCK_ASSERT(stcb);
9350 	/* First find the cookie in the param area */
9351 	cookie = NULL;
9352 	at = offset + sizeof(struct sctp_init_chunk);
9353 	for (;;) {
9354 		phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
9355 		if (phdr == NULL) {
9356 			return (-3);
9357 		}
9358 		ptype = ntohs(phdr->param_type);
9359 		plen = ntohs(phdr->param_length);
9360 		if (ptype == SCTP_STATE_COOKIE) {
9361 			int pad;
9362 
9363 			/* found the cookie */
9364 			if ((pad = (plen % 4))) {
9365 				plen += 4 - pad;
9366 			}
9367 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9368 			if (cookie == NULL) {
9369 				/* No memory */
9370 				return (-2);
9371 			}
9372 #ifdef SCTP_MBUF_LOGGING
9373 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9374 				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9375 			}
9376 #endif
9377 			break;
9378 		}
9379 		at += SCTP_SIZE32(plen);
9380 	}
9381 	/* ok, we got the cookie lets change it into a cookie echo chunk */
9382 	/* first the change from param to cookie */
9383 	hdr = mtod(cookie, struct sctp_chunkhdr *);
9384 	hdr->chunk_type = SCTP_COOKIE_ECHO;
9385 	hdr->chunk_flags = 0;
9386 	/* get the chunk stuff now and place it in the FRONT of the queue */
9387 	sctp_alloc_a_chunk(stcb, chk);
9388 	if (chk == NULL) {
9389 		/* no memory */
9390 		sctp_m_freem(cookie);
9391 		return (-5);
9392 	}
9393 	chk->copy_by_ref = 0;
9394 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9395 	chk->rec.chunk_id.can_take_data = 0;
9396 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9397 	chk->send_size = plen;
9398 	chk->sent = SCTP_DATAGRAM_UNSENT;
9399 	chk->snd_count = 0;
9400 	chk->asoc = &stcb->asoc;
9401 	chk->data = cookie;
9402 	chk->whoTo = net;
9403 	atomic_add_int(&chk->whoTo->ref_count, 1);
9404 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9405 	chk->asoc->ctrl_queue_cnt++;
9406 	return (0);
9407 }
9408 
9409 void
9410 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9411     struct mbuf *m,
9412     int offset,
9413     int chk_length,
9414     struct sctp_nets *net)
9415 {
9416 	/*
9417 	 * take a HB request and make it into a HB ack and send it.
9418 	 */
9419 	struct mbuf *outchain;
9420 	struct sctp_chunkhdr *chdr;
9421 	struct sctp_tmit_chunk *chk;
9422 
9423 
9424 	if (net == NULL)
9425 		/* must have a net pointer */
9426 		return;
9427 
9428 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9429 	if (outchain == NULL) {
9430 		/* gak out of memory */
9431 		return;
9432 	}
9433 #ifdef SCTP_MBUF_LOGGING
9434 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9435 		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9436 	}
9437 #endif
9438 	chdr = mtod(outchain, struct sctp_chunkhdr *);
9439 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9440 	chdr->chunk_flags = 0;
9441 	if (chk_length % 4) {
9442 		/* need pad */
9443 		uint32_t cpthis = 0;
9444 		int padlen;
9445 
9446 		padlen = 4 - (chk_length % 4);
9447 		m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9448 	}
9449 	sctp_alloc_a_chunk(stcb, chk);
9450 	if (chk == NULL) {
9451 		/* no memory */
9452 		sctp_m_freem(outchain);
9453 		return;
9454 	}
9455 	chk->copy_by_ref = 0;
9456 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9457 	chk->rec.chunk_id.can_take_data = 1;
9458 	chk->flags = 0;
9459 	chk->send_size = chk_length;
9460 	chk->sent = SCTP_DATAGRAM_UNSENT;
9461 	chk->snd_count = 0;
9462 	chk->asoc = &stcb->asoc;
9463 	chk->data = outchain;
9464 	chk->whoTo = net;
9465 	atomic_add_int(&chk->whoTo->ref_count, 1);
9466 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9467 	chk->asoc->ctrl_queue_cnt++;
9468 }
9469 
9470 void
9471 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9472 {
9473 	/* formulate and queue a cookie-ack back to sender */
9474 	struct mbuf *cookie_ack;
9475 	struct sctp_chunkhdr *hdr;
9476 	struct sctp_tmit_chunk *chk;
9477 
9478 	SCTP_TCB_LOCK_ASSERT(stcb);
9479 
9480 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9481 	if (cookie_ack == NULL) {
9482 		/* no mbuf's */
9483 		return;
9484 	}
9485 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9486 	sctp_alloc_a_chunk(stcb, chk);
9487 	if (chk == NULL) {
9488 		/* no memory */
9489 		sctp_m_freem(cookie_ack);
9490 		return;
9491 	}
9492 	chk->copy_by_ref = 0;
9493 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9494 	chk->rec.chunk_id.can_take_data = 1;
9495 	chk->flags = 0;
9496 	chk->send_size = sizeof(struct sctp_chunkhdr);
9497 	chk->sent = SCTP_DATAGRAM_UNSENT;
9498 	chk->snd_count = 0;
9499 	chk->asoc = &stcb->asoc;
9500 	chk->data = cookie_ack;
9501 	if (chk->asoc->last_control_chunk_from != NULL) {
9502 		chk->whoTo = chk->asoc->last_control_chunk_from;
9503 		atomic_add_int(&chk->whoTo->ref_count, 1);
9504 	} else {
9505 		chk->whoTo = NULL;
9506 	}
9507 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9508 	hdr->chunk_type = SCTP_COOKIE_ACK;
9509 	hdr->chunk_flags = 0;
9510 	hdr->chunk_length = htons(chk->send_size);
9511 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9512 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9513 	chk->asoc->ctrl_queue_cnt++;
9514 	return;
9515 }
9516 
9517 
9518 void
9519 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9520 {
9521 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
9522 	struct mbuf *m_shutdown_ack;
9523 	struct sctp_shutdown_ack_chunk *ack_cp;
9524 	struct sctp_tmit_chunk *chk;
9525 
9526 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9527 	if (m_shutdown_ack == NULL) {
9528 		/* no mbuf's */
9529 		return;
9530 	}
9531 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9532 	sctp_alloc_a_chunk(stcb, chk);
9533 	if (chk == NULL) {
9534 		/* no memory */
9535 		sctp_m_freem(m_shutdown_ack);
9536 		return;
9537 	}
9538 	chk->copy_by_ref = 0;
9539 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9540 	chk->rec.chunk_id.can_take_data = 1;
9541 	chk->flags = 0;
9542 	chk->send_size = sizeof(struct sctp_chunkhdr);
9543 	chk->sent = SCTP_DATAGRAM_UNSENT;
9544 	chk->snd_count = 0;
9545 	chk->flags = 0;
9546 	chk->asoc = &stcb->asoc;
9547 	chk->data = m_shutdown_ack;
9548 	chk->whoTo = net;
9549 	if (chk->whoTo) {
9550 		atomic_add_int(&chk->whoTo->ref_count, 1);
9551 	}
9552 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9553 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9554 	ack_cp->ch.chunk_flags = 0;
9555 	ack_cp->ch.chunk_length = htons(chk->send_size);
9556 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9557 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9558 	chk->asoc->ctrl_queue_cnt++;
9559 	return;
9560 }
9561 
9562 void
9563 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9564 {
9565 	/* formulate and queue a SHUTDOWN to the sender */
9566 	struct mbuf *m_shutdown;
9567 	struct sctp_shutdown_chunk *shutdown_cp;
9568 	struct sctp_tmit_chunk *chk;
9569 
9570 	m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9571 	if (m_shutdown == NULL) {
9572 		/* no mbuf's */
9573 		return;
9574 	}
9575 	SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9576 	sctp_alloc_a_chunk(stcb, chk);
9577 	if (chk == NULL) {
9578 		/* no memory */
9579 		sctp_m_freem(m_shutdown);
9580 		return;
9581 	}
9582 	chk->copy_by_ref = 0;
9583 	chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9584 	chk->rec.chunk_id.can_take_data = 1;
9585 	chk->flags = 0;
9586 	chk->send_size = sizeof(struct sctp_shutdown_chunk);
9587 	chk->sent = SCTP_DATAGRAM_UNSENT;
9588 	chk->snd_count = 0;
9589 	chk->flags = 0;
9590 	chk->asoc = &stcb->asoc;
9591 	chk->data = m_shutdown;
9592 	chk->whoTo = net;
9593 	if (chk->whoTo) {
9594 		atomic_add_int(&chk->whoTo->ref_count, 1);
9595 	}
9596 	shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9597 	shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9598 	shutdown_cp->ch.chunk_flags = 0;
9599 	shutdown_cp->ch.chunk_length = htons(chk->send_size);
9600 	shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9601 	SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9602 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9603 	chk->asoc->ctrl_queue_cnt++;
9604 	return;
9605 }
9606 
9607 void
9608 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9609 {
9610 	/*
9611 	 * formulate and queue an ASCONF to the peer.
9612 	 * ASCONF parameters should be queued on the assoc queue.
9613 	 */
9614 	struct sctp_tmit_chunk *chk;
9615 	struct mbuf *m_asconf;
9616 	int len;
9617 
9618 	SCTP_TCB_LOCK_ASSERT(stcb);
9619 
9620 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9621 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9622 		/* can't send a new one if there is one in flight already */
9623 		return;
9624 	}
9625 
9626 	/* compose an ASCONF chunk, maximum length is PMTU */
9627 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9628 	if (m_asconf == NULL) {
9629 		return;
9630 	}
9631 
9632 	sctp_alloc_a_chunk(stcb, chk);
9633 	if (chk == NULL) {
9634 		/* no memory */
9635 		sctp_m_freem(m_asconf);
9636 		return;
9637 	}
9638 
9639 	chk->copy_by_ref = 0;
9640 	chk->rec.chunk_id.id = SCTP_ASCONF;
9641 	chk->rec.chunk_id.can_take_data = 0;
9642 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9643 	chk->data = m_asconf;
9644 	chk->send_size = len;
9645 	chk->sent = SCTP_DATAGRAM_UNSENT;
9646 	chk->snd_count = 0;
9647 	chk->asoc = &stcb->asoc;
9648 	chk->whoTo = net;
9649 	if (chk->whoTo) {
9650 		atomic_add_int(&chk->whoTo->ref_count, 1);
9651 	}
9652 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9653 	chk->asoc->ctrl_queue_cnt++;
9654 	return;
9655 }
9656 
9657 void
9658 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9659 {
9660 	/*
9661 	 * formulate and queue a asconf-ack back to sender.
9662 	 * the asconf-ack must be stored in the tcb.
9663 	 */
9664 	struct sctp_tmit_chunk *chk;
9665 	struct sctp_asconf_ack *ack, *latest_ack;
9666 	struct mbuf *m_ack;
9667 	struct sctp_nets *net = NULL;
9668 
9669 	SCTP_TCB_LOCK_ASSERT(stcb);
9670 	/* Get the latest ASCONF-ACK */
9671 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9672 	if (latest_ack == NULL) {
9673 		return;
9674 	}
9675 	if (latest_ack->last_sent_to != NULL &&
9676 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9677 		/* we're doing a retransmission */
9678 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9679 		if (net == NULL) {
9680 			/* no alternate */
9681 			if (stcb->asoc.last_control_chunk_from == NULL) {
9682 				if (stcb->asoc.alternate) {
9683 					net = stcb->asoc.alternate;
9684 				} else {
9685 					net = stcb->asoc.primary_destination;
9686 				}
9687 			} else {
9688 				net = stcb->asoc.last_control_chunk_from;
9689 			}
9690 		}
9691 	} else {
9692 		/* normal case */
9693 		if (stcb->asoc.last_control_chunk_from == NULL) {
9694 			if (stcb->asoc.alternate) {
9695 				net = stcb->asoc.alternate;
9696 			} else {
9697 				net = stcb->asoc.primary_destination;
9698 			}
9699 		} else {
9700 			net = stcb->asoc.last_control_chunk_from;
9701 		}
9702 	}
9703 	latest_ack->last_sent_to = net;
9704 
9705 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9706 		if (ack->data == NULL) {
9707 			continue;
9708 		}
9709 
9710 		/* copy the asconf_ack */
9711 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9712 		if (m_ack == NULL) {
9713 			/* couldn't copy it */
9714 			return;
9715 		}
9716 #ifdef SCTP_MBUF_LOGGING
9717 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9718 			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9719 		}
9720 #endif
9721 
9722 		sctp_alloc_a_chunk(stcb, chk);
9723 		if (chk == NULL) {
9724 			/* no memory */
9725 			if (m_ack)
9726 				sctp_m_freem(m_ack);
9727 			return;
9728 		}
9729 		chk->copy_by_ref = 0;
9730 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9731 		chk->rec.chunk_id.can_take_data = 1;
9732 		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9733 		chk->whoTo = net;
9734 		if (chk->whoTo) {
9735 			atomic_add_int(&chk->whoTo->ref_count, 1);
9736 		}
9737 		chk->data = m_ack;
9738 		chk->send_size = ack->len;
9739 		chk->sent = SCTP_DATAGRAM_UNSENT;
9740 		chk->snd_count = 0;
9741 		chk->asoc = &stcb->asoc;
9742 
9743 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9744 		chk->asoc->ctrl_queue_cnt++;
9745 	}
9746 	return;
9747 }
9748 
9749 
9750 static int
9751 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9752     struct sctp_tcb *stcb,
9753     struct sctp_association *asoc,
9754     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9756     SCTP_UNUSED
9757 #endif
9758     )
9759 {
9760 	/*-
9761 	 * send out one MTU of retransmission. If fast_retransmit is
9762 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9763 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
9764 	 * retransmit them by themselves.
9765 	 *
9766 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
9767 	 * marked for resend and bundle them all together (up to a MTU of
9768 	 * destination). The address to send to should have been
9769 	 * selected/changed where the retransmission was marked (i.e. in FR
9770 	 * or t3-timeout routines).
9771 	 */
9772 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9773 	struct sctp_tmit_chunk *chk, *fwd;
9774 	struct mbuf *m, *endofchain;
9775 	struct sctp_nets *net = NULL;
9776 	uint32_t tsns_sent = 0;
9777 	int no_fragmentflg, bundle_at, cnt_thru;
9778 	unsigned int mtu;
9779 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9780 	struct sctp_auth_chunk *auth = NULL;
9781 	uint32_t auth_offset = 0;
9782 	uint16_t auth_keyid;
9783 	int override_ok = 1;
9784 	int data_auth_reqd = 0;
9785 	uint32_t dmtu = 0;
9786 
9787 #if defined(__APPLE__)
9788 	if (so_locked) {
9789 		sctp_lock_assert(SCTP_INP_SO(inp));
9790 	} else {
9791 		sctp_unlock_assert(SCTP_INP_SO(inp));
9792 	}
9793 #endif
9794 	SCTP_TCB_LOCK_ASSERT(stcb);
9795 	tmr_started = ctl_cnt = bundle_at = error = 0;
9796 	no_fragmentflg = 1;
9797 	fwd_tsn = 0;
9798 	*cnt_out = 0;
9799 	fwd = NULL;
9800 	endofchain = m = NULL;
9801 	auth_keyid = stcb->asoc.authinfo.active_keyid;
9802 #ifdef SCTP_AUDITING_ENABLED
9803 	sctp_audit_log(0xC3, 1);
9804 #endif
9805 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9806 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
9807 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
9808 			asoc->sent_queue_retran_cnt);
9809 		asoc->sent_queue_cnt = 0;
9810 		asoc->sent_queue_cnt_removeable = 0;
9811 		/* send back 0/0 so we enter normal transmission */
9812 		*cnt_out = 0;
9813 		return (0);
9814 	}
9815 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9816 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9817 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9818 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9819 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
9820 				continue;
9821 			}
9822 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9823 				if (chk != asoc->str_reset) {
9824 					/*
9825 					 * not eligible for retran if its
9826 					 * not ours
9827 					 */
9828 					continue;
9829 				}
9830 			}
9831 			ctl_cnt++;
9832 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9833 				fwd_tsn = 1;
9834 			}
9835 			/*
9836 			 * Add an AUTH chunk, if chunk requires it save the
9837 			 * offset into the chain for AUTH
9838 			 */
9839 			if ((auth == NULL) &&
9840 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9841 							 stcb->asoc.peer_auth_chunks))) {
9842 				m = sctp_add_auth_chunk(m, &endofchain,
9843 							&auth, &auth_offset,
9844 							stcb,
9845 							chk->rec.chunk_id.id);
9846 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9847 			}
9848 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9849 			break;
9850 		}
9851 	}
9852 	one_chunk = 0;
9853 	cnt_thru = 0;
9854 	/* do we have control chunks to retransmit? */
9855 	if (m != NULL) {
9856 		/* Start a timer no matter if we suceed or fail */
9857 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9858 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9859 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9860 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9861 		chk->snd_count++;	/* update our count */
9862 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9863 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9864 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9865 		                                        no_fragmentflg, 0, 0,
9866 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9867 		                                        chk->whoTo->port, NULL,
9868 #if defined(__FreeBSD__)
9869 		                                        0, 0,
9870 #endif
9871 		                                        so_locked))) {
9872 			SCTP_STAT_INCR(sctps_lowlevelerr);
9873 			return (error);
9874 		}
9875 		endofchain = NULL;
9876 		auth = NULL;
9877 		auth_offset = 0;
9878 		/*
9879 		 * We don't want to mark the net->sent time here since this
9880 		 * we use this for HB and retrans cannot measure RTT
9881 		 */
9882 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9883 		*cnt_out += 1;
9884 		chk->sent = SCTP_DATAGRAM_SENT;
9885 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9886 		if (fwd_tsn == 0) {
9887 			return (0);
9888 		} else {
9889 			/* Clean up the fwd-tsn list */
9890 			sctp_clean_up_ctl(stcb, asoc, so_locked);
9891 			return (0);
9892 		}
9893 	}
9894 	/*
9895 	 * Ok, it is just data retransmission we need to do or that and a
9896 	 * fwd-tsn with it all.
9897 	 */
9898 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
9899 		return (SCTP_RETRAN_DONE);
9900 	}
9901 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9902 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9903 		/* not yet open, resend the cookie and that is it */
9904 		return (1);
9905 	}
9906 #ifdef SCTP_AUDITING_ENABLED
9907 	sctp_auditing(20, inp, stcb, NULL);
9908 #endif
9909 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9910 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9911 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
9912 			/* No, not sent to this net or not ready for rtx */
9913 			continue;
9914 		}
9915 		if (chk->data == NULL) {
9916 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9917 			            chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9918 			continue;
9919 		}
9920 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9921 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9922 			/* Gak, we have exceeded max unlucky retran, abort! */
9923 			SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9924 				    chk->snd_count,
9925 				    SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9926 			atomic_add_int(&stcb->asoc.refcnt, 1);
9927 			sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9928 			SCTP_TCB_LOCK(stcb);
9929 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
9930 			return (SCTP_RETRAN_EXIT);
9931 		}
9932 		/* pick up the net */
9933 		net = chk->whoTo;
9934 		switch (net->ro._l_addr.sa.sa_family) {
9935 #ifdef INET
9936 			case AF_INET:
9937 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9938 				break;
9939 #endif
9940 #ifdef INET6
9941 			case AF_INET6:
9942 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
9943 				break;
9944 #endif
9945 #if defined(__Userspace__)
9946 			case AF_CONN:
9947 				mtu = net->mtu - sizeof(struct sctphdr);
9948 				break;
9949 #endif
9950 			default:
9951 				/* TSNH */
9952 				mtu = net->mtu;
9953 				break;
9954 		}
9955 
9956 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9957 			/* No room in peers rwnd */
9958 			uint32_t tsn;
9959 
9960 			tsn = asoc->last_acked_seq + 1;
9961 			if (tsn == chk->rec.data.TSN_seq) {
9962 				/*
9963 				 * we make a special exception for this
9964 				 * case. The peer has no rwnd but is missing
9965 				 * the lowest chunk.. which is probably what
9966 				 * is holding up the rwnd.
9967 				 */
9968 				goto one_chunk_around;
9969 			}
9970 			return (1);
9971 		}
9972 	one_chunk_around:
9973 		if (asoc->peers_rwnd < mtu) {
9974 			one_chunk = 1;
9975 			if ((asoc->peers_rwnd == 0) &&
9976 			    (asoc->total_flight == 0)) {
9977 				chk->window_probe = 1;
9978 				chk->whoTo->window_probe = 1;
9979 			}
9980 		}
9981 #ifdef SCTP_AUDITING_ENABLED
9982 		sctp_audit_log(0xC3, 2);
9983 #endif
9984 		bundle_at = 0;
9985 		m = NULL;
9986 		net->fast_retran_ip = 0;
9987 		if (chk->rec.data.doing_fast_retransmit == 0) {
9988 			/*
9989 			 * if no FR in progress skip destination that have
9990 			 * flight_size > cwnd.
9991 			 */
9992 			if (net->flight_size >= net->cwnd) {
9993 				continue;
9994 			}
9995 		} else {
9996 			/*
9997 			 * Mark the destination net to have FR recovery
9998 			 * limits put on it.
9999 			 */
10000 			*fr_done = 1;
10001 			net->fast_retran_ip = 1;
10002 		}
10003 
10004 		/*
10005 		 * if no AUTH is yet included and this chunk requires it,
10006 		 * make sure to account for it.  We don't apply the size
10007 		 * until the AUTH chunk is actually added below in case
10008 		 * there is no room for this chunk.
10009 		 */
10010 		if (data_auth_reqd && (auth == NULL)) {
10011 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10012 		} else
10013 			dmtu = 0;
10014 
10015 		if ((chk->send_size <= (mtu - dmtu)) ||
10016 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10017 			/* ok we will add this one */
10018 			if (data_auth_reqd) {
10019 				if (auth == NULL) {
10020 					m = sctp_add_auth_chunk(m,
10021 								&endofchain,
10022 								&auth,
10023 								&auth_offset,
10024 								stcb,
10025 								SCTP_DATA);
10026 					auth_keyid = chk->auth_keyid;
10027 					override_ok = 0;
10028 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10029 				} else if (override_ok) {
10030 					auth_keyid = chk->auth_keyid;
10031 					override_ok = 0;
10032 				} else if (chk->auth_keyid != auth_keyid) {
10033 					/* different keyid, so done bundling */
10034 					break;
10035 				}
10036 			}
10037 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10038 			if (m == NULL) {
10039 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10040 				return (ENOMEM);
10041 			}
10042 			/* Do clear IP_DF ? */
10043 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10044 				no_fragmentflg = 0;
10045 			}
10046 			/* upate our MTU size */
10047 			if (mtu > (chk->send_size + dmtu))
10048 				mtu -= (chk->send_size + dmtu);
10049 			else
10050 				mtu = 0;
10051 			data_list[bundle_at++] = chk;
10052 			if (one_chunk && (asoc->total_flight <= 0)) {
10053 				SCTP_STAT_INCR(sctps_windowprobed);
10054 			}
10055 		}
10056 		if (one_chunk == 0) {
10057 			/*
10058 			 * now are there anymore forward from chk to pick
10059 			 * up?
10060 			 */
10061 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10062 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10063 					/* Nope, not for retran */
10064 					continue;
10065 				}
10066 				if (fwd->whoTo != net) {
10067 					/* Nope, not the net in question */
10068 					continue;
10069 				}
10070 				if (data_auth_reqd && (auth == NULL)) {
10071 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10072 				} else
10073 					dmtu = 0;
10074 				if (fwd->send_size <= (mtu - dmtu)) {
10075 					if (data_auth_reqd) {
10076 						if (auth == NULL) {
10077 							m = sctp_add_auth_chunk(m,
10078 										&endofchain,
10079 										&auth,
10080 										&auth_offset,
10081 										stcb,
10082 										SCTP_DATA);
10083 							auth_keyid = fwd->auth_keyid;
10084 							override_ok = 0;
10085 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10086 						} else if (override_ok) {
10087 							auth_keyid = fwd->auth_keyid;
10088 							override_ok = 0;
10089 						} else if (fwd->auth_keyid != auth_keyid) {
10090 							/* different keyid, so done bundling */
10091 							break;
10092 						}
10093 					}
10094 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10095 					if (m == NULL) {
10096 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10097 						return (ENOMEM);
10098 					}
10099 					/* Do clear IP_DF ? */
10100 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10101 						no_fragmentflg = 0;
10102 					}
10103 					/* upate our MTU size */
10104 					if (mtu > (fwd->send_size + dmtu))
10105 						mtu -= (fwd->send_size + dmtu);
10106 					else
10107 						mtu = 0;
10108 					data_list[bundle_at++] = fwd;
10109 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10110 						break;
10111 					}
10112 				} else {
10113 					/* can't fit so we are done */
10114 					break;
10115 				}
10116 			}
10117 		}
10118 		/* Is there something to send for this destination? */
10119 		if (m) {
10120 			/*
10121 			 * No matter if we fail/or suceed we should start a
10122 			 * timer. A failure is like a lost IP packet :-)
10123 			 */
10124 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10125 				/*
10126 				 * no timer running on this destination
10127 				 * restart it.
10128 				 */
10129 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10130 				tmr_started = 1;
10131 			}
10132 			/* Now lets send it, if there is anything to send :> */
10133 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10134 			                                        (struct sockaddr *)&net->ro._l_addr, m,
10135 			                                        auth_offset, auth, auth_keyid,
10136 			                                        no_fragmentflg, 0, 0,
10137 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10138 			                                        net->port, NULL,
10139 #if defined(__FreeBSD__)
10140 			                                        0, 0,
10141 #endif
10142 			                                        so_locked))) {
10143 				/* error, we could not output */
10144 				SCTP_STAT_INCR(sctps_lowlevelerr);
10145 				return (error);
10146 			}
10147 			endofchain = NULL;
10148 			auth = NULL;
10149 			auth_offset = 0;
10150 			/* For HB's */
10151 			/*
10152 			 * We don't want to mark the net->sent time here
10153 			 * since this we use this for HB and retrans cannot
10154 			 * measure RTT
10155 			 */
10156 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10157 
10158 			/* For auto-close */
10159 			cnt_thru++;
10160 			if (*now_filled == 0) {
10161 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10162 				*now = asoc->time_last_sent;
10163 				*now_filled = 1;
10164 			} else {
10165 				asoc->time_last_sent = *now;
10166 			}
10167 			*cnt_out += bundle_at;
10168 #ifdef SCTP_AUDITING_ENABLED
10169 			sctp_audit_log(0xC4, bundle_at);
10170 #endif
10171 			if (bundle_at) {
10172 				tsns_sent = data_list[0]->rec.data.TSN_seq;
10173 			}
10174 			for (i = 0; i < bundle_at; i++) {
10175 				SCTP_STAT_INCR(sctps_sendretransdata);
10176 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
10177 				/*
10178 				 * When we have a revoked data, and we
10179 				 * retransmit it, then we clear the revoked
10180 				 * flag since this flag dictates if we
10181 				 * subtracted from the fs
10182 				 */
10183 				if (data_list[i]->rec.data.chunk_was_revoked) {
10184 					/* Deflate the cwnd */
10185 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10186 					data_list[i]->rec.data.chunk_was_revoked = 0;
10187 				}
10188 				data_list[i]->snd_count++;
10189 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10190 				/* record the time */
10191 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
10192 				if (data_list[i]->book_size_scale) {
10193 					/*
10194 					 * need to double the book size on
10195 					 * this one
10196 					 */
10197 					data_list[i]->book_size_scale = 0;
10198 					/* Since we double the booksize, we must
10199 					 * also double the output queue size, since this
10200 					 * get shrunk when we free by this amount.
10201 					 */
10202 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10203 					data_list[i]->book_size *= 2;
10204 
10205 
10206 				} else {
10207 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10208 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10209 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10210 					}
10211 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10212 									    (uint32_t) (data_list[i]->send_size +
10213 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10214 				}
10215 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10216 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10217 						       data_list[i]->whoTo->flight_size,
10218 						       data_list[i]->book_size,
10219 						       (uintptr_t)data_list[i]->whoTo,
10220 						       data_list[i]->rec.data.TSN_seq);
10221 				}
10222 				sctp_flight_size_increase(data_list[i]);
10223 				sctp_total_flight_increase(stcb, data_list[i]);
10224 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10225 					/* SWS sender side engages */
10226 					asoc->peers_rwnd = 0;
10227 				}
10228 				if ((i == 0) &&
10229 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
10230 					SCTP_STAT_INCR(sctps_sendfastretrans);
10231 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10232 					    (tmr_started == 0)) {
10233 						/*-
10234 						 * ok we just fast-retrans'd
10235 						 * the lowest TSN, i.e the
10236 						 * first on the list. In
10237 						 * this case we want to give
10238 						 * some more time to get a
10239 						 * SACK back without a
10240 						 * t3-expiring.
10241 						 */
10242 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10243 								SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4);
10244 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10245 					}
10246 				}
10247 			}
10248 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10249 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10250 			}
10251 #ifdef SCTP_AUDITING_ENABLED
10252 			sctp_auditing(21, inp, stcb, NULL);
10253 #endif
10254 		} else {
10255 			/* None will fit */
10256 			return (1);
10257 		}
10258 		if (asoc->sent_queue_retran_cnt <= 0) {
10259 			/* all done we have no more to retran */
10260 			asoc->sent_queue_retran_cnt = 0;
10261 			break;
10262 		}
10263 		if (one_chunk) {
10264 			/* No more room in rwnd */
10265 			return (1);
10266 		}
10267 		/* stop the for loop here. we sent out a packet */
10268 		break;
10269 	}
10270 	return (0);
10271 }
10272 
10273 static void
10274 sctp_timer_validation(struct sctp_inpcb *inp,
10275     struct sctp_tcb *stcb,
10276     struct sctp_association *asoc)
10277 {
10278 	struct sctp_nets *net;
10279 
10280 	/* Validate that a timer is running somewhere */
10281 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10282 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10283 			/* Here is a timer */
10284 			return;
10285 		}
10286 	}
10287 	SCTP_TCB_LOCK_ASSERT(stcb);
10288 	/* Gak, we did not have a timer somewhere */
10289 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10290 	if (asoc->alternate) {
10291 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10292 	} else {
10293 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10294 	}
10295 	return;
10296 }
10297 
10298 void
10299 sctp_chunk_output (struct sctp_inpcb *inp,
10300     struct sctp_tcb *stcb,
10301     int from_where,
10302     int so_locked
10303 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10304     SCTP_UNUSED
10305 #endif
10306     )
10307 {
10308 	/*-
10309 	 * Ok this is the generic chunk service queue. we must do the
10310 	 * following:
10311 	 * - See if there are retransmits pending, if so we must
10312 	 *   do these first.
10313 	 * - Service the stream queue that is next, moving any
10314 	 *   message (note I must get a complete message i.e.
10315 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10316 	 *   TSN's
10317 	 * - Check to see if the cwnd/rwnd allows any output, if so we
10318 	 *   go ahead and fomulate and send the low level chunks. Making sure
10319 	 *   to combine any control in the control chunk queue also.
10320 	 */
10321 	struct sctp_association *asoc;
10322 	struct sctp_nets *net;
10323 	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10324 	unsigned int burst_cnt = 0;
10325 	struct timeval now;
10326 	int now_filled = 0;
10327 	int nagle_on;
10328 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10329 	int un_sent = 0;
10330 	int fr_done;
10331 	unsigned int tot_frs = 0;
10332 
10333 #if defined(__APPLE__)
10334 	if (so_locked) {
10335 		sctp_lock_assert(SCTP_INP_SO(inp));
10336 	} else {
10337 		sctp_unlock_assert(SCTP_INP_SO(inp));
10338 	}
10339 #endif
10340 	asoc = &stcb->asoc;
10341 	/* The Nagle algorithm is only applied when handling a send call. */
10342 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10343 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10344 			nagle_on = 0;
10345 		} else {
10346 			nagle_on = 1;
10347 		}
10348 	} else {
10349 		nagle_on = 0;
10350 	}
10351 	SCTP_TCB_LOCK_ASSERT(stcb);
10352 
10353 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10354 
10355 	if ((un_sent <= 0) &&
10356 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10357 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10358 	    (asoc->sent_queue_retran_cnt == 0)) {
10359 		/* Nothing to do unless there is something to be sent left */
10360 		return;
10361 	}
10362 	/* Do we have something to send, data or control AND
10363 	 * a sack timer running, if so piggy-back the sack.
10364 	 */
10365 	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10366 		sctp_send_sack(stcb, so_locked);
10367 		(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10368 	}
10369 	while (asoc->sent_queue_retran_cnt) {
10370 		/*-
10371 		 * Ok, it is retransmission time only, we send out only ONE
10372 		 * packet with a single call off to the retran code.
10373 		 */
10374 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10375 			/*-
10376 			 * Special hook for handling cookiess discarded
10377 			 * by peer that carried data. Send cookie-ack only
10378 			 * and then the next call with get the retran's.
10379 			 */
10380 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10381 						    from_where,
10382 						    &now, &now_filled, frag_point, so_locked);
10383 			return;
10384 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10385 			/* if its not from a HB then do it */
10386 			fr_done = 0;
10387 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10388 			if (fr_done) {
10389 				tot_frs++;
10390 			}
10391 		} else {
10392 			/*
10393 			 * its from any other place, we don't allow retran
10394 			 * output (only control)
10395 			 */
10396 			ret = 1;
10397 		}
10398 		if (ret > 0) {
10399 			/* Can't send anymore */
10400 			/*-
10401 			 * now lets push out control by calling med-level
10402 			 * output once. this assures that we WILL send HB's
10403 			 * if queued too.
10404 			 */
10405 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10406 						    from_where,
10407 						    &now, &now_filled, frag_point, so_locked);
10408 #ifdef SCTP_AUDITING_ENABLED
10409 			sctp_auditing(8, inp, stcb, NULL);
10410 #endif
10411 			sctp_timer_validation(inp, stcb, asoc);
10412 			return;
10413 		}
10414 		if (ret < 0) {
10415 			/*-
10416 			 * The count was off.. retran is not happening so do
10417 			 * the normal retransmission.
10418 			 */
10419 #ifdef SCTP_AUDITING_ENABLED
10420 			sctp_auditing(9, inp, stcb, NULL);
10421 #endif
10422 			if (ret == SCTP_RETRAN_EXIT) {
10423 				return;
10424 			}
10425 			break;
10426 		}
10427 		if (from_where == SCTP_OUTPUT_FROM_T3) {
10428 			/* Only one transmission allowed out of a timeout */
10429 #ifdef SCTP_AUDITING_ENABLED
10430 			sctp_auditing(10, inp, stcb, NULL);
10431 #endif
10432 			/* Push out any control */
10433 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10434 						    &now, &now_filled, frag_point, so_locked);
10435 			return;
10436 		}
10437 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10438 			/* Hit FR burst limit */
10439 			return;
10440 		}
10441 		if ((num_out == 0) && (ret == 0)) {
10442 			/* No more retrans to send */
10443 			break;
10444 		}
10445 	}
10446 #ifdef SCTP_AUDITING_ENABLED
10447 	sctp_auditing(12, inp, stcb, NULL);
10448 #endif
10449 	/* Check for bad destinations, if they exist move chunks around. */
10450 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10451 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10452 			/*-
10453 			 * if possible move things off of this address we
10454 			 * still may send below due to the dormant state but
10455 			 * we try to find an alternate address to send to
10456 			 * and if we have one we move all queued data on the
10457 			 * out wheel to this alternate address.
10458 			 */
10459 			if (net->ref_count > 1)
10460 				sctp_move_chunks_from_net(stcb, net);
10461 		} else {
10462 			/*-
10463 			 * if ((asoc->sat_network) || (net->addr_is_local))
10464 			 * { burst_limit = asoc->max_burst *
10465 			 * SCTP_SAT_NETWORK_BURST_INCR; }
10466 			 */
10467 			if (asoc->max_burst > 0) {
10468 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10469 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10470 						/* JRS - Use the congestion control given in the congestion control module */
10471 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10472 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10473 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10474 						}
10475 						SCTP_STAT_INCR(sctps_maxburstqueued);
10476 					}
10477 					net->fast_retran_ip = 0;
10478 				} else {
10479 					if (net->flight_size == 0) {
10480 						/* Should be decaying the cwnd here */
10481 						;
10482 					}
10483 				}
10484 			}
10485 		}
10486 
10487 	}
10488 	burst_cnt = 0;
10489 	do {
10490 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10491 					      &reason_code, 0, from_where,
10492 					      &now, &now_filled, frag_point, so_locked);
10493 		if (error) {
10494 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10495 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10496 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10497 			}
10498 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10499 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10500 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10501 			}
10502 			break;
10503 		}
10504 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10505 
10506 		tot_out += num_out;
10507 		burst_cnt++;
10508 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10509 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10510 			if (num_out == 0) {
10511 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10512 			}
10513 		}
10514 		if (nagle_on) {
10515 			/*
10516 			 * When the Nagle algorithm is used, look at how much
10517 			 * is unsent, then if its smaller than an MTU and we
10518 			 * have data in flight we stop, except if we are
10519 			 * handling a fragmented user message.
10520 			 */
10521 			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10522 			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10523 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10524 			    (stcb->asoc.total_flight > 0) &&
10525 			    ((stcb->asoc.locked_on_sending == NULL) ||
10526 			     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10527 				break;
10528 			}
10529 		}
10530 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10531 		    TAILQ_EMPTY(&asoc->send_queue) &&
10532 		    stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10533 			/* Nothing left to send */
10534 			break;
10535 		}
10536 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10537 			/* Nothing left to send */
10538 			break;
10539 		}
10540 	} while (num_out &&
10541 	         ((asoc->max_burst == 0) ||
10542 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10543 		  (burst_cnt < asoc->max_burst)));
10544 
10545 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10546 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10547 			SCTP_STAT_INCR(sctps_maxburstqueued);
10548 			asoc->burst_limit_applied = 1;
10549 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10550 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10551 			}
10552 		} else {
10553 			asoc->burst_limit_applied = 0;
10554 		}
10555 	}
10556 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10557 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10558 	}
10559 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10560 		tot_out);
10561 
10562 	/*-
10563 	 * Now we need to clean up the control chunk chain if a ECNE is on
10564 	 * it. It must be marked as UNSENT again so next call will continue
10565 	 * to send it until such time that we get a CWR, to remove it.
10566 	 */
10567 	if (stcb->asoc.ecn_echo_cnt_onq)
10568 		sctp_fix_ecn_echo(asoc);
10569 	return;
10570 }
10571 
10572 
10573 int
10574 sctp_output(
10575 	struct sctp_inpcb *inp,
10576 #if defined(__Panda__)
10577 	pakhandle_type m,
10578 #else
10579 	struct mbuf *m,
10580 #endif
10581 	struct sockaddr *addr,
10582 #if defined(__Panda__)
10583 	pakhandle_type control,
10584 #else
10585 	struct mbuf *control,
10586 #endif
10587 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
10588 	struct thread *p,
10589 #elif defined(__Windows__)
10590 	PKTHREAD p,
10591 #else
10592 #if defined(__APPLE__)
10593 	struct proc *p SCTP_UNUSED,
10594 #else
10595 	struct proc *p,
10596 #endif
10597 #endif
10598 	int flags)
10599 {
10600 	if (inp == NULL) {
10601 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10602 		return (EINVAL);
10603 	}
10604 
10605 	if (inp->sctp_socket == NULL) {
10606 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10607 		return (EINVAL);
10608 	}
10609 	return (sctp_sosend(inp->sctp_socket,
10610 			    addr,
10611 			    (struct uio *)NULL,
10612 			    m,
10613 			    control,
10614 #if defined(__APPLE__) || defined(__Panda__)
10615 			    flags
10616 #else
10617 			    flags, p
10618 #endif
10619 			));
10620 }
10621 
10622 void
10623 send_forward_tsn(struct sctp_tcb *stcb,
10624 		 struct sctp_association *asoc)
10625 {
10626         struct sctp_tmit_chunk *chk;
10627 	struct sctp_forward_tsn_chunk *fwdtsn;
10628 	uint32_t advance_peer_ack_point;
10629 
10630         SCTP_TCB_LOCK_ASSERT(stcb);
10631 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10632 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10633 			/* mark it to unsent */
10634 			chk->sent = SCTP_DATAGRAM_UNSENT;
10635 			chk->snd_count = 0;
10636 			/* Do we correct its output location? */
10637 			if (chk->whoTo) {
10638 				sctp_free_remote_addr(chk->whoTo);
10639 				chk->whoTo = NULL;
10640 			}
10641 			goto sctp_fill_in_rest;
10642 		}
10643 	}
10644 	/* Ok if we reach here we must build one */
10645 	sctp_alloc_a_chunk(stcb, chk);
10646 	if (chk == NULL) {
10647 		return;
10648 	}
10649 	asoc->fwd_tsn_cnt++;
10650 	chk->copy_by_ref = 0;
10651 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10652 	chk->rec.chunk_id.can_take_data = 0;
10653 	chk->flags = 0;
10654 	chk->asoc = asoc;
10655 	chk->whoTo = NULL;
10656 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10657 	if (chk->data == NULL) {
10658 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10659 		return;
10660 	}
10661 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10662 	chk->sent = SCTP_DATAGRAM_UNSENT;
10663 	chk->snd_count = 0;
10664 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10665 	asoc->ctrl_queue_cnt++;
10666 sctp_fill_in_rest:
10667 	/*-
10668 	 * Here we go through and fill out the part that deals with
10669 	 * stream/seq of the ones we skip.
10670 	 */
10671 	SCTP_BUF_LEN(chk->data) = 0;
10672 	{
10673 		struct sctp_tmit_chunk *at, *tp1, *last;
10674 		struct sctp_strseq *strseq;
10675 		unsigned int cnt_of_space, i, ovh;
10676 		unsigned int space_needed;
10677 		unsigned int cnt_of_skipped = 0;
10678 
10679 		TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10680 			if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10681 			    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10682 				/* no more to look at */
10683 				break;
10684 			}
10685 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10686 				/* We don't report these */
10687 				continue;
10688 			}
10689 			cnt_of_skipped++;
10690 		}
10691 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10692 		    (cnt_of_skipped * sizeof(struct sctp_strseq)));
10693 
10694 		cnt_of_space = M_TRAILINGSPACE(chk->data);
10695 
10696 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10697 			ovh = SCTP_MIN_OVERHEAD;
10698 		} else {
10699 			ovh = SCTP_MIN_V4_OVERHEAD;
10700 		}
10701 		if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10702 			/* trim to a mtu size */
10703 			cnt_of_space = asoc->smallest_mtu - ovh;
10704 		}
10705 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10706 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10707 				       0xff, 0, cnt_of_skipped,
10708 				       asoc->advanced_peer_ack_point);
10709 
10710 		}
10711 		advance_peer_ack_point = asoc->advanced_peer_ack_point;
10712 		if (cnt_of_space < space_needed) {
10713 			/*-
10714 			 * ok we must trim down the chunk by lowering the
10715 			 * advance peer ack point.
10716 			 */
10717 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10718 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10719 					       0xff, 0xff, cnt_of_space,
10720 					       space_needed);
10721 			}
10722 			cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10723 			cnt_of_skipped /= sizeof(struct sctp_strseq);
10724 			/*-
10725 			 * Go through and find the TSN that will be the one
10726 			 * we report.
10727 			 */
10728 			at = TAILQ_FIRST(&asoc->sent_queue);
10729 			if (at != NULL) {
10730 				for (i = 0; i < cnt_of_skipped; i++) {
10731 					tp1 = TAILQ_NEXT(at, sctp_next);
10732 					if (tp1 == NULL) {
10733 						break;
10734 					}
10735 					at = tp1;
10736 				}
10737 			}
10738 			if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10739 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10740 					       0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10741 					       asoc->advanced_peer_ack_point);
10742 			}
10743 			last = at;
10744 			/*-
10745 			 * last now points to last one I can report, update
10746 			 * peer ack point
10747 			 */
10748 			if (last)
10749 				advance_peer_ack_point = last->rec.data.TSN_seq;
10750 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10751 			               cnt_of_skipped * sizeof(struct sctp_strseq);
10752 		}
10753 		chk->send_size = space_needed;
10754 		/* Setup the chunk */
10755 		fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10756 		fwdtsn->ch.chunk_length = htons(chk->send_size);
10757 		fwdtsn->ch.chunk_flags = 0;
10758 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10759 		fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10760 		SCTP_BUF_LEN(chk->data) = chk->send_size;
10761 		fwdtsn++;
10762 		/*-
10763 		 * Move pointer to after the fwdtsn and transfer to the
10764 		 * strseq pointer.
10765 		 */
10766 		strseq = (struct sctp_strseq *)fwdtsn;
10767 		/*-
10768 		 * Now populate the strseq list. This is done blindly
10769 		 * without pulling out duplicate stream info. This is
10770 		 * inefficent but won't harm the process since the peer will
10771 		 * look at these in sequence and will thus release anything.
10772 		 * It could mean we exceed the PMTU and chop off some that
10773 		 * we could have included.. but this is unlikely (aka 1432/4
10774 		 * would mean 300+ stream seq's would have to be reported in
10775 		 * one FWD-TSN. With a bit of work we can later FIX this to
10776 		 * optimize and pull out duplcates.. but it does add more
10777 		 * overhead. So for now... not!
10778 		 */
10779 		at = TAILQ_FIRST(&asoc->sent_queue);
10780 		for (i = 0; i < cnt_of_skipped; i++) {
10781 			tp1 = TAILQ_NEXT(at, sctp_next);
10782 			if (tp1 == NULL)
10783 				break;
10784 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10785 				/* We don't report these */
10786 				i--;
10787 				at = tp1;
10788 				continue;
10789 			}
10790 			if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10791 				at->rec.data.fwd_tsn_cnt = 0;
10792 			}
10793 			strseq->stream = ntohs(at->rec.data.stream_number);
10794 			strseq->sequence = ntohs(at->rec.data.stream_seq);
10795 			strseq++;
10796 			at = tp1;
10797 		}
10798 	}
10799 	return;
10800 }
10801 
10802 void
10803 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10804 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10805 	SCTP_UNUSED
10806 #endif
10807 )
10808 {
10809 	/*-
10810 	 * Queue up a SACK or NR-SACK in the control queue.
10811 	 * We must first check to see if a SACK or NR-SACK is
10812 	 * somehow on the control queue.
10813 	 * If so, we will take and and remove the old one.
10814 	 */
10815 	struct sctp_association *asoc;
10816 	struct sctp_tmit_chunk *chk, *a_chk;
10817 	struct sctp_sack_chunk *sack;
10818 	struct sctp_nr_sack_chunk *nr_sack;
10819 	struct sctp_gap_ack_block *gap_descriptor;
10820 	struct sack_track *selector;
10821 	int mergeable = 0;
10822 	int offset;
10823 	caddr_t limit;
10824 	uint32_t *dup;
10825 	int limit_reached = 0;
10826 	unsigned int i, siz, j;
10827 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10828 	int num_dups = 0;
10829 	int space_req;
10830 	uint32_t highest_tsn;
10831 	uint8_t flags;
10832 	uint8_t type;
10833 	uint8_t tsn_map;
10834 
10835 	if (stcb->asoc.nrsack_supported == 1) {
10836 		type = SCTP_NR_SELECTIVE_ACK;
10837 	} else {
10838 		type = SCTP_SELECTIVE_ACK;
10839 	}
10840 	a_chk = NULL;
10841 	asoc = &stcb->asoc;
10842 	SCTP_TCB_LOCK_ASSERT(stcb);
10843 	if (asoc->last_data_chunk_from == NULL) {
10844 		/* Hmm we never received anything */
10845 		return;
10846 	}
10847 	sctp_slide_mapping_arrays(stcb);
10848 	sctp_set_rwnd(stcb, asoc);
10849 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10850 		if (chk->rec.chunk_id.id == type) {
10851 			/* Hmm, found a sack already on queue, remove it */
10852 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10853 			asoc->ctrl_queue_cnt--;
10854 			a_chk = chk;
10855 			if (a_chk->data) {
10856 				sctp_m_freem(a_chk->data);
10857 				a_chk->data = NULL;
10858 			}
10859 			if (a_chk->whoTo) {
10860 				sctp_free_remote_addr(a_chk->whoTo);
10861 				a_chk->whoTo = NULL;
10862 			}
10863 			break;
10864 		}
10865 	}
10866 	if (a_chk == NULL) {
10867 		sctp_alloc_a_chunk(stcb, a_chk);
10868 		if (a_chk == NULL) {
10869 			/* No memory so we drop the idea, and set a timer */
10870 			if (stcb->asoc.delayed_ack) {
10871 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10872 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10873 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10874 				    stcb->sctp_ep, stcb, NULL);
10875 			} else {
10876 				stcb->asoc.send_sack = 1;
10877 			}
10878 			return;
10879 		}
10880 		a_chk->copy_by_ref = 0;
10881 		a_chk->rec.chunk_id.id = type;
10882 		a_chk->rec.chunk_id.can_take_data = 1;
10883 	}
10884 	/* Clear our pkt counts */
10885 	asoc->data_pkts_seen = 0;
10886 
10887 	a_chk->flags = 0;
10888 	a_chk->asoc = asoc;
10889 	a_chk->snd_count = 0;
10890 	a_chk->send_size = 0;	/* fill in later */
10891 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
10892 	a_chk->whoTo = NULL;
10893 
10894 	if ((asoc->numduptsns) ||
10895 	    (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10896 		/*-
10897 		 * Ok, we have some duplicates or the destination for the
10898 		 * sack is unreachable, lets see if we can select an
10899 		 * alternate than asoc->last_data_chunk_from
10900 		 */
10901 		if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10902 		    (asoc->used_alt_onsack > asoc->numnets)) {
10903 			/* We used an alt last time, don't this time */
10904 			a_chk->whoTo = NULL;
10905 		} else {
10906 			asoc->used_alt_onsack++;
10907 			a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10908 		}
10909 		if (a_chk->whoTo == NULL) {
10910 			/* Nope, no alternate */
10911 			a_chk->whoTo = asoc->last_data_chunk_from;
10912 			asoc->used_alt_onsack = 0;
10913 		}
10914 	} else {
10915 		/*
10916 		 * No duplicates so we use the last place we received data
10917 		 * from.
10918 		 */
10919 		asoc->used_alt_onsack = 0;
10920 		a_chk->whoTo = asoc->last_data_chunk_from;
10921 	}
10922 	if (a_chk->whoTo) {
10923 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
10924 	}
10925 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10926 		highest_tsn = asoc->highest_tsn_inside_map;
10927 	} else {
10928 		highest_tsn = asoc->highest_tsn_inside_nr_map;
10929 	}
10930 	if (highest_tsn == asoc->cumulative_tsn) {
10931 		/* no gaps */
10932 		if (type == SCTP_SELECTIVE_ACK) {
10933 			space_req = sizeof(struct sctp_sack_chunk);
10934 		} else {
10935 			space_req = sizeof(struct sctp_nr_sack_chunk);
10936 		}
10937 	} else {
10938 		/* gaps get a cluster */
10939 		space_req = MCLBYTES;
10940 	}
10941 	/* Ok now lets formulate a MBUF with our sack */
10942 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10943 	if ((a_chk->data == NULL) ||
10944 	    (a_chk->whoTo == NULL)) {
10945 		/* rats, no mbuf memory */
10946 		if (a_chk->data) {
10947 			/* was a problem with the destination */
10948 			sctp_m_freem(a_chk->data);
10949 			a_chk->data = NULL;
10950 		}
10951 		sctp_free_a_chunk(stcb, a_chk, so_locked);
10952 		/* sa_ignore NO_NULL_CHK */
10953 		if (stcb->asoc.delayed_ack) {
10954 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10955 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10956 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10957 			    stcb->sctp_ep, stcb, NULL);
10958 		} else {
10959 			stcb->asoc.send_sack = 1;
10960 		}
10961 		return;
10962 	}
10963 	/* ok, lets go through and fill it in */
10964 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10965 	space = M_TRAILINGSPACE(a_chk->data);
10966 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10967 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10968 	}
10969 	limit = mtod(a_chk->data, caddr_t);
10970 	limit += space;
10971 
10972 	flags = 0;
10973 
10974 	if ((asoc->sctp_cmt_on_off > 0) &&
10975 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10976 		/*-
10977 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10978 		 * received, then set high bit to 1, else 0. Reset
10979 		 * pkts_rcvd.
10980 		 */
10981 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10982 		asoc->cmt_dac_pkts_rcvd = 0;
10983 	}
10984 #ifdef SCTP_ASOCLOG_OF_TSNS
10985 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10986 	stcb->asoc.cumack_log_atsnt++;
10987 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10988 		stcb->asoc.cumack_log_atsnt = 0;
10989 	}
10990 #endif
10991 	/* reset the readers interpretation */
10992 	stcb->freed_by_sorcv_sincelast = 0;
10993 
10994 	if (type == SCTP_SELECTIVE_ACK) {
10995 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10996 		nr_sack = NULL;
10997 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10998 		if (highest_tsn > asoc->mapping_array_base_tsn) {
10999 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11000 		} else {
11001 			siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
11002 		}
11003 	} else {
11004 		sack = NULL;
11005 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11006 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11007 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11008 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11009 		} else {
11010 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11011 		}
11012 	}
11013 
11014 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11015 		offset = 1;
11016 	} else {
11017 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11018 	}
11019 	if (((type == SCTP_SELECTIVE_ACK) &&
11020 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11021 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
11022 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11023 		/* we have a gap .. maybe */
11024 		for (i = 0; i < siz; i++) {
11025 			tsn_map = asoc->mapping_array[i];
11026 			if (type == SCTP_SELECTIVE_ACK) {
11027 				tsn_map |= asoc->nr_mapping_array[i];
11028 			}
11029 			if (i == 0) {
11030 				/*
11031 				 * Clear all bits corresponding to TSNs
11032 				 * smaller or equal to the cumulative TSN.
11033 				 */
11034 				tsn_map &= (~0 << (1 - offset));
11035 			}
11036 			selector = &sack_array[tsn_map];
11037 			if (mergeable && selector->right_edge) {
11038 				/*
11039 				 * Backup, left and right edges were ok to
11040 				 * merge.
11041 				 */
11042 				num_gap_blocks--;
11043 				gap_descriptor--;
11044 			}
11045 			if (selector->num_entries == 0)
11046 				mergeable = 0;
11047 			else {
11048 				for (j = 0; j < selector->num_entries; j++) {
11049 					if (mergeable && selector->right_edge) {
11050 						/*
11051 						 * do a merge by NOT setting
11052 						 * the left side
11053 						 */
11054 						mergeable = 0;
11055 					} else {
11056 						/*
11057 						 * no merge, set the left
11058 						 * side
11059 						 */
11060 						mergeable = 0;
11061 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
11062 					}
11063 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
11064 					num_gap_blocks++;
11065 					gap_descriptor++;
11066 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11067 						/* no more room */
11068 						limit_reached = 1;
11069 						break;
11070 					}
11071 				}
11072 				if (selector->left_edge) {
11073 					mergeable = 1;
11074 				}
11075 			}
11076 			if (limit_reached) {
11077 				/* Reached the limit stop */
11078 				break;
11079 			}
11080 			offset += 8;
11081 		}
11082 	}
11083 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
11084 	    (limit_reached == 0)) {
11085 
11086 		mergeable = 0;
11087 
11088 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11089 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11090 		} else {
11091 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11092 		}
11093 
11094 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11095 			offset = 1;
11096 		} else {
11097 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11098 		}
11099 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11100 			/* we have a gap .. maybe */
11101 			for (i = 0; i < siz; i++) {
11102 				tsn_map = asoc->nr_mapping_array[i];
11103 				if (i == 0) {
11104 					/*
11105 					 * Clear all bits corresponding to TSNs
11106 					 * smaller or equal to the cumulative TSN.
11107 					 */
11108 					tsn_map &= (~0 << (1 - offset));
11109 				}
11110 				selector = &sack_array[tsn_map];
11111 				if (mergeable && selector->right_edge) {
11112 					/*
11113 					* Backup, left and right edges were ok to
11114 					* merge.
11115 					*/
11116 					num_nr_gap_blocks--;
11117 					gap_descriptor--;
11118 				}
11119 				if (selector->num_entries == 0)
11120 					mergeable = 0;
11121 				else {
11122 					for (j = 0; j < selector->num_entries; j++) {
11123 						if (mergeable && selector->right_edge) {
11124 							/*
11125 							* do a merge by NOT setting
11126 							* the left side
11127 							*/
11128 							mergeable = 0;
11129 						} else {
11130 							/*
11131 							* no merge, set the left
11132 							* side
11133 							*/
11134 							mergeable = 0;
11135 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
11136 						}
11137 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
11138 						num_nr_gap_blocks++;
11139 						gap_descriptor++;
11140 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11141 							/* no more room */
11142 							limit_reached = 1;
11143 							break;
11144 						}
11145 					}
11146 					if (selector->left_edge) {
11147 						mergeable = 1;
11148 					}
11149 				}
11150 				if (limit_reached) {
11151 					/* Reached the limit stop */
11152 					break;
11153 				}
11154 				offset += 8;
11155 			}
11156 		}
11157 	}
11158 	/* now we must add any dups we are going to report. */
11159 	if ((limit_reached == 0) && (asoc->numduptsns)) {
11160 		dup = (uint32_t *) gap_descriptor;
11161 		for (i = 0; i < asoc->numduptsns; i++) {
11162 			*dup = htonl(asoc->dup_tsns[i]);
11163 			dup++;
11164 			num_dups++;
11165 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11166 				/* no more room */
11167 				break;
11168 			}
11169 		}
11170 		asoc->numduptsns = 0;
11171 	}
11172 	/*
11173 	 * now that the chunk is prepared queue it to the control chunk
11174 	 * queue.
11175 	 */
11176 	if (type == SCTP_SELECTIVE_ACK) {
11177 		a_chk->send_size = sizeof(struct sctp_sack_chunk) +
11178 		                   (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11179 		                   num_dups * sizeof(int32_t);
11180 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11181 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11182 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11183 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11184 		sack->sack.num_dup_tsns = htons(num_dups);
11185 		sack->ch.chunk_type = type;
11186 		sack->ch.chunk_flags = flags;
11187 		sack->ch.chunk_length = htons(a_chk->send_size);
11188 	} else {
11189 		a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
11190 		                   (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11191 		                   num_dups * sizeof(int32_t);
11192 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11193 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11194 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11195 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11196 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11197 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11198 		nr_sack->nr_sack.reserved = 0;
11199 		nr_sack->ch.chunk_type = type;
11200 		nr_sack->ch.chunk_flags = flags;
11201 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
11202 	}
11203 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11204 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
11205 	asoc->ctrl_queue_cnt++;
11206 	asoc->send_sack = 0;
11207 	SCTP_STAT_INCR(sctps_sendsacks);
11208 	return;
11209 }
11210 
11211 void
11212 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
11213 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11214     SCTP_UNUSED
11215 #endif
11216     )
11217 {
11218 	struct mbuf *m_abort, *m, *m_last;
11219 	struct mbuf *m_out, *m_end = NULL;
11220 	struct sctp_abort_chunk *abort;
11221 	struct sctp_auth_chunk *auth = NULL;
11222 	struct sctp_nets *net;
11223 	uint32_t vtag;
11224 	uint32_t auth_offset = 0;
11225 	uint16_t cause_len, chunk_len, padding_len;
11226 
11227 #if defined(__APPLE__)
11228 	if (so_locked) {
11229 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11230 	} else {
11231 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11232 	}
11233 #endif
11234 	SCTP_TCB_LOCK_ASSERT(stcb);
11235 	/*-
11236 	 * Add an AUTH chunk, if chunk requires it and save the offset into
11237 	 * the chain for AUTH
11238 	 */
11239 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11240 	                                stcb->asoc.peer_auth_chunks)) {
11241 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11242 					    stcb, SCTP_ABORT_ASSOCIATION);
11243 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11244 	} else {
11245 		m_out = NULL;
11246 	}
11247 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11248 	if (m_abort == NULL) {
11249 		if (m_out) {
11250 			sctp_m_freem(m_out);
11251 		}
11252 		if (operr) {
11253 			sctp_m_freem(operr);
11254 		}
11255 		return;
11256 	}
11257 	/* link in any error */
11258 	SCTP_BUF_NEXT(m_abort) = operr;
11259 	cause_len = 0;
11260 	m_last = NULL;
11261 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11262 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
11263 		if (SCTP_BUF_NEXT(m) == NULL) {
11264 			m_last = m;
11265 		}
11266 	}
11267 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11268 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11269 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11270 	if (m_out == NULL) {
11271 		/* NO Auth chunk prepended, so reserve space in front */
11272 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11273 		m_out = m_abort;
11274 	} else {
11275 		/* Put AUTH chunk at the front of the chain */
11276 		SCTP_BUF_NEXT(m_end) = m_abort;
11277 	}
11278 	if (stcb->asoc.alternate) {
11279 		net = stcb->asoc.alternate;
11280 	} else {
11281 		net = stcb->asoc.primary_destination;
11282 	}
11283 	/* Fill in the ABORT chunk header. */
11284 	abort = mtod(m_abort, struct sctp_abort_chunk *);
11285 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11286 	if (stcb->asoc.peer_vtag == 0) {
11287 		/* This happens iff the assoc is in COOKIE-WAIT state. */
11288 		vtag = stcb->asoc.my_vtag;
11289 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11290 	} else {
11291 		vtag = stcb->asoc.peer_vtag;
11292 		abort->ch.chunk_flags = 0;
11293 	}
11294 	abort->ch.chunk_length = htons(chunk_len);
11295 	/* Add padding, if necessary. */
11296 	if (padding_len > 0) {
11297 		if ((m_last == NULL) ||
11298 		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11299 			sctp_m_freem(m_out);
11300 			return;
11301 		}
11302 	}
11303 	(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11304 	                                 (struct sockaddr *)&net->ro._l_addr,
11305 	                                 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11306 	                                 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11307 	                                 stcb->asoc.primary_destination->port, NULL,
11308 #if defined(__FreeBSD__)
11309 	                                 0, 0,
11310 #endif
11311 	                                 so_locked);
11312 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11313 }
11314 
11315 void
11316 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11317                             struct sctp_nets *net,
11318                             int reflect_vtag)
11319 {
11320 	/* formulate and SEND a SHUTDOWN-COMPLETE */
11321 	struct mbuf *m_shutdown_comp;
11322 	struct sctp_shutdown_complete_chunk *shutdown_complete;
11323 	uint32_t vtag;
11324 	uint8_t flags;
11325 
11326 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11327 	if (m_shutdown_comp == NULL) {
11328 		/* no mbuf's */
11329 		return;
11330 	}
11331 	if (reflect_vtag) {
11332 		flags = SCTP_HAD_NO_TCB;
11333 		vtag = stcb->asoc.my_vtag;
11334 	} else {
11335 		flags = 0;
11336 		vtag = stcb->asoc.peer_vtag;
11337 	}
11338 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11339 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11340 	shutdown_complete->ch.chunk_flags = flags;
11341 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11342 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11343 	(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11344 	                                 (struct sockaddr *)&net->ro._l_addr,
11345 	                                 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11346 	                                 stcb->sctp_ep->sctp_lport, stcb->rport,
11347 	                                 htonl(vtag),
11348 	                                 net->port, NULL,
11349 #if defined(__FreeBSD__)
11350 	                                 0, 0,
11351 #endif
11352 	                                 SCTP_SO_NOT_LOCKED);
11353 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11354 	return;
11355 }
11356 
11357 #if defined(__FreeBSD__)
11358 static void
11359 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11360                    struct sctphdr *sh, uint32_t vtag,
11361                    uint8_t type, struct mbuf *cause,
11362                    uint8_t mflowtype, uint32_t mflowid,
11363                    uint32_t vrf_id, uint16_t port)
11364 #else
11365 static void
11366 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11367                    struct sctphdr *sh, uint32_t vtag,
11368                    uint8_t type, struct mbuf *cause,
11369                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11370 #endif
11371 {
11372 #ifdef __Panda__
11373 	pakhandle_type o_pak;
11374 #else
11375 	struct mbuf *o_pak;
11376 #endif
11377 	struct mbuf *mout;
11378 	struct sctphdr *shout;
11379 	struct sctp_chunkhdr *ch;
11380 #if defined(INET) || defined(INET6)
11381 	struct udphdr *udp;
11382 	int ret;
11383 #endif
11384 	int len, cause_len, padding_len;
11385 #ifdef INET
11386 #if defined(__APPLE__) || defined(__Panda__)
11387 	sctp_route_t ro;
11388 #endif
11389 	struct sockaddr_in *src_sin, *dst_sin;
11390 	struct ip *ip;
11391 #endif
11392 #ifdef INET6
11393 	struct sockaddr_in6 *src_sin6, *dst_sin6;
11394 	struct ip6_hdr *ip6;
11395 #endif
11396 
11397 	/* Compute the length of the cause and add final padding. */
11398 	cause_len = 0;
11399 	if (cause != NULL) {
11400 		struct mbuf *m_at, *m_last = NULL;
11401 
11402 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11403 			if (SCTP_BUF_NEXT(m_at) == NULL)
11404 				m_last = m_at;
11405 			cause_len += SCTP_BUF_LEN(m_at);
11406 		}
11407 		padding_len = cause_len % 4;
11408 		if (padding_len != 0) {
11409 			padding_len = 4 - padding_len;
11410 		}
11411 		if (padding_len != 0) {
11412 			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11413 				sctp_m_freem(cause);
11414 				return;
11415 			}
11416 		}
11417 	} else {
11418 		padding_len = 0;
11419 	}
11420 	/* Get an mbuf for the header. */
11421 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11422 	switch (dst->sa_family) {
11423 #ifdef INET
11424 	case AF_INET:
11425 		len += sizeof(struct ip);
11426 		break;
11427 #endif
11428 #ifdef INET6
11429 	case AF_INET6:
11430 		len += sizeof(struct ip6_hdr);
11431 		break;
11432 #endif
11433 	default:
11434 		break;
11435 	}
11436 #if defined(INET) || defined(INET6)
11437 	if (port) {
11438 		len += sizeof(struct udphdr);
11439 	}
11440 #endif
11441 #if defined(__APPLE__)
11442 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11443 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11444 #else
11445 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11446 #endif
11447 #else
11448 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11449 #endif
11450 	if (mout == NULL) {
11451 		if (cause) {
11452 			sctp_m_freem(cause);
11453 		}
11454 		return;
11455 	}
11456 #if defined(__APPLE__)
11457 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11458 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11459 #else
11460 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11461 #endif
11462 #else
11463 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
11464 #endif
11465 	SCTP_BUF_LEN(mout) = len;
11466 	SCTP_BUF_NEXT(mout) = cause;
11467 #if defined(__FreeBSD__)
11468 	mout->m_pkthdr.flowid = mflowid;
11469 	M_HASHTYPE_SET(mout, mflowtype);
11470 #endif
11471 #ifdef INET
11472 	ip = NULL;
11473 #endif
11474 #ifdef INET6
11475 	ip6 = NULL;
11476 #endif
11477 	switch (dst->sa_family) {
11478 #ifdef INET
11479 	case AF_INET:
11480 		src_sin = (struct sockaddr_in *)src;
11481 		dst_sin = (struct sockaddr_in *)dst;
11482 		ip = mtod(mout, struct ip *);
11483 		ip->ip_v = IPVERSION;
11484 		ip->ip_hl = (sizeof(struct ip) >> 2);
11485 		ip->ip_tos = 0;
11486 #if defined(__FreeBSD__)
11487 		ip->ip_id = ip_newid();
11488 #elif defined(__APPLE__)
11489 #if RANDOM_IP_ID
11490 		ip->ip_id = ip_randomid();
11491 #else
11492 		ip->ip_id = htons(ip_id++);
11493 #endif
11494 #else
11495                 ip->ip_id = htons(ip_id++);
11496 #endif
11497 		ip->ip_off = 0;
11498 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11499 		if (port) {
11500 			ip->ip_p = IPPROTO_UDP;
11501 		} else {
11502 			ip->ip_p = IPPROTO_SCTP;
11503 		}
11504 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11505 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11506 		ip->ip_sum = 0;
11507 		len = sizeof(struct ip);
11508 		shout = (struct sctphdr *)((caddr_t)ip + len);
11509 		break;
11510 #endif
11511 #ifdef INET6
11512 	case AF_INET6:
11513 		src_sin6 = (struct sockaddr_in6 *)src;
11514 		dst_sin6 = (struct sockaddr_in6 *)dst;
11515 		ip6 = mtod(mout, struct ip6_hdr *);
11516 		ip6->ip6_flow = htonl(0x60000000);
11517 #if defined(__FreeBSD__)
11518 		if (V_ip6_auto_flowlabel) {
11519 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11520 		}
11521 #endif
11522 #if defined(__Userspace__)
11523 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
11524 #else
11525 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11526 #endif
11527 		if (port) {
11528 			ip6->ip6_nxt = IPPROTO_UDP;
11529 		} else {
11530 			ip6->ip6_nxt = IPPROTO_SCTP;
11531 		}
11532 		ip6->ip6_src = dst_sin6->sin6_addr;
11533 		ip6->ip6_dst = src_sin6->sin6_addr;
11534 		len = sizeof(struct ip6_hdr);
11535 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
11536 		break;
11537 #endif
11538 	default:
11539 		len = 0;
11540 		shout = mtod(mout, struct sctphdr *);
11541 		break;
11542 	}
11543 #if defined(INET) || defined(INET6)
11544 	if (port) {
11545 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11546 			sctp_m_freem(mout);
11547 			return;
11548 		}
11549 		udp = (struct udphdr *)shout;
11550 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11551 		udp->uh_dport = port;
11552 		udp->uh_sum = 0;
11553 		udp->uh_ulen = htons(sizeof(struct udphdr) +
11554 		                     sizeof(struct sctphdr) +
11555 		                     sizeof(struct sctp_chunkhdr) +
11556 		                     cause_len + padding_len);
11557 		len += sizeof(struct udphdr);
11558 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11559 	} else {
11560 		udp = NULL;
11561 	}
11562 #endif
11563 	shout->src_port = sh->dest_port;
11564 	shout->dest_port = sh->src_port;
11565 	shout->checksum = 0;
11566 	if (vtag) {
11567 		shout->v_tag = htonl(vtag);
11568 	} else {
11569 		shout->v_tag = sh->v_tag;
11570 	}
11571 	len += sizeof(struct sctphdr);
11572 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11573 	ch->chunk_type = type;
11574 	if (vtag) {
11575 		ch->chunk_flags = 0;
11576 	} else {
11577 		ch->chunk_flags = SCTP_HAD_NO_TCB;
11578 	}
11579 	ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11580 	len += sizeof(struct sctp_chunkhdr);
11581 	len += cause_len + padding_len;
11582 
11583 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11584 		sctp_m_freem(mout);
11585 		return;
11586 	}
11587 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
11588 	switch (dst->sa_family) {
11589 #ifdef INET
11590 	case AF_INET:
11591 #if defined(__APPLE__) || defined(__Panda__)
11592 		/* zap the stack pointer to the route */
11593 		bzero(&ro, sizeof(sctp_route_t));
11594 #if defined(__Panda__)
11595 		ro._l_addr.sa.sa_family = AF_INET;
11596 #endif
11597 #endif
11598 		if (port) {
11599 #if !defined(__Windows__) && !defined(__Userspace__)
11600 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11601 			if (V_udp_cksum) {
11602 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11603 			} else {
11604 				udp->uh_sum = 0;
11605 			}
11606 #else
11607 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11608 #endif
11609 #else
11610 			udp->uh_sum = 0;
11611 #endif
11612 		}
11613 #if defined(__FreeBSD__)
11614 #if __FreeBSD_version >= 1000000
11615 		ip->ip_len = htons(len);
11616 #else
11617 		ip->ip_len = len;
11618 #endif
11619 #elif defined(__APPLE__) || defined(__Userspace__)
11620 		ip->ip_len = len;
11621 #else
11622 		ip->ip_len = htons(len);
11623 #endif
11624 		if (port) {
11625 #if defined(SCTP_WITH_NO_CSUM)
11626 			SCTP_STAT_INCR(sctps_sendnocrc);
11627 #else
11628 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11629 			SCTP_STAT_INCR(sctps_sendswcrc);
11630 #endif
11631 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11632 			if (V_udp_cksum) {
11633 				SCTP_ENABLE_UDP_CSUM(o_pak);
11634 			}
11635 #else
11636 			SCTP_ENABLE_UDP_CSUM(o_pak);
11637 #endif
11638 		} else {
11639 #if defined(SCTP_WITH_NO_CSUM)
11640 			SCTP_STAT_INCR(sctps_sendnocrc);
11641 #else
11642 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
11643 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11644 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11645 			SCTP_STAT_INCR(sctps_sendhwcrc);
11646 #else
11647 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11648 			SCTP_STAT_INCR(sctps_sendswcrc);
11649 #endif
11650 #endif
11651 		}
11652 #ifdef SCTP_PACKET_LOGGING
11653 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11654 			sctp_packet_log(o_pak);
11655 		}
11656 #endif
11657 #if defined(__APPLE__) || defined(__Panda__)
11658 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11659 		/* Free the route if we got one back */
11660 		if (ro.ro_rt) {
11661 			RTFREE(ro.ro_rt);
11662 			ro.ro_rt = NULL;
11663 		}
11664 #else
11665 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11666 #endif
11667 		break;
11668 #endif
11669 #ifdef INET6
11670 	case AF_INET6:
11671 		ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11672 		if (port) {
11673 #if defined(SCTP_WITH_NO_CSUM)
11674 			SCTP_STAT_INCR(sctps_sendnocrc);
11675 #else
11676 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11677 			SCTP_STAT_INCR(sctps_sendswcrc);
11678 #endif
11679 #if defined(__Windows__)
11680 			udp->uh_sum = 0;
11681 #elif !defined(__Userspace__)
11682 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11683 				udp->uh_sum = 0xffff;
11684 			}
11685 #endif
11686 		} else {
11687 #if defined(SCTP_WITH_NO_CSUM)
11688 			SCTP_STAT_INCR(sctps_sendnocrc);
11689 #else
11690 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000
11691 #if __FreeBSD_version > 901000
11692 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11693 #else
11694 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
11695 #endif
11696 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11697 			SCTP_STAT_INCR(sctps_sendhwcrc);
11698 #else
11699 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11700 			SCTP_STAT_INCR(sctps_sendswcrc);
11701 #endif
11702 #endif
11703 		}
11704 #ifdef SCTP_PACKET_LOGGING
11705 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11706 			sctp_packet_log(o_pak);
11707 		}
11708 #endif
11709 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11710 		break;
11711 #endif
11712 #if defined(__Userspace__)
11713 	case AF_CONN:
11714 	{
11715 		char *buffer;
11716 		struct sockaddr_conn *sconn;
11717 
11718 		sconn = (struct sockaddr_conn *)src;
11719 #if defined(SCTP_WITH_NO_CSUM)
11720 		SCTP_STAT_INCR(sctps_sendnocrc);
11721 #else
11722 		shout->checksum = sctp_calculate_cksum(mout, 0);
11723 		SCTP_STAT_INCR(sctps_sendswcrc);
11724 #endif
11725 #ifdef SCTP_PACKET_LOGGING
11726 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11727 			sctp_packet_log(mout);
11728 		}
11729 #endif
11730 		/* Don't alloc/free for each packet */
11731 		if ((buffer = malloc(len)) != NULL) {
11732 			m_copydata(mout, 0, len, buffer);
11733 			SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11734 			free(buffer);
11735 		}
11736 		sctp_m_freem(mout);
11737 		break;
11738 	}
11739 #endif
11740 	default:
11741 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11742 		        dst->sa_family);
11743 		sctp_m_freem(mout);
11744 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11745 		return;
11746 	}
11747 	SCTP_STAT_INCR(sctps_sendpackets);
11748 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11749 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11750 	return;
11751 }
11752 
11753 void
11754 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11755                              struct sctphdr *sh,
11756 #if defined(__FreeBSD__)
11757                              uint8_t mflowtype, uint32_t mflowid,
11758 #endif
11759                              uint32_t vrf_id, uint16_t port)
11760 {
11761 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11762 #if defined(__FreeBSD__)
11763 	                   mflowtype, mflowid,
11764 #endif
11765 	                   vrf_id, port);
11766 }
11767 
11768 void
11769 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
11770 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11771 	SCTP_UNUSED
11772 #endif
11773 )
11774 {
11775 	struct sctp_tmit_chunk *chk;
11776 	struct sctp_heartbeat_chunk *hb;
11777 	struct timeval now;
11778 
11779 	SCTP_TCB_LOCK_ASSERT(stcb);
11780 	if (net == NULL) {
11781 		return;
11782 	}
11783 	(void)SCTP_GETTIME_TIMEVAL(&now);
11784 	switch (net->ro._l_addr.sa.sa_family) {
11785 #ifdef INET
11786 	case AF_INET:
11787 		break;
11788 #endif
11789 #ifdef INET6
11790 	case AF_INET6:
11791 		break;
11792 #endif
11793 #if defined(__Userspace__)
11794 	case AF_CONN:
11795 		break;
11796 #endif
11797 	default:
11798 		return;
11799 	}
11800 	sctp_alloc_a_chunk(stcb, chk);
11801 	if (chk == NULL) {
11802 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11803 		return;
11804 	}
11805 
11806 	chk->copy_by_ref = 0;
11807 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11808 	chk->rec.chunk_id.can_take_data = 1;
11809 	chk->flags = 0;
11810 	chk->asoc = &stcb->asoc;
11811 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11812 
11813 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11814 	if (chk->data == NULL) {
11815 		sctp_free_a_chunk(stcb, chk, so_locked);
11816 		return;
11817 	}
11818 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11819 	SCTP_BUF_LEN(chk->data) = chk->send_size;
11820 	chk->sent = SCTP_DATAGRAM_UNSENT;
11821 	chk->snd_count = 0;
11822 	chk->whoTo = net;
11823 	atomic_add_int(&chk->whoTo->ref_count, 1);
11824 	/* Now we have a mbuf that we can fill in with the details */
11825 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11826 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11827 	/* fill out chunk header */
11828 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11829 	hb->ch.chunk_flags = 0;
11830 	hb->ch.chunk_length = htons(chk->send_size);
11831 	/* Fill out hb parameter */
11832 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11833 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11834 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11835 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11836 	/* Did our user request this one, put it in */
11837 	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11838 #ifdef HAVE_SA_LEN
11839 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11840 #else
11841 	switch (net->ro._l_addr.sa.sa_family) {
11842 #ifdef INET
11843 	case AF_INET:
11844 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
11845 		break;
11846 #endif
11847 #ifdef INET6
11848 	case AF_INET6:
11849 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
11850 		break;
11851 #endif
11852 #if defined(__Userspace__)
11853 	case AF_CONN:
11854 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
11855 		break;
11856 #endif
11857 	default:
11858 		hb->heartbeat.hb_info.addr_len = 0;
11859 		break;
11860 	}
11861 #endif
11862 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11863 		/*
11864 		 * we only take from the entropy pool if the address is not
11865 		 * confirmed.
11866 		 */
11867 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11868 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11869 	} else {
11870 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11871 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11872 	}
11873 	switch (net->ro._l_addr.sa.sa_family) {
11874 #ifdef INET
11875 	case AF_INET:
11876 		memcpy(hb->heartbeat.hb_info.address,
11877 		       &net->ro._l_addr.sin.sin_addr,
11878 		       sizeof(net->ro._l_addr.sin.sin_addr));
11879 		break;
11880 #endif
11881 #ifdef INET6
11882 	case AF_INET6:
11883 		memcpy(hb->heartbeat.hb_info.address,
11884 		       &net->ro._l_addr.sin6.sin6_addr,
11885 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
11886 		break;
11887 #endif
11888 #if defined(__Userspace__)
11889 	case AF_CONN:
11890 		memcpy(hb->heartbeat.hb_info.address,
11891 		       &net->ro._l_addr.sconn.sconn_addr,
11892 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
11893 		break;
11894 #endif
11895 	default:
11896 		return;
11897 		break;
11898 	}
11899 	net->hb_responded = 0;
11900 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11901 	stcb->asoc.ctrl_queue_cnt++;
11902 	SCTP_STAT_INCR(sctps_sendheartbeat);
11903 	return;
11904 }
11905 
11906 void
11907 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11908 		   uint32_t high_tsn)
11909 {
11910 	struct sctp_association *asoc;
11911 	struct sctp_ecne_chunk *ecne;
11912 	struct sctp_tmit_chunk *chk;
11913 
11914 	if (net == NULL) {
11915 		return;
11916 	}
11917 	asoc = &stcb->asoc;
11918 	SCTP_TCB_LOCK_ASSERT(stcb);
11919 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11920 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11921 			/* found a previous ECN_ECHO update it if needed */
11922 			uint32_t cnt, ctsn;
11923 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11924 			ctsn = ntohl(ecne->tsn);
11925 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
11926 				ecne->tsn = htonl(high_tsn);
11927 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
11928 			}
11929 			cnt = ntohl(ecne->num_pkts_since_cwr);
11930 			cnt++;
11931 			ecne->num_pkts_since_cwr = htonl(cnt);
11932 			return;
11933 		}
11934 	}
11935 	/* nope could not find one to update so we must build one */
11936 	sctp_alloc_a_chunk(stcb, chk);
11937 	if (chk == NULL) {
11938 		return;
11939 	}
11940 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
11941 	chk->copy_by_ref = 0;
11942 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11943 	chk->rec.chunk_id.can_take_data = 0;
11944 	chk->flags = 0;
11945 	chk->asoc = &stcb->asoc;
11946 	chk->send_size = sizeof(struct sctp_ecne_chunk);
11947 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11948 	if (chk->data == NULL) {
11949 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11950 		return;
11951 	}
11952 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11953 	SCTP_BUF_LEN(chk->data) = chk->send_size;
11954 	chk->sent = SCTP_DATAGRAM_UNSENT;
11955 	chk->snd_count = 0;
11956 	chk->whoTo = net;
11957 	atomic_add_int(&chk->whoTo->ref_count, 1);
11958 
11959 	stcb->asoc.ecn_echo_cnt_onq++;
11960 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11961 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
11962 	ecne->ch.chunk_flags = 0;
11963 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11964 	ecne->tsn = htonl(high_tsn);
11965 	ecne->num_pkts_since_cwr = htonl(1);
11966 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11967 	asoc->ctrl_queue_cnt++;
11968 }
11969 
11970 void
11971 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11972     struct mbuf *m, int len, int iphlen, int bad_crc)
11973 {
11974 	struct sctp_association *asoc;
11975 	struct sctp_pktdrop_chunk *drp;
11976 	struct sctp_tmit_chunk *chk;
11977 	uint8_t *datap;
11978 	int was_trunc = 0;
11979 	int fullsz = 0;
11980 	long spc;
11981 	int offset;
11982 	struct sctp_chunkhdr *ch, chunk_buf;
11983 	unsigned int chk_length;
11984 
11985         if (!stcb) {
11986             return;
11987         }
11988 	asoc = &stcb->asoc;
11989 	SCTP_TCB_LOCK_ASSERT(stcb);
11990 	if (asoc->pktdrop_supported == 0) {
11991 		/*-
11992 		 * peer must declare support before I send one.
11993 		 */
11994 		return;
11995 	}
11996 	if (stcb->sctp_socket == NULL) {
11997 		return;
11998 	}
11999 	sctp_alloc_a_chunk(stcb, chk);
12000 	if (chk == NULL) {
12001 		return;
12002 	}
12003 	chk->copy_by_ref = 0;
12004 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12005 	chk->rec.chunk_id.can_take_data = 1;
12006 	chk->flags = 0;
12007 	len -= iphlen;
12008 	chk->send_size = len;
12009         /* Validate that we do not have an ABORT in here. */
12010 	offset = iphlen + sizeof(struct sctphdr);
12011 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12012 						   sizeof(*ch), (uint8_t *) & chunk_buf);
12013 	while (ch != NULL) {
12014 		chk_length = ntohs(ch->chunk_length);
12015 		if (chk_length < sizeof(*ch)) {
12016 			/* break to abort land */
12017 			break;
12018 		}
12019 		switch (ch->chunk_type) {
12020 		case SCTP_PACKET_DROPPED:
12021 		case SCTP_ABORT_ASSOCIATION:
12022 		case SCTP_INITIATION_ACK:
12023 			/**
12024 			 * We don't respond with an PKT-DROP to an ABORT
12025 			 * or PKT-DROP. We also do not respond to an
12026 			 * INIT-ACK, because we can't know if the initiation
12027 			 * tag is correct or not.
12028 			 */
12029 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12030 			return;
12031 		default:
12032 			break;
12033 		}
12034 		offset += SCTP_SIZE32(chk_length);
12035 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12036 		    sizeof(*ch), (uint8_t *) & chunk_buf);
12037 	}
12038 
12039 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12040 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12041 		/* only send 1 mtu worth, trim off the
12042 		 * excess on the end.
12043 		 */
12044 		fullsz = len;
12045 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12046 		was_trunc = 1;
12047 	}
12048 	chk->asoc = &stcb->asoc;
12049 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12050 	if (chk->data == NULL) {
12051 jump_out:
12052 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12053 		return;
12054 	}
12055 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12056 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12057 	if (drp == NULL) {
12058 		sctp_m_freem(chk->data);
12059 		chk->data = NULL;
12060 		goto jump_out;
12061 	}
12062 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12063 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12064 	chk->book_size_scale = 0;
12065 	if (was_trunc) {
12066 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12067 		drp->trunc_len = htons(fullsz);
12068 		/* Len is already adjusted to size minus overhead above
12069 		 * take out the pkt_drop chunk itself from it.
12070 		 */
12071 		chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
12072 		len = chk->send_size;
12073 	} else {
12074 		/* no truncation needed */
12075 		drp->ch.chunk_flags = 0;
12076 		drp->trunc_len = htons(0);
12077 	}
12078 	if (bad_crc) {
12079 		drp->ch.chunk_flags |= SCTP_BADCRC;
12080 	}
12081 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12082 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12083 	chk->sent = SCTP_DATAGRAM_UNSENT;
12084 	chk->snd_count = 0;
12085 	if (net) {
12086 		/* we should hit here */
12087 		chk->whoTo = net;
12088 		atomic_add_int(&chk->whoTo->ref_count, 1);
12089 	} else {
12090 		chk->whoTo = NULL;
12091 	}
12092 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12093 	drp->ch.chunk_length = htons(chk->send_size);
12094 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12095 	if (spc < 0) {
12096 		spc = 0;
12097 	}
12098 	drp->bottle_bw = htonl(spc);
12099 	if (asoc->my_rwnd) {
12100 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
12101 		    asoc->size_on_all_streams +
12102 		    asoc->my_rwnd_control_len +
12103 		    stcb->sctp_socket->so_rcv.sb_cc);
12104 	} else {
12105 		/*-
12106 		 * If my rwnd is 0, possibly from mbuf depletion as well as
12107 		 * space used, tell the peer there is NO space aka onq == bw
12108 		 */
12109 		drp->current_onq = htonl(spc);
12110 	}
12111 	drp->reserved = 0;
12112 	datap = drp->data;
12113 	m_copydata(m, iphlen, len, (caddr_t)datap);
12114 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12115 	asoc->ctrl_queue_cnt++;
12116 }
12117 
12118 void
12119 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12120 {
12121 	struct sctp_association *asoc;
12122 	struct sctp_cwr_chunk *cwr;
12123 	struct sctp_tmit_chunk *chk;
12124 
12125 	SCTP_TCB_LOCK_ASSERT(stcb);
12126 	if (net == NULL) {
12127 		return;
12128 	}
12129 	asoc = &stcb->asoc;
12130 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12131 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12132 			/* found a previous CWR queued to same destination update it if needed */
12133 			uint32_t ctsn;
12134 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12135 			ctsn = ntohl(cwr->tsn);
12136 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
12137 				cwr->tsn = htonl(high_tsn);
12138 			}
12139 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12140 				/* Make sure override is carried */
12141 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12142 			}
12143 			return;
12144 		}
12145 	}
12146 	sctp_alloc_a_chunk(stcb, chk);
12147 	if (chk == NULL) {
12148 		return;
12149 	}
12150 	chk->copy_by_ref = 0;
12151 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
12152 	chk->rec.chunk_id.can_take_data = 1;
12153 	chk->flags = 0;
12154 	chk->asoc = &stcb->asoc;
12155 	chk->send_size = sizeof(struct sctp_cwr_chunk);
12156 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12157 	if (chk->data == NULL) {
12158 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12159 		return;
12160 	}
12161 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12162 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12163 	chk->sent = SCTP_DATAGRAM_UNSENT;
12164 	chk->snd_count = 0;
12165 	chk->whoTo = net;
12166 	atomic_add_int(&chk->whoTo->ref_count, 1);
12167 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12168 	cwr->ch.chunk_type = SCTP_ECN_CWR;
12169 	cwr->ch.chunk_flags = override;
12170 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12171 	cwr->tsn = htonl(high_tsn);
12172 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12173 	asoc->ctrl_queue_cnt++;
12174 }
12175 
12176 void
12177 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
12178                           int number_entries, uint16_t * list,
12179                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12180 {
12181 	uint16_t len, old_len, i;
12182 	struct sctp_stream_reset_out_request *req_out;
12183 	struct sctp_chunkhdr *ch;
12184 
12185 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12186 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12187 
12188 	/* get to new offset for the param. */
12189 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12190 	/* now how long will this param be? */
12191 	len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12192 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12193 	req_out->ph.param_length = htons(len);
12194 	req_out->request_seq = htonl(seq);
12195 	req_out->response_seq = htonl(resp_seq);
12196 	req_out->send_reset_at_tsn = htonl(last_sent);
12197 	if (number_entries) {
12198 		for (i = 0; i < number_entries; i++) {
12199 			req_out->list_of_streams[i] = htons(list[i]);
12200 		}
12201 	}
12202 	if (SCTP_SIZE32(len) > len) {
12203 		/*-
12204 		 * Need to worry about the pad we may end up adding to the
12205 		 * end. This is easy since the struct is either aligned to 4
12206 		 * bytes or 2 bytes off.
12207 		 */
12208 		req_out->list_of_streams[number_entries] = 0;
12209 	}
12210 	/* now fix the chunk length */
12211 	ch->chunk_length = htons(len + old_len);
12212 	chk->book_size = len + old_len;
12213 	chk->book_size_scale = 0;
12214 	chk->send_size = SCTP_SIZE32(chk->book_size);
12215 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12216 	return;
12217 }
12218 
12219 static void
12220 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12221                          int number_entries, uint16_t *list,
12222                          uint32_t seq)
12223 {
12224 	uint16_t len, old_len, i;
12225 	struct sctp_stream_reset_in_request *req_in;
12226 	struct sctp_chunkhdr *ch;
12227 
12228 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12229 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12230 
12231 	/* get to new offset for the param. */
12232 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12233 	/* now how long will this param be? */
12234 	len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12235 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12236 	req_in->ph.param_length = htons(len);
12237 	req_in->request_seq = htonl(seq);
12238 	if (number_entries) {
12239 		for (i = 0; i < number_entries; i++) {
12240 			req_in->list_of_streams[i] = htons(list[i]);
12241 		}
12242 	}
12243 	if (SCTP_SIZE32(len) > len) {
12244 		/*-
12245 		 * Need to worry about the pad we may end up adding to the
12246 		 * end. This is easy since the struct is either aligned to 4
12247 		 * bytes or 2 bytes off.
12248 		 */
12249 		req_in->list_of_streams[number_entries] = 0;
12250 	}
12251 	/* now fix the chunk length */
12252 	ch->chunk_length = htons(len + old_len);
12253 	chk->book_size = len + old_len;
12254 	chk->book_size_scale = 0;
12255 	chk->send_size = SCTP_SIZE32(chk->book_size);
12256 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12257 	return;
12258 }
12259 
12260 static void
12261 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12262                           uint32_t seq)
12263 {
12264 	uint16_t len, old_len;
12265 	struct sctp_stream_reset_tsn_request *req_tsn;
12266 	struct sctp_chunkhdr *ch;
12267 
12268 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12269 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12270 
12271 	/* get to new offset for the param. */
12272 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12273 	/* now how long will this param be? */
12274 	len = sizeof(struct sctp_stream_reset_tsn_request);
12275 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12276 	req_tsn->ph.param_length = htons(len);
12277 	req_tsn->request_seq = htonl(seq);
12278 
12279 	/* now fix the chunk length */
12280 	ch->chunk_length = htons(len + old_len);
12281 	chk->send_size = len + old_len;
12282 	chk->book_size = SCTP_SIZE32(chk->send_size);
12283 	chk->book_size_scale = 0;
12284 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12285 	return;
12286 }
12287 
12288 void
12289 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12290                              uint32_t resp_seq, uint32_t result)
12291 {
12292 	uint16_t len, old_len;
12293 	struct sctp_stream_reset_response *resp;
12294 	struct sctp_chunkhdr *ch;
12295 
12296 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12297 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12298 
12299 	/* get to new offset for the param. */
12300 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12301 	/* now how long will this param be? */
12302 	len = sizeof(struct sctp_stream_reset_response);
12303 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12304 	resp->ph.param_length = htons(len);
12305 	resp->response_seq = htonl(resp_seq);
12306 	resp->result = ntohl(result);
12307 
12308 	/* now fix the chunk length */
12309 	ch->chunk_length = htons(len + old_len);
12310 	chk->book_size = len + old_len;
12311 	chk->book_size_scale = 0;
12312 	chk->send_size = SCTP_SIZE32(chk->book_size);
12313 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12314 	return;
12315 }
12316 
12317 void
12318 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12319                                  uint32_t resp_seq, uint32_t result,
12320                                  uint32_t send_una, uint32_t recv_next)
12321 {
12322 	uint16_t len, old_len;
12323 	struct sctp_stream_reset_response_tsn *resp;
12324 	struct sctp_chunkhdr *ch;
12325 
12326 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12327 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12328 
12329 	/* get to new offset for the param. */
12330 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12331 	/* now how long will this param be? */
12332 	len = sizeof(struct sctp_stream_reset_response_tsn);
12333 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12334 	resp->ph.param_length = htons(len);
12335 	resp->response_seq = htonl(resp_seq);
12336 	resp->result = htonl(result);
12337 	resp->senders_next_tsn = htonl(send_una);
12338 	resp->receivers_next_tsn = htonl(recv_next);
12339 
12340 	/* now fix the chunk length */
12341 	ch->chunk_length = htons(len + old_len);
12342 	chk->book_size = len + old_len;
12343 	chk->send_size = SCTP_SIZE32(chk->book_size);
12344 	chk->book_size_scale = 0;
12345 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12346 	return;
12347 }
12348 
12349 static void
12350 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12351 		       uint32_t seq,
12352 		       uint16_t adding)
12353 {
12354 	uint16_t len, old_len;
12355 	struct sctp_chunkhdr *ch;
12356 	struct sctp_stream_reset_add_strm *addstr;
12357 
12358 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12359 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12360 
12361 	/* get to new offset for the param. */
12362 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12363 	/* now how long will this param be? */
12364 	len = sizeof(struct sctp_stream_reset_add_strm);
12365 
12366 	/* Fill it out. */
12367 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12368 	addstr->ph.param_length = htons(len);
12369 	addstr->request_seq = htonl(seq);
12370 	addstr->number_of_streams = htons(adding);
12371 	addstr->reserved = 0;
12372 
12373 	/* now fix the chunk length */
12374 	ch->chunk_length = htons(len + old_len);
12375 	chk->send_size = len + old_len;
12376 	chk->book_size = SCTP_SIZE32(chk->send_size);
12377 	chk->book_size_scale = 0;
12378 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12379 	return;
12380 }
12381 
12382 static void
12383 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12384                       uint32_t seq,
12385                       uint16_t adding)
12386 {
12387 	uint16_t len, old_len;
12388 	struct sctp_chunkhdr *ch;
12389 	struct sctp_stream_reset_add_strm *addstr;
12390 
12391 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12392 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12393 
12394 	/* get to new offset for the param. */
12395 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12396 	/* now how long will this param be? */
12397 	len = sizeof(struct sctp_stream_reset_add_strm);
12398 	/* Fill it out. */
12399 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12400 	addstr->ph.param_length = htons(len);
12401 	addstr->request_seq = htonl(seq);
12402 	addstr->number_of_streams = htons(adding);
12403 	addstr->reserved = 0;
12404 
12405 	/* now fix the chunk length */
12406 	ch->chunk_length = htons(len + old_len);
12407 	chk->send_size = len + old_len;
12408 	chk->book_size = SCTP_SIZE32(chk->send_size);
12409 	chk->book_size_scale = 0;
12410 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12411 	return;
12412 }
12413 
12414 int
12415 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12416                         uint16_t number_entries, uint16_t *list,
12417                         uint8_t send_out_req,
12418                         uint8_t send_in_req,
12419                         uint8_t send_tsn_req,
12420                         uint8_t add_stream,
12421                         uint16_t adding_o,
12422                         uint16_t adding_i, uint8_t peer_asked)
12423 {
12424 
12425 	struct sctp_association *asoc;
12426 	struct sctp_tmit_chunk *chk;
12427 	struct sctp_chunkhdr *ch;
12428 	uint32_t seq;
12429 
12430 	asoc = &stcb->asoc;
12431 	if (asoc->stream_reset_outstanding) {
12432 		/*-
12433 		 * Already one pending, must get ACK back to clear the flag.
12434 		 */
12435 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12436 		return (EBUSY);
12437 	}
12438 	if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
12439 	    (add_stream == 0)) {
12440 		/* nothing to do */
12441 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12442 		return (EINVAL);
12443 	}
12444 	if (send_tsn_req && (send_out_req || send_in_req)) {
12445 		/* error, can't do that */
12446 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12447 		return (EINVAL);
12448 	}
12449 	if (number_entries > (MCLBYTES -
12450 	                      SCTP_MIN_OVERHEAD -
12451 	                      sizeof(struct sctp_chunkhdr) -
12452 	                      sizeof(struct sctp_stream_reset_out_request)) /
12453 	                     sizeof(uint16_t)) {
12454 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12455 		return (ENOMEM);
12456 	}
12457 	sctp_alloc_a_chunk(stcb, chk);
12458 	if (chk == NULL) {
12459 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12460 		return (ENOMEM);
12461 	}
12462 	chk->copy_by_ref = 0;
12463 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12464 	chk->rec.chunk_id.can_take_data = 0;
12465 	chk->flags = 0;
12466 	chk->asoc = &stcb->asoc;
12467 	chk->book_size = sizeof(struct sctp_chunkhdr);
12468 	chk->send_size = SCTP_SIZE32(chk->book_size);
12469 	chk->book_size_scale = 0;
12470 
12471 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12472 	if (chk->data == NULL) {
12473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12474 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12475 		return (ENOMEM);
12476 	}
12477 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12478 
12479 	/* setup chunk parameters */
12480 	chk->sent = SCTP_DATAGRAM_UNSENT;
12481 	chk->snd_count = 0;
12482 	if (stcb->asoc.alternate) {
12483 		chk->whoTo = stcb->asoc.alternate;
12484 	} else {
12485 		chk->whoTo = stcb->asoc.primary_destination;
12486 	}
12487 	atomic_add_int(&chk->whoTo->ref_count, 1);
12488 	ch = mtod(chk->data, struct sctp_chunkhdr *);
12489 	ch->chunk_type = SCTP_STREAM_RESET;
12490 	ch->chunk_flags = 0;
12491 	ch->chunk_length = htons(chk->book_size);
12492 	SCTP_BUF_LEN(chk->data) = chk->send_size;
12493 
12494 	seq = stcb->asoc.str_reset_seq_out;
12495 	if (send_out_req) {
12496 		sctp_add_stream_reset_out(chk, number_entries, list,
12497 					  seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12498 		asoc->stream_reset_out_is_outstanding = 1;
12499 		seq++;
12500 		asoc->stream_reset_outstanding++;
12501 	}
12502 	if ((add_stream & 1) &&
12503 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12504 		/* Need to allocate more */
12505 		struct sctp_stream_out *oldstream;
12506 		struct sctp_stream_queue_pending *sp, *nsp;
12507 		int i;
12508 #if defined(SCTP_DETAILED_STR_STATS)
12509 		int j;
12510 #endif
12511 
12512 		oldstream = stcb->asoc.strmout;
12513 		/* get some more */
12514 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12515 			    ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)),
12516 			    SCTP_M_STRMO);
12517 		if (stcb->asoc.strmout == NULL) {
12518 			uint8_t x;
12519 			stcb->asoc.strmout = oldstream;
12520 			/* Turn off the bit */
12521 			x = add_stream & 0xfe;
12522 			add_stream = x;
12523 			goto skip_stuff;
12524 		}
12525 		/* Ok now we proceed with copying the old out stuff and
12526 		 * initializing the new stuff.
12527 		 */
12528 		SCTP_TCB_SEND_LOCK(stcb);
12529 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12530 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12531 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12532 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12533 			stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
12534 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12535 			stcb->asoc.strmout[i].stream_no = i;
12536 			stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
12537 			/* now anything on those queues? */
12538 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12539 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12540 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12541 			}
12542 			/* Now move assoc pointers too */
12543 			if (stcb->asoc.last_out_stream == &oldstream[i]) {
12544 				stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
12545 			}
12546 			if (stcb->asoc.locked_on_sending == &oldstream[i]) {
12547 				stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
12548 			}
12549 		}
12550 		/* now the new streams */
12551 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12552 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12553 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12554 			stcb->asoc.strmout[i].chunks_on_queues = 0;
12555 #if defined(SCTP_DETAILED_STR_STATS)
12556 			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12557 				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12558 				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12559 			}
12560 #else
12561 			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12562 			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12563 #endif
12564 			stcb->asoc.strmout[i].next_sequence_send = 0x0;
12565 			stcb->asoc.strmout[i].stream_no = i;
12566 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
12567 			stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
12568 		}
12569 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12570 		SCTP_FREE(oldstream, SCTP_M_STRMO);
12571 		SCTP_TCB_SEND_UNLOCK(stcb);
12572 	}
12573 skip_stuff:
12574 	if ((add_stream & 1) && (adding_o > 0)) {
12575 		asoc->strm_pending_add_size = adding_o;
12576 		asoc->peer_req_out = peer_asked;
12577 		sctp_add_an_out_stream(chk, seq, adding_o);
12578 		seq++;
12579 		asoc->stream_reset_outstanding++;
12580 	}
12581 	if ((add_stream & 2) && (adding_i > 0)) {
12582 		sctp_add_an_in_stream(chk, seq, adding_i);
12583 		seq++;
12584 		asoc->stream_reset_outstanding++;
12585 	}
12586 	if (send_in_req) {
12587 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
12588 		seq++;
12589 		asoc->stream_reset_outstanding++;
12590 	}
12591 	if (send_tsn_req) {
12592 		sctp_add_stream_reset_tsn(chk, seq);
12593 		asoc->stream_reset_outstanding++;
12594 	}
12595 	asoc->str_reset = chk;
12596 	/* insert the chunk for sending */
12597 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12598 			  chk,
12599 			  sctp_next);
12600 	asoc->ctrl_queue_cnt++;
12601 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12602 	return (0);
12603 }
12604 
12605 void
12606 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12607                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12608 #if defined(__FreeBSD__)
12609                 uint8_t mflowtype, uint32_t mflowid,
12610 #endif
12611                 uint32_t vrf_id, uint16_t port)
12612 {
12613 	/* Don't respond to an ABORT with an ABORT. */
12614 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12615 		if (cause)
12616 			sctp_m_freem(cause);
12617 		return;
12618 	}
12619 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12620 #if defined(__FreeBSD__)
12621 	                   mflowtype, mflowid,
12622 #endif
12623 	                   vrf_id, port);
12624 	return;
12625 }
12626 
12627 void
12628 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12629                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12630 #if defined(__FreeBSD__)
12631                    uint8_t mflowtype, uint32_t mflowid,
12632 #endif
12633                    uint32_t vrf_id, uint16_t port)
12634 {
12635 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12636 #if defined(__FreeBSD__)
12637 	                   mflowtype, mflowid,
12638 #endif
12639 	                   vrf_id, port);
12640 	return;
12641 }
12642 
12643 static struct mbuf *
12644 sctp_copy_resume(struct uio *uio,
12645 		 int max_send_len,
12646 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
12647 		 int user_marks_eor,
12648 #endif
12649 		 int *error,
12650 		 uint32_t *sndout,
12651 		 struct mbuf **new_tail)
12652 {
12653 #if defined(__Panda__)
12654 	struct mbuf *m;
12655 
12656 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12657 			(user_marks_eor ? M_EOR : 0));
12658 	if (m == NULL) {
12659 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12660 		*error = ENOBUFS;
12661 	} else {
12662 		*sndout = m_length(m, NULL);
12663 		*new_tail = m_last(m);
12664 	}
12665 	return (m);
12666 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12667 	struct mbuf *m;
12668 
12669 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12670 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12671 	if (m == NULL) {
12672 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12673 		*error = ENOBUFS;
12674 	} else {
12675 		*sndout = m_length(m, NULL);
12676 		*new_tail = m_last(m);
12677 	}
12678 	return (m);
12679 #else
12680 	int left, cancpy, willcpy;
12681 	struct mbuf *m, *head;
12682 
12683 #if defined(__APPLE__)
12684 #if defined(APPLE_LEOPARD)
12685 	left = min(uio->uio_resid, max_send_len);
12686 #else
12687 	left = min(uio_resid(uio), max_send_len);
12688 #endif
12689 #else
12690 	left = min(uio->uio_resid, max_send_len);
12691 #endif
12692 	/* Always get a header just in case */
12693 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12694 	if (head == NULL) {
12695 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12696 		*error = ENOBUFS;
12697 		return (NULL);
12698 	}
12699 	cancpy = M_TRAILINGSPACE(head);
12700 	willcpy = min(cancpy, left);
12701 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
12702 	if (*error) {
12703 		sctp_m_freem(head);
12704 		return (NULL);
12705 	}
12706 	*sndout += willcpy;
12707 	left -= willcpy;
12708 	SCTP_BUF_LEN(head) = willcpy;
12709 	m = head;
12710 	*new_tail = head;
12711 	while (left > 0) {
12712 		/* move in user data */
12713 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12714 		if (SCTP_BUF_NEXT(m) == NULL) {
12715 			sctp_m_freem(head);
12716 			*new_tail = NULL;
12717 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12718 			*error = ENOBUFS;
12719 			return (NULL);
12720 		}
12721 		m = SCTP_BUF_NEXT(m);
12722 		cancpy = M_TRAILINGSPACE(m);
12723 		willcpy = min(cancpy, left);
12724 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
12725 		if (*error) {
12726 			sctp_m_freem(head);
12727 			*new_tail = NULL;
12728 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12729 			*error = EFAULT;
12730 			return (NULL);
12731 		}
12732 		SCTP_BUF_LEN(m) = willcpy;
12733 		left -= willcpy;
12734 		*sndout += willcpy;
12735 		*new_tail = m;
12736 		if (left == 0) {
12737 			SCTP_BUF_NEXT(m) = NULL;
12738 		}
12739 	}
12740 	return (head);
12741 #endif
12742 }
12743 
12744 static int
12745 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12746               struct uio *uio,
12747               int resv_upfront)
12748 {
12749 	int left;
12750 #if defined(__Panda__)
12751 	left = sp->length;
12752 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12753 	                       resv_upfront, 0);
12754 	if (sp->data == NULL) {
12755 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12756 		return (ENOBUFS);
12757 	}
12758 
12759 	sp->tail_mbuf = m_last(sp->data);
12760 	return (0);
12761 
12762 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
12763 	left = sp->length;
12764 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12765 	                       resv_upfront, 0);
12766 	if (sp->data == NULL) {
12767 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12768 		return (ENOBUFS);
12769 	}
12770 
12771 	sp->tail_mbuf = m_last(sp->data);
12772 	return (0);
12773 #else
12774 	int cancpy, willcpy, error;
12775 	struct mbuf *m, *head;
12776 	int cpsz = 0;
12777 
12778 	/* First one gets a header */
12779 	left = sp->length;
12780 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
12781 	if (m == NULL) {
12782 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12783 		return (ENOBUFS);
12784 	}
12785 	/*-
12786 	 * Add this one for m in now, that way if the alloc fails we won't
12787 	 * have a bad cnt.
12788 	 */
12789 	SCTP_BUF_RESV_UF(m, resv_upfront);
12790 	cancpy = M_TRAILINGSPACE(m);
12791 	willcpy = min(cancpy, left);
12792 	while (left > 0) {
12793 		/* move in user data */
12794 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
12795 		if (error) {
12796 			sctp_m_freem(head);
12797 			return (error);
12798 		}
12799 		SCTP_BUF_LEN(m) = willcpy;
12800 		left -= willcpy;
12801 		cpsz += willcpy;
12802 		if (left > 0) {
12803 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
12804 			if (SCTP_BUF_NEXT(m) == NULL) {
12805 				/*
12806 				 * the head goes back to caller, he can free
12807 				 * the rest
12808 				 */
12809 				sctp_m_freem(head);
12810 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12811 				return (ENOBUFS);
12812 			}
12813 			m = SCTP_BUF_NEXT(m);
12814 			cancpy = M_TRAILINGSPACE(m);
12815 			willcpy = min(cancpy, left);
12816 		} else {
12817 			sp->tail_mbuf = m;
12818 			SCTP_BUF_NEXT(m) = NULL;
12819 		}
12820 	}
12821 	sp->data = head;
12822 	sp->length = cpsz;
12823 	return (0);
12824 #endif
12825 }
12826 
12827 
12828 
12829 static struct sctp_stream_queue_pending *
12830 sctp_copy_it_in(struct sctp_tcb *stcb,
12831     struct sctp_association *asoc,
12832     struct sctp_sndrcvinfo *srcv,
12833     struct uio *uio,
12834     struct sctp_nets *net,
12835     int max_send_len,
12836     int user_marks_eor,
12837     int *error)
12838 
12839 {
12840 	/*-
12841 	 * This routine must be very careful in its work. Protocol
12842 	 * processing is up and running so care must be taken to spl...()
12843 	 * when you need to do something that may effect the stcb/asoc. The
12844 	 * sb is locked however. When data is copied the protocol processing
12845 	 * should be enabled since this is a slower operation...
12846 	 */
12847 	struct sctp_stream_queue_pending *sp = NULL;
12848 	int resv_in_first;
12849 
12850 	*error = 0;
12851 	/* Now can we send this? */
12852 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12853 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12854 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12855 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12856 		/* got data while shutting down */
12857 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12858 		*error = ECONNRESET;
12859 		goto out_now;
12860 	}
12861 	sctp_alloc_a_strmoq(stcb, sp);
12862 	if (sp == NULL) {
12863 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12864 		*error = ENOMEM;
12865 		goto out_now;
12866 	}
12867 	sp->act_flags = 0;
12868 	sp->sender_all_done = 0;
12869 	sp->sinfo_flags = srcv->sinfo_flags;
12870 	sp->timetolive = srcv->sinfo_timetolive;
12871 	sp->ppid = srcv->sinfo_ppid;
12872 	sp->context = srcv->sinfo_context;
12873 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12874 
12875 	sp->stream = srcv->sinfo_stream;
12876 #if defined(__APPLE__)
12877 #if defined(APPLE_LEOPARD)
12878 	sp->length = min(uio->uio_resid, max_send_len);
12879 #else
12880 	sp->length = min(uio_resid(uio), max_send_len);
12881 #endif
12882 #else
12883 	sp->length = min(uio->uio_resid, max_send_len);
12884 #endif
12885 #if defined(__APPLE__)
12886 #if defined(APPLE_LEOPARD)
12887 	if ((sp->length == (uint32_t)uio->uio_resid) &&
12888 #else
12889 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
12890 #endif
12891 #else
12892 	if ((sp->length == (uint32_t)uio->uio_resid) &&
12893 #endif
12894 	    ((user_marks_eor == 0) ||
12895 	     (srcv->sinfo_flags & SCTP_EOF) ||
12896 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12897 		sp->msg_is_complete = 1;
12898 	} else {
12899 		sp->msg_is_complete = 0;
12900 	}
12901 	sp->sender_all_done = 0;
12902 	sp->some_taken = 0;
12903 	sp->put_last_out = 0;
12904 	resv_in_first = sizeof(struct sctp_data_chunk);
12905 	sp->data = sp->tail_mbuf = NULL;
12906 	if (sp->length == 0) {
12907 		*error = 0;
12908 		goto skip_copy;
12909 	}
12910 	if (srcv->sinfo_keynumber_valid) {
12911 		sp->auth_keyid = srcv->sinfo_keynumber;
12912 	} else {
12913 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12914 	}
12915 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12916 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
12917 		sp->holds_key_ref = 1;
12918 	}
12919 #if defined(__APPLE__)
12920 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12921 #endif
12922 	*error = sctp_copy_one(sp, uio, resv_in_first);
12923 #if defined(__APPLE__)
12924 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
12925 #endif
12926  skip_copy:
12927 	if (*error) {
12928 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12929 		sp = NULL;
12930 	} else {
12931 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12932 			sp->net = net;
12933 			atomic_add_int(&sp->net->ref_count, 1);
12934 		} else {
12935 			sp->net = NULL;
12936 		}
12937 		sctp_set_prsctp_policy(sp);
12938 	}
12939 out_now:
12940 	return (sp);
12941 }
12942 
12943 
12944 int
12945 sctp_sosend(struct socket *so,
12946             struct sockaddr *addr,
12947             struct uio *uio,
12948 #ifdef __Panda__
12949             pakhandle_type top,
12950             pakhandle_type icontrol,
12951 #else
12952             struct mbuf *top,
12953             struct mbuf *control,
12954 #endif
12955 #if defined(__APPLE__) || defined(__Panda__)
12956             int flags
12957 #else
12958             int flags,
12959 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
12960             struct thread *p
12961 #elif defined(__Windows__)
12962             PKTHREAD p
12963 #else
12964 #if defined(__Userspace__)
12965             /*
12966 	     * proc is a dummy in __Userspace__ and will not be passed
12967 	     * to sctp_lower_sosend
12968 	     */
12969 #endif
12970             struct proc *p
12971 #endif
12972 #endif
12973 )
12974 {
12975 #ifdef __Panda__
12976 	struct mbuf *control = NULL;
12977 #endif
12978 #if defined(__APPLE__)
12979 	struct proc *p = current_proc();
12980 #endif
12981 	int error, use_sndinfo = 0;
12982 	struct sctp_sndrcvinfo sndrcvninfo;
12983 	struct sockaddr *addr_to_use;
12984 #if defined(INET) && defined(INET6)
12985 	struct sockaddr_in sin;
12986 #endif
12987 
12988 #if defined(__APPLE__)
12989 	SCTP_SOCKET_LOCK(so, 1);
12990 #endif
12991 #ifdef __Panda__
12992 	control = SCTP_HEADER_TO_CHAIN(icontrol);
12993 #endif
12994 	if (control) {
12995 		/* process cmsg snd/rcv info (maybe a assoc-id) */
12996 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12997 		    sizeof(sndrcvninfo))) {
12998 			/* got one */
12999 			use_sndinfo = 1;
13000 		}
13001 	}
13002 	addr_to_use = addr;
13003 #if defined(INET) && defined(INET6)
13004 	if ((addr) && (addr->sa_family == AF_INET6)) {
13005 		struct sockaddr_in6 *sin6;
13006 
13007 		sin6 = (struct sockaddr_in6 *)addr;
13008 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13009 			in6_sin6_2_sin(&sin, sin6);
13010 			addr_to_use = (struct sockaddr *)&sin;
13011 		}
13012 	}
13013 #endif
13014 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
13015 #ifdef __Panda__
13016 				  icontrol,
13017 #else
13018 				  control,
13019 #endif
13020 				  flags,
13021 				  use_sndinfo ? &sndrcvninfo: NULL
13022 #if !(defined(__Panda__) || defined(__Userspace__))
13023 				  , p
13024 #endif
13025 		);
13026 #if defined(__APPLE__)
13027 	SCTP_SOCKET_UNLOCK(so, 1);
13028 #endif
13029 	return (error);
13030 }
13031 
13032 
13033 int
13034 sctp_lower_sosend(struct socket *so,
13035                   struct sockaddr *addr,
13036                   struct uio *uio,
13037 #ifdef __Panda__
13038                   pakhandle_type i_pak,
13039                   pakhandle_type i_control,
13040 #else
13041                   struct mbuf *i_pak,
13042                   struct mbuf *control,
13043 #endif
13044                   int flags,
13045                   struct sctp_sndrcvinfo *srcv
13046 #if !(defined( __Panda__) || defined(__Userspace__))
13047                   ,
13048 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13049                   struct thread *p
13050 #elif defined(__Windows__)
13051                   PKTHREAD p
13052 #else
13053                   struct proc *p
13054 #endif
13055 #endif
13056 	)
13057 {
13058 	unsigned int sndlen = 0, max_len;
13059 	int error, len;
13060 	struct mbuf *top = NULL;
13061 #ifdef __Panda__
13062 	struct mbuf *control = NULL;
13063 #endif
13064 	int queue_only = 0, queue_only_for_init = 0;
13065 	int free_cnt_applied = 0;
13066 	int un_sent;
13067 	int now_filled = 0;
13068 	unsigned int inqueue_bytes = 0;
13069 	struct sctp_block_entry be;
13070 	struct sctp_inpcb *inp;
13071 	struct sctp_tcb *stcb = NULL;
13072 	struct timeval now;
13073 	struct sctp_nets *net;
13074 	struct sctp_association *asoc;
13075 	struct sctp_inpcb *t_inp;
13076 	int user_marks_eor;
13077 	int create_lock_applied = 0;
13078 	int nagle_applies = 0;
13079 	int some_on_control = 0;
13080 	int got_all_of_the_send = 0;
13081 	int hold_tcblock = 0;
13082 	int non_blocking = 0;
13083 	uint32_t local_add_more, local_soresv = 0;
13084 	uint16_t port;
13085 	uint16_t sinfo_flags;
13086 	sctp_assoc_t sinfo_assoc_id;
13087 
13088 	error = 0;
13089 	net = NULL;
13090 	stcb = NULL;
13091 	asoc = NULL;
13092 
13093 #if defined(__APPLE__)
13094 	sctp_lock_assert(so);
13095 #endif
13096 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13097 	if (inp == NULL) {
13098 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13099 		error = EINVAL;
13100 		if (i_pak) {
13101 			SCTP_RELEASE_PKT(i_pak);
13102 		}
13103 		return (error);
13104 	}
13105 	if ((uio == NULL) && (i_pak == NULL)) {
13106 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13107 		return (EINVAL);
13108 	}
13109 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13110 	atomic_add_int(&inp->total_sends, 1);
13111 	if (uio) {
13112 #if defined(__APPLE__)
13113 #if defined(APPLE_LEOPARD)
13114 		if (uio->uio_resid < 0) {
13115 #else
13116 		if (uio_resid(uio) < 0) {
13117 #endif
13118 #else
13119 		if (uio->uio_resid < 0) {
13120 #endif
13121 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13122 			return (EINVAL);
13123 		}
13124 #if defined(__APPLE__)
13125 #if defined(APPLE_LEOPARD)
13126 		sndlen = uio->uio_resid;
13127 #else
13128 		sndlen = uio_resid(uio);
13129 #endif
13130 #else
13131 		sndlen = uio->uio_resid;
13132 #endif
13133 	} else {
13134 		top = SCTP_HEADER_TO_CHAIN(i_pak);
13135 #ifdef __Panda__
13136 		/*-
13137 		 * app len indicates the datalen, dgsize for cases
13138 		 * of SCTP_EOF/ABORT will not have the right len
13139 		 */
13140 		sndlen = SCTP_APP_DATA_LEN(i_pak);
13141 		/*-
13142 		 * Set the particle len also to zero to match
13143 		 * up with app len. We only have one particle
13144 		 * if app len is zero for Panda. This is ensured
13145 		 * in the socket lib
13146 		 */
13147 		if (sndlen == 0) {
13148 			SCTP_BUF_LEN(top)  = 0;
13149 		}
13150 		/*-
13151 		 * We delink the chain from header, but keep
13152 		 * the header around as we will need it in
13153 		 * EAGAIN case
13154 		 */
13155 		SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
13156 #else
13157 		sndlen = SCTP_HEADER_LEN(i_pak);
13158 #endif
13159 	}
13160 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
13161 		(void *)addr,
13162 	        sndlen);
13163 #ifdef __Panda__
13164 	if (i_control) {
13165 		control = SCTP_HEADER_TO_CHAIN(i_control);
13166 	}
13167 #endif
13168 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13169 	    (inp->sctp_socket->so_qlimit)) {
13170 		/* The listener can NOT send */
13171 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13172 		error = ENOTCONN;
13173 		goto out_unlocked;
13174 	}
13175 	/**
13176 	 * Pre-screen address, if one is given the sin-len
13177 	 * must be set correctly!
13178 	 */
13179 	if (addr) {
13180 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13181 		switch (raddr->sa.sa_family) {
13182 #ifdef INET
13183 		case AF_INET:
13184 #ifdef HAVE_SIN_LEN
13185 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13186 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13187 				error = EINVAL;
13188 				goto out_unlocked;
13189 			}
13190 #endif
13191 			port = raddr->sin.sin_port;
13192 			break;
13193 #endif
13194 #ifdef INET6
13195 		case AF_INET6:
13196 #ifdef HAVE_SIN6_LEN
13197 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13198 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13199 				error = EINVAL;
13200 				goto out_unlocked;
13201 			}
13202 #endif
13203 			port = raddr->sin6.sin6_port;
13204 			break;
13205 #endif
13206 #if defined(__Userspace__)
13207 		case AF_CONN:
13208 #ifdef HAVE_SCONN_LEN
13209 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13210 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13211 				error = EINVAL;
13212 				goto out_unlocked;
13213 			}
13214 #endif
13215 			port = raddr->sconn.sconn_port;
13216 			break;
13217 #endif
13218 		default:
13219 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13220 			error = EAFNOSUPPORT;
13221 			goto out_unlocked;
13222 		}
13223 	} else
13224 		port = 0;
13225 
13226 	if (srcv) {
13227 		sinfo_flags = srcv->sinfo_flags;
13228 		sinfo_assoc_id = srcv->sinfo_assoc_id;
13229 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
13230 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13231 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13232 			error = EINVAL;
13233 			goto out_unlocked;
13234 		}
13235 		if (srcv->sinfo_flags)
13236 			SCTP_STAT_INCR(sctps_sends_with_flags);
13237 	} else {
13238 		sinfo_flags = inp->def_send.sinfo_flags;
13239 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13240 	}
13241 	if (sinfo_flags & SCTP_SENDALL) {
13242 		/* its a sendall */
13243 		error = sctp_sendall(inp, uio, top, srcv);
13244 		top = NULL;
13245 		goto out_unlocked;
13246 	}
13247 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13248 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13249 		error = EINVAL;
13250 		goto out_unlocked;
13251 	}
13252 	/* now we must find the assoc */
13253 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13254 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13255 		SCTP_INP_RLOCK(inp);
13256 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
13257 		if (stcb) {
13258 			SCTP_TCB_LOCK(stcb);
13259 			hold_tcblock = 1;
13260 		}
13261 		SCTP_INP_RUNLOCK(inp);
13262 	} else if (sinfo_assoc_id) {
13263 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
13264 	} else if (addr) {
13265 		/*-
13266 		 * Since we did not use findep we must
13267 		 * increment it, and if we don't find a tcb
13268 		 * decrement it.
13269 		 */
13270 		SCTP_INP_WLOCK(inp);
13271 		SCTP_INP_INCR_REF(inp);
13272 		SCTP_INP_WUNLOCK(inp);
13273 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13274 		if (stcb == NULL) {
13275 			SCTP_INP_WLOCK(inp);
13276 			SCTP_INP_DECR_REF(inp);
13277 			SCTP_INP_WUNLOCK(inp);
13278 		} else {
13279 			hold_tcblock = 1;
13280 		}
13281 	}
13282 	if ((stcb == NULL) && (addr)) {
13283 		/* Possible implicit send? */
13284 		SCTP_ASOC_CREATE_LOCK(inp);
13285 		create_lock_applied = 1;
13286 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13287 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13288 			/* Should I really unlock ? */
13289 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13290 			error = EINVAL;
13291 			goto out_unlocked;
13292 
13293 		}
13294 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13295 		    (addr->sa_family == AF_INET6)) {
13296 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13297 			error = EINVAL;
13298 			goto out_unlocked;
13299 		}
13300 		SCTP_INP_WLOCK(inp);
13301 		SCTP_INP_INCR_REF(inp);
13302 		SCTP_INP_WUNLOCK(inp);
13303 		/* With the lock applied look again */
13304 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13305 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13306 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13307 		}
13308 		if (stcb == NULL) {
13309 			SCTP_INP_WLOCK(inp);
13310 			SCTP_INP_DECR_REF(inp);
13311 			SCTP_INP_WUNLOCK(inp);
13312 		} else {
13313 			hold_tcblock = 1;
13314 		}
13315 		if (error) {
13316 			goto out_unlocked;
13317 		}
13318 		if (t_inp != inp) {
13319 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13320 			error = ENOTCONN;
13321 			goto out_unlocked;
13322 		}
13323 	}
13324 	if (stcb == NULL) {
13325 		if (addr == NULL) {
13326 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13327 			error = ENOENT;
13328 			goto out_unlocked;
13329 		} else {
13330 			/* We must go ahead and start the INIT process */
13331 			uint32_t vrf_id;
13332 
13333 			if ((sinfo_flags & SCTP_ABORT) ||
13334 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13335 				/*-
13336 				 * User asks to abort a non-existant assoc,
13337 				 * or EOF a non-existant assoc with no data
13338 				 */
13339 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13340 				error = ENOENT;
13341 				goto out_unlocked;
13342 			}
13343 			/* get an asoc/stcb struct */
13344 			vrf_id = inp->def_vrf_id;
13345 #ifdef INVARIANTS
13346 			if (create_lock_applied == 0) {
13347 				panic("Error, should hold create lock and I don't?");
13348 			}
13349 #endif
13350 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13351 #if !(defined( __Panda__) || defined(__Userspace__))
13352 					       p
13353 #else
13354 					       (struct proc *)NULL
13355 #endif
13356 				);
13357 			if (stcb == NULL) {
13358 				/* Error is setup for us in the call */
13359 				goto out_unlocked;
13360 			}
13361 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13362 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13363 				/* Set the connected flag so we can queue data */
13364 				soisconnecting(so);
13365 			}
13366 			hold_tcblock = 1;
13367 			if (create_lock_applied) {
13368 				SCTP_ASOC_CREATE_UNLOCK(inp);
13369 				create_lock_applied = 0;
13370 			} else {
13371 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13372 			}
13373 			/* Turn on queue only flag to prevent data from being sent */
13374 			queue_only = 1;
13375 			asoc = &stcb->asoc;
13376 			SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13377 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13378 
13379 			/* initialize authentication params for the assoc */
13380 			sctp_initialize_auth_params(inp, stcb);
13381 
13382 			if (control) {
13383 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13384 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
13385 					hold_tcblock = 0;
13386 					stcb = NULL;
13387 					goto out_unlocked;
13388 				}
13389 			}
13390 			/* out with the INIT */
13391 			queue_only_for_init = 1;
13392 			/*-
13393 			 * we may want to dig in after this call and adjust the MTU
13394 			 * value. It defaulted to 1500 (constant) but the ro
13395 			 * structure may now have an update and thus we may need to
13396 			 * change it BEFORE we append the message.
13397 			 */
13398 		}
13399 	} else
13400 		asoc = &stcb->asoc;
13401 	if (srcv == NULL)
13402 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13403 	if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
13404 		if (addr)
13405 			net = sctp_findnet(stcb, addr);
13406 		else
13407 			net = NULL;
13408 		if ((net == NULL) ||
13409 		    ((port != 0) && (port != stcb->rport))) {
13410 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13411 			error = EINVAL;
13412 			goto out_unlocked;
13413 		}
13414 	} else {
13415 		if (stcb->asoc.alternate) {
13416 			net = stcb->asoc.alternate;
13417 		} else {
13418 			net = stcb->asoc.primary_destination;
13419 		}
13420 	}
13421 	atomic_add_int(&stcb->total_sends, 1);
13422 	/* Keep the stcb from being freed under our feet */
13423 	atomic_add_int(&asoc->refcnt, 1);
13424 	free_cnt_applied = 1;
13425 
13426 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13427 		if (sndlen > asoc->smallest_mtu) {
13428 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13429 			error = EMSGSIZE;
13430 			goto out_unlocked;
13431 		}
13432 	}
13433 #if defined(__Userspace__)
13434 	if (inp->recv_callback) {
13435 		non_blocking = 1;
13436 	}
13437 #endif
13438 	if (SCTP_SO_IS_NBIO(so)
13439 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13440 	     || (flags & MSG_NBIO)
13441 #endif
13442 	    ) {
13443 		non_blocking = 1;
13444 	}
13445 	/* would we block? */
13446 	if (non_blocking) {
13447 		if (hold_tcblock == 0) {
13448 			SCTP_TCB_LOCK(stcb);
13449 			hold_tcblock = 1;
13450 		}
13451 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13452 		if ((SCTP_SB_LIMIT_SND(so) <  (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13453 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13454 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13455 			if (sndlen > SCTP_SB_LIMIT_SND(so))
13456 				error = EMSGSIZE;
13457 			else
13458 				error = EWOULDBLOCK;
13459 			goto out_unlocked;
13460 		}
13461 		stcb->asoc.sb_send_resv += sndlen;
13462 		SCTP_TCB_UNLOCK(stcb);
13463 		hold_tcblock = 0;
13464 	} else {
13465 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13466 	}
13467 	local_soresv = sndlen;
13468 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13469 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13470 		error = ECONNRESET;
13471 		goto out_unlocked;
13472 	}
13473 	if (create_lock_applied) {
13474 		SCTP_ASOC_CREATE_UNLOCK(inp);
13475 		create_lock_applied = 0;
13476 	}
13477 	if (asoc->stream_reset_outstanding) {
13478 		/*
13479 		 * Can't queue any data while stream reset is underway.
13480 		 */
13481 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
13482 		error = EAGAIN;
13483 		goto out_unlocked;
13484 	}
13485 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13486 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13487 		queue_only = 1;
13488 	}
13489 	/* we are now done with all control */
13490 	if (control) {
13491 		sctp_m_freem(control);
13492 		control = NULL;
13493 	}
13494 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13495 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13496 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13497 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13498 		if (srcv->sinfo_flags & SCTP_ABORT) {
13499 			;
13500 		} else {
13501 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13502 			error = ECONNRESET;
13503 			goto out_unlocked;
13504 		}
13505 	}
13506 	/* Ok, we will attempt a msgsnd :> */
13507 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
13508 	if (p) {
13509 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000
13510 		p->td_ru.ru_msgsnd++;
13511 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
13512 		p->td_proc->p_stats->p_ru.ru_msgsnd++;
13513 #else
13514 		p->p_stats->p_ru.ru_msgsnd++;
13515 #endif
13516 	}
13517 #endif
13518 	/* Are we aborting? */
13519 	if (srcv->sinfo_flags & SCTP_ABORT) {
13520 		struct mbuf *mm;
13521 		int tot_demand, tot_out = 0, max_out;
13522 
13523 		SCTP_STAT_INCR(sctps_sends_with_abort);
13524 		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13525 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13526 			/* It has to be up before we abort */
13527 			/* how big is the user initiated abort? */
13528 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13529 			error = EINVAL;
13530 			goto out;
13531 		}
13532 		if (hold_tcblock) {
13533 			SCTP_TCB_UNLOCK(stcb);
13534 			hold_tcblock = 0;
13535 		}
13536 		if (top) {
13537 			struct mbuf *cntm = NULL;
13538 
13539 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13540 			if (sndlen != 0) {
13541 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13542 					tot_out += SCTP_BUF_LEN(cntm);
13543 				}
13544 			}
13545 		} else {
13546 			/* Must fit in a MTU */
13547 			tot_out = sndlen;
13548 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13549 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13550 				/* To big */
13551 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13552 				error = EMSGSIZE;
13553 				goto out;
13554 			}
13555 			mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
13556 		}
13557 		if (mm == NULL) {
13558 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13559 			error = ENOMEM;
13560 			goto out;
13561 		}
13562 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13563 		max_out -= sizeof(struct sctp_abort_msg);
13564 		if (tot_out > max_out) {
13565 			tot_out = max_out;
13566 		}
13567 		if (mm) {
13568 			struct sctp_paramhdr *ph;
13569 
13570 			/* now move forward the data pointer */
13571 			ph = mtod(mm, struct sctp_paramhdr *);
13572 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13573 			ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
13574 			ph++;
13575 			SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
13576 			if (top == NULL) {
13577 #if defined(__APPLE__)
13578 				SCTP_SOCKET_UNLOCK(so, 0);
13579 #endif
13580 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
13581 #if defined(__APPLE__)
13582 				SCTP_SOCKET_LOCK(so, 0);
13583 #endif
13584 				if (error) {
13585 					/*-
13586 					 * Here if we can't get his data we
13587 					 * still abort we just don't get to
13588 					 * send the users note :-0
13589 					 */
13590 					sctp_m_freem(mm);
13591 					mm = NULL;
13592 				}
13593 			} else {
13594 				if (sndlen != 0) {
13595 					SCTP_BUF_NEXT(mm) = top;
13596 				}
13597 			}
13598 		}
13599 		if (hold_tcblock == 0) {
13600 			SCTP_TCB_LOCK(stcb);
13601 		}
13602 		atomic_add_int(&stcb->asoc.refcnt, -1);
13603 		free_cnt_applied = 0;
13604 		/* release this lock, otherwise we hang on ourselves */
13605 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13606 		/* now relock the stcb so everything is sane */
13607 		hold_tcblock = 0;
13608 		stcb = NULL;
13609 		/* In this case top is already chained to mm
13610 		 * avoid double free, since we free it below if
13611 		 * top != NULL and driver would free it after sending
13612 		 * the packet out
13613 		 */
13614 		if (sndlen != 0) {
13615 			top = NULL;
13616 		}
13617 		goto out_unlocked;
13618 	}
13619 	/* Calculate the maximum we can send */
13620 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13621 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13622 		if (non_blocking) {
13623 			/* we already checked for non-blocking above. */
13624 			max_len = sndlen;
13625 		} else {
13626 			max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13627 		}
13628 	} else {
13629 		max_len = 0;
13630 	}
13631 	if (hold_tcblock) {
13632 		SCTP_TCB_UNLOCK(stcb);
13633 		hold_tcblock = 0;
13634 	}
13635 	/* Is the stream no. valid? */
13636 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13637 		/* Invalid stream number */
13638 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13639 		error = EINVAL;
13640 		goto out_unlocked;
13641 	}
13642 	if (asoc->strmout == NULL) {
13643 		/* huh? software error */
13644 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13645 		error = EFAULT;
13646 		goto out_unlocked;
13647 	}
13648 
13649 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
13650 	if ((user_marks_eor == 0) &&
13651 	    (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13652 		/* It will NEVER fit */
13653 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13654 		error = EMSGSIZE;
13655 		goto out_unlocked;
13656 	}
13657 	if ((uio == NULL) && user_marks_eor) {
13658 		/*-
13659 		 * We do not support eeor mode for
13660 		 * sending with mbuf chains (like sendfile).
13661 		 */
13662 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13663 		error = EINVAL;
13664 		goto out_unlocked;
13665 	}
13666 
13667 	if (user_marks_eor) {
13668 		local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13669 	} else {
13670 		/*-
13671 		 * For non-eeor the whole message must fit in
13672 		 * the socket send buffer.
13673 		 */
13674 		local_add_more = sndlen;
13675 	}
13676 	len = 0;
13677 	if (non_blocking) {
13678 		goto skip_preblock;
13679 	}
13680 	if (((max_len <= local_add_more) &&
13681 	     (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13682 	    (max_len == 0) ||
13683 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13684 		/* No room right now ! */
13685 		SOCKBUF_LOCK(&so->so_snd);
13686 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13687 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13688 		       ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13689 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13690 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
13691 			        inqueue_bytes,
13692 			        local_add_more,
13693 			        stcb->asoc.stream_queue_cnt,
13694 			        stcb->asoc.chunks_on_out_queue,
13695 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13696 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13697 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13698 			}
13699 			be.error = 0;
13700 #if !defined(__Panda__) && !defined(__Windows__)
13701 			stcb->block_entry = &be;
13702 #endif
13703 			error = sbwait(&so->so_snd);
13704 			stcb->block_entry = NULL;
13705 			if (error || so->so_error || be.error) {
13706 				if (error == 0) {
13707 					if (so->so_error)
13708 						error = so->so_error;
13709 					if (be.error) {
13710 						error = be.error;
13711 					}
13712 				}
13713 				SOCKBUF_UNLOCK(&so->so_snd);
13714 				goto out_unlocked;
13715 			}
13716 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13717 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13718 				               asoc, stcb->asoc.total_output_queue_size);
13719 			}
13720 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13721 				goto out_unlocked;
13722 			}
13723 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13724 		}
13725 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13726 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
13727 		} else {
13728 			max_len = 0;
13729 		}
13730 		SOCKBUF_UNLOCK(&so->so_snd);
13731 	}
13732 
13733 skip_preblock:
13734 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13735 		goto out_unlocked;
13736 	}
13737 #if defined(__APPLE__)
13738 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
13739 #endif
13740 	/* sndlen covers for mbuf case
13741 	 * uio_resid covers for the non-mbuf case
13742 	 * NOTE: uio will be null when top/mbuf is passed
13743 	 */
13744 	if (sndlen == 0) {
13745 		if (srcv->sinfo_flags & SCTP_EOF) {
13746 			got_all_of_the_send = 1;
13747 			goto dataless_eof;
13748 		} else {
13749 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13750 			error = EINVAL;
13751 			goto out;
13752 		}
13753 	}
13754 	if (top == NULL) {
13755 		struct sctp_stream_queue_pending *sp;
13756 		struct sctp_stream_out *strm;
13757 		uint32_t sndout;
13758 
13759 		SCTP_TCB_SEND_LOCK(stcb);
13760 		if ((asoc->stream_locked) &&
13761 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
13762 			SCTP_TCB_SEND_UNLOCK(stcb);
13763 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13764 			error = EINVAL;
13765 			goto out;
13766 		}
13767 		SCTP_TCB_SEND_UNLOCK(stcb);
13768 
13769 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13770 		if (strm->last_msg_incomplete == 0) {
13771 		do_a_copy_in:
13772 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13773 			if ((sp == NULL) || (error)) {
13774 				goto out;
13775 			}
13776 			SCTP_TCB_SEND_LOCK(stcb);
13777 			if (sp->msg_is_complete) {
13778 				strm->last_msg_incomplete = 0;
13779 				asoc->stream_locked = 0;
13780 			} else {
13781 				/* Just got locked to this guy in
13782 				 * case of an interrupt.
13783 				 */
13784 				strm->last_msg_incomplete = 1;
13785 				asoc->stream_locked = 1;
13786 				asoc->stream_locked_on  = srcv->sinfo_stream;
13787 				sp->sender_all_done = 0;
13788 			}
13789 			sctp_snd_sb_alloc(stcb, sp->length);
13790 			atomic_add_int(&asoc->stream_queue_cnt, 1);
13791 			if (srcv->sinfo_flags & SCTP_UNORDERED) {
13792 				SCTP_STAT_INCR(sctps_sends_with_unord);
13793 			}
13794 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13795 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13796 			SCTP_TCB_SEND_UNLOCK(stcb);
13797 		} else {
13798 			SCTP_TCB_SEND_LOCK(stcb);
13799 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13800 			SCTP_TCB_SEND_UNLOCK(stcb);
13801 			if (sp == NULL) {
13802 				/* ???? Huh ??? last msg is gone */
13803 #ifdef INVARIANTS
13804 				panic("Warning: Last msg marked incomplete, yet nothing left?");
13805 #else
13806 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13807 				strm->last_msg_incomplete = 0;
13808 #endif
13809 				goto do_a_copy_in;
13810 
13811 			}
13812 		}
13813 #if defined(__APPLE__)
13814 #if defined(APPLE_LEOPARD)
13815 		while (uio->uio_resid > 0) {
13816 #else
13817 		while (uio_resid(uio) > 0) {
13818 #endif
13819 #else
13820 		while (uio->uio_resid > 0) {
13821 #endif
13822 			/* How much room do we have? */
13823 			struct mbuf *new_tail, *mm;
13824 
13825 			if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13826 				max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13827 			else
13828 				max_len = 0;
13829 
13830 			if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13831 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13832 #if defined(__APPLE__)
13833 #if defined(APPLE_LEOPARD)
13834 			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13835 #else
13836 			    (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
13837 #endif
13838 #else
13839 			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13840 #endif
13841 				sndout = 0;
13842 				new_tail = NULL;
13843 				if (hold_tcblock) {
13844 					SCTP_TCB_UNLOCK(stcb);
13845 					hold_tcblock = 0;
13846 				}
13847 #if defined(__APPLE__)
13848 				SCTP_SOCKET_UNLOCK(so, 0);
13849 #endif
13850 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
13851 				    mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13852 #else
13853 				    mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
13854 #endif
13855 #if defined(__APPLE__)
13856 				SCTP_SOCKET_LOCK(so, 0);
13857 #endif
13858 				if ((mm == NULL) || error) {
13859 					if (mm) {
13860 						sctp_m_freem(mm);
13861 					}
13862 					goto out;
13863 				}
13864 				/* Update the mbuf and count */
13865 				SCTP_TCB_SEND_LOCK(stcb);
13866 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13867 					/* we need to get out.
13868 					 * Peer probably aborted.
13869 					 */
13870 					sctp_m_freem(mm);
13871 					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13872 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13873 						error = ECONNRESET;
13874 					}
13875 					SCTP_TCB_SEND_UNLOCK(stcb);
13876 					goto out;
13877 				}
13878 				if (sp->tail_mbuf) {
13879 					/* tack it to the end */
13880 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13881 					sp->tail_mbuf = new_tail;
13882 				} else {
13883 					/* A stolen mbuf */
13884 					sp->data = mm;
13885 					sp->tail_mbuf = new_tail;
13886 				}
13887 				sctp_snd_sb_alloc(stcb, sndout);
13888 				atomic_add_int(&sp->length,sndout);
13889 				len += sndout;
13890 
13891 				/* Did we reach EOR? */
13892 #if defined(__APPLE__)
13893 #if defined(APPLE_LEOPARD)
13894 				if ((uio->uio_resid == 0) &&
13895 #else
13896 				if ((uio_resid(uio) == 0) &&
13897 #endif
13898 #else
13899 				if ((uio->uio_resid == 0) &&
13900 #endif
13901 				    ((user_marks_eor == 0) ||
13902 				     (srcv->sinfo_flags & SCTP_EOF) ||
13903 				     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13904 					sp->msg_is_complete = 1;
13905 				} else {
13906 					sp->msg_is_complete = 0;
13907 				}
13908 				SCTP_TCB_SEND_UNLOCK(stcb);
13909 			}
13910 #if defined(__APPLE__)
13911 #if defined(APPLE_LEOPARD)
13912 			if (uio->uio_resid == 0) {
13913 #else
13914 			if (uio_resid(uio) == 0) {
13915 #endif
13916 #else
13917 			if (uio->uio_resid == 0) {
13918 #endif
13919 				/* got it all? */
13920 				continue;
13921 			}
13922 			/* PR-SCTP? */
13923 			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13924 				/* This is ugly but we must assure locking order */
13925 				if (hold_tcblock == 0) {
13926 					SCTP_TCB_LOCK(stcb);
13927 					hold_tcblock = 1;
13928 				}
13929 				sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13930 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13931 				if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13932 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13933 				else
13934 					max_len = 0;
13935 				if (max_len > 0) {
13936 					continue;
13937 				}
13938 				SCTP_TCB_UNLOCK(stcb);
13939 				hold_tcblock = 0;
13940 			}
13941 			/* wait for space now */
13942 			if (non_blocking) {
13943 				/* Non-blocking io in place out */
13944 				goto skip_out_eof;
13945 			}
13946 			/* What about the INIT, send it maybe */
13947 			if (queue_only_for_init) {
13948 				if (hold_tcblock == 0) {
13949 					SCTP_TCB_LOCK(stcb);
13950 					hold_tcblock = 1;
13951 				}
13952 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13953 					/* a collision took us forward? */
13954 					queue_only = 0;
13955 				} else {
13956 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13957 					SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13958 					queue_only = 1;
13959 				}
13960 			}
13961 			if ((net->flight_size > net->cwnd) &&
13962 			    (asoc->sctp_cmt_on_off == 0)) {
13963 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13964 				queue_only = 1;
13965 			} else if (asoc->ifp_had_enobuf) {
13966 				SCTP_STAT_INCR(sctps_ifnomemqueued);
13967 				if (net->flight_size > (2 * net->mtu)) {
13968 					queue_only = 1;
13969 				}
13970 				asoc->ifp_had_enobuf = 0;
13971 			}
13972 			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13973 			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13974 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13975 			    (stcb->asoc.total_flight > 0) &&
13976 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13977 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13978 
13979 				/*-
13980 				 * Ok, Nagle is set on and we have data outstanding.
13981 				 * Don't send anything and let SACKs drive out the
13982 				 * data unless wen have a "full" segment to send.
13983 				 */
13984 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13985 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13986 				}
13987 				SCTP_STAT_INCR(sctps_naglequeued);
13988 				nagle_applies = 1;
13989 			} else {
13990 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13991 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13992 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13993 				}
13994 				SCTP_STAT_INCR(sctps_naglesent);
13995 				nagle_applies = 0;
13996 			}
13997 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13998 
13999 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14000 					       nagle_applies, un_sent);
14001 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14002 					       stcb->asoc.total_flight,
14003 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14004 			}
14005 			if (queue_only_for_init)
14006 				queue_only_for_init = 0;
14007 			if ((queue_only == 0) && (nagle_applies == 0)) {
14008 				/*-
14009 				 * need to start chunk output
14010 				 * before blocking.. note that if
14011 				 * a lock is already applied, then
14012 				 * the input via the net is happening
14013 				 * and I don't need to start output :-D
14014 				 */
14015 				if (hold_tcblock == 0) {
14016 					if (SCTP_TCB_TRYLOCK(stcb)) {
14017 						hold_tcblock = 1;
14018 						sctp_chunk_output(inp,
14019 								  stcb,
14020 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14021 					}
14022 				} else {
14023 					sctp_chunk_output(inp,
14024 							  stcb,
14025 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14026 				}
14027 				if (hold_tcblock == 1) {
14028 					SCTP_TCB_UNLOCK(stcb);
14029 					hold_tcblock = 0;
14030 				}
14031 			}
14032 			SOCKBUF_LOCK(&so->so_snd);
14033 			/*-
14034 			 * This is a bit strange, but I think it will
14035 			 * work. The total_output_queue_size is locked and
14036 			 * protected by the TCB_LOCK, which we just released.
14037 			 * There is a race that can occur between releasing it
14038 			 * above, and me getting the socket lock, where sacks
14039 			 * come in but we have not put the SB_WAIT on the
14040 			 * so_snd buffer to get the wakeup. After the LOCK
14041 			 * is applied the sack_processing will also need to
14042 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
14043 			 * once we have the socket buffer lock if we recheck the
14044 			 * size we KNOW we will get to sleep safely with the
14045 			 * wakeup flag in place.
14046 			 */
14047 			if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
14048 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14049 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14050 #if defined(__APPLE__)
14051 #if defined(APPLE_LEOPARD)
14052 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14053 						       asoc, uio->uio_resid);
14054 #else
14055 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14056 						       asoc, uio_resid(uio));
14057 #endif
14058 #else
14059 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14060 						       asoc, uio->uio_resid);
14061 #endif
14062 				}
14063 				be.error = 0;
14064 #if !defined(__Panda__) && !defined(__Windows__)
14065 				stcb->block_entry = &be;
14066 #endif
14067 #if defined(__APPLE__)
14068 				sbunlock(&so->so_snd, 1);
14069 #endif
14070 				error = sbwait(&so->so_snd);
14071 				stcb->block_entry = NULL;
14072 
14073 				if (error || so->so_error || be.error) {
14074 					if (error == 0) {
14075 						if (so->so_error)
14076 							error = so->so_error;
14077 						if (be.error) {
14078 							error = be.error;
14079 						}
14080 					}
14081 					SOCKBUF_UNLOCK(&so->so_snd);
14082 					goto out_unlocked;
14083 				}
14084 
14085 #if defined(__APPLE__)
14086 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14087 #endif
14088 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14089 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14090 						       asoc, stcb->asoc.total_output_queue_size);
14091 				}
14092 			}
14093 			SOCKBUF_UNLOCK(&so->so_snd);
14094 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14095 				goto out_unlocked;
14096 			}
14097 		}
14098 		SCTP_TCB_SEND_LOCK(stcb);
14099 		if (sp) {
14100 			if (sp->msg_is_complete == 0) {
14101 				strm->last_msg_incomplete = 1;
14102 				asoc->stream_locked = 1;
14103 				asoc->stream_locked_on  = srcv->sinfo_stream;
14104 			} else {
14105 				sp->sender_all_done = 1;
14106 				strm->last_msg_incomplete = 0;
14107 				asoc->stream_locked = 0;
14108 			}
14109 		} else {
14110 			SCTP_PRINTF("Huh no sp TSNH?\n");
14111 			strm->last_msg_incomplete = 0;
14112 			asoc->stream_locked = 0;
14113 		}
14114 		SCTP_TCB_SEND_UNLOCK(stcb);
14115 #if defined(__APPLE__)
14116 #if defined(APPLE_LEOPARD)
14117 		if (uio->uio_resid == 0) {
14118 #else
14119 		if (uio_resid(uio) == 0) {
14120 #endif
14121 #else
14122 		if (uio->uio_resid == 0) {
14123 #endif
14124 			got_all_of_the_send = 1;
14125 		}
14126 	} else {
14127 		/* We send in a 0, since we do NOT have any locks */
14128 		error = sctp_msg_append(stcb, net, top, srcv, 0);
14129 		top = NULL;
14130 		if (srcv->sinfo_flags & SCTP_EOF) {
14131 			/*
14132 			 * This should only happen for Panda for the mbuf
14133 			 * send case, which does NOT yet support EEOR mode.
14134 			 * Thus, we can just set this flag to do the proper
14135 			 * EOF handling.
14136 			 */
14137 			got_all_of_the_send = 1;
14138 		}
14139 	}
14140 	if (error) {
14141 		goto out;
14142 	}
14143 dataless_eof:
14144 	/* EOF thing ? */
14145 	if ((srcv->sinfo_flags & SCTP_EOF) &&
14146 	    (got_all_of_the_send == 1)) {
14147 		int cnt;
14148 		SCTP_STAT_INCR(sctps_sends_with_eof);
14149 		error = 0;
14150 		if (hold_tcblock == 0) {
14151 			SCTP_TCB_LOCK(stcb);
14152 			hold_tcblock = 1;
14153 		}
14154 		cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
14155 		if (TAILQ_EMPTY(&asoc->send_queue) &&
14156 		    TAILQ_EMPTY(&asoc->sent_queue) &&
14157 		    (cnt == 0)) {
14158 			if (asoc->locked_on_sending) {
14159 				goto abort_anyway;
14160 			}
14161 			/* there is nothing queued to send, so I'm done... */
14162 			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14163 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14164 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14165 				struct sctp_nets *netp;
14166 
14167 				/* only send SHUTDOWN the first time through */
14168 				if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
14169 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14170 				}
14171 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
14172 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
14173 				sctp_stop_timers_for_shutdown(stcb);
14174 				if (stcb->asoc.alternate) {
14175 					netp = stcb->asoc.alternate;
14176 				} else {
14177 					netp = stcb->asoc.primary_destination;
14178 				}
14179 				sctp_send_shutdown(stcb, netp);
14180 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14181 				                 netp);
14182 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14183 				                 asoc->primary_destination);
14184 			}
14185 		} else {
14186 			/*-
14187 			 * we still got (or just got) data to send, so set
14188 			 * SHUTDOWN_PENDING
14189 			 */
14190 			/*-
14191 			 * XXX sockets draft says that SCTP_EOF should be
14192 			 * sent with no data.  currently, we will allow user
14193 			 * data to be sent first and move to
14194 			 * SHUTDOWN-PENDING
14195 			 */
14196 			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14197 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14198 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14199 				if (hold_tcblock == 0) {
14200 					SCTP_TCB_LOCK(stcb);
14201 					hold_tcblock = 1;
14202 				}
14203 				if (asoc->locked_on_sending) {
14204 					/* Locked to send out the data */
14205 					struct sctp_stream_queue_pending *sp;
14206 					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
14207 					if (sp) {
14208 						if ((sp->length == 0) && (sp->msg_is_complete == 0))
14209 							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
14210 					}
14211 				}
14212 				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
14213 				if (TAILQ_EMPTY(&asoc->send_queue) &&
14214 				    TAILQ_EMPTY(&asoc->sent_queue) &&
14215 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14216 				abort_anyway:
14217 					if (free_cnt_applied) {
14218 						atomic_add_int(&stcb->asoc.refcnt, -1);
14219 						free_cnt_applied = 0;
14220 					}
14221 					sctp_abort_an_association(stcb->sctp_ep, stcb,
14222 					                          NULL, SCTP_SO_LOCKED);
14223 					/* now relock the stcb so everything is sane */
14224 					hold_tcblock = 0;
14225 					stcb = NULL;
14226 					goto out;
14227 				}
14228 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14229 				                 asoc->primary_destination);
14230 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14231 			}
14232 		}
14233 	}
14234 skip_out_eof:
14235 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14236 		some_on_control = 1;
14237 	}
14238 	if (queue_only_for_init) {
14239 		if (hold_tcblock == 0) {
14240 			SCTP_TCB_LOCK(stcb);
14241 			hold_tcblock = 1;
14242 		}
14243 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14244 			/* a collision took us forward? */
14245 			queue_only = 0;
14246 		} else {
14247 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14248 			SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
14249 			queue_only = 1;
14250 		}
14251 	}
14252 	if ((net->flight_size > net->cwnd) &&
14253 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
14254 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14255 		queue_only = 1;
14256 	} else if (asoc->ifp_had_enobuf) {
14257 		SCTP_STAT_INCR(sctps_ifnomemqueued);
14258 		if (net->flight_size > (2 * net->mtu)) {
14259 			queue_only = 1;
14260 		}
14261 		asoc->ifp_had_enobuf = 0;
14262 	}
14263 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14264 	           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14265 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14266 	    (stcb->asoc.total_flight > 0) &&
14267 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14268 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14269 		/*-
14270 		 * Ok, Nagle is set on and we have data outstanding.
14271 		 * Don't send anything and let SACKs drive out the
14272 		 * data unless wen have a "full" segment to send.
14273 		 */
14274 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14275 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14276 		}
14277 		SCTP_STAT_INCR(sctps_naglequeued);
14278 		nagle_applies = 1;
14279 	} else {
14280 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14281 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14282 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14283 		}
14284 		SCTP_STAT_INCR(sctps_naglesent);
14285 		nagle_applies = 0;
14286 	}
14287 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14288 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14289 		               nagle_applies, un_sent);
14290 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14291 		               stcb->asoc.total_flight,
14292 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14293 	}
14294 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14295 		/* we can attempt to send too. */
14296 		if (hold_tcblock == 0) {
14297 			/* If there is activity recv'ing sacks no need to send */
14298 			if (SCTP_TCB_TRYLOCK(stcb)) {
14299 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14300 				hold_tcblock = 1;
14301 			}
14302 		} else {
14303 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14304 		}
14305 	} else if ((queue_only == 0) &&
14306 	           (stcb->asoc.peers_rwnd == 0) &&
14307 	           (stcb->asoc.total_flight == 0)) {
14308 		/* We get to have a probe outstanding */
14309 		if (hold_tcblock == 0) {
14310 			hold_tcblock = 1;
14311 			SCTP_TCB_LOCK(stcb);
14312 		}
14313 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14314 	} else if (some_on_control) {
14315 		int num_out, reason, frag_point;
14316 
14317 		/* Here we do control only */
14318 		if (hold_tcblock == 0) {
14319 			hold_tcblock = 1;
14320 			SCTP_TCB_LOCK(stcb);
14321 		}
14322 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14323 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14324 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14325 	}
14326 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14327 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
14328 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14329 	        stcb->asoc.total_output_queue_size, error);
14330 
14331 out:
14332 #if defined(__APPLE__)
14333 	sbunlock(&so->so_snd, 1);
14334 #endif
14335 out_unlocked:
14336 
14337 	if (local_soresv && stcb) {
14338 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14339 	}
14340 	if (create_lock_applied) {
14341 		SCTP_ASOC_CREATE_UNLOCK(inp);
14342 	}
14343 	if ((stcb) && hold_tcblock) {
14344 		SCTP_TCB_UNLOCK(stcb);
14345 	}
14346 	if (stcb && free_cnt_applied) {
14347 		atomic_add_int(&stcb->asoc.refcnt, -1);
14348 	}
14349 #ifdef INVARIANTS
14350 #if !defined(__APPLE__)
14351 	if (stcb) {
14352 		if (mtx_owned(&stcb->tcb_mtx)) {
14353 			panic("Leaving with tcb mtx owned?");
14354 		}
14355 		if (mtx_owned(&stcb->tcb_send_mtx)) {
14356 			panic("Leaving with tcb send mtx owned?");
14357 		}
14358 	}
14359 #endif
14360 #endif
14361 #ifdef __Panda__
14362 	/*
14363 	 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
14364 	 * to particle when pak is passed in, so that caller
14365 	 * can try again with this pak
14366 	 *
14367 	 * NOTE: For other cases, including success case,
14368 	 * we simply want to return the header back to free
14369 	 * pool
14370 	 */
14371 	if (top) {
14372 		if ((error == EAGAIN) || (error == ENOMEM)) {
14373 			SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
14374 			top = NULL;
14375 		} else {
14376 			(void)SCTP_RELEASE_HEADER(i_pak);
14377 		}
14378 	} else {
14379 		/* This is to handle cases when top has
14380 		 * been reset to NULL but pak might not
14381 		 * be freed
14382 		 */
14383 		if (i_pak) {
14384 			(void)SCTP_RELEASE_HEADER(i_pak);
14385 		}
14386 	}
14387 #endif
14388 #ifdef INVARIANTS
14389 	if (inp) {
14390 		sctp_validate_no_locks(inp);
14391 	} else {
14392 		SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
14393 	}
14394 #endif
14395 	if (top) {
14396 		sctp_m_freem(top);
14397 	}
14398 	if (control) {
14399 		sctp_m_freem(control);
14400 	}
14401 	return (error);
14402 }
14403 
14404 
14405 /*
14406  * generate an AUTHentication chunk, if required
14407  */
14408 struct mbuf *
14409 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14410     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14411     struct sctp_tcb *stcb, uint8_t chunk)
14412 {
14413 	struct mbuf *m_auth;
14414 	struct sctp_auth_chunk *auth;
14415 	int chunk_len;
14416 	struct mbuf *cn;
14417 
14418 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14419 	    (stcb == NULL))
14420 		return (m);
14421 
14422 	if (stcb->asoc.auth_supported == 0) {
14423 		return (m);
14424 	}
14425 	/* does the requested chunk require auth? */
14426 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14427 		return (m);
14428 	}
14429 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14430 	if (m_auth == NULL) {
14431 		/* no mbuf's */
14432 		return (m);
14433 	}
14434 	/* reserve some space if this will be the first mbuf */
14435 	if (m == NULL)
14436 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14437 	/* fill in the AUTH chunk details */
14438 	auth = mtod(m_auth, struct sctp_auth_chunk *);
14439 	bzero(auth, sizeof(*auth));
14440 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
14441 	auth->ch.chunk_flags = 0;
14442 	chunk_len = sizeof(*auth) +
14443 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14444 	auth->ch.chunk_length = htons(chunk_len);
14445 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14446 	/* key id and hmac digest will be computed and filled in upon send */
14447 
14448 	/* save the offset where the auth was inserted into the chain */
14449 	*offset = 0;
14450 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14451 		*offset += SCTP_BUF_LEN(cn);
14452 	}
14453 
14454 	/* update length and return pointer to the auth chunk */
14455 	SCTP_BUF_LEN(m_auth) = chunk_len;
14456 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14457 	if (auth_ret != NULL)
14458 		*auth_ret = auth;
14459 
14460 	return (m);
14461 }
14462 
14463 #if defined(__FreeBSD__)  || defined(__APPLE__)
14464 #ifdef INET6
14465 int
14466 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14467 {
14468 	struct nd_prefix *pfx = NULL;
14469 	struct nd_pfxrouter *pfxrtr = NULL;
14470 	struct sockaddr_in6 gw6;
14471 
14472 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14473 		return (0);
14474 
14475 	/* get prefix entry of address */
14476 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14477 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14478 			continue;
14479 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14480 		    &src6->sin6_addr, &pfx->ndpr_mask))
14481 			break;
14482 	}
14483 	/* no prefix entry in the prefix list */
14484 	if (pfx == NULL) {
14485 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14486 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14487 		return (0);
14488 	}
14489 
14490 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14491 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14492 
14493 	/* search installed gateway from prefix entry */
14494 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14495 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
14496 		gw6.sin6_family = AF_INET6;
14497 #ifdef HAVE_SIN6_LEN
14498 		gw6.sin6_len = sizeof(struct sockaddr_in6);
14499 #endif
14500 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14501 		    sizeof(struct in6_addr));
14502 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14503 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14504 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14505 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14506 		if (sctp_cmpaddr((struct sockaddr *)&gw6,
14507 				ro->ro_rt->rt_gateway)) {
14508 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14509 			return (1);
14510 		}
14511 	}
14512 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14513 	return (0);
14514 }
14515 #endif
14516 
14517 int
14518 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14519 {
14520 #ifdef INET
14521 	struct sockaddr_in *sin, *mask;
14522 	struct ifaddr *ifa;
14523 	struct in_addr srcnetaddr, gwnetaddr;
14524 
14525 	if (ro == NULL || ro->ro_rt == NULL ||
14526 	    sifa->address.sa.sa_family != AF_INET) {
14527 		return (0);
14528 	}
14529 	ifa = (struct ifaddr *)sifa->ifa;
14530 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14531 	sin = &sifa->address.sin;
14532 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14533 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14534 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14535 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14536 
14537 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14538 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14539 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14540 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14541 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14542 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14543 		return (1);
14544 	}
14545 #endif
14546 	return (0);
14547 }
14548 #elif defined(__Userspace__)
14549 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14550 int
14551 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14552 {
14553     return (0);
14554 }
14555 int
14556 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14557 {
14558     return (0);
14559 }
14560 
14561 #endif
14562