xref: /freebsd/sys/netinet/sctputil.c (revision b00ab754)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #if defined(INET6) || defined(INET)
55 #include <netinet/tcp_var.h>
56 #endif
57 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <sys/proc.h>
60 #ifdef INET6
61 #include <netinet/icmp6.h>
62 #endif
63 
64 
65 #ifndef KTR_SCTP
66 #define KTR_SCTP KTR_SUBSYS
67 #endif
68 
69 extern const struct sctp_cc_functions sctp_cc_functions[];
70 extern const struct sctp_ss_functions sctp_ss_functions[];
71 
72 void
73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
74 {
75 #if defined(SCTP_LOCAL_TRACE_BUF)
76 	struct sctp_cwnd_log sctp_clog;
77 
78 	sctp_clog.x.sb.stcb = stcb;
79 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
80 	if (stcb)
81 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
82 	else
83 		sctp_clog.x.sb.stcb_sbcc = 0;
84 	sctp_clog.x.sb.incr = incr;
85 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
86 	    SCTP_LOG_EVENT_SB,
87 	    from,
88 	    sctp_clog.x.misc.log1,
89 	    sctp_clog.x.misc.log2,
90 	    sctp_clog.x.misc.log3,
91 	    sctp_clog.x.misc.log4);
92 #endif
93 }
94 
95 void
96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
97 {
98 #if defined(SCTP_LOCAL_TRACE_BUF)
99 	struct sctp_cwnd_log sctp_clog;
100 
101 	sctp_clog.x.close.inp = (void *)inp;
102 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
103 	if (stcb) {
104 		sctp_clog.x.close.stcb = (void *)stcb;
105 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
106 	} else {
107 		sctp_clog.x.close.stcb = 0;
108 		sctp_clog.x.close.state = 0;
109 	}
110 	sctp_clog.x.close.loc = loc;
111 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
112 	    SCTP_LOG_EVENT_CLOSE,
113 	    0,
114 	    sctp_clog.x.misc.log1,
115 	    sctp_clog.x.misc.log2,
116 	    sctp_clog.x.misc.log3,
117 	    sctp_clog.x.misc.log4);
118 #endif
119 }
120 
121 void
122 rto_logging(struct sctp_nets *net, int from)
123 {
124 #if defined(SCTP_LOCAL_TRACE_BUF)
125 	struct sctp_cwnd_log sctp_clog;
126 
127 	memset(&sctp_clog, 0, sizeof(sctp_clog));
128 	sctp_clog.x.rto.net = (void *)net;
129 	sctp_clog.x.rto.rtt = net->rtt / 1000;
130 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
131 	    SCTP_LOG_EVENT_RTT,
132 	    from,
133 	    sctp_clog.x.misc.log1,
134 	    sctp_clog.x.misc.log2,
135 	    sctp_clog.x.misc.log3,
136 	    sctp_clog.x.misc.log4);
137 #endif
138 }
139 
140 void
141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
142 {
143 #if defined(SCTP_LOCAL_TRACE_BUF)
144 	struct sctp_cwnd_log sctp_clog;
145 
146 	sctp_clog.x.strlog.stcb = stcb;
147 	sctp_clog.x.strlog.n_tsn = tsn;
148 	sctp_clog.x.strlog.n_sseq = sseq;
149 	sctp_clog.x.strlog.e_tsn = 0;
150 	sctp_clog.x.strlog.e_sseq = 0;
151 	sctp_clog.x.strlog.strm = stream;
152 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
153 	    SCTP_LOG_EVENT_STRM,
154 	    from,
155 	    sctp_clog.x.misc.log1,
156 	    sctp_clog.x.misc.log2,
157 	    sctp_clog.x.misc.log3,
158 	    sctp_clog.x.misc.log4);
159 #endif
160 }
161 
162 void
163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
164 {
165 #if defined(SCTP_LOCAL_TRACE_BUF)
166 	struct sctp_cwnd_log sctp_clog;
167 
168 	sctp_clog.x.nagle.stcb = (void *)stcb;
169 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
170 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
171 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
172 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
173 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
174 	    SCTP_LOG_EVENT_NAGLE,
175 	    action,
176 	    sctp_clog.x.misc.log1,
177 	    sctp_clog.x.misc.log2,
178 	    sctp_clog.x.misc.log3,
179 	    sctp_clog.x.misc.log4);
180 #endif
181 }
182 
183 void
184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
185 {
186 #if defined(SCTP_LOCAL_TRACE_BUF)
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	sctp_clog.x.sack.cumack = cumack;
190 	sctp_clog.x.sack.oldcumack = old_cumack;
191 	sctp_clog.x.sack.tsn = tsn;
192 	sctp_clog.x.sack.numGaps = gaps;
193 	sctp_clog.x.sack.numDups = dups;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_SACK,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 #endif
202 }
203 
204 void
205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
206 {
207 #if defined(SCTP_LOCAL_TRACE_BUF)
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.map.base = map;
212 	sctp_clog.x.map.cum = cum;
213 	sctp_clog.x.map.high = high;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_MAP,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 #endif
222 }
223 
224 void
225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
226 {
227 #if defined(SCTP_LOCAL_TRACE_BUF)
228 	struct sctp_cwnd_log sctp_clog;
229 
230 	memset(&sctp_clog, 0, sizeof(sctp_clog));
231 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
232 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
233 	sctp_clog.x.fr.tsn = tsn;
234 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
235 	    SCTP_LOG_EVENT_FR,
236 	    from,
237 	    sctp_clog.x.misc.log1,
238 	    sctp_clog.x.misc.log2,
239 	    sctp_clog.x.misc.log3,
240 	    sctp_clog.x.misc.log4);
241 #endif
242 }
243 
244 #ifdef SCTP_MBUF_LOGGING
245 void
246 sctp_log_mb(struct mbuf *m, int from)
247 {
248 #if defined(SCTP_LOCAL_TRACE_BUF)
249 	struct sctp_cwnd_log sctp_clog;
250 
251 	sctp_clog.x.mb.mp = m;
252 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
253 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
254 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
255 	if (SCTP_BUF_IS_EXTENDED(m)) {
256 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
257 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
258 	} else {
259 		sctp_clog.x.mb.ext = 0;
260 		sctp_clog.x.mb.refcnt = 0;
261 	}
262 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
263 	    SCTP_LOG_EVENT_MBUF,
264 	    from,
265 	    sctp_clog.x.misc.log1,
266 	    sctp_clog.x.misc.log2,
267 	    sctp_clog.x.misc.log3,
268 	    sctp_clog.x.misc.log4);
269 #endif
270 }
271 
272 void
273 sctp_log_mbc(struct mbuf *m, int from)
274 {
275 	struct mbuf *mat;
276 
277 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
278 		sctp_log_mb(mat, from);
279 	}
280 }
281 #endif
282 
283 void
284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
285 {
286 #if defined(SCTP_LOCAL_TRACE_BUF)
287 	struct sctp_cwnd_log sctp_clog;
288 
289 	if (control == NULL) {
290 		SCTP_PRINTF("Gak log of NULL?\n");
291 		return;
292 	}
293 	sctp_clog.x.strlog.stcb = control->stcb;
294 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
295 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
296 	sctp_clog.x.strlog.strm = control->sinfo_stream;
297 	if (poschk != NULL) {
298 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
299 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
300 	} else {
301 		sctp_clog.x.strlog.e_tsn = 0;
302 		sctp_clog.x.strlog.e_sseq = 0;
303 	}
304 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
305 	    SCTP_LOG_EVENT_STRM,
306 	    from,
307 	    sctp_clog.x.misc.log1,
308 	    sctp_clog.x.misc.log2,
309 	    sctp_clog.x.misc.log3,
310 	    sctp_clog.x.misc.log4);
311 #endif
312 }
313 
314 void
315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
316 {
317 #if defined(SCTP_LOCAL_TRACE_BUF)
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	sctp_clog.x.cwnd.net = net;
321 	if (stcb->asoc.send_queue_cnt > 255)
322 		sctp_clog.x.cwnd.cnt_in_send = 255;
323 	else
324 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
325 	if (stcb->asoc.stream_queue_cnt > 255)
326 		sctp_clog.x.cwnd.cnt_in_str = 255;
327 	else
328 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
329 
330 	if (net) {
331 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
332 		sctp_clog.x.cwnd.inflight = net->flight_size;
333 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
334 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
335 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
336 	}
337 	if (SCTP_CWNDLOG_PRESEND == from) {
338 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
339 	}
340 	sctp_clog.x.cwnd.cwnd_augment = augment;
341 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
342 	    SCTP_LOG_EVENT_CWND,
343 	    from,
344 	    sctp_clog.x.misc.log1,
345 	    sctp_clog.x.misc.log2,
346 	    sctp_clog.x.misc.log3,
347 	    sctp_clog.x.misc.log4);
348 #endif
349 }
350 
351 void
352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
353 {
354 #if defined(SCTP_LOCAL_TRACE_BUF)
355 	struct sctp_cwnd_log sctp_clog;
356 
357 	memset(&sctp_clog, 0, sizeof(sctp_clog));
358 	if (inp) {
359 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
360 
361 	} else {
362 		sctp_clog.x.lock.sock = (void *)NULL;
363 	}
364 	sctp_clog.x.lock.inp = (void *)inp;
365 	if (stcb) {
366 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
367 	} else {
368 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
369 	}
370 	if (inp) {
371 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
372 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
373 	} else {
374 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
375 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
376 	}
377 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
378 	if (inp && (inp->sctp_socket)) {
379 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
380 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
382 	} else {
383 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
384 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
386 	}
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_LOCK_EVENT,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 #endif
395 }
396 
397 void
398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
399 {
400 #if defined(SCTP_LOCAL_TRACE_BUF)
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	memset(&sctp_clog, 0, sizeof(sctp_clog));
404 	sctp_clog.x.cwnd.net = net;
405 	sctp_clog.x.cwnd.cwnd_new_value = error;
406 	sctp_clog.x.cwnd.inflight = net->flight_size;
407 	sctp_clog.x.cwnd.cwnd_augment = burst;
408 	if (stcb->asoc.send_queue_cnt > 255)
409 		sctp_clog.x.cwnd.cnt_in_send = 255;
410 	else
411 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
412 	if (stcb->asoc.stream_queue_cnt > 255)
413 		sctp_clog.x.cwnd.cnt_in_str = 255;
414 	else
415 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
416 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
417 	    SCTP_LOG_EVENT_MAXBURST,
418 	    from,
419 	    sctp_clog.x.misc.log1,
420 	    sctp_clog.x.misc.log2,
421 	    sctp_clog.x.misc.log3,
422 	    sctp_clog.x.misc.log4);
423 #endif
424 }
425 
426 void
427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
428 {
429 #if defined(SCTP_LOCAL_TRACE_BUF)
430 	struct sctp_cwnd_log sctp_clog;
431 
432 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
433 	sctp_clog.x.rwnd.send_size = snd_size;
434 	sctp_clog.x.rwnd.overhead = overhead;
435 	sctp_clog.x.rwnd.new_rwnd = 0;
436 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
437 	    SCTP_LOG_EVENT_RWND,
438 	    from,
439 	    sctp_clog.x.misc.log1,
440 	    sctp_clog.x.misc.log2,
441 	    sctp_clog.x.misc.log3,
442 	    sctp_clog.x.misc.log4);
443 #endif
444 }
445 
446 void
447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
448 {
449 #if defined(SCTP_LOCAL_TRACE_BUF)
450 	struct sctp_cwnd_log sctp_clog;
451 
452 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
453 	sctp_clog.x.rwnd.send_size = flight_size;
454 	sctp_clog.x.rwnd.overhead = overhead;
455 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_EVENT_RWND,
458 	    from,
459 	    sctp_clog.x.misc.log1,
460 	    sctp_clog.x.misc.log2,
461 	    sctp_clog.x.misc.log3,
462 	    sctp_clog.x.misc.log4);
463 #endif
464 }
465 
466 #ifdef SCTP_MBCNT_LOGGING
467 static void
468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
469 {
470 #if defined(SCTP_LOCAL_TRACE_BUF)
471 	struct sctp_cwnd_log sctp_clog;
472 
473 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
474 	sctp_clog.x.mbcnt.size_change = book;
475 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
476 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
477 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
478 	    SCTP_LOG_EVENT_MBCNT,
479 	    from,
480 	    sctp_clog.x.misc.log1,
481 	    sctp_clog.x.misc.log2,
482 	    sctp_clog.x.misc.log3,
483 	    sctp_clog.x.misc.log4);
484 #endif
485 }
486 #endif
487 
488 void
489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
490 {
491 #if defined(SCTP_LOCAL_TRACE_BUF)
492 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
493 	    SCTP_LOG_MISC_EVENT,
494 	    from,
495 	    a, b, c, d);
496 #endif
497 }
498 
499 void
500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
501 {
502 #if defined(SCTP_LOCAL_TRACE_BUF)
503 	struct sctp_cwnd_log sctp_clog;
504 
505 	sctp_clog.x.wake.stcb = (void *)stcb;
506 	sctp_clog.x.wake.wake_cnt = wake_cnt;
507 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
508 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
509 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
510 
511 	if (stcb->asoc.stream_queue_cnt < 0xff)
512 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
513 	else
514 		sctp_clog.x.wake.stream_qcnt = 0xff;
515 
516 	if (stcb->asoc.chunks_on_out_queue < 0xff)
517 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
518 	else
519 		sctp_clog.x.wake.chunks_on_oque = 0xff;
520 
521 	sctp_clog.x.wake.sctpflags = 0;
522 	/* set in the defered mode stuff */
523 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
524 		sctp_clog.x.wake.sctpflags |= 1;
525 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
526 		sctp_clog.x.wake.sctpflags |= 2;
527 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
528 		sctp_clog.x.wake.sctpflags |= 4;
529 	/* what about the sb */
530 	if (stcb->sctp_socket) {
531 		struct socket *so = stcb->sctp_socket;
532 
533 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
534 	} else {
535 		sctp_clog.x.wake.sbflags = 0xff;
536 	}
537 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
538 	    SCTP_LOG_EVENT_WAKE,
539 	    from,
540 	    sctp_clog.x.misc.log1,
541 	    sctp_clog.x.misc.log2,
542 	    sctp_clog.x.misc.log3,
543 	    sctp_clog.x.misc.log4);
544 #endif
545 }
546 
547 void
548 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
549 {
550 #if defined(SCTP_LOCAL_TRACE_BUF)
551 	struct sctp_cwnd_log sctp_clog;
552 
553 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
554 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
555 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
556 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
557 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
558 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
559 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
560 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
561 	    SCTP_LOG_EVENT_BLOCK,
562 	    from,
563 	    sctp_clog.x.misc.log1,
564 	    sctp_clog.x.misc.log2,
565 	    sctp_clog.x.misc.log3,
566 	    sctp_clog.x.misc.log4);
567 #endif
568 }
569 
570 int
571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
572 {
573 	/* May need to fix this if ktrdump does not work */
574 	return (0);
575 }
576 
577 #ifdef SCTP_AUDITING_ENABLED
578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
579 static int sctp_audit_indx = 0;
580 
581 static
582 void
583 sctp_print_audit_report(void)
584 {
585 	int i;
586 	int cnt;
587 
588 	cnt = 0;
589 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
590 		if ((sctp_audit_data[i][0] == 0xe0) &&
591 		    (sctp_audit_data[i][1] == 0x01)) {
592 			cnt = 0;
593 			SCTP_PRINTF("\n");
594 		} else if (sctp_audit_data[i][0] == 0xf0) {
595 			cnt = 0;
596 			SCTP_PRINTF("\n");
597 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
598 		    (sctp_audit_data[i][1] == 0x01)) {
599 			SCTP_PRINTF("\n");
600 			cnt = 0;
601 		}
602 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
603 		    (uint32_t)sctp_audit_data[i][1]);
604 		cnt++;
605 		if ((cnt % 14) == 0)
606 			SCTP_PRINTF("\n");
607 	}
608 	for (i = 0; i < sctp_audit_indx; i++) {
609 		if ((sctp_audit_data[i][0] == 0xe0) &&
610 		    (sctp_audit_data[i][1] == 0x01)) {
611 			cnt = 0;
612 			SCTP_PRINTF("\n");
613 		} else if (sctp_audit_data[i][0] == 0xf0) {
614 			cnt = 0;
615 			SCTP_PRINTF("\n");
616 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
617 		    (sctp_audit_data[i][1] == 0x01)) {
618 			SCTP_PRINTF("\n");
619 			cnt = 0;
620 		}
621 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
622 		    (uint32_t)sctp_audit_data[i][1]);
623 		cnt++;
624 		if ((cnt % 14) == 0)
625 			SCTP_PRINTF("\n");
626 	}
627 	SCTP_PRINTF("\n");
628 }
629 
630 void
631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
632     struct sctp_nets *net)
633 {
634 	int resend_cnt, tot_out, rep, tot_book_cnt;
635 	struct sctp_nets *lnet;
636 	struct sctp_tmit_chunk *chk;
637 
638 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
639 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
640 	sctp_audit_indx++;
641 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
642 		sctp_audit_indx = 0;
643 	}
644 	if (inp == NULL) {
645 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
646 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 		return;
652 	}
653 	if (stcb == NULL) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		return;
661 	}
662 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
663 	sctp_audit_data[sctp_audit_indx][1] =
664 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
665 	sctp_audit_indx++;
666 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667 		sctp_audit_indx = 0;
668 	}
669 	rep = 0;
670 	tot_book_cnt = 0;
671 	resend_cnt = tot_out = 0;
672 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
673 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
674 			resend_cnt++;
675 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
676 			tot_out += chk->book_size;
677 			tot_book_cnt++;
678 		}
679 	}
680 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
688 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
689 		rep = 1;
690 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
691 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
692 		sctp_audit_data[sctp_audit_indx][1] =
693 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
694 		sctp_audit_indx++;
695 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
696 			sctp_audit_indx = 0;
697 		}
698 	}
699 	if (tot_out != stcb->asoc.total_flight) {
700 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
701 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
702 		sctp_audit_indx++;
703 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
704 			sctp_audit_indx = 0;
705 		}
706 		rep = 1;
707 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
708 		    (int)stcb->asoc.total_flight);
709 		stcb->asoc.total_flight = tot_out;
710 	}
711 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
712 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
713 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
714 		sctp_audit_indx++;
715 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
716 			sctp_audit_indx = 0;
717 		}
718 		rep = 1;
719 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
720 
721 		stcb->asoc.total_flight_count = tot_book_cnt;
722 	}
723 	tot_out = 0;
724 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
725 		tot_out += lnet->flight_size;
726 	}
727 	if (tot_out != stcb->asoc.total_flight) {
728 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
729 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
730 		sctp_audit_indx++;
731 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
732 			sctp_audit_indx = 0;
733 		}
734 		rep = 1;
735 		SCTP_PRINTF("real flight:%d net total was %d\n",
736 		    stcb->asoc.total_flight, tot_out);
737 		/* now corrective action */
738 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
739 
740 			tot_out = 0;
741 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
742 				if ((chk->whoTo == lnet) &&
743 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
744 					tot_out += chk->book_size;
745 				}
746 			}
747 			if (lnet->flight_size != tot_out) {
748 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
749 				    (void *)lnet, lnet->flight_size,
750 				    tot_out);
751 				lnet->flight_size = tot_out;
752 			}
753 		}
754 	}
755 	if (rep) {
756 		sctp_print_audit_report();
757 	}
758 }
759 
760 void
761 sctp_audit_log(uint8_t ev, uint8_t fd)
762 {
763 
764 	sctp_audit_data[sctp_audit_indx][0] = ev;
765 	sctp_audit_data[sctp_audit_indx][1] = fd;
766 	sctp_audit_indx++;
767 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
768 		sctp_audit_indx = 0;
769 	}
770 }
771 
772 #endif
773 
774 /*
775  * sctp_stop_timers_for_shutdown() should be called
776  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
777  * state to make sure that all timers are stopped.
778  */
779 void
780 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
781 {
782 	struct sctp_association *asoc;
783 	struct sctp_nets *net;
784 
785 	asoc = &stcb->asoc;
786 
787 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
788 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
789 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
790 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
791 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
792 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
793 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
794 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
795 	}
796 }
797 
798 /*
799  * A list of sizes based on typical mtu's, used only if next hop size not
800  * returned. These values MUST be multiples of 4 and MUST be ordered.
801  */
802 static uint32_t sctp_mtu_sizes[] = {
803 	68,
804 	296,
805 	508,
806 	512,
807 	544,
808 	576,
809 	1004,
810 	1492,
811 	1500,
812 	1536,
813 	2000,
814 	2048,
815 	4352,
816 	4464,
817 	8166,
818 	17912,
819 	32000,
820 	65532
821 };
822 
823 /*
824  * Return the largest MTU in sctp_mtu_sizes smaller than val.
825  * If val is smaller than the minimum, just return the largest
826  * multiple of 4 smaller or equal to val.
827  * Ensure that the result is a multiple of 4.
828  */
829 uint32_t
830 sctp_get_prev_mtu(uint32_t val)
831 {
832 	uint32_t i;
833 
834 	val &= 0x00000003;
835 	if (val <= sctp_mtu_sizes[0]) {
836 		return (val);
837 	}
838 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
839 		if (val <= sctp_mtu_sizes[i]) {
840 			break;
841 		}
842 	}
843 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
844 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
845 	return (sctp_mtu_sizes[i - 1]);
846 }
847 
848 /*
849  * Return the smallest MTU in sctp_mtu_sizes larger than val.
850  * If val is larger than the maximum, just return the largest multiple of 4 smaller
851  * or equal to val.
852  * Ensure that the result is a multiple of 4.
853  */
854 uint32_t
855 sctp_get_next_mtu(uint32_t val)
856 {
857 	/* select another MTU that is just bigger than this one */
858 	uint32_t i;
859 
860 	val &= 0x00000003;
861 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
862 		if (val < sctp_mtu_sizes[i]) {
863 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
864 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
865 			return (sctp_mtu_sizes[i]);
866 		}
867 	}
868 	return (val);
869 }
870 
871 void
872 sctp_fill_random_store(struct sctp_pcb *m)
873 {
874 	/*
875 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
876 	 * our counter. The result becomes our good random numbers and we
877 	 * then setup to give these out. Note that we do no locking to
878 	 * protect this. This is ok, since if competing folks call this we
879 	 * will get more gobbled gook in the random store which is what we
880 	 * want. There is a danger that two guys will use the same random
881 	 * numbers, but thats ok too since that is random as well :->
882 	 */
883 	m->store_at = 0;
884 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
885 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
886 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
887 	m->random_counter++;
888 }
889 
890 uint32_t
891 sctp_select_initial_TSN(struct sctp_pcb *inp)
892 {
893 	/*
894 	 * A true implementation should use random selection process to get
895 	 * the initial stream sequence number, using RFC1750 as a good
896 	 * guideline
897 	 */
898 	uint32_t x, *xp;
899 	uint8_t *p;
900 	int store_at, new_store;
901 
902 	if (inp->initial_sequence_debug != 0) {
903 		uint32_t ret;
904 
905 		ret = inp->initial_sequence_debug;
906 		inp->initial_sequence_debug++;
907 		return (ret);
908 	}
909 retry:
910 	store_at = inp->store_at;
911 	new_store = store_at + sizeof(uint32_t);
912 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
913 		new_store = 0;
914 	}
915 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
916 		goto retry;
917 	}
918 	if (new_store == 0) {
919 		/* Refill the random store */
920 		sctp_fill_random_store(inp);
921 	}
922 	p = &inp->random_store[store_at];
923 	xp = (uint32_t *)p;
924 	x = *xp;
925 	return (x);
926 }
927 
928 uint32_t
929 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
930 {
931 	uint32_t x;
932 	struct timeval now;
933 
934 	if (check) {
935 		(void)SCTP_GETTIME_TIMEVAL(&now);
936 	}
937 	for (;;) {
938 		x = sctp_select_initial_TSN(&inp->sctp_ep);
939 		if (x == 0) {
940 			/* we never use 0 */
941 			continue;
942 		}
943 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
944 			break;
945 		}
946 	}
947 	return (x);
948 }
949 
950 int32_t
951 sctp_map_assoc_state(int kernel_state)
952 {
953 	int32_t user_state;
954 
955 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
956 		user_state = SCTP_CLOSED;
957 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
958 		user_state = SCTP_SHUTDOWN_PENDING;
959 	} else {
960 		switch (kernel_state & SCTP_STATE_MASK) {
961 		case SCTP_STATE_EMPTY:
962 			user_state = SCTP_CLOSED;
963 			break;
964 		case SCTP_STATE_INUSE:
965 			user_state = SCTP_CLOSED;
966 			break;
967 		case SCTP_STATE_COOKIE_WAIT:
968 			user_state = SCTP_COOKIE_WAIT;
969 			break;
970 		case SCTP_STATE_COOKIE_ECHOED:
971 			user_state = SCTP_COOKIE_ECHOED;
972 			break;
973 		case SCTP_STATE_OPEN:
974 			user_state = SCTP_ESTABLISHED;
975 			break;
976 		case SCTP_STATE_SHUTDOWN_SENT:
977 			user_state = SCTP_SHUTDOWN_SENT;
978 			break;
979 		case SCTP_STATE_SHUTDOWN_RECEIVED:
980 			user_state = SCTP_SHUTDOWN_RECEIVED;
981 			break;
982 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
983 			user_state = SCTP_SHUTDOWN_ACK_SENT;
984 			break;
985 		default:
986 			user_state = SCTP_CLOSED;
987 			break;
988 		}
989 	}
990 	return (user_state);
991 }
992 
993 int
994 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
995     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
996 {
997 	struct sctp_association *asoc;
998 
999 	/*
1000 	 * Anything set to zero is taken care of by the allocation routine's
1001 	 * bzero
1002 	 */
1003 
1004 	/*
1005 	 * Up front select what scoping to apply on addresses I tell my peer
1006 	 * Not sure what to do with these right now, we will need to come up
1007 	 * with a way to set them. We may need to pass them through from the
1008 	 * caller in the sctp_aloc_assoc() function.
1009 	 */
1010 	int i;
1011 #if defined(SCTP_DETAILED_STR_STATS)
1012 	int j;
1013 #endif
1014 
1015 	asoc = &stcb->asoc;
1016 	/* init all variables to a known value. */
1017 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
1018 	asoc->max_burst = inp->sctp_ep.max_burst;
1019 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1020 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1021 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1022 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1023 	asoc->ecn_supported = inp->ecn_supported;
1024 	asoc->prsctp_supported = inp->prsctp_supported;
1025 	asoc->idata_supported = inp->idata_supported;
1026 	asoc->auth_supported = inp->auth_supported;
1027 	asoc->asconf_supported = inp->asconf_supported;
1028 	asoc->reconfig_supported = inp->reconfig_supported;
1029 	asoc->nrsack_supported = inp->nrsack_supported;
1030 	asoc->pktdrop_supported = inp->pktdrop_supported;
1031 	asoc->idata_supported = inp->idata_supported;
1032 	asoc->sctp_cmt_pf = (uint8_t)0;
1033 	asoc->sctp_frag_point = inp->sctp_frag_point;
1034 	asoc->sctp_features = inp->sctp_features;
1035 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1036 	asoc->max_cwnd = inp->max_cwnd;
1037 #ifdef INET6
1038 	if (inp->sctp_ep.default_flowlabel) {
1039 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1040 	} else {
1041 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1042 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1043 			asoc->default_flowlabel &= 0x000fffff;
1044 			asoc->default_flowlabel |= 0x80000000;
1045 		} else {
1046 			asoc->default_flowlabel = 0;
1047 		}
1048 	}
1049 #endif
1050 	asoc->sb_send_resv = 0;
1051 	if (override_tag) {
1052 		asoc->my_vtag = override_tag;
1053 	} else {
1054 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1055 	}
1056 	/* Get the nonce tags */
1057 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1058 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1059 	asoc->vrf_id = vrf_id;
1060 
1061 #ifdef SCTP_ASOCLOG_OF_TSNS
1062 	asoc->tsn_in_at = 0;
1063 	asoc->tsn_out_at = 0;
1064 	asoc->tsn_in_wrapped = 0;
1065 	asoc->tsn_out_wrapped = 0;
1066 	asoc->cumack_log_at = 0;
1067 	asoc->cumack_log_atsnt = 0;
1068 #endif
1069 #ifdef SCTP_FS_SPEC_LOG
1070 	asoc->fs_index = 0;
1071 #endif
1072 	asoc->refcnt = 0;
1073 	asoc->assoc_up_sent = 0;
1074 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1075 	    sctp_select_initial_TSN(&inp->sctp_ep);
1076 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1077 	/* we are optimisitic here */
1078 	asoc->peer_supports_nat = 0;
1079 	asoc->sent_queue_retran_cnt = 0;
1080 
1081 	/* for CMT */
1082 	asoc->last_net_cmt_send_started = NULL;
1083 
1084 	/* This will need to be adjusted */
1085 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1086 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1087 	asoc->asconf_seq_in = asoc->last_acked_seq;
1088 
1089 	/* here we are different, we hold the next one we expect */
1090 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1091 
1092 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1093 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1094 
1095 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1096 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1097 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1098 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1099 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1100 	asoc->free_chunk_cnt = 0;
1101 
1102 	asoc->iam_blocking = 0;
1103 	asoc->context = inp->sctp_context;
1104 	asoc->local_strreset_support = inp->local_strreset_support;
1105 	asoc->def_send = inp->def_send;
1106 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1107 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1108 	asoc->pr_sctp_cnt = 0;
1109 	asoc->total_output_queue_size = 0;
1110 
1111 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1112 		asoc->scope.ipv6_addr_legal = 1;
1113 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1114 			asoc->scope.ipv4_addr_legal = 1;
1115 		} else {
1116 			asoc->scope.ipv4_addr_legal = 0;
1117 		}
1118 	} else {
1119 		asoc->scope.ipv6_addr_legal = 0;
1120 		asoc->scope.ipv4_addr_legal = 1;
1121 	}
1122 
1123 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1124 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1125 
1126 	asoc->smallest_mtu = inp->sctp_frag_point;
1127 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1128 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1129 
1130 	asoc->stream_locked_on = 0;
1131 	asoc->ecn_echo_cnt_onq = 0;
1132 	asoc->stream_locked = 0;
1133 
1134 	asoc->send_sack = 1;
1135 
1136 	LIST_INIT(&asoc->sctp_restricted_addrs);
1137 
1138 	TAILQ_INIT(&asoc->nets);
1139 	TAILQ_INIT(&asoc->pending_reply_queue);
1140 	TAILQ_INIT(&asoc->asconf_ack_sent);
1141 	/* Setup to fill the hb random cache at first HB */
1142 	asoc->hb_random_idx = 4;
1143 
1144 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1145 
1146 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1147 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1148 
1149 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1150 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1151 
1152 	/*
1153 	 * Now the stream parameters, here we allocate space for all streams
1154 	 * that we request by default.
1155 	 */
1156 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1157 	    o_strms;
1158 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1159 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1160 	    SCTP_M_STRMO);
1161 	if (asoc->strmout == NULL) {
1162 		/* big trouble no memory */
1163 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1164 		return (ENOMEM);
1165 	}
1166 	for (i = 0; i < asoc->streamoutcnt; i++) {
1167 		/*
1168 		 * inbound side must be set to 0xffff, also NOTE when we get
1169 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1170 		 * count (streamoutcnt) but first check if we sent to any of
1171 		 * the upper streams that were dropped (if some were). Those
1172 		 * that were dropped must be notified to the upper layer as
1173 		 * failed to send.
1174 		 */
1175 		asoc->strmout[i].next_mid_ordered = 0;
1176 		asoc->strmout[i].next_mid_unordered = 0;
1177 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1178 		asoc->strmout[i].chunks_on_queues = 0;
1179 #if defined(SCTP_DETAILED_STR_STATS)
1180 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1181 			asoc->strmout[i].abandoned_sent[j] = 0;
1182 			asoc->strmout[i].abandoned_unsent[j] = 0;
1183 		}
1184 #else
1185 		asoc->strmout[i].abandoned_sent[0] = 0;
1186 		asoc->strmout[i].abandoned_unsent[0] = 0;
1187 #endif
1188 		asoc->strmout[i].sid = i;
1189 		asoc->strmout[i].last_msg_incomplete = 0;
1190 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1191 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1192 	}
1193 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1194 
1195 	/* Now the mapping array */
1196 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1197 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1198 	    SCTP_M_MAP);
1199 	if (asoc->mapping_array == NULL) {
1200 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1201 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1202 		return (ENOMEM);
1203 	}
1204 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1205 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1206 	    SCTP_M_MAP);
1207 	if (asoc->nr_mapping_array == NULL) {
1208 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1209 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1210 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1211 		return (ENOMEM);
1212 	}
1213 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1214 
1215 	/* Now the init of the other outqueues */
1216 	TAILQ_INIT(&asoc->free_chunks);
1217 	TAILQ_INIT(&asoc->control_send_queue);
1218 	TAILQ_INIT(&asoc->asconf_send_queue);
1219 	TAILQ_INIT(&asoc->send_queue);
1220 	TAILQ_INIT(&asoc->sent_queue);
1221 	TAILQ_INIT(&asoc->resetHead);
1222 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1223 	TAILQ_INIT(&asoc->asconf_queue);
1224 	/* authentication fields */
1225 	asoc->authinfo.random = NULL;
1226 	asoc->authinfo.active_keyid = 0;
1227 	asoc->authinfo.assoc_key = NULL;
1228 	asoc->authinfo.assoc_keyid = 0;
1229 	asoc->authinfo.recv_key = NULL;
1230 	asoc->authinfo.recv_keyid = 0;
1231 	LIST_INIT(&asoc->shared_keys);
1232 	asoc->marked_retrans = 0;
1233 	asoc->port = inp->sctp_ep.port;
1234 	asoc->timoinit = 0;
1235 	asoc->timodata = 0;
1236 	asoc->timosack = 0;
1237 	asoc->timoshutdown = 0;
1238 	asoc->timoheartbeat = 0;
1239 	asoc->timocookie = 0;
1240 	asoc->timoshutdownack = 0;
1241 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1242 	asoc->discontinuity_time = asoc->start_time;
1243 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1244 		asoc->abandoned_unsent[i] = 0;
1245 		asoc->abandoned_sent[i] = 0;
1246 	}
1247 	/*
1248 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1249 	 * freed later when the association is freed.
1250 	 */
1251 	return (0);
1252 }
1253 
1254 void
1255 sctp_print_mapping_array(struct sctp_association *asoc)
1256 {
1257 	unsigned int i, limit;
1258 
1259 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1260 	    asoc->mapping_array_size,
1261 	    asoc->mapping_array_base_tsn,
1262 	    asoc->cumulative_tsn,
1263 	    asoc->highest_tsn_inside_map,
1264 	    asoc->highest_tsn_inside_nr_map);
1265 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1266 		if (asoc->mapping_array[limit - 1] != 0) {
1267 			break;
1268 		}
1269 	}
1270 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1271 	for (i = 0; i < limit; i++) {
1272 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1273 	}
1274 	if (limit % 16)
1275 		SCTP_PRINTF("\n");
1276 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1277 		if (asoc->nr_mapping_array[limit - 1]) {
1278 			break;
1279 		}
1280 	}
1281 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1282 	for (i = 0; i < limit; i++) {
1283 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1284 	}
1285 	if (limit % 16)
1286 		SCTP_PRINTF("\n");
1287 }
1288 
1289 int
1290 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1291 {
1292 	/* mapping array needs to grow */
1293 	uint8_t *new_array1, *new_array2;
1294 	uint32_t new_size;
1295 
1296 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1297 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1298 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1299 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1300 		/* can't get more, forget it */
1301 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1302 		if (new_array1) {
1303 			SCTP_FREE(new_array1, SCTP_M_MAP);
1304 		}
1305 		if (new_array2) {
1306 			SCTP_FREE(new_array2, SCTP_M_MAP);
1307 		}
1308 		return (-1);
1309 	}
1310 	memset(new_array1, 0, new_size);
1311 	memset(new_array2, 0, new_size);
1312 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1313 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1314 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1315 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1316 	asoc->mapping_array = new_array1;
1317 	asoc->nr_mapping_array = new_array2;
1318 	asoc->mapping_array_size = new_size;
1319 	return (0);
1320 }
1321 
1322 
1323 static void
1324 sctp_iterator_work(struct sctp_iterator *it)
1325 {
1326 	int iteration_count = 0;
1327 	int inp_skip = 0;
1328 	int first_in = 1;
1329 	struct sctp_inpcb *tinp;
1330 
1331 	SCTP_INP_INFO_RLOCK();
1332 	SCTP_ITERATOR_LOCK();
1333 	sctp_it_ctl.cur_it = it;
1334 	if (it->inp) {
1335 		SCTP_INP_RLOCK(it->inp);
1336 		SCTP_INP_DECR_REF(it->inp);
1337 	}
1338 	if (it->inp == NULL) {
1339 		/* iterator is complete */
1340 done_with_iterator:
1341 		sctp_it_ctl.cur_it = NULL;
1342 		SCTP_ITERATOR_UNLOCK();
1343 		SCTP_INP_INFO_RUNLOCK();
1344 		if (it->function_atend != NULL) {
1345 			(*it->function_atend) (it->pointer, it->val);
1346 		}
1347 		SCTP_FREE(it, SCTP_M_ITER);
1348 		return;
1349 	}
1350 select_a_new_ep:
1351 	if (first_in) {
1352 		first_in = 0;
1353 	} else {
1354 		SCTP_INP_RLOCK(it->inp);
1355 	}
1356 	while (((it->pcb_flags) &&
1357 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1358 	    ((it->pcb_features) &&
1359 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1360 		/* endpoint flags or features don't match, so keep looking */
1361 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1362 			SCTP_INP_RUNLOCK(it->inp);
1363 			goto done_with_iterator;
1364 		}
1365 		tinp = it->inp;
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 		SCTP_INP_RUNLOCK(tinp);
1368 		if (it->inp == NULL) {
1369 			goto done_with_iterator;
1370 		}
1371 		SCTP_INP_RLOCK(it->inp);
1372 	}
1373 	/* now go through each assoc which is in the desired state */
1374 	if (it->done_current_ep == 0) {
1375 		if (it->function_inp != NULL)
1376 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1377 		it->done_current_ep = 1;
1378 	}
1379 	if (it->stcb == NULL) {
1380 		/* run the per instance function */
1381 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1382 	}
1383 	if ((inp_skip) || it->stcb == NULL) {
1384 		if (it->function_inp_end != NULL) {
1385 			inp_skip = (*it->function_inp_end) (it->inp,
1386 			    it->pointer,
1387 			    it->val);
1388 		}
1389 		SCTP_INP_RUNLOCK(it->inp);
1390 		goto no_stcb;
1391 	}
1392 	while (it->stcb) {
1393 		SCTP_TCB_LOCK(it->stcb);
1394 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1395 			/* not in the right state... keep looking */
1396 			SCTP_TCB_UNLOCK(it->stcb);
1397 			goto next_assoc;
1398 		}
1399 		/* see if we have limited out the iterator loop */
1400 		iteration_count++;
1401 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1402 			/* Pause to let others grab the lock */
1403 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1404 			SCTP_TCB_UNLOCK(it->stcb);
1405 			SCTP_INP_INCR_REF(it->inp);
1406 			SCTP_INP_RUNLOCK(it->inp);
1407 			SCTP_ITERATOR_UNLOCK();
1408 			SCTP_INP_INFO_RUNLOCK();
1409 			SCTP_INP_INFO_RLOCK();
1410 			SCTP_ITERATOR_LOCK();
1411 			if (sctp_it_ctl.iterator_flags) {
1412 				/* We won't be staying here */
1413 				SCTP_INP_DECR_REF(it->inp);
1414 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1415 				if (sctp_it_ctl.iterator_flags &
1416 				    SCTP_ITERATOR_STOP_CUR_IT) {
1417 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1418 					goto done_with_iterator;
1419 				}
1420 				if (sctp_it_ctl.iterator_flags &
1421 				    SCTP_ITERATOR_STOP_CUR_INP) {
1422 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1423 					goto no_stcb;
1424 				}
1425 				/* If we reach here huh? */
1426 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1427 				    sctp_it_ctl.iterator_flags);
1428 				sctp_it_ctl.iterator_flags = 0;
1429 			}
1430 			SCTP_INP_RLOCK(it->inp);
1431 			SCTP_INP_DECR_REF(it->inp);
1432 			SCTP_TCB_LOCK(it->stcb);
1433 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1434 			iteration_count = 0;
1435 		}
1436 		/* run function on this one */
1437 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1438 
1439 		/*
1440 		 * we lie here, it really needs to have its own type but
1441 		 * first I must verify that this won't effect things :-0
1442 		 */
1443 		if (it->no_chunk_output == 0)
1444 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1445 
1446 		SCTP_TCB_UNLOCK(it->stcb);
1447 next_assoc:
1448 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1449 		if (it->stcb == NULL) {
1450 			/* Run last function */
1451 			if (it->function_inp_end != NULL) {
1452 				inp_skip = (*it->function_inp_end) (it->inp,
1453 				    it->pointer,
1454 				    it->val);
1455 			}
1456 		}
1457 	}
1458 	SCTP_INP_RUNLOCK(it->inp);
1459 no_stcb:
1460 	/* done with all assocs on this endpoint, move on to next endpoint */
1461 	it->done_current_ep = 0;
1462 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1463 		it->inp = NULL;
1464 	} else {
1465 		it->inp = LIST_NEXT(it->inp, sctp_list);
1466 	}
1467 	if (it->inp == NULL) {
1468 		goto done_with_iterator;
1469 	}
1470 	goto select_a_new_ep;
1471 }
1472 
1473 void
1474 sctp_iterator_worker(void)
1475 {
1476 	struct sctp_iterator *it, *nit;
1477 
1478 	/* This function is called with the WQ lock in place */
1479 
1480 	sctp_it_ctl.iterator_running = 1;
1481 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1482 		/* now lets work on this one */
1483 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1484 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1485 		CURVNET_SET(it->vn);
1486 		sctp_iterator_work(it);
1487 		CURVNET_RESTORE();
1488 		SCTP_IPI_ITERATOR_WQ_LOCK();
1489 		/* sa_ignore FREED_MEMORY */
1490 	}
1491 	sctp_it_ctl.iterator_running = 0;
1492 	return;
1493 }
1494 
1495 
1496 static void
1497 sctp_handle_addr_wq(void)
1498 {
1499 	/* deal with the ADDR wq from the rtsock calls */
1500 	struct sctp_laddr *wi, *nwi;
1501 	struct sctp_asconf_iterator *asc;
1502 
1503 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1504 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1505 	if (asc == NULL) {
1506 		/* Try later, no memory */
1507 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1508 		    (struct sctp_inpcb *)NULL,
1509 		    (struct sctp_tcb *)NULL,
1510 		    (struct sctp_nets *)NULL);
1511 		return;
1512 	}
1513 	LIST_INIT(&asc->list_of_work);
1514 	asc->cnt = 0;
1515 
1516 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1517 		LIST_REMOVE(wi, sctp_nxt_addr);
1518 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1519 		asc->cnt++;
1520 	}
1521 
1522 	if (asc->cnt == 0) {
1523 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1524 	} else {
1525 		int ret;
1526 
1527 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1528 		    sctp_asconf_iterator_stcb,
1529 		    NULL,	/* No ep end for boundall */
1530 		    SCTP_PCB_FLAGS_BOUNDALL,
1531 		    SCTP_PCB_ANY_FEATURES,
1532 		    SCTP_ASOC_ANY_STATE,
1533 		    (void *)asc, 0,
1534 		    sctp_asconf_iterator_end, NULL, 0);
1535 		if (ret) {
1536 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1537 			/*
1538 			 * Freeing if we are stopping or put back on the
1539 			 * addr_wq.
1540 			 */
1541 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1542 				sctp_asconf_iterator_end(asc, 0);
1543 			} else {
1544 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1545 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1546 				}
1547 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1548 			}
1549 		}
1550 	}
1551 }
1552 
1553 void
1554 sctp_timeout_handler(void *t)
1555 {
1556 	struct sctp_inpcb *inp;
1557 	struct sctp_tcb *stcb;
1558 	struct sctp_nets *net;
1559 	struct sctp_timer *tmr;
1560 	struct mbuf *op_err;
1561 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1562 	struct socket *so;
1563 #endif
1564 	int did_output;
1565 	int type;
1566 
1567 	tmr = (struct sctp_timer *)t;
1568 	inp = (struct sctp_inpcb *)tmr->ep;
1569 	stcb = (struct sctp_tcb *)tmr->tcb;
1570 	net = (struct sctp_nets *)tmr->net;
1571 	CURVNET_SET((struct vnet *)tmr->vnet);
1572 	did_output = 1;
1573 
1574 #ifdef SCTP_AUDITING_ENABLED
1575 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1576 	sctp_auditing(3, inp, stcb, net);
1577 #endif
1578 
1579 	/* sanity checks... */
1580 	if (tmr->self != (void *)tmr) {
1581 		/*
1582 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1583 		 * (void *)tmr);
1584 		 */
1585 		CURVNET_RESTORE();
1586 		return;
1587 	}
1588 	tmr->stopped_from = 0xa001;
1589 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1590 		/*
1591 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1592 		 * tmr->type);
1593 		 */
1594 		CURVNET_RESTORE();
1595 		return;
1596 	}
1597 	tmr->stopped_from = 0xa002;
1598 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1599 		CURVNET_RESTORE();
1600 		return;
1601 	}
1602 	/* if this is an iterator timeout, get the struct and clear inp */
1603 	tmr->stopped_from = 0xa003;
1604 	if (inp) {
1605 		SCTP_INP_INCR_REF(inp);
1606 		if ((inp->sctp_socket == NULL) &&
1607 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1608 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1609 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1610 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1611 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1612 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1613 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1614 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1615 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1616 			SCTP_INP_DECR_REF(inp);
1617 			CURVNET_RESTORE();
1618 			return;
1619 		}
1620 	}
1621 	tmr->stopped_from = 0xa004;
1622 	if (stcb) {
1623 		atomic_add_int(&stcb->asoc.refcnt, 1);
1624 		if (stcb->asoc.state == 0) {
1625 			atomic_add_int(&stcb->asoc.refcnt, -1);
1626 			if (inp) {
1627 				SCTP_INP_DECR_REF(inp);
1628 			}
1629 			CURVNET_RESTORE();
1630 			return;
1631 		}
1632 	}
1633 	type = tmr->type;
1634 	tmr->stopped_from = 0xa005;
1635 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1636 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1637 		if (inp) {
1638 			SCTP_INP_DECR_REF(inp);
1639 		}
1640 		if (stcb) {
1641 			atomic_add_int(&stcb->asoc.refcnt, -1);
1642 		}
1643 		CURVNET_RESTORE();
1644 		return;
1645 	}
1646 	tmr->stopped_from = 0xa006;
1647 
1648 	if (stcb) {
1649 		SCTP_TCB_LOCK(stcb);
1650 		atomic_add_int(&stcb->asoc.refcnt, -1);
1651 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1652 		    ((stcb->asoc.state == 0) ||
1653 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1654 			SCTP_TCB_UNLOCK(stcb);
1655 			if (inp) {
1656 				SCTP_INP_DECR_REF(inp);
1657 			}
1658 			CURVNET_RESTORE();
1659 			return;
1660 		}
1661 	} else if (inp != NULL) {
1662 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1663 			SCTP_INP_WLOCK(inp);
1664 		}
1665 	} else {
1666 		SCTP_WQ_ADDR_LOCK();
1667 	}
1668 	/* record in stopped what t-o occurred */
1669 	tmr->stopped_from = type;
1670 
1671 	/* mark as being serviced now */
1672 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1673 		/*
1674 		 * Callout has been rescheduled.
1675 		 */
1676 		goto get_out;
1677 	}
1678 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1679 		/*
1680 		 * Not active, so no action.
1681 		 */
1682 		goto get_out;
1683 	}
1684 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1685 
1686 	/* call the handler for the appropriate timer type */
1687 	switch (type) {
1688 	case SCTP_TIMER_TYPE_ADDR_WQ:
1689 		sctp_handle_addr_wq();
1690 		break;
1691 	case SCTP_TIMER_TYPE_SEND:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timodata);
1696 		stcb->asoc.timodata++;
1697 		stcb->asoc.num_send_timers_up--;
1698 		if (stcb->asoc.num_send_timers_up < 0) {
1699 			stcb->asoc.num_send_timers_up = 0;
1700 		}
1701 		SCTP_TCB_LOCK_ASSERT(stcb);
1702 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1703 			/* no need to unlock on tcb its gone */
1704 
1705 			goto out_decr;
1706 		}
1707 		SCTP_TCB_LOCK_ASSERT(stcb);
1708 #ifdef SCTP_AUDITING_ENABLED
1709 		sctp_auditing(4, inp, stcb, net);
1710 #endif
1711 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1712 		if ((stcb->asoc.num_send_timers_up == 0) &&
1713 		    (stcb->asoc.sent_queue_cnt > 0)) {
1714 			struct sctp_tmit_chunk *chk;
1715 
1716 			/*
1717 			 * safeguard. If there on some on the sent queue
1718 			 * somewhere but no timers running something is
1719 			 * wrong... so we start a timer on the first chunk
1720 			 * on the send queue on whatever net it is sent to.
1721 			 */
1722 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1723 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1724 			    chk->whoTo);
1725 		}
1726 		break;
1727 	case SCTP_TIMER_TYPE_INIT:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		SCTP_STAT_INCR(sctps_timoinit);
1732 		stcb->asoc.timoinit++;
1733 		if (sctp_t1init_timer(inp, stcb, net)) {
1734 			/* no need to unlock on tcb its gone */
1735 			goto out_decr;
1736 		}
1737 		/* We do output but not here */
1738 		did_output = 0;
1739 		break;
1740 	case SCTP_TIMER_TYPE_RECV:
1741 		if ((stcb == NULL) || (inp == NULL)) {
1742 			break;
1743 		}
1744 		SCTP_STAT_INCR(sctps_timosack);
1745 		stcb->asoc.timosack++;
1746 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1747 #ifdef SCTP_AUDITING_ENABLED
1748 		sctp_auditing(4, inp, stcb, net);
1749 #endif
1750 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1751 		break;
1752 	case SCTP_TIMER_TYPE_SHUTDOWN:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_shutdown_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timoshutdown);
1761 		stcb->asoc.timoshutdown++;
1762 #ifdef SCTP_AUDITING_ENABLED
1763 		sctp_auditing(4, inp, stcb, net);
1764 #endif
1765 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1766 		break;
1767 	case SCTP_TIMER_TYPE_HEARTBEAT:
1768 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1769 			break;
1770 		}
1771 		SCTP_STAT_INCR(sctps_timoheartbeat);
1772 		stcb->asoc.timoheartbeat++;
1773 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1774 			/* no need to unlock on tcb its gone */
1775 			goto out_decr;
1776 		}
1777 #ifdef SCTP_AUDITING_ENABLED
1778 		sctp_auditing(4, inp, stcb, net);
1779 #endif
1780 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1781 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1782 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1783 		}
1784 		break;
1785 	case SCTP_TIMER_TYPE_COOKIE:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 		if (sctp_cookie_timer(inp, stcb, net)) {
1790 			/* no need to unlock on tcb its gone */
1791 			goto out_decr;
1792 		}
1793 		SCTP_STAT_INCR(sctps_timocookie);
1794 		stcb->asoc.timocookie++;
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, net);
1797 #endif
1798 		/*
1799 		 * We consider T3 and Cookie timer pretty much the same with
1800 		 * respect to where from in chunk_output.
1801 		 */
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1803 		break;
1804 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1805 		{
1806 			struct timeval tv;
1807 			int i, secret;
1808 
1809 			if (inp == NULL) {
1810 				break;
1811 			}
1812 			SCTP_STAT_INCR(sctps_timosecret);
1813 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1814 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1815 			inp->sctp_ep.last_secret_number =
1816 			    inp->sctp_ep.current_secret_number;
1817 			inp->sctp_ep.current_secret_number++;
1818 			if (inp->sctp_ep.current_secret_number >=
1819 			    SCTP_HOW_MANY_SECRETS) {
1820 				inp->sctp_ep.current_secret_number = 0;
1821 			}
1822 			secret = (int)inp->sctp_ep.current_secret_number;
1823 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1824 				inp->sctp_ep.secret_key[secret][i] =
1825 				    sctp_select_initial_TSN(&inp->sctp_ep);
1826 			}
1827 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1828 		}
1829 		did_output = 0;
1830 		break;
1831 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		SCTP_STAT_INCR(sctps_timopathmtu);
1836 		sctp_pathmtu_timer(inp, stcb, net);
1837 		did_output = 0;
1838 		break;
1839 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1840 		if ((stcb == NULL) || (inp == NULL)) {
1841 			break;
1842 		}
1843 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1844 			/* no need to unlock on tcb its gone */
1845 			goto out_decr;
1846 		}
1847 		SCTP_STAT_INCR(sctps_timoshutdownack);
1848 		stcb->asoc.timoshutdownack++;
1849 #ifdef SCTP_AUDITING_ENABLED
1850 		sctp_auditing(4, inp, stcb, net);
1851 #endif
1852 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1853 		break;
1854 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1855 		if ((stcb == NULL) || (inp == NULL)) {
1856 			break;
1857 		}
1858 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1859 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1860 		    "Shutdown guard timer expired");
1861 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1862 		/* no need to unlock on tcb its gone */
1863 		goto out_decr;
1864 
1865 	case SCTP_TIMER_TYPE_STRRESET:
1866 		if ((stcb == NULL) || (inp == NULL)) {
1867 			break;
1868 		}
1869 		if (sctp_strreset_timer(inp, stcb, net)) {
1870 			/* no need to unlock on tcb its gone */
1871 			goto out_decr;
1872 		}
1873 		SCTP_STAT_INCR(sctps_timostrmrst);
1874 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1875 		break;
1876 	case SCTP_TIMER_TYPE_ASCONF:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		if (sctp_asconf_timer(inp, stcb, net)) {
1881 			/* no need to unlock on tcb its gone */
1882 			goto out_decr;
1883 		}
1884 		SCTP_STAT_INCR(sctps_timoasconf);
1885 #ifdef SCTP_AUDITING_ENABLED
1886 		sctp_auditing(4, inp, stcb, net);
1887 #endif
1888 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1889 		break;
1890 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1891 		if ((stcb == NULL) || (inp == NULL)) {
1892 			break;
1893 		}
1894 		sctp_delete_prim_timer(inp, stcb, net);
1895 		SCTP_STAT_INCR(sctps_timodelprim);
1896 		break;
1897 
1898 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1899 		if ((stcb == NULL) || (inp == NULL)) {
1900 			break;
1901 		}
1902 		SCTP_STAT_INCR(sctps_timoautoclose);
1903 		sctp_autoclose_timer(inp, stcb, net);
1904 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1905 		did_output = 0;
1906 		break;
1907 	case SCTP_TIMER_TYPE_ASOCKILL:
1908 		if ((stcb == NULL) || (inp == NULL)) {
1909 			break;
1910 		}
1911 		SCTP_STAT_INCR(sctps_timoassockill);
1912 		/* Can we free it yet? */
1913 		SCTP_INP_DECR_REF(inp);
1914 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1915 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1916 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1917 		so = SCTP_INP_SO(inp);
1918 		atomic_add_int(&stcb->asoc.refcnt, 1);
1919 		SCTP_TCB_UNLOCK(stcb);
1920 		SCTP_SOCKET_LOCK(so, 1);
1921 		SCTP_TCB_LOCK(stcb);
1922 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1923 #endif
1924 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1925 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1926 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1927 		SCTP_SOCKET_UNLOCK(so, 1);
1928 #endif
1929 		/*
1930 		 * free asoc, always unlocks (or destroy's) so prevent
1931 		 * duplicate unlock or unlock of a free mtx :-0
1932 		 */
1933 		stcb = NULL;
1934 		goto out_no_decr;
1935 	case SCTP_TIMER_TYPE_INPKILL:
1936 		SCTP_STAT_INCR(sctps_timoinpkill);
1937 		if (inp == NULL) {
1938 			break;
1939 		}
1940 		/*
1941 		 * special case, take away our increment since WE are the
1942 		 * killer
1943 		 */
1944 		SCTP_INP_DECR_REF(inp);
1945 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1946 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1947 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1948 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1949 		inp = NULL;
1950 		goto out_no_decr;
1951 	default:
1952 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1953 		    type);
1954 		break;
1955 	}
1956 #ifdef SCTP_AUDITING_ENABLED
1957 	sctp_audit_log(0xF1, (uint8_t)type);
1958 	if (inp)
1959 		sctp_auditing(5, inp, stcb, net);
1960 #endif
1961 	if ((did_output) && stcb) {
1962 		/*
1963 		 * Now we need to clean up the control chunk chain if an
1964 		 * ECNE is on it. It must be marked as UNSENT again so next
1965 		 * call will continue to send it until such time that we get
1966 		 * a CWR, to remove it. It is, however, less likely that we
1967 		 * will find a ecn echo on the chain though.
1968 		 */
1969 		sctp_fix_ecn_echo(&stcb->asoc);
1970 	}
1971 get_out:
1972 	if (stcb) {
1973 		SCTP_TCB_UNLOCK(stcb);
1974 	} else if (inp != NULL) {
1975 		SCTP_INP_WUNLOCK(inp);
1976 	} else {
1977 		SCTP_WQ_ADDR_UNLOCK();
1978 	}
1979 
1980 out_decr:
1981 	if (inp) {
1982 		SCTP_INP_DECR_REF(inp);
1983 	}
1984 out_no_decr:
1985 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1986 	CURVNET_RESTORE();
1987 }
1988 
1989 void
1990 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1991     struct sctp_nets *net)
1992 {
1993 	uint32_t to_ticks;
1994 	struct sctp_timer *tmr;
1995 
1996 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1997 		return;
1998 
1999 	tmr = NULL;
2000 	if (stcb) {
2001 		SCTP_TCB_LOCK_ASSERT(stcb);
2002 	}
2003 	switch (t_type) {
2004 	case SCTP_TIMER_TYPE_ADDR_WQ:
2005 		/* Only 1 tick away :-) */
2006 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2007 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2008 		break;
2009 	case SCTP_TIMER_TYPE_SEND:
2010 		/* Here we use the RTO timer */
2011 		{
2012 			int rto_val;
2013 
2014 			if ((stcb == NULL) || (net == NULL)) {
2015 				return;
2016 			}
2017 			tmr = &net->rxt_timer;
2018 			if (net->RTO == 0) {
2019 				rto_val = stcb->asoc.initial_rto;
2020 			} else {
2021 				rto_val = net->RTO;
2022 			}
2023 			to_ticks = MSEC_TO_TICKS(rto_val);
2024 		}
2025 		break;
2026 	case SCTP_TIMER_TYPE_INIT:
2027 		/*
2028 		 * Here we use the INIT timer default usually about 1
2029 		 * minute.
2030 		 */
2031 		if ((stcb == NULL) || (net == NULL)) {
2032 			return;
2033 		}
2034 		tmr = &net->rxt_timer;
2035 		if (net->RTO == 0) {
2036 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2037 		} else {
2038 			to_ticks = MSEC_TO_TICKS(net->RTO);
2039 		}
2040 		break;
2041 	case SCTP_TIMER_TYPE_RECV:
2042 		/*
2043 		 * Here we use the Delayed-Ack timer value from the inp
2044 		 * ususually about 200ms.
2045 		 */
2046 		if (stcb == NULL) {
2047 			return;
2048 		}
2049 		tmr = &stcb->asoc.dack_timer;
2050 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2051 		break;
2052 	case SCTP_TIMER_TYPE_SHUTDOWN:
2053 		/* Here we use the RTO of the destination. */
2054 		if ((stcb == NULL) || (net == NULL)) {
2055 			return;
2056 		}
2057 		if (net->RTO == 0) {
2058 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2059 		} else {
2060 			to_ticks = MSEC_TO_TICKS(net->RTO);
2061 		}
2062 		tmr = &net->rxt_timer;
2063 		break;
2064 	case SCTP_TIMER_TYPE_HEARTBEAT:
2065 		/*
2066 		 * the net is used here so that we can add in the RTO. Even
2067 		 * though we use a different timer. We also add the HB timer
2068 		 * PLUS a random jitter.
2069 		 */
2070 		if ((stcb == NULL) || (net == NULL)) {
2071 			return;
2072 		} else {
2073 			uint32_t rndval;
2074 			uint32_t jitter;
2075 
2076 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2077 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2078 				return;
2079 			}
2080 			if (net->RTO == 0) {
2081 				to_ticks = stcb->asoc.initial_rto;
2082 			} else {
2083 				to_ticks = net->RTO;
2084 			}
2085 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2086 			jitter = rndval % to_ticks;
2087 			if (jitter >= (to_ticks >> 1)) {
2088 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2089 			} else {
2090 				to_ticks = to_ticks - jitter;
2091 			}
2092 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2093 			    !(net->dest_state & SCTP_ADDR_PF)) {
2094 				to_ticks += net->heart_beat_delay;
2095 			}
2096 			/*
2097 			 * Now we must convert the to_ticks that are now in
2098 			 * ms to ticks.
2099 			 */
2100 			to_ticks = MSEC_TO_TICKS(to_ticks);
2101 			tmr = &net->hb_timer;
2102 		}
2103 		break;
2104 	case SCTP_TIMER_TYPE_COOKIE:
2105 		/*
2106 		 * Here we can use the RTO timer from the network since one
2107 		 * RTT was compelete. If a retran happened then we will be
2108 		 * using the RTO initial value.
2109 		 */
2110 		if ((stcb == NULL) || (net == NULL)) {
2111 			return;
2112 		}
2113 		if (net->RTO == 0) {
2114 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2115 		} else {
2116 			to_ticks = MSEC_TO_TICKS(net->RTO);
2117 		}
2118 		tmr = &net->rxt_timer;
2119 		break;
2120 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2121 		/*
2122 		 * nothing needed but the endpoint here ususually about 60
2123 		 * minutes.
2124 		 */
2125 		tmr = &inp->sctp_ep.signature_change;
2126 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2127 		break;
2128 	case SCTP_TIMER_TYPE_ASOCKILL:
2129 		if (stcb == NULL) {
2130 			return;
2131 		}
2132 		tmr = &stcb->asoc.strreset_timer;
2133 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2134 		break;
2135 	case SCTP_TIMER_TYPE_INPKILL:
2136 		/*
2137 		 * The inp is setup to die. We re-use the signature_chage
2138 		 * timer since that has stopped and we are in the GONE
2139 		 * state.
2140 		 */
2141 		tmr = &inp->sctp_ep.signature_change;
2142 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2143 		break;
2144 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2145 		/*
2146 		 * Here we use the value found in the EP for PMTU ususually
2147 		 * about 10 minutes.
2148 		 */
2149 		if ((stcb == NULL) || (net == NULL)) {
2150 			return;
2151 		}
2152 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2153 			return;
2154 		}
2155 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2156 		tmr = &net->pmtu_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2159 		/* Here we use the RTO of the destination */
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		if (net->RTO == 0) {
2164 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2165 		} else {
2166 			to_ticks = MSEC_TO_TICKS(net->RTO);
2167 		}
2168 		tmr = &net->rxt_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2171 		/*
2172 		 * Here we use the endpoints shutdown guard timer usually
2173 		 * about 3 minutes.
2174 		 */
2175 		if (stcb == NULL) {
2176 			return;
2177 		}
2178 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2179 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2180 		} else {
2181 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2182 		}
2183 		tmr = &stcb->asoc.shut_guard_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_STRRESET:
2186 		/*
2187 		 * Here the timer comes from the stcb but its value is from
2188 		 * the net's RTO.
2189 		 */
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		if (net->RTO == 0) {
2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2195 		} else {
2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
2197 		}
2198 		tmr = &stcb->asoc.strreset_timer;
2199 		break;
2200 	case SCTP_TIMER_TYPE_ASCONF:
2201 		/*
2202 		 * Here the timer comes from the stcb but its value is from
2203 		 * the net's RTO.
2204 		 */
2205 		if ((stcb == NULL) || (net == NULL)) {
2206 			return;
2207 		}
2208 		if (net->RTO == 0) {
2209 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2210 		} else {
2211 			to_ticks = MSEC_TO_TICKS(net->RTO);
2212 		}
2213 		tmr = &stcb->asoc.asconf_timer;
2214 		break;
2215 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2216 		if ((stcb == NULL) || (net != NULL)) {
2217 			return;
2218 		}
2219 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2220 		tmr = &stcb->asoc.delete_prim_timer;
2221 		break;
2222 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2223 		if (stcb == NULL) {
2224 			return;
2225 		}
2226 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2227 			/*
2228 			 * Really an error since stcb is NOT set to
2229 			 * autoclose
2230 			 */
2231 			return;
2232 		}
2233 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2234 		tmr = &stcb->asoc.autoclose_timer;
2235 		break;
2236 	default:
2237 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2238 		    __func__, t_type);
2239 		return;
2240 		break;
2241 	}
2242 	if ((to_ticks <= 0) || (tmr == NULL)) {
2243 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2244 		    __func__, t_type, to_ticks, (void *)tmr);
2245 		return;
2246 	}
2247 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2248 		/*
2249 		 * we do NOT allow you to have it already running. if it is
2250 		 * we leave the current one up unchanged
2251 		 */
2252 		return;
2253 	}
2254 	/* At this point we can proceed */
2255 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2256 		stcb->asoc.num_send_timers_up++;
2257 	}
2258 	tmr->stopped_from = 0;
2259 	tmr->type = t_type;
2260 	tmr->ep = (void *)inp;
2261 	tmr->tcb = (void *)stcb;
2262 	tmr->net = (void *)net;
2263 	tmr->self = (void *)tmr;
2264 	tmr->vnet = (void *)curvnet;
2265 	tmr->ticks = sctp_get_tick_count();
2266 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2267 	return;
2268 }
2269 
2270 void
2271 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2272     struct sctp_nets *net, uint32_t from)
2273 {
2274 	struct sctp_timer *tmr;
2275 
2276 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2277 	    (inp == NULL))
2278 		return;
2279 
2280 	tmr = NULL;
2281 	if (stcb) {
2282 		SCTP_TCB_LOCK_ASSERT(stcb);
2283 	}
2284 	switch (t_type) {
2285 	case SCTP_TIMER_TYPE_ADDR_WQ:
2286 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2287 		break;
2288 	case SCTP_TIMER_TYPE_SEND:
2289 		if ((stcb == NULL) || (net == NULL)) {
2290 			return;
2291 		}
2292 		tmr = &net->rxt_timer;
2293 		break;
2294 	case SCTP_TIMER_TYPE_INIT:
2295 		if ((stcb == NULL) || (net == NULL)) {
2296 			return;
2297 		}
2298 		tmr = &net->rxt_timer;
2299 		break;
2300 	case SCTP_TIMER_TYPE_RECV:
2301 		if (stcb == NULL) {
2302 			return;
2303 		}
2304 		tmr = &stcb->asoc.dack_timer;
2305 		break;
2306 	case SCTP_TIMER_TYPE_SHUTDOWN:
2307 		if ((stcb == NULL) || (net == NULL)) {
2308 			return;
2309 		}
2310 		tmr = &net->rxt_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_HEARTBEAT:
2313 		if ((stcb == NULL) || (net == NULL)) {
2314 			return;
2315 		}
2316 		tmr = &net->hb_timer;
2317 		break;
2318 	case SCTP_TIMER_TYPE_COOKIE:
2319 		if ((stcb == NULL) || (net == NULL)) {
2320 			return;
2321 		}
2322 		tmr = &net->rxt_timer;
2323 		break;
2324 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2325 		/* nothing needed but the endpoint here */
2326 		tmr = &inp->sctp_ep.signature_change;
2327 		/*
2328 		 * We re-use the newcookie timer for the INP kill timer. We
2329 		 * must assure that we do not kill it by accident.
2330 		 */
2331 		break;
2332 	case SCTP_TIMER_TYPE_ASOCKILL:
2333 		/*
2334 		 * Stop the asoc kill timer.
2335 		 */
2336 		if (stcb == NULL) {
2337 			return;
2338 		}
2339 		tmr = &stcb->asoc.strreset_timer;
2340 		break;
2341 
2342 	case SCTP_TIMER_TYPE_INPKILL:
2343 		/*
2344 		 * The inp is setup to die. We re-use the signature_chage
2345 		 * timer since that has stopped and we are in the GONE
2346 		 * state.
2347 		 */
2348 		tmr = &inp->sctp_ep.signature_change;
2349 		break;
2350 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2351 		if ((stcb == NULL) || (net == NULL)) {
2352 			return;
2353 		}
2354 		tmr = &net->pmtu_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2357 		if ((stcb == NULL) || (net == NULL)) {
2358 			return;
2359 		}
2360 		tmr = &net->rxt_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2363 		if (stcb == NULL) {
2364 			return;
2365 		}
2366 		tmr = &stcb->asoc.shut_guard_timer;
2367 		break;
2368 	case SCTP_TIMER_TYPE_STRRESET:
2369 		if (stcb == NULL) {
2370 			return;
2371 		}
2372 		tmr = &stcb->asoc.strreset_timer;
2373 		break;
2374 	case SCTP_TIMER_TYPE_ASCONF:
2375 		if (stcb == NULL) {
2376 			return;
2377 		}
2378 		tmr = &stcb->asoc.asconf_timer;
2379 		break;
2380 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2381 		if (stcb == NULL) {
2382 			return;
2383 		}
2384 		tmr = &stcb->asoc.delete_prim_timer;
2385 		break;
2386 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2387 		if (stcb == NULL) {
2388 			return;
2389 		}
2390 		tmr = &stcb->asoc.autoclose_timer;
2391 		break;
2392 	default:
2393 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2394 		    __func__, t_type);
2395 		break;
2396 	}
2397 	if (tmr == NULL) {
2398 		return;
2399 	}
2400 	if ((tmr->type != t_type) && tmr->type) {
2401 		/*
2402 		 * Ok we have a timer that is under joint use. Cookie timer
2403 		 * per chance with the SEND timer. We therefore are NOT
2404 		 * running the timer that the caller wants stopped.  So just
2405 		 * return.
2406 		 */
2407 		return;
2408 	}
2409 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2410 		stcb->asoc.num_send_timers_up--;
2411 		if (stcb->asoc.num_send_timers_up < 0) {
2412 			stcb->asoc.num_send_timers_up = 0;
2413 		}
2414 	}
2415 	tmr->self = NULL;
2416 	tmr->stopped_from = from;
2417 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2418 	return;
2419 }
2420 
2421 uint32_t
2422 sctp_calculate_len(struct mbuf *m)
2423 {
2424 	uint32_t tlen = 0;
2425 	struct mbuf *at;
2426 
2427 	at = m;
2428 	while (at) {
2429 		tlen += SCTP_BUF_LEN(at);
2430 		at = SCTP_BUF_NEXT(at);
2431 	}
2432 	return (tlen);
2433 }
2434 
2435 void
2436 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2437     struct sctp_association *asoc, uint32_t mtu)
2438 {
2439 	/*
2440 	 * Reset the P-MTU size on this association, this involves changing
2441 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2442 	 * allow the DF flag to be cleared.
2443 	 */
2444 	struct sctp_tmit_chunk *chk;
2445 	unsigned int eff_mtu, ovh;
2446 
2447 	asoc->smallest_mtu = mtu;
2448 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2449 		ovh = SCTP_MIN_OVERHEAD;
2450 	} else {
2451 		ovh = SCTP_MIN_V4_OVERHEAD;
2452 	}
2453 	eff_mtu = mtu - ovh;
2454 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2455 		if (chk->send_size > eff_mtu) {
2456 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2457 		}
2458 	}
2459 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2460 		if (chk->send_size > eff_mtu) {
2461 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2462 		}
2463 	}
2464 }
2465 
2466 
2467 /*
2468  * given an association and starting time of the current RTT period return
2469  * RTO in number of msecs net should point to the current network
2470  */
2471 
2472 uint32_t
2473 sctp_calculate_rto(struct sctp_tcb *stcb,
2474     struct sctp_association *asoc,
2475     struct sctp_nets *net,
2476     struct timeval *old,
2477     int rtt_from_sack)
2478 {
2479 	/*-
2480 	 * given an association and the starting time of the current RTT
2481 	 * period (in value1/value2) return RTO in number of msecs.
2482 	 */
2483 	int32_t rtt;		/* RTT in ms */
2484 	uint32_t new_rto;
2485 	int first_measure = 0;
2486 	struct timeval now;
2487 
2488 	/************************/
2489 	/* 1. calculate new RTT */
2490 	/************************/
2491 	/* get the current time */
2492 	if (stcb->asoc.use_precise_time) {
2493 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2494 	} else {
2495 		(void)SCTP_GETTIME_TIMEVAL(&now);
2496 	}
2497 	timevalsub(&now, old);
2498 	/* store the current RTT in us */
2499 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2500 	        (uint64_t)now.tv_usec;
2501 
2502 	/* compute rtt in ms */
2503 	rtt = (int32_t)(net->rtt / 1000);
2504 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2505 		/*
2506 		 * Tell the CC module that a new update has just occurred
2507 		 * from a sack
2508 		 */
2509 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2510 	}
2511 	/*
2512 	 * Do we need to determine the lan? We do this only on sacks i.e.
2513 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2514 	 */
2515 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2516 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2517 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2518 			net->lan_type = SCTP_LAN_INTERNET;
2519 		} else {
2520 			net->lan_type = SCTP_LAN_LOCAL;
2521 		}
2522 	}
2523 	/***************************/
2524 	/* 2. update RTTVAR & SRTT */
2525 	/***************************/
2526 	/*-
2527 	 * Compute the scaled average lastsa and the
2528 	 * scaled variance lastsv as described in van Jacobson
2529 	 * Paper "Congestion Avoidance and Control", Annex A.
2530 	 *
2531 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2532 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2533 	 */
2534 	if (net->RTO_measured) {
2535 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2536 		net->lastsa += rtt;
2537 		if (rtt < 0) {
2538 			rtt = -rtt;
2539 		}
2540 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2541 		net->lastsv += rtt;
2542 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2543 			rto_logging(net, SCTP_LOG_RTTVAR);
2544 		}
2545 	} else {
2546 		/* First RTO measurment */
2547 		net->RTO_measured = 1;
2548 		first_measure = 1;
2549 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2550 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2551 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2552 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2553 		}
2554 	}
2555 	if (net->lastsv == 0) {
2556 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2557 	}
2558 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2559 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2560 	    (stcb->asoc.sat_network_lockout == 0)) {
2561 		stcb->asoc.sat_network = 1;
2562 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2563 		stcb->asoc.sat_network = 0;
2564 		stcb->asoc.sat_network_lockout = 1;
2565 	}
2566 	/* bound it, per C6/C7 in Section 5.3.1 */
2567 	if (new_rto < stcb->asoc.minrto) {
2568 		new_rto = stcb->asoc.minrto;
2569 	}
2570 	if (new_rto > stcb->asoc.maxrto) {
2571 		new_rto = stcb->asoc.maxrto;
2572 	}
2573 	/* we are now returning the RTO */
2574 	return (new_rto);
2575 }
2576 
2577 /*
2578  * return a pointer to a contiguous piece of data from the given mbuf chain
2579  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2580  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2581  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2582  */
2583 caddr_t
2584 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2585 {
2586 	uint32_t count;
2587 	uint8_t *ptr;
2588 
2589 	ptr = in_ptr;
2590 	if ((off < 0) || (len <= 0))
2591 		return (NULL);
2592 
2593 	/* find the desired start location */
2594 	while ((m != NULL) && (off > 0)) {
2595 		if (off < SCTP_BUF_LEN(m))
2596 			break;
2597 		off -= SCTP_BUF_LEN(m);
2598 		m = SCTP_BUF_NEXT(m);
2599 	}
2600 	if (m == NULL)
2601 		return (NULL);
2602 
2603 	/* is the current mbuf large enough (eg. contiguous)? */
2604 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2605 		return (mtod(m, caddr_t)+off);
2606 	} else {
2607 		/* else, it spans more than one mbuf, so save a temp copy... */
2608 		while ((m != NULL) && (len > 0)) {
2609 			count = min(SCTP_BUF_LEN(m) - off, len);
2610 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2611 			len -= count;
2612 			ptr += count;
2613 			off = 0;
2614 			m = SCTP_BUF_NEXT(m);
2615 		}
2616 		if ((m == NULL) && (len > 0))
2617 			return (NULL);
2618 		else
2619 			return ((caddr_t)in_ptr);
2620 	}
2621 }
2622 
2623 
2624 
2625 struct sctp_paramhdr *
2626 sctp_get_next_param(struct mbuf *m,
2627     int offset,
2628     struct sctp_paramhdr *pull,
2629     int pull_limit)
2630 {
2631 	/* This just provides a typed signature to Peter's Pull routine */
2632 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2633 	    (uint8_t *)pull));
2634 }
2635 
2636 
2637 struct mbuf *
2638 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2639 {
2640 	struct mbuf *m_last;
2641 	caddr_t dp;
2642 
2643 	if (padlen > 3) {
2644 		return (NULL);
2645 	}
2646 	if (padlen <= M_TRAILINGSPACE(m)) {
2647 		/*
2648 		 * The easy way. We hope the majority of the time we hit
2649 		 * here :)
2650 		 */
2651 		m_last = m;
2652 	} else {
2653 		/* Hard way we must grow the mbuf chain */
2654 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2655 		if (m_last == NULL) {
2656 			return (NULL);
2657 		}
2658 		SCTP_BUF_LEN(m_last) = 0;
2659 		SCTP_BUF_NEXT(m_last) = NULL;
2660 		SCTP_BUF_NEXT(m) = m_last;
2661 	}
2662 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2663 	SCTP_BUF_LEN(m_last) += padlen;
2664 	memset(dp, 0, padlen);
2665 	return (m_last);
2666 }
2667 
2668 struct mbuf *
2669 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2670 {
2671 	/* find the last mbuf in chain and pad it */
2672 	struct mbuf *m_at;
2673 
2674 	if (last_mbuf != NULL) {
2675 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2676 	} else {
2677 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2678 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2679 				return (sctp_add_pad_tombuf(m_at, padval));
2680 			}
2681 		}
2682 	}
2683 	return (NULL);
2684 }
2685 
2686 static void
2687 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2688     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2690     SCTP_UNUSED
2691 #endif
2692 )
2693 {
2694 	struct mbuf *m_notify;
2695 	struct sctp_assoc_change *sac;
2696 	struct sctp_queued_to_read *control;
2697 	unsigned int notif_len;
2698 	uint16_t abort_len;
2699 	unsigned int i;
2700 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2701 	struct socket *so;
2702 #endif
2703 
2704 	if (stcb == NULL) {
2705 		return;
2706 	}
2707 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2708 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2709 		if (abort != NULL) {
2710 			abort_len = ntohs(abort->ch.chunk_length);
2711 			/*
2712 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2713 			 * contiguous.
2714 			 */
2715 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2716 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2717 			}
2718 		} else {
2719 			abort_len = 0;
2720 		}
2721 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2722 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2723 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2724 			notif_len += abort_len;
2725 		}
2726 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2727 		if (m_notify == NULL) {
2728 			/* Retry with smaller value. */
2729 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2730 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2731 			if (m_notify == NULL) {
2732 				goto set_error;
2733 			}
2734 		}
2735 		SCTP_BUF_NEXT(m_notify) = NULL;
2736 		sac = mtod(m_notify, struct sctp_assoc_change *);
2737 		memset(sac, 0, notif_len);
2738 		sac->sac_type = SCTP_ASSOC_CHANGE;
2739 		sac->sac_flags = 0;
2740 		sac->sac_length = sizeof(struct sctp_assoc_change);
2741 		sac->sac_state = state;
2742 		sac->sac_error = error;
2743 		/* XXX verify these stream counts */
2744 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2745 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2746 		sac->sac_assoc_id = sctp_get_associd(stcb);
2747 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2748 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2749 				i = 0;
2750 				if (stcb->asoc.prsctp_supported == 1) {
2751 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2752 				}
2753 				if (stcb->asoc.auth_supported == 1) {
2754 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2755 				}
2756 				if (stcb->asoc.asconf_supported == 1) {
2757 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2758 				}
2759 				if (stcb->asoc.idata_supported == 1) {
2760 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2761 				}
2762 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2763 				if (stcb->asoc.reconfig_supported == 1) {
2764 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2765 				}
2766 				sac->sac_length += i;
2767 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2768 				memcpy(sac->sac_info, abort, abort_len);
2769 				sac->sac_length += abort_len;
2770 			}
2771 		}
2772 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2773 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2774 		    0, 0, stcb->asoc.context, 0, 0, 0,
2775 		    m_notify);
2776 		if (control != NULL) {
2777 			control->length = SCTP_BUF_LEN(m_notify);
2778 			control->spec_flags = M_NOTIFICATION;
2779 			/* not that we need this */
2780 			control->tail_mbuf = m_notify;
2781 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2782 			    control,
2783 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2784 			    so_locked);
2785 		} else {
2786 			sctp_m_freem(m_notify);
2787 		}
2788 	}
2789 	/*
2790 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2791 	 * comes in.
2792 	 */
2793 set_error:
2794 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2795 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2796 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2797 		SOCK_LOCK(stcb->sctp_socket);
2798 		if (from_peer) {
2799 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2800 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2801 				stcb->sctp_socket->so_error = ECONNREFUSED;
2802 			} else {
2803 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2804 				stcb->sctp_socket->so_error = ECONNRESET;
2805 			}
2806 		} else {
2807 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2808 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2809 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2810 				stcb->sctp_socket->so_error = ETIMEDOUT;
2811 			} else {
2812 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2813 				stcb->sctp_socket->so_error = ECONNABORTED;
2814 			}
2815 		}
2816 		SOCK_UNLOCK(stcb->sctp_socket);
2817 	}
2818 	/* Wake ANY sleepers */
2819 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2820 	so = SCTP_INP_SO(stcb->sctp_ep);
2821 	if (!so_locked) {
2822 		atomic_add_int(&stcb->asoc.refcnt, 1);
2823 		SCTP_TCB_UNLOCK(stcb);
2824 		SCTP_SOCKET_LOCK(so, 1);
2825 		SCTP_TCB_LOCK(stcb);
2826 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2827 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2828 			SCTP_SOCKET_UNLOCK(so, 1);
2829 			return;
2830 		}
2831 	}
2832 #endif
2833 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2834 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2835 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2836 		socantrcvmore(stcb->sctp_socket);
2837 	}
2838 	sorwakeup(stcb->sctp_socket);
2839 	sowwakeup(stcb->sctp_socket);
2840 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2841 	if (!so_locked) {
2842 		SCTP_SOCKET_UNLOCK(so, 1);
2843 	}
2844 #endif
2845 }
2846 
2847 static void
2848 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2849     struct sockaddr *sa, uint32_t error, int so_locked
2850 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2851     SCTP_UNUSED
2852 #endif
2853 )
2854 {
2855 	struct mbuf *m_notify;
2856 	struct sctp_paddr_change *spc;
2857 	struct sctp_queued_to_read *control;
2858 
2859 	if ((stcb == NULL) ||
2860 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2861 		/* event not enabled */
2862 		return;
2863 	}
2864 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2865 	if (m_notify == NULL)
2866 		return;
2867 	SCTP_BUF_LEN(m_notify) = 0;
2868 	spc = mtod(m_notify, struct sctp_paddr_change *);
2869 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2870 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2871 	spc->spc_flags = 0;
2872 	spc->spc_length = sizeof(struct sctp_paddr_change);
2873 	switch (sa->sa_family) {
2874 #ifdef INET
2875 	case AF_INET:
2876 #ifdef INET6
2877 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2878 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2879 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2880 		} else {
2881 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2882 		}
2883 #else
2884 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2885 #endif
2886 		break;
2887 #endif
2888 #ifdef INET6
2889 	case AF_INET6:
2890 		{
2891 			struct sockaddr_in6 *sin6;
2892 
2893 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2894 
2895 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2896 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2897 				if (sin6->sin6_scope_id == 0) {
2898 					/* recover scope_id for user */
2899 					(void)sa6_recoverscope(sin6);
2900 				} else {
2901 					/* clear embedded scope_id for user */
2902 					in6_clearscope(&sin6->sin6_addr);
2903 				}
2904 			}
2905 			break;
2906 		}
2907 #endif
2908 	default:
2909 		/* TSNH */
2910 		break;
2911 	}
2912 	spc->spc_state = state;
2913 	spc->spc_error = error;
2914 	spc->spc_assoc_id = sctp_get_associd(stcb);
2915 
2916 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2917 	SCTP_BUF_NEXT(m_notify) = NULL;
2918 
2919 	/* append to socket */
2920 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2921 	    0, 0, stcb->asoc.context, 0, 0, 0,
2922 	    m_notify);
2923 	if (control == NULL) {
2924 		/* no memory */
2925 		sctp_m_freem(m_notify);
2926 		return;
2927 	}
2928 	control->length = SCTP_BUF_LEN(m_notify);
2929 	control->spec_flags = M_NOTIFICATION;
2930 	/* not that we need this */
2931 	control->tail_mbuf = m_notify;
2932 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2933 	    control,
2934 	    &stcb->sctp_socket->so_rcv, 1,
2935 	    SCTP_READ_LOCK_NOT_HELD,
2936 	    so_locked);
2937 }
2938 
2939 
2940 static void
2941 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2942     struct sctp_tmit_chunk *chk, int so_locked
2943 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2944     SCTP_UNUSED
2945 #endif
2946 )
2947 {
2948 	struct mbuf *m_notify;
2949 	struct sctp_send_failed *ssf;
2950 	struct sctp_send_failed_event *ssfe;
2951 	struct sctp_queued_to_read *control;
2952 	struct sctp_chunkhdr *chkhdr;
2953 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2954 
2955 	if ((stcb == NULL) ||
2956 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2957 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2958 		/* event not enabled */
2959 		return;
2960 	}
2961 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2962 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2963 	} else {
2964 		notifhdr_len = sizeof(struct sctp_send_failed);
2965 	}
2966 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2967 	if (m_notify == NULL)
2968 		/* no space left */
2969 		return;
2970 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2971 	if (stcb->asoc.idata_supported) {
2972 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2973 	} else {
2974 		chkhdr_len = sizeof(struct sctp_data_chunk);
2975 	}
2976 	/* Use some defaults in case we can't access the chunk header */
2977 	if (chk->send_size >= chkhdr_len) {
2978 		payload_len = chk->send_size - chkhdr_len;
2979 	} else {
2980 		payload_len = 0;
2981 	}
2982 	padding_len = 0;
2983 	if (chk->data != NULL) {
2984 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2985 		if (chkhdr != NULL) {
2986 			chk_len = ntohs(chkhdr->chunk_length);
2987 			if ((chk_len >= chkhdr_len) &&
2988 			    (chk->send_size >= chk_len) &&
2989 			    (chk->send_size - chk_len < 4)) {
2990 				padding_len = chk->send_size - chk_len;
2991 				payload_len = chk->send_size - chkhdr_len - padding_len;
2992 			}
2993 		}
2994 	}
2995 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2996 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2997 		memset(ssfe, 0, notifhdr_len);
2998 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2999 		if (sent) {
3000 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3001 		} else {
3002 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3003 		}
3004 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3005 		ssfe->ssfe_error = error;
3006 		/* not exactly what the user sent in, but should be close :) */
3007 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3008 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3009 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3010 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3011 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3012 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3013 	} else {
3014 		ssf = mtod(m_notify, struct sctp_send_failed *);
3015 		memset(ssf, 0, notifhdr_len);
3016 		ssf->ssf_type = SCTP_SEND_FAILED;
3017 		if (sent) {
3018 			ssf->ssf_flags = SCTP_DATA_SENT;
3019 		} else {
3020 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3021 		}
3022 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3023 		ssf->ssf_error = error;
3024 		/* not exactly what the user sent in, but should be close :) */
3025 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3026 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3027 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3028 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3029 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3030 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3031 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3032 	}
3033 	if (chk->data != NULL) {
3034 		/* Trim off the sctp chunk header (it should be there) */
3035 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3036 			m_adj(chk->data, chkhdr_len);
3037 			m_adj(chk->data, -padding_len);
3038 			sctp_mbuf_crush(chk->data);
3039 			chk->send_size -= (chkhdr_len + padding_len);
3040 		}
3041 	}
3042 	SCTP_BUF_NEXT(m_notify) = chk->data;
3043 	/* Steal off the mbuf */
3044 	chk->data = NULL;
3045 	/*
3046 	 * For this case, we check the actual socket buffer, since the assoc
3047 	 * is going away we don't want to overfill the socket buffer for a
3048 	 * non-reader
3049 	 */
3050 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3051 		sctp_m_freem(m_notify);
3052 		return;
3053 	}
3054 	/* append to socket */
3055 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3056 	    0, 0, stcb->asoc.context, 0, 0, 0,
3057 	    m_notify);
3058 	if (control == NULL) {
3059 		/* no memory */
3060 		sctp_m_freem(m_notify);
3061 		return;
3062 	}
3063 	control->length = SCTP_BUF_LEN(m_notify);
3064 	control->spec_flags = M_NOTIFICATION;
3065 	/* not that we need this */
3066 	control->tail_mbuf = m_notify;
3067 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3068 	    control,
3069 	    &stcb->sctp_socket->so_rcv, 1,
3070 	    SCTP_READ_LOCK_NOT_HELD,
3071 	    so_locked);
3072 }
3073 
3074 
3075 static void
3076 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3077     struct sctp_stream_queue_pending *sp, int so_locked
3078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3079     SCTP_UNUSED
3080 #endif
3081 )
3082 {
3083 	struct mbuf *m_notify;
3084 	struct sctp_send_failed *ssf;
3085 	struct sctp_send_failed_event *ssfe;
3086 	struct sctp_queued_to_read *control;
3087 	int notifhdr_len;
3088 
3089 	if ((stcb == NULL) ||
3090 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3091 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3092 		/* event not enabled */
3093 		return;
3094 	}
3095 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3096 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3097 	} else {
3098 		notifhdr_len = sizeof(struct sctp_send_failed);
3099 	}
3100 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3101 	if (m_notify == NULL) {
3102 		/* no space left */
3103 		return;
3104 	}
3105 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3106 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3107 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3108 		memset(ssfe, 0, notifhdr_len);
3109 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3110 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3111 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3112 		ssfe->ssfe_error = error;
3113 		/* not exactly what the user sent in, but should be close :) */
3114 		ssfe->ssfe_info.snd_sid = sp->sid;
3115 		if (sp->some_taken) {
3116 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3117 		} else {
3118 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3119 		}
3120 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3121 		ssfe->ssfe_info.snd_context = sp->context;
3122 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3123 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3124 	} else {
3125 		ssf = mtod(m_notify, struct sctp_send_failed *);
3126 		memset(ssf, 0, notifhdr_len);
3127 		ssf->ssf_type = SCTP_SEND_FAILED;
3128 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3129 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3130 		ssf->ssf_error = error;
3131 		/* not exactly what the user sent in, but should be close :) */
3132 		ssf->ssf_info.sinfo_stream = sp->sid;
3133 		ssf->ssf_info.sinfo_ssn = 0;
3134 		if (sp->some_taken) {
3135 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3136 		} else {
3137 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3138 		}
3139 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3140 		ssf->ssf_info.sinfo_context = sp->context;
3141 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3142 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3143 	}
3144 	SCTP_BUF_NEXT(m_notify) = sp->data;
3145 
3146 	/* Steal off the mbuf */
3147 	sp->data = NULL;
3148 	/*
3149 	 * For this case, we check the actual socket buffer, since the assoc
3150 	 * is going away we don't want to overfill the socket buffer for a
3151 	 * non-reader
3152 	 */
3153 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3154 		sctp_m_freem(m_notify);
3155 		return;
3156 	}
3157 	/* append to socket */
3158 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3159 	    0, 0, stcb->asoc.context, 0, 0, 0,
3160 	    m_notify);
3161 	if (control == NULL) {
3162 		/* no memory */
3163 		sctp_m_freem(m_notify);
3164 		return;
3165 	}
3166 	control->length = SCTP_BUF_LEN(m_notify);
3167 	control->spec_flags = M_NOTIFICATION;
3168 	/* not that we need this */
3169 	control->tail_mbuf = m_notify;
3170 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3171 	    control,
3172 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3173 }
3174 
3175 
3176 
3177 static void
3178 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3179 {
3180 	struct mbuf *m_notify;
3181 	struct sctp_adaptation_event *sai;
3182 	struct sctp_queued_to_read *control;
3183 
3184 	if ((stcb == NULL) ||
3185 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3186 		/* event not enabled */
3187 		return;
3188 	}
3189 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3190 	if (m_notify == NULL)
3191 		/* no space left */
3192 		return;
3193 	SCTP_BUF_LEN(m_notify) = 0;
3194 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3195 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3196 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3197 	sai->sai_flags = 0;
3198 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3199 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3200 	sai->sai_assoc_id = sctp_get_associd(stcb);
3201 
3202 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3203 	SCTP_BUF_NEXT(m_notify) = NULL;
3204 
3205 	/* append to socket */
3206 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3207 	    0, 0, stcb->asoc.context, 0, 0, 0,
3208 	    m_notify);
3209 	if (control == NULL) {
3210 		/* no memory */
3211 		sctp_m_freem(m_notify);
3212 		return;
3213 	}
3214 	control->length = SCTP_BUF_LEN(m_notify);
3215 	control->spec_flags = M_NOTIFICATION;
3216 	/* not that we need this */
3217 	control->tail_mbuf = m_notify;
3218 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3219 	    control,
3220 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3221 }
3222 
3223 /* This always must be called with the read-queue LOCKED in the INP */
3224 static void
3225 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3226     uint32_t val, int so_locked
3227 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3228     SCTP_UNUSED
3229 #endif
3230 )
3231 {
3232 	struct mbuf *m_notify;
3233 	struct sctp_pdapi_event *pdapi;
3234 	struct sctp_queued_to_read *control;
3235 	struct sockbuf *sb;
3236 
3237 	if ((stcb == NULL) ||
3238 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3239 		/* event not enabled */
3240 		return;
3241 	}
3242 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3243 		return;
3244 	}
3245 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3246 	if (m_notify == NULL)
3247 		/* no space left */
3248 		return;
3249 	SCTP_BUF_LEN(m_notify) = 0;
3250 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3251 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3252 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3253 	pdapi->pdapi_flags = 0;
3254 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3255 	pdapi->pdapi_indication = error;
3256 	pdapi->pdapi_stream = (val >> 16);
3257 	pdapi->pdapi_seq = (val & 0x0000ffff);
3258 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3259 
3260 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3261 	SCTP_BUF_NEXT(m_notify) = NULL;
3262 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3263 	    0, 0, stcb->asoc.context, 0, 0, 0,
3264 	    m_notify);
3265 	if (control == NULL) {
3266 		/* no memory */
3267 		sctp_m_freem(m_notify);
3268 		return;
3269 	}
3270 	control->length = SCTP_BUF_LEN(m_notify);
3271 	control->spec_flags = M_NOTIFICATION;
3272 	/* not that we need this */
3273 	control->tail_mbuf = m_notify;
3274 	sb = &stcb->sctp_socket->so_rcv;
3275 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3276 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3277 	}
3278 	sctp_sballoc(stcb, sb, m_notify);
3279 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3280 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3281 	}
3282 	control->end_added = 1;
3283 	if (stcb->asoc.control_pdapi)
3284 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3285 	else {
3286 		/* we really should not see this case */
3287 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3288 	}
3289 	if (stcb->sctp_ep && stcb->sctp_socket) {
3290 		/* This should always be the case */
3291 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3292 		struct socket *so;
3293 
3294 		so = SCTP_INP_SO(stcb->sctp_ep);
3295 		if (!so_locked) {
3296 			atomic_add_int(&stcb->asoc.refcnt, 1);
3297 			SCTP_TCB_UNLOCK(stcb);
3298 			SCTP_SOCKET_LOCK(so, 1);
3299 			SCTP_TCB_LOCK(stcb);
3300 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3301 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3302 				SCTP_SOCKET_UNLOCK(so, 1);
3303 				return;
3304 			}
3305 		}
3306 #endif
3307 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3308 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3309 		if (!so_locked) {
3310 			SCTP_SOCKET_UNLOCK(so, 1);
3311 		}
3312 #endif
3313 	}
3314 }
3315 
3316 static void
3317 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3318 {
3319 	struct mbuf *m_notify;
3320 	struct sctp_shutdown_event *sse;
3321 	struct sctp_queued_to_read *control;
3322 
3323 	/*
3324 	 * For TCP model AND UDP connected sockets we will send an error up
3325 	 * when an SHUTDOWN completes
3326 	 */
3327 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3328 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3329 		/* mark socket closed for read/write and wakeup! */
3330 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3331 		struct socket *so;
3332 
3333 		so = SCTP_INP_SO(stcb->sctp_ep);
3334 		atomic_add_int(&stcb->asoc.refcnt, 1);
3335 		SCTP_TCB_UNLOCK(stcb);
3336 		SCTP_SOCKET_LOCK(so, 1);
3337 		SCTP_TCB_LOCK(stcb);
3338 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3339 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3340 			SCTP_SOCKET_UNLOCK(so, 1);
3341 			return;
3342 		}
3343 #endif
3344 		socantsendmore(stcb->sctp_socket);
3345 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3346 		SCTP_SOCKET_UNLOCK(so, 1);
3347 #endif
3348 	}
3349 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3350 		/* event not enabled */
3351 		return;
3352 	}
3353 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3354 	if (m_notify == NULL)
3355 		/* no space left */
3356 		return;
3357 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3358 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3359 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3360 	sse->sse_flags = 0;
3361 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3362 	sse->sse_assoc_id = sctp_get_associd(stcb);
3363 
3364 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3365 	SCTP_BUF_NEXT(m_notify) = NULL;
3366 
3367 	/* append to socket */
3368 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3369 	    0, 0, stcb->asoc.context, 0, 0, 0,
3370 	    m_notify);
3371 	if (control == NULL) {
3372 		/* no memory */
3373 		sctp_m_freem(m_notify);
3374 		return;
3375 	}
3376 	control->length = SCTP_BUF_LEN(m_notify);
3377 	control->spec_flags = M_NOTIFICATION;
3378 	/* not that we need this */
3379 	control->tail_mbuf = m_notify;
3380 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3381 	    control,
3382 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3383 }
3384 
3385 static void
3386 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3387     int so_locked
3388 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3389     SCTP_UNUSED
3390 #endif
3391 )
3392 {
3393 	struct mbuf *m_notify;
3394 	struct sctp_sender_dry_event *event;
3395 	struct sctp_queued_to_read *control;
3396 
3397 	if ((stcb == NULL) ||
3398 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3399 		/* event not enabled */
3400 		return;
3401 	}
3402 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3403 	if (m_notify == NULL) {
3404 		/* no space left */
3405 		return;
3406 	}
3407 	SCTP_BUF_LEN(m_notify) = 0;
3408 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3409 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3410 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3411 	event->sender_dry_flags = 0;
3412 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3413 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3414 
3415 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3416 	SCTP_BUF_NEXT(m_notify) = NULL;
3417 
3418 	/* append to socket */
3419 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3420 	    0, 0, stcb->asoc.context, 0, 0, 0,
3421 	    m_notify);
3422 	if (control == NULL) {
3423 		/* no memory */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	control->length = SCTP_BUF_LEN(m_notify);
3428 	control->spec_flags = M_NOTIFICATION;
3429 	/* not that we need this */
3430 	control->tail_mbuf = m_notify;
3431 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3432 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3433 }
3434 
3435 
3436 void
3437 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3438 {
3439 	struct mbuf *m_notify;
3440 	struct sctp_queued_to_read *control;
3441 	struct sctp_stream_change_event *stradd;
3442 
3443 	if ((stcb == NULL) ||
3444 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3445 		/* event not enabled */
3446 		return;
3447 	}
3448 	if ((stcb->asoc.peer_req_out) && flag) {
3449 		/* Peer made the request, don't tell the local user */
3450 		stcb->asoc.peer_req_out = 0;
3451 		return;
3452 	}
3453 	stcb->asoc.peer_req_out = 0;
3454 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3455 	if (m_notify == NULL)
3456 		/* no space left */
3457 		return;
3458 	SCTP_BUF_LEN(m_notify) = 0;
3459 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3460 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3461 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3462 	stradd->strchange_flags = flag;
3463 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3464 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3465 	stradd->strchange_instrms = numberin;
3466 	stradd->strchange_outstrms = numberout;
3467 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3468 	SCTP_BUF_NEXT(m_notify) = NULL;
3469 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3470 		/* no space */
3471 		sctp_m_freem(m_notify);
3472 		return;
3473 	}
3474 	/* append to socket */
3475 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3476 	    0, 0, stcb->asoc.context, 0, 0, 0,
3477 	    m_notify);
3478 	if (control == NULL) {
3479 		/* no memory */
3480 		sctp_m_freem(m_notify);
3481 		return;
3482 	}
3483 	control->length = SCTP_BUF_LEN(m_notify);
3484 	control->spec_flags = M_NOTIFICATION;
3485 	/* not that we need this */
3486 	control->tail_mbuf = m_notify;
3487 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3488 	    control,
3489 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3490 }
3491 
3492 void
3493 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3494 {
3495 	struct mbuf *m_notify;
3496 	struct sctp_queued_to_read *control;
3497 	struct sctp_assoc_reset_event *strasoc;
3498 
3499 	if ((stcb == NULL) ||
3500 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3501 		/* event not enabled */
3502 		return;
3503 	}
3504 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3505 	if (m_notify == NULL)
3506 		/* no space left */
3507 		return;
3508 	SCTP_BUF_LEN(m_notify) = 0;
3509 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3510 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3511 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3512 	strasoc->assocreset_flags = flag;
3513 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3514 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3515 	strasoc->assocreset_local_tsn = sending_tsn;
3516 	strasoc->assocreset_remote_tsn = recv_tsn;
3517 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3518 	SCTP_BUF_NEXT(m_notify) = NULL;
3519 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3520 		/* no space */
3521 		sctp_m_freem(m_notify);
3522 		return;
3523 	}
3524 	/* append to socket */
3525 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3526 	    0, 0, stcb->asoc.context, 0, 0, 0,
3527 	    m_notify);
3528 	if (control == NULL) {
3529 		/* no memory */
3530 		sctp_m_freem(m_notify);
3531 		return;
3532 	}
3533 	control->length = SCTP_BUF_LEN(m_notify);
3534 	control->spec_flags = M_NOTIFICATION;
3535 	/* not that we need this */
3536 	control->tail_mbuf = m_notify;
3537 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3538 	    control,
3539 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3540 }
3541 
3542 
3543 
3544 static void
3545 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3546     int number_entries, uint16_t *list, int flag)
3547 {
3548 	struct mbuf *m_notify;
3549 	struct sctp_queued_to_read *control;
3550 	struct sctp_stream_reset_event *strreset;
3551 	int len;
3552 
3553 	if ((stcb == NULL) ||
3554 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3555 		/* event not enabled */
3556 		return;
3557 	}
3558 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3559 	if (m_notify == NULL)
3560 		/* no space left */
3561 		return;
3562 	SCTP_BUF_LEN(m_notify) = 0;
3563 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3564 	if (len > M_TRAILINGSPACE(m_notify)) {
3565 		/* never enough room */
3566 		sctp_m_freem(m_notify);
3567 		return;
3568 	}
3569 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3570 	memset(strreset, 0, len);
3571 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3572 	strreset->strreset_flags = flag;
3573 	strreset->strreset_length = len;
3574 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3575 	if (number_entries) {
3576 		int i;
3577 
3578 		for (i = 0; i < number_entries; i++) {
3579 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3580 		}
3581 	}
3582 	SCTP_BUF_LEN(m_notify) = len;
3583 	SCTP_BUF_NEXT(m_notify) = NULL;
3584 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3585 		/* no space */
3586 		sctp_m_freem(m_notify);
3587 		return;
3588 	}
3589 	/* append to socket */
3590 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3591 	    0, 0, stcb->asoc.context, 0, 0, 0,
3592 	    m_notify);
3593 	if (control == NULL) {
3594 		/* no memory */
3595 		sctp_m_freem(m_notify);
3596 		return;
3597 	}
3598 	control->length = SCTP_BUF_LEN(m_notify);
3599 	control->spec_flags = M_NOTIFICATION;
3600 	/* not that we need this */
3601 	control->tail_mbuf = m_notify;
3602 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3603 	    control,
3604 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3605 }
3606 
3607 
3608 static void
3609 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3610 {
3611 	struct mbuf *m_notify;
3612 	struct sctp_remote_error *sre;
3613 	struct sctp_queued_to_read *control;
3614 	unsigned int notif_len;
3615 	uint16_t chunk_len;
3616 
3617 	if ((stcb == NULL) ||
3618 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3619 		return;
3620 	}
3621 	if (chunk != NULL) {
3622 		chunk_len = ntohs(chunk->ch.chunk_length);
3623 		/*
3624 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3625 		 * contiguous.
3626 		 */
3627 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3628 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3629 		}
3630 	} else {
3631 		chunk_len = 0;
3632 	}
3633 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3634 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3635 	if (m_notify == NULL) {
3636 		/* Retry with smaller value. */
3637 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3638 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3639 		if (m_notify == NULL) {
3640 			return;
3641 		}
3642 	}
3643 	SCTP_BUF_NEXT(m_notify) = NULL;
3644 	sre = mtod(m_notify, struct sctp_remote_error *);
3645 	memset(sre, 0, notif_len);
3646 	sre->sre_type = SCTP_REMOTE_ERROR;
3647 	sre->sre_flags = 0;
3648 	sre->sre_length = sizeof(struct sctp_remote_error);
3649 	sre->sre_error = error;
3650 	sre->sre_assoc_id = sctp_get_associd(stcb);
3651 	if (notif_len > sizeof(struct sctp_remote_error)) {
3652 		memcpy(sre->sre_data, chunk, chunk_len);
3653 		sre->sre_length += chunk_len;
3654 	}
3655 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3656 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3657 	    0, 0, stcb->asoc.context, 0, 0, 0,
3658 	    m_notify);
3659 	if (control != NULL) {
3660 		control->length = SCTP_BUF_LEN(m_notify);
3661 		control->spec_flags = M_NOTIFICATION;
3662 		/* not that we need this */
3663 		control->tail_mbuf = m_notify;
3664 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3665 		    control,
3666 		    &stcb->sctp_socket->so_rcv, 1,
3667 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3668 	} else {
3669 		sctp_m_freem(m_notify);
3670 	}
3671 }
3672 
3673 
3674 void
3675 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3676     uint32_t error, void *data, int so_locked
3677 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3678     SCTP_UNUSED
3679 #endif
3680 )
3681 {
3682 	if ((stcb == NULL) ||
3683 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3684 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3685 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3686 		/* If the socket is gone we are out of here */
3687 		return;
3688 	}
3689 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3690 		return;
3691 	}
3692 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3693 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3694 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3695 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3696 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3697 			/* Don't report these in front states */
3698 			return;
3699 		}
3700 	}
3701 	switch (notification) {
3702 	case SCTP_NOTIFY_ASSOC_UP:
3703 		if (stcb->asoc.assoc_up_sent == 0) {
3704 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3705 			stcb->asoc.assoc_up_sent = 1;
3706 		}
3707 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3708 			sctp_notify_adaptation_layer(stcb);
3709 		}
3710 		if (stcb->asoc.auth_supported == 0) {
3711 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3712 			    NULL, so_locked);
3713 		}
3714 		break;
3715 	case SCTP_NOTIFY_ASSOC_DOWN:
3716 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_INTERFACE_DOWN:
3719 		{
3720 			struct sctp_nets *net;
3721 
3722 			net = (struct sctp_nets *)data;
3723 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3724 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3725 			break;
3726 		}
3727 	case SCTP_NOTIFY_INTERFACE_UP:
3728 		{
3729 			struct sctp_nets *net;
3730 
3731 			net = (struct sctp_nets *)data;
3732 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3733 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3734 			break;
3735 		}
3736 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3737 		{
3738 			struct sctp_nets *net;
3739 
3740 			net = (struct sctp_nets *)data;
3741 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3742 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3743 			break;
3744 		}
3745 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3746 		sctp_notify_send_failed2(stcb, error,
3747 		    (struct sctp_stream_queue_pending *)data, so_locked);
3748 		break;
3749 	case SCTP_NOTIFY_SENT_DG_FAIL:
3750 		sctp_notify_send_failed(stcb, 1, error,
3751 		    (struct sctp_tmit_chunk *)data, so_locked);
3752 		break;
3753 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3754 		sctp_notify_send_failed(stcb, 0, error,
3755 		    (struct sctp_tmit_chunk *)data, so_locked);
3756 		break;
3757 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3758 		{
3759 			uint32_t val;
3760 
3761 			val = *((uint32_t *)data);
3762 
3763 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3764 			break;
3765 		}
3766 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3767 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3768 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3769 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3770 		} else {
3771 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3772 		}
3773 		break;
3774 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3775 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3776 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3777 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3778 		} else {
3779 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3780 		}
3781 		break;
3782 	case SCTP_NOTIFY_ASSOC_RESTART:
3783 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3784 		if (stcb->asoc.auth_supported == 0) {
3785 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3786 			    NULL, so_locked);
3787 		}
3788 		break;
3789 	case SCTP_NOTIFY_STR_RESET_SEND:
3790 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3791 		break;
3792 	case SCTP_NOTIFY_STR_RESET_RECV:
3793 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3794 		break;
3795 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3796 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3797 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3798 		break;
3799 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3800 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3801 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3802 		break;
3803 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3804 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3805 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3806 		break;
3807 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3808 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3809 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3810 		break;
3811 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3812 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3813 		    error, so_locked);
3814 		break;
3815 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3816 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3817 		    error, so_locked);
3818 		break;
3819 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3820 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3821 		    error, so_locked);
3822 		break;
3823 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3824 		sctp_notify_shutdown_event(stcb);
3825 		break;
3826 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3827 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3828 		    (uint16_t)(uintptr_t)data,
3829 		    so_locked);
3830 		break;
3831 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3832 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3833 		    (uint16_t)(uintptr_t)data,
3834 		    so_locked);
3835 		break;
3836 	case SCTP_NOTIFY_NO_PEER_AUTH:
3837 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3838 		    (uint16_t)(uintptr_t)data,
3839 		    so_locked);
3840 		break;
3841 	case SCTP_NOTIFY_SENDER_DRY:
3842 		sctp_notify_sender_dry_event(stcb, so_locked);
3843 		break;
3844 	case SCTP_NOTIFY_REMOTE_ERROR:
3845 		sctp_notify_remote_error(stcb, error, data);
3846 		break;
3847 	default:
3848 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3849 		    __func__, notification, notification);
3850 		break;
3851 	}			/* end switch */
3852 }
3853 
3854 void
3855 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3856 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3857     SCTP_UNUSED
3858 #endif
3859 )
3860 {
3861 	struct sctp_association *asoc;
3862 	struct sctp_stream_out *outs;
3863 	struct sctp_tmit_chunk *chk, *nchk;
3864 	struct sctp_stream_queue_pending *sp, *nsp;
3865 	int i;
3866 
3867 	if (stcb == NULL) {
3868 		return;
3869 	}
3870 	asoc = &stcb->asoc;
3871 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3872 		/* already being freed */
3873 		return;
3874 	}
3875 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3876 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3877 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3878 		return;
3879 	}
3880 	/* now through all the gunk freeing chunks */
3881 	if (holds_lock == 0) {
3882 		SCTP_TCB_SEND_LOCK(stcb);
3883 	}
3884 	/* sent queue SHOULD be empty */
3885 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3886 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3887 		asoc->sent_queue_cnt--;
3888 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3889 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3890 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3891 #ifdef INVARIANTS
3892 			} else {
3893 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3894 #endif
3895 			}
3896 		}
3897 		if (chk->data != NULL) {
3898 			sctp_free_bufspace(stcb, asoc, chk, 1);
3899 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3900 			    error, chk, so_locked);
3901 			if (chk->data) {
3902 				sctp_m_freem(chk->data);
3903 				chk->data = NULL;
3904 			}
3905 		}
3906 		sctp_free_a_chunk(stcb, chk, so_locked);
3907 		/* sa_ignore FREED_MEMORY */
3908 	}
3909 	/* pending send queue SHOULD be empty */
3910 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3911 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3912 		asoc->send_queue_cnt--;
3913 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3914 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3915 #ifdef INVARIANTS
3916 		} else {
3917 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3918 #endif
3919 		}
3920 		if (chk->data != NULL) {
3921 			sctp_free_bufspace(stcb, asoc, chk, 1);
3922 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3923 			    error, chk, so_locked);
3924 			if (chk->data) {
3925 				sctp_m_freem(chk->data);
3926 				chk->data = NULL;
3927 			}
3928 		}
3929 		sctp_free_a_chunk(stcb, chk, so_locked);
3930 		/* sa_ignore FREED_MEMORY */
3931 	}
3932 	for (i = 0; i < asoc->streamoutcnt; i++) {
3933 		/* For each stream */
3934 		outs = &asoc->strmout[i];
3935 		/* clean up any sends there */
3936 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3937 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3938 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3939 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3940 			sctp_free_spbufspace(stcb, asoc, sp);
3941 			if (sp->data) {
3942 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3943 				    error, (void *)sp, so_locked);
3944 				if (sp->data) {
3945 					sctp_m_freem(sp->data);
3946 					sp->data = NULL;
3947 					sp->tail_mbuf = NULL;
3948 					sp->length = 0;
3949 				}
3950 			}
3951 			if (sp->net) {
3952 				sctp_free_remote_addr(sp->net);
3953 				sp->net = NULL;
3954 			}
3955 			/* Free the chunk */
3956 			sctp_free_a_strmoq(stcb, sp, so_locked);
3957 			/* sa_ignore FREED_MEMORY */
3958 		}
3959 	}
3960 
3961 	if (holds_lock == 0) {
3962 		SCTP_TCB_SEND_UNLOCK(stcb);
3963 	}
3964 }
3965 
3966 void
3967 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3968     struct sctp_abort_chunk *abort, int so_locked
3969 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3970     SCTP_UNUSED
3971 #endif
3972 )
3973 {
3974 	if (stcb == NULL) {
3975 		return;
3976 	}
3977 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3978 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3979 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3980 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3981 	}
3982 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3983 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3984 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3985 		return;
3986 	}
3987 	/* Tell them we lost the asoc */
3988 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3989 	if (from_peer) {
3990 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3991 	} else {
3992 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3993 	}
3994 }
3995 
3996 void
3997 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3998     struct mbuf *m, int iphlen,
3999     struct sockaddr *src, struct sockaddr *dst,
4000     struct sctphdr *sh, struct mbuf *op_err,
4001     uint8_t mflowtype, uint32_t mflowid,
4002     uint32_t vrf_id, uint16_t port)
4003 {
4004 	uint32_t vtag;
4005 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4006 	struct socket *so;
4007 #endif
4008 
4009 	vtag = 0;
4010 	if (stcb != NULL) {
4011 		vtag = stcb->asoc.peer_vtag;
4012 		vrf_id = stcb->asoc.vrf_id;
4013 	}
4014 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4015 	    mflowtype, mflowid, inp->fibnum,
4016 	    vrf_id, port);
4017 	if (stcb != NULL) {
4018 		/* We have a TCB to abort, send notification too */
4019 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4020 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4021 		/* Ok, now lets free it */
4022 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4023 		so = SCTP_INP_SO(inp);
4024 		atomic_add_int(&stcb->asoc.refcnt, 1);
4025 		SCTP_TCB_UNLOCK(stcb);
4026 		SCTP_SOCKET_LOCK(so, 1);
4027 		SCTP_TCB_LOCK(stcb);
4028 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4029 #endif
4030 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4031 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4032 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4033 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4034 		}
4035 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4036 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4037 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4038 		SCTP_SOCKET_UNLOCK(so, 1);
4039 #endif
4040 	}
4041 }
4042 #ifdef SCTP_ASOCLOG_OF_TSNS
4043 void
4044 sctp_print_out_track_log(struct sctp_tcb *stcb)
4045 {
4046 #ifdef NOSIY_PRINTS
4047 	int i;
4048 
4049 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4050 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4051 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4052 		SCTP_PRINTF("None rcvd\n");
4053 		goto none_in;
4054 	}
4055 	if (stcb->asoc.tsn_in_wrapped) {
4056 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4057 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4058 			    stcb->asoc.in_tsnlog[i].tsn,
4059 			    stcb->asoc.in_tsnlog[i].strm,
4060 			    stcb->asoc.in_tsnlog[i].seq,
4061 			    stcb->asoc.in_tsnlog[i].flgs,
4062 			    stcb->asoc.in_tsnlog[i].sz);
4063 		}
4064 	}
4065 	if (stcb->asoc.tsn_in_at) {
4066 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4067 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4068 			    stcb->asoc.in_tsnlog[i].tsn,
4069 			    stcb->asoc.in_tsnlog[i].strm,
4070 			    stcb->asoc.in_tsnlog[i].seq,
4071 			    stcb->asoc.in_tsnlog[i].flgs,
4072 			    stcb->asoc.in_tsnlog[i].sz);
4073 		}
4074 	}
4075 none_in:
4076 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4077 	if ((stcb->asoc.tsn_out_at == 0) &&
4078 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4079 		SCTP_PRINTF("None sent\n");
4080 	}
4081 	if (stcb->asoc.tsn_out_wrapped) {
4082 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4083 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4084 			    stcb->asoc.out_tsnlog[i].tsn,
4085 			    stcb->asoc.out_tsnlog[i].strm,
4086 			    stcb->asoc.out_tsnlog[i].seq,
4087 			    stcb->asoc.out_tsnlog[i].flgs,
4088 			    stcb->asoc.out_tsnlog[i].sz);
4089 		}
4090 	}
4091 	if (stcb->asoc.tsn_out_at) {
4092 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4093 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4094 			    stcb->asoc.out_tsnlog[i].tsn,
4095 			    stcb->asoc.out_tsnlog[i].strm,
4096 			    stcb->asoc.out_tsnlog[i].seq,
4097 			    stcb->asoc.out_tsnlog[i].flgs,
4098 			    stcb->asoc.out_tsnlog[i].sz);
4099 		}
4100 	}
4101 #endif
4102 }
4103 #endif
4104 
4105 void
4106 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4107     struct mbuf *op_err,
4108     int so_locked
4109 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4110     SCTP_UNUSED
4111 #endif
4112 )
4113 {
4114 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4115 	struct socket *so;
4116 #endif
4117 
4118 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4119 	so = SCTP_INP_SO(inp);
4120 #endif
4121 	if (stcb == NULL) {
4122 		/* Got to have a TCB */
4123 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4124 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4125 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4126 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4127 			}
4128 		}
4129 		return;
4130 	} else {
4131 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4132 	}
4133 	/* notify the peer */
4134 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4135 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4136 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4137 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4138 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4139 	}
4140 	/* notify the ulp */
4141 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4142 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4143 	}
4144 	/* now free the asoc */
4145 #ifdef SCTP_ASOCLOG_OF_TSNS
4146 	sctp_print_out_track_log(stcb);
4147 #endif
4148 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4149 	if (!so_locked) {
4150 		atomic_add_int(&stcb->asoc.refcnt, 1);
4151 		SCTP_TCB_UNLOCK(stcb);
4152 		SCTP_SOCKET_LOCK(so, 1);
4153 		SCTP_TCB_LOCK(stcb);
4154 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4155 	}
4156 #endif
4157 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4158 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4159 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4160 	if (!so_locked) {
4161 		SCTP_SOCKET_UNLOCK(so, 1);
4162 	}
4163 #endif
4164 }
4165 
4166 void
4167 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4168     struct sockaddr *src, struct sockaddr *dst,
4169     struct sctphdr *sh, struct sctp_inpcb *inp,
4170     struct mbuf *cause,
4171     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4172     uint32_t vrf_id, uint16_t port)
4173 {
4174 	struct sctp_chunkhdr *ch, chunk_buf;
4175 	unsigned int chk_length;
4176 	int contains_init_chunk;
4177 
4178 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4179 	/* Generate a TO address for future reference */
4180 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4181 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4182 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4183 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4184 		}
4185 	}
4186 	contains_init_chunk = 0;
4187 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4188 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4189 	while (ch != NULL) {
4190 		chk_length = ntohs(ch->chunk_length);
4191 		if (chk_length < sizeof(*ch)) {
4192 			/* break to abort land */
4193 			break;
4194 		}
4195 		switch (ch->chunk_type) {
4196 		case SCTP_INIT:
4197 			contains_init_chunk = 1;
4198 			break;
4199 		case SCTP_PACKET_DROPPED:
4200 			/* we don't respond to pkt-dropped */
4201 			return;
4202 		case SCTP_ABORT_ASSOCIATION:
4203 			/* we don't respond with an ABORT to an ABORT */
4204 			return;
4205 		case SCTP_SHUTDOWN_COMPLETE:
4206 			/*
4207 			 * we ignore it since we are not waiting for it and
4208 			 * peer is gone
4209 			 */
4210 			return;
4211 		case SCTP_SHUTDOWN_ACK:
4212 			sctp_send_shutdown_complete2(src, dst, sh,
4213 			    mflowtype, mflowid, fibnum,
4214 			    vrf_id, port);
4215 			return;
4216 		default:
4217 			break;
4218 		}
4219 		offset += SCTP_SIZE32(chk_length);
4220 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4221 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4222 	}
4223 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4224 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4225 	    (contains_init_chunk == 0))) {
4226 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4227 		    mflowtype, mflowid, fibnum,
4228 		    vrf_id, port);
4229 	}
4230 }
4231 
4232 /*
4233  * check the inbound datagram to make sure there is not an abort inside it,
4234  * if there is return 1, else return 0.
4235  */
4236 int
4237 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4238 {
4239 	struct sctp_chunkhdr *ch;
4240 	struct sctp_init_chunk *init_chk, chunk_buf;
4241 	int offset;
4242 	unsigned int chk_length;
4243 
4244 	offset = iphlen + sizeof(struct sctphdr);
4245 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4246 	    (uint8_t *)&chunk_buf);
4247 	while (ch != NULL) {
4248 		chk_length = ntohs(ch->chunk_length);
4249 		if (chk_length < sizeof(*ch)) {
4250 			/* packet is probably corrupt */
4251 			break;
4252 		}
4253 		/* we seem to be ok, is it an abort? */
4254 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4255 			/* yep, tell them */
4256 			return (1);
4257 		}
4258 		if (ch->chunk_type == SCTP_INITIATION) {
4259 			/* need to update the Vtag */
4260 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4261 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4262 			if (init_chk != NULL) {
4263 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4264 			}
4265 		}
4266 		/* Nope, move to the next chunk */
4267 		offset += SCTP_SIZE32(chk_length);
4268 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4269 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4270 	}
4271 	return (0);
4272 }
4273 
4274 /*
4275  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4276  * set (i.e. it's 0) so, create this function to compare link local scopes
4277  */
4278 #ifdef INET6
4279 uint32_t
4280 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4281 {
4282 	struct sockaddr_in6 a, b;
4283 
4284 	/* save copies */
4285 	a = *addr1;
4286 	b = *addr2;
4287 
4288 	if (a.sin6_scope_id == 0)
4289 		if (sa6_recoverscope(&a)) {
4290 			/* can't get scope, so can't match */
4291 			return (0);
4292 		}
4293 	if (b.sin6_scope_id == 0)
4294 		if (sa6_recoverscope(&b)) {
4295 			/* can't get scope, so can't match */
4296 			return (0);
4297 		}
4298 	if (a.sin6_scope_id != b.sin6_scope_id)
4299 		return (0);
4300 
4301 	return (1);
4302 }
4303 
4304 /*
4305  * returns a sockaddr_in6 with embedded scope recovered and removed
4306  */
4307 struct sockaddr_in6 *
4308 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4309 {
4310 	/* check and strip embedded scope junk */
4311 	if (addr->sin6_family == AF_INET6) {
4312 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4313 			if (addr->sin6_scope_id == 0) {
4314 				*store = *addr;
4315 				if (!sa6_recoverscope(store)) {
4316 					/* use the recovered scope */
4317 					addr = store;
4318 				}
4319 			} else {
4320 				/* else, return the original "to" addr */
4321 				in6_clearscope(&addr->sin6_addr);
4322 			}
4323 		}
4324 	}
4325 	return (addr);
4326 }
4327 #endif
4328 
4329 /*
4330  * are the two addresses the same?  currently a "scopeless" check returns: 1
4331  * if same, 0 if not
4332  */
4333 int
4334 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4335 {
4336 
4337 	/* must be valid */
4338 	if (sa1 == NULL || sa2 == NULL)
4339 		return (0);
4340 
4341 	/* must be the same family */
4342 	if (sa1->sa_family != sa2->sa_family)
4343 		return (0);
4344 
4345 	switch (sa1->sa_family) {
4346 #ifdef INET6
4347 	case AF_INET6:
4348 		{
4349 			/* IPv6 addresses */
4350 			struct sockaddr_in6 *sin6_1, *sin6_2;
4351 
4352 			sin6_1 = (struct sockaddr_in6 *)sa1;
4353 			sin6_2 = (struct sockaddr_in6 *)sa2;
4354 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4355 			    sin6_2));
4356 		}
4357 #endif
4358 #ifdef INET
4359 	case AF_INET:
4360 		{
4361 			/* IPv4 addresses */
4362 			struct sockaddr_in *sin_1, *sin_2;
4363 
4364 			sin_1 = (struct sockaddr_in *)sa1;
4365 			sin_2 = (struct sockaddr_in *)sa2;
4366 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4367 		}
4368 #endif
4369 	default:
4370 		/* we don't do these... */
4371 		return (0);
4372 	}
4373 }
4374 
4375 void
4376 sctp_print_address(struct sockaddr *sa)
4377 {
4378 #ifdef INET6
4379 	char ip6buf[INET6_ADDRSTRLEN];
4380 #endif
4381 
4382 	switch (sa->sa_family) {
4383 #ifdef INET6
4384 	case AF_INET6:
4385 		{
4386 			struct sockaddr_in6 *sin6;
4387 
4388 			sin6 = (struct sockaddr_in6 *)sa;
4389 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4390 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4391 			    ntohs(sin6->sin6_port),
4392 			    sin6->sin6_scope_id);
4393 			break;
4394 		}
4395 #endif
4396 #ifdef INET
4397 	case AF_INET:
4398 		{
4399 			struct sockaddr_in *sin;
4400 			unsigned char *p;
4401 
4402 			sin = (struct sockaddr_in *)sa;
4403 			p = (unsigned char *)&sin->sin_addr;
4404 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4405 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4406 			break;
4407 		}
4408 #endif
4409 	default:
4410 		SCTP_PRINTF("?\n");
4411 		break;
4412 	}
4413 }
4414 
4415 void
4416 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4417     struct sctp_inpcb *new_inp,
4418     struct sctp_tcb *stcb,
4419     int waitflags)
4420 {
4421 	/*
4422 	 * go through our old INP and pull off any control structures that
4423 	 * belong to stcb and move then to the new inp.
4424 	 */
4425 	struct socket *old_so, *new_so;
4426 	struct sctp_queued_to_read *control, *nctl;
4427 	struct sctp_readhead tmp_queue;
4428 	struct mbuf *m;
4429 	int error = 0;
4430 
4431 	old_so = old_inp->sctp_socket;
4432 	new_so = new_inp->sctp_socket;
4433 	TAILQ_INIT(&tmp_queue);
4434 	error = sblock(&old_so->so_rcv, waitflags);
4435 	if (error) {
4436 		/*
4437 		 * Gak, can't get sblock, we have a problem. data will be
4438 		 * left stranded.. and we don't dare look at it since the
4439 		 * other thread may be reading something. Oh well, its a
4440 		 * screwed up app that does a peeloff OR a accept while
4441 		 * reading from the main socket... actually its only the
4442 		 * peeloff() case, since I think read will fail on a
4443 		 * listening socket..
4444 		 */
4445 		return;
4446 	}
4447 	/* lock the socket buffers */
4448 	SCTP_INP_READ_LOCK(old_inp);
4449 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4450 		/* Pull off all for out target stcb */
4451 		if (control->stcb == stcb) {
4452 			/* remove it we want it */
4453 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4454 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4455 			m = control->data;
4456 			while (m) {
4457 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4458 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4459 				}
4460 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4461 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4462 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4463 				}
4464 				m = SCTP_BUF_NEXT(m);
4465 			}
4466 		}
4467 	}
4468 	SCTP_INP_READ_UNLOCK(old_inp);
4469 	/* Remove the sb-lock on the old socket */
4470 
4471 	sbunlock(&old_so->so_rcv);
4472 	/* Now we move them over to the new socket buffer */
4473 	SCTP_INP_READ_LOCK(new_inp);
4474 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4475 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4476 		m = control->data;
4477 		while (m) {
4478 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4479 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4480 			}
4481 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4482 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4483 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4484 			}
4485 			m = SCTP_BUF_NEXT(m);
4486 		}
4487 	}
4488 	SCTP_INP_READ_UNLOCK(new_inp);
4489 }
4490 
4491 void
4492 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4493     struct sctp_tcb *stcb,
4494     int so_locked
4495 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4496     SCTP_UNUSED
4497 #endif
4498 )
4499 {
4500 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4501 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4502 		struct socket *so;
4503 
4504 		so = SCTP_INP_SO(inp);
4505 		if (!so_locked) {
4506 			if (stcb) {
4507 				atomic_add_int(&stcb->asoc.refcnt, 1);
4508 				SCTP_TCB_UNLOCK(stcb);
4509 			}
4510 			SCTP_SOCKET_LOCK(so, 1);
4511 			if (stcb) {
4512 				SCTP_TCB_LOCK(stcb);
4513 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4514 			}
4515 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4516 				SCTP_SOCKET_UNLOCK(so, 1);
4517 				return;
4518 			}
4519 		}
4520 #endif
4521 		sctp_sorwakeup(inp, inp->sctp_socket);
4522 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4523 		if (!so_locked) {
4524 			SCTP_SOCKET_UNLOCK(so, 1);
4525 		}
4526 #endif
4527 	}
4528 }
4529 
4530 void
4531 sctp_add_to_readq(struct sctp_inpcb *inp,
4532     struct sctp_tcb *stcb,
4533     struct sctp_queued_to_read *control,
4534     struct sockbuf *sb,
4535     int end,
4536     int inp_read_lock_held,
4537     int so_locked
4538 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4539     SCTP_UNUSED
4540 #endif
4541 )
4542 {
4543 	/*
4544 	 * Here we must place the control on the end of the socket read
4545 	 * queue AND increment sb_cc so that select will work properly on
4546 	 * read.
4547 	 */
4548 	struct mbuf *m, *prev = NULL;
4549 
4550 	if (inp == NULL) {
4551 		/* Gak, TSNH!! */
4552 #ifdef INVARIANTS
4553 		panic("Gak, inp NULL on add_to_readq");
4554 #endif
4555 		return;
4556 	}
4557 	if (inp_read_lock_held == 0)
4558 		SCTP_INP_READ_LOCK(inp);
4559 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4560 		sctp_free_remote_addr(control->whoFrom);
4561 		if (control->data) {
4562 			sctp_m_freem(control->data);
4563 			control->data = NULL;
4564 		}
4565 		sctp_free_a_readq(stcb, control);
4566 		if (inp_read_lock_held == 0)
4567 			SCTP_INP_READ_UNLOCK(inp);
4568 		return;
4569 	}
4570 	if (!(control->spec_flags & M_NOTIFICATION)) {
4571 		atomic_add_int(&inp->total_recvs, 1);
4572 		if (!control->do_not_ref_stcb) {
4573 			atomic_add_int(&stcb->total_recvs, 1);
4574 		}
4575 	}
4576 	m = control->data;
4577 	control->held_length = 0;
4578 	control->length = 0;
4579 	while (m) {
4580 		if (SCTP_BUF_LEN(m) == 0) {
4581 			/* Skip mbufs with NO length */
4582 			if (prev == NULL) {
4583 				/* First one */
4584 				control->data = sctp_m_free(m);
4585 				m = control->data;
4586 			} else {
4587 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4588 				m = SCTP_BUF_NEXT(prev);
4589 			}
4590 			if (m == NULL) {
4591 				control->tail_mbuf = prev;
4592 			}
4593 			continue;
4594 		}
4595 		prev = m;
4596 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4597 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4598 		}
4599 		sctp_sballoc(stcb, sb, m);
4600 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4601 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4602 		}
4603 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4604 		m = SCTP_BUF_NEXT(m);
4605 	}
4606 	if (prev != NULL) {
4607 		control->tail_mbuf = prev;
4608 	} else {
4609 		/* Everything got collapsed out?? */
4610 		sctp_free_remote_addr(control->whoFrom);
4611 		sctp_free_a_readq(stcb, control);
4612 		if (inp_read_lock_held == 0)
4613 			SCTP_INP_READ_UNLOCK(inp);
4614 		return;
4615 	}
4616 	if (end) {
4617 		control->end_added = 1;
4618 	}
4619 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4620 	control->on_read_q = 1;
4621 	if (inp_read_lock_held == 0)
4622 		SCTP_INP_READ_UNLOCK(inp);
4623 	if (inp && inp->sctp_socket) {
4624 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4625 	}
4626 }
4627 
4628 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4629  *************ALTERNATE ROUTING CODE
4630  */
4631 
4632 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4633  *************ALTERNATE ROUTING CODE
4634  */
4635 
4636 struct mbuf *
4637 sctp_generate_cause(uint16_t code, char *info)
4638 {
4639 	struct mbuf *m;
4640 	struct sctp_gen_error_cause *cause;
4641 	size_t info_len;
4642 	uint16_t len;
4643 
4644 	if ((code == 0) || (info == NULL)) {
4645 		return (NULL);
4646 	}
4647 	info_len = strlen(info);
4648 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4649 		return (NULL);
4650 	}
4651 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4652 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4653 	if (m != NULL) {
4654 		SCTP_BUF_LEN(m) = len;
4655 		cause = mtod(m, struct sctp_gen_error_cause *);
4656 		cause->code = htons(code);
4657 		cause->length = htons(len);
4658 		memcpy(cause->info, info, info_len);
4659 	}
4660 	return (m);
4661 }
4662 
4663 struct mbuf *
4664 sctp_generate_no_user_data_cause(uint32_t tsn)
4665 {
4666 	struct mbuf *m;
4667 	struct sctp_error_no_user_data *no_user_data_cause;
4668 	uint16_t len;
4669 
4670 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4671 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4672 	if (m != NULL) {
4673 		SCTP_BUF_LEN(m) = len;
4674 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4675 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4676 		no_user_data_cause->cause.length = htons(len);
4677 		no_user_data_cause->tsn = htonl(tsn);
4678 	}
4679 	return (m);
4680 }
4681 
4682 #ifdef SCTP_MBCNT_LOGGING
4683 void
4684 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4685     struct sctp_tmit_chunk *tp1, int chk_cnt)
4686 {
4687 	if (tp1->data == NULL) {
4688 		return;
4689 	}
4690 	asoc->chunks_on_out_queue -= chk_cnt;
4691 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4692 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4693 		    asoc->total_output_queue_size,
4694 		    tp1->book_size,
4695 		    0,
4696 		    tp1->mbcnt);
4697 	}
4698 	if (asoc->total_output_queue_size >= tp1->book_size) {
4699 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4700 	} else {
4701 		asoc->total_output_queue_size = 0;
4702 	}
4703 
4704 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4705 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4706 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4707 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4708 		} else {
4709 			stcb->sctp_socket->so_snd.sb_cc = 0;
4710 
4711 		}
4712 	}
4713 }
4714 
4715 #endif
4716 
4717 int
4718 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4719     uint8_t sent, int so_locked
4720 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4721     SCTP_UNUSED
4722 #endif
4723 )
4724 {
4725 	struct sctp_stream_out *strq;
4726 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4727 	struct sctp_stream_queue_pending *sp;
4728 	uint32_t mid;
4729 	uint16_t sid;
4730 	uint8_t foundeom = 0;
4731 	int ret_sz = 0;
4732 	int notdone;
4733 	int do_wakeup_routine = 0;
4734 
4735 	sid = tp1->rec.data.sid;
4736 	mid = tp1->rec.data.mid;
4737 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4738 		stcb->asoc.abandoned_sent[0]++;
4739 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4740 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4741 #if defined(SCTP_DETAILED_STR_STATS)
4742 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4743 #endif
4744 	} else {
4745 		stcb->asoc.abandoned_unsent[0]++;
4746 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4747 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4748 #if defined(SCTP_DETAILED_STR_STATS)
4749 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4750 #endif
4751 	}
4752 	do {
4753 		ret_sz += tp1->book_size;
4754 		if (tp1->data != NULL) {
4755 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4756 				sctp_flight_size_decrease(tp1);
4757 				sctp_total_flight_decrease(stcb, tp1);
4758 			}
4759 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4760 			stcb->asoc.peers_rwnd += tp1->send_size;
4761 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4762 			if (sent) {
4763 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4764 			} else {
4765 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4766 			}
4767 			if (tp1->data) {
4768 				sctp_m_freem(tp1->data);
4769 				tp1->data = NULL;
4770 			}
4771 			do_wakeup_routine = 1;
4772 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4773 				stcb->asoc.sent_queue_cnt_removeable--;
4774 			}
4775 		}
4776 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4777 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4778 		    SCTP_DATA_NOT_FRAG) {
4779 			/* not frag'ed we ae done   */
4780 			notdone = 0;
4781 			foundeom = 1;
4782 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4783 			/* end of frag, we are done */
4784 			notdone = 0;
4785 			foundeom = 1;
4786 		} else {
4787 			/*
4788 			 * Its a begin or middle piece, we must mark all of
4789 			 * it
4790 			 */
4791 			notdone = 1;
4792 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4793 		}
4794 	} while (tp1 && notdone);
4795 	if (foundeom == 0) {
4796 		/*
4797 		 * The multi-part message was scattered across the send and
4798 		 * sent queue.
4799 		 */
4800 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4801 			if ((tp1->rec.data.sid != sid) ||
4802 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4803 				break;
4804 			}
4805 			/*
4806 			 * save to chk in case we have some on stream out
4807 			 * queue. If so and we have an un-transmitted one we
4808 			 * don't have to fudge the TSN.
4809 			 */
4810 			chk = tp1;
4811 			ret_sz += tp1->book_size;
4812 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4813 			if (sent) {
4814 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4815 			} else {
4816 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4817 			}
4818 			if (tp1->data) {
4819 				sctp_m_freem(tp1->data);
4820 				tp1->data = NULL;
4821 			}
4822 			/* No flight involved here book the size to 0 */
4823 			tp1->book_size = 0;
4824 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4825 				foundeom = 1;
4826 			}
4827 			do_wakeup_routine = 1;
4828 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4829 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4830 			/*
4831 			 * on to the sent queue so we can wait for it to be
4832 			 * passed by.
4833 			 */
4834 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4835 			    sctp_next);
4836 			stcb->asoc.send_queue_cnt--;
4837 			stcb->asoc.sent_queue_cnt++;
4838 		}
4839 	}
4840 	if (foundeom == 0) {
4841 		/*
4842 		 * Still no eom found. That means there is stuff left on the
4843 		 * stream out queue.. yuck.
4844 		 */
4845 		SCTP_TCB_SEND_LOCK(stcb);
4846 		strq = &stcb->asoc.strmout[sid];
4847 		sp = TAILQ_FIRST(&strq->outqueue);
4848 		if (sp != NULL) {
4849 			sp->discard_rest = 1;
4850 			/*
4851 			 * We may need to put a chunk on the queue that
4852 			 * holds the TSN that would have been sent with the
4853 			 * LAST bit.
4854 			 */
4855 			if (chk == NULL) {
4856 				/* Yep, we have to */
4857 				sctp_alloc_a_chunk(stcb, chk);
4858 				if (chk == NULL) {
4859 					/*
4860 					 * we are hosed. All we can do is
4861 					 * nothing.. which will cause an
4862 					 * abort if the peer is paying
4863 					 * attention.
4864 					 */
4865 					goto oh_well;
4866 				}
4867 				memset(chk, 0, sizeof(*chk));
4868 				chk->rec.data.rcv_flags = 0;
4869 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4870 				chk->asoc = &stcb->asoc;
4871 				if (stcb->asoc.idata_supported == 0) {
4872 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4873 						chk->rec.data.mid = 0;
4874 					} else {
4875 						chk->rec.data.mid = strq->next_mid_ordered;
4876 					}
4877 				} else {
4878 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4879 						chk->rec.data.mid = strq->next_mid_unordered;
4880 					} else {
4881 						chk->rec.data.mid = strq->next_mid_ordered;
4882 					}
4883 				}
4884 				chk->rec.data.sid = sp->sid;
4885 				chk->rec.data.ppid = sp->ppid;
4886 				chk->rec.data.context = sp->context;
4887 				chk->flags = sp->act_flags;
4888 				chk->whoTo = NULL;
4889 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4890 				strq->chunks_on_queues++;
4891 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4892 				stcb->asoc.sent_queue_cnt++;
4893 				stcb->asoc.pr_sctp_cnt++;
4894 			}
4895 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4896 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4897 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4898 			}
4899 			if (stcb->asoc.idata_supported == 0) {
4900 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4901 					strq->next_mid_ordered++;
4902 				}
4903 			} else {
4904 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4905 					strq->next_mid_unordered++;
4906 				} else {
4907 					strq->next_mid_ordered++;
4908 				}
4909 			}
4910 	oh_well:
4911 			if (sp->data) {
4912 				/*
4913 				 * Pull any data to free up the SB and allow
4914 				 * sender to "add more" while we will throw
4915 				 * away :-)
4916 				 */
4917 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4918 				ret_sz += sp->length;
4919 				do_wakeup_routine = 1;
4920 				sp->some_taken = 1;
4921 				sctp_m_freem(sp->data);
4922 				sp->data = NULL;
4923 				sp->tail_mbuf = NULL;
4924 				sp->length = 0;
4925 			}
4926 		}
4927 		SCTP_TCB_SEND_UNLOCK(stcb);
4928 	}
4929 	if (do_wakeup_routine) {
4930 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4931 		struct socket *so;
4932 
4933 		so = SCTP_INP_SO(stcb->sctp_ep);
4934 		if (!so_locked) {
4935 			atomic_add_int(&stcb->asoc.refcnt, 1);
4936 			SCTP_TCB_UNLOCK(stcb);
4937 			SCTP_SOCKET_LOCK(so, 1);
4938 			SCTP_TCB_LOCK(stcb);
4939 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4940 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4941 				/* assoc was freed while we were unlocked */
4942 				SCTP_SOCKET_UNLOCK(so, 1);
4943 				return (ret_sz);
4944 			}
4945 		}
4946 #endif
4947 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4948 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4949 		if (!so_locked) {
4950 			SCTP_SOCKET_UNLOCK(so, 1);
4951 		}
4952 #endif
4953 	}
4954 	return (ret_sz);
4955 }
4956 
4957 /*
4958  * checks to see if the given address, sa, is one that is currently known by
4959  * the kernel note: can't distinguish the same address on multiple interfaces
4960  * and doesn't handle multiple addresses with different zone/scope id's note:
4961  * ifa_ifwithaddr() compares the entire sockaddr struct
4962  */
4963 struct sctp_ifa *
4964 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4965     int holds_lock)
4966 {
4967 	struct sctp_laddr *laddr;
4968 
4969 	if (holds_lock == 0) {
4970 		SCTP_INP_RLOCK(inp);
4971 	}
4972 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4973 		if (laddr->ifa == NULL)
4974 			continue;
4975 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4976 			continue;
4977 #ifdef INET
4978 		if (addr->sa_family == AF_INET) {
4979 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4980 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4981 				/* found him. */
4982 				if (holds_lock == 0) {
4983 					SCTP_INP_RUNLOCK(inp);
4984 				}
4985 				return (laddr->ifa);
4986 				break;
4987 			}
4988 		}
4989 #endif
4990 #ifdef INET6
4991 		if (addr->sa_family == AF_INET6) {
4992 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4993 			    &laddr->ifa->address.sin6)) {
4994 				/* found him. */
4995 				if (holds_lock == 0) {
4996 					SCTP_INP_RUNLOCK(inp);
4997 				}
4998 				return (laddr->ifa);
4999 				break;
5000 			}
5001 		}
5002 #endif
5003 	}
5004 	if (holds_lock == 0) {
5005 		SCTP_INP_RUNLOCK(inp);
5006 	}
5007 	return (NULL);
5008 }
5009 
5010 uint32_t
5011 sctp_get_ifa_hash_val(struct sockaddr *addr)
5012 {
5013 	switch (addr->sa_family) {
5014 #ifdef INET
5015 	case AF_INET:
5016 		{
5017 			struct sockaddr_in *sin;
5018 
5019 			sin = (struct sockaddr_in *)addr;
5020 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5021 		}
5022 #endif
5023 #ifdef INET6
5024 	case AF_INET6:
5025 		{
5026 			struct sockaddr_in6 *sin6;
5027 			uint32_t hash_of_addr;
5028 
5029 			sin6 = (struct sockaddr_in6 *)addr;
5030 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5031 			    sin6->sin6_addr.s6_addr32[1] +
5032 			    sin6->sin6_addr.s6_addr32[2] +
5033 			    sin6->sin6_addr.s6_addr32[3]);
5034 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5035 			return (hash_of_addr);
5036 		}
5037 #endif
5038 	default:
5039 		break;
5040 	}
5041 	return (0);
5042 }
5043 
5044 struct sctp_ifa *
5045 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5046 {
5047 	struct sctp_ifa *sctp_ifap;
5048 	struct sctp_vrf *vrf;
5049 	struct sctp_ifalist *hash_head;
5050 	uint32_t hash_of_addr;
5051 
5052 	if (holds_lock == 0)
5053 		SCTP_IPI_ADDR_RLOCK();
5054 
5055 	vrf = sctp_find_vrf(vrf_id);
5056 	if (vrf == NULL) {
5057 		if (holds_lock == 0)
5058 			SCTP_IPI_ADDR_RUNLOCK();
5059 		return (NULL);
5060 	}
5061 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5062 
5063 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5064 	if (hash_head == NULL) {
5065 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5066 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5067 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5068 		sctp_print_address(addr);
5069 		SCTP_PRINTF("No such bucket for address\n");
5070 		if (holds_lock == 0)
5071 			SCTP_IPI_ADDR_RUNLOCK();
5072 
5073 		return (NULL);
5074 	}
5075 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5076 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5077 			continue;
5078 #ifdef INET
5079 		if (addr->sa_family == AF_INET) {
5080 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5081 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5082 				/* found him. */
5083 				if (holds_lock == 0)
5084 					SCTP_IPI_ADDR_RUNLOCK();
5085 				return (sctp_ifap);
5086 				break;
5087 			}
5088 		}
5089 #endif
5090 #ifdef INET6
5091 		if (addr->sa_family == AF_INET6) {
5092 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5093 			    &sctp_ifap->address.sin6)) {
5094 				/* found him. */
5095 				if (holds_lock == 0)
5096 					SCTP_IPI_ADDR_RUNLOCK();
5097 				return (sctp_ifap);
5098 				break;
5099 			}
5100 		}
5101 #endif
5102 	}
5103 	if (holds_lock == 0)
5104 		SCTP_IPI_ADDR_RUNLOCK();
5105 	return (NULL);
5106 }
5107 
5108 static void
5109 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5110     uint32_t rwnd_req)
5111 {
5112 	/* User pulled some data, do we need a rwnd update? */
5113 	int r_unlocked = 0;
5114 	uint32_t dif, rwnd;
5115 	struct socket *so = NULL;
5116 
5117 	if (stcb == NULL)
5118 		return;
5119 
5120 	atomic_add_int(&stcb->asoc.refcnt, 1);
5121 
5122 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5123 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5124 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5125 		/* Pre-check If we are freeing no update */
5126 		goto no_lock;
5127 	}
5128 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5129 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5130 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5131 		goto out;
5132 	}
5133 	so = stcb->sctp_socket;
5134 	if (so == NULL) {
5135 		goto out;
5136 	}
5137 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5138 	/* Have you have freed enough to look */
5139 	*freed_so_far = 0;
5140 	/* Yep, its worth a look and the lock overhead */
5141 
5142 	/* Figure out what the rwnd would be */
5143 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5144 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5145 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5146 	} else {
5147 		dif = 0;
5148 	}
5149 	if (dif >= rwnd_req) {
5150 		if (hold_rlock) {
5151 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5152 			r_unlocked = 1;
5153 		}
5154 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5155 			/*
5156 			 * One last check before we allow the guy possibly
5157 			 * to get in. There is a race, where the guy has not
5158 			 * reached the gate. In that case
5159 			 */
5160 			goto out;
5161 		}
5162 		SCTP_TCB_LOCK(stcb);
5163 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5164 			/* No reports here */
5165 			SCTP_TCB_UNLOCK(stcb);
5166 			goto out;
5167 		}
5168 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5169 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5170 
5171 		sctp_chunk_output(stcb->sctp_ep, stcb,
5172 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5173 		/* make sure no timer is running */
5174 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5175 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5176 		SCTP_TCB_UNLOCK(stcb);
5177 	} else {
5178 		/* Update how much we have pending */
5179 		stcb->freed_by_sorcv_sincelast = dif;
5180 	}
5181 out:
5182 	if (so && r_unlocked && hold_rlock) {
5183 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5184 	}
5185 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5186 no_lock:
5187 	atomic_add_int(&stcb->asoc.refcnt, -1);
5188 	return;
5189 }
5190 
5191 int
5192 sctp_sorecvmsg(struct socket *so,
5193     struct uio *uio,
5194     struct mbuf **mp,
5195     struct sockaddr *from,
5196     int fromlen,
5197     int *msg_flags,
5198     struct sctp_sndrcvinfo *sinfo,
5199     int filling_sinfo)
5200 {
5201 	/*
5202 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5203 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5204 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5205 	 * On the way out we may send out any combination of:
5206 	 * MSG_NOTIFICATION MSG_EOR
5207 	 *
5208 	 */
5209 	struct sctp_inpcb *inp = NULL;
5210 	int my_len = 0;
5211 	int cp_len = 0, error = 0;
5212 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5213 	struct mbuf *m = NULL;
5214 	struct sctp_tcb *stcb = NULL;
5215 	int wakeup_read_socket = 0;
5216 	int freecnt_applied = 0;
5217 	int out_flags = 0, in_flags = 0;
5218 	int block_allowed = 1;
5219 	uint32_t freed_so_far = 0;
5220 	uint32_t copied_so_far = 0;
5221 	int in_eeor_mode = 0;
5222 	int no_rcv_needed = 0;
5223 	uint32_t rwnd_req = 0;
5224 	int hold_sblock = 0;
5225 	int hold_rlock = 0;
5226 	ssize_t slen = 0;
5227 	uint32_t held_length = 0;
5228 	int sockbuf_lock = 0;
5229 
5230 	if (uio == NULL) {
5231 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5232 		return (EINVAL);
5233 	}
5234 	if (msg_flags) {
5235 		in_flags = *msg_flags;
5236 		if (in_flags & MSG_PEEK)
5237 			SCTP_STAT_INCR(sctps_read_peeks);
5238 	} else {
5239 		in_flags = 0;
5240 	}
5241 	slen = uio->uio_resid;
5242 
5243 	/* Pull in and set up our int flags */
5244 	if (in_flags & MSG_OOB) {
5245 		/* Out of band's NOT supported */
5246 		return (EOPNOTSUPP);
5247 	}
5248 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5249 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5250 		return (EINVAL);
5251 	}
5252 	if ((in_flags & (MSG_DONTWAIT
5253 	    | MSG_NBIO
5254 	    )) ||
5255 	    SCTP_SO_IS_NBIO(so)) {
5256 		block_allowed = 0;
5257 	}
5258 	/* setup the endpoint */
5259 	inp = (struct sctp_inpcb *)so->so_pcb;
5260 	if (inp == NULL) {
5261 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5262 		return (EFAULT);
5263 	}
5264 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5265 	/* Must be at least a MTU's worth */
5266 	if (rwnd_req < SCTP_MIN_RWND)
5267 		rwnd_req = SCTP_MIN_RWND;
5268 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5269 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5270 		sctp_misc_ints(SCTP_SORECV_ENTER,
5271 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5272 	}
5273 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5274 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5275 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5276 	}
5277 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5278 	if (error) {
5279 		goto release_unlocked;
5280 	}
5281 	sockbuf_lock = 1;
5282 restart:
5283 
5284 
5285 restart_nosblocks:
5286 	if (hold_sblock == 0) {
5287 		SOCKBUF_LOCK(&so->so_rcv);
5288 		hold_sblock = 1;
5289 	}
5290 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5291 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5292 		goto out;
5293 	}
5294 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5295 		if (so->so_error) {
5296 			error = so->so_error;
5297 			if ((in_flags & MSG_PEEK) == 0)
5298 				so->so_error = 0;
5299 			goto out;
5300 		} else {
5301 			if (so->so_rcv.sb_cc == 0) {
5302 				/* indicate EOF */
5303 				error = 0;
5304 				goto out;
5305 			}
5306 		}
5307 	}
5308 	if (so->so_rcv.sb_cc <= held_length) {
5309 		if (so->so_error) {
5310 			error = so->so_error;
5311 			if ((in_flags & MSG_PEEK) == 0) {
5312 				so->so_error = 0;
5313 			}
5314 			goto out;
5315 		}
5316 		if ((so->so_rcv.sb_cc == 0) &&
5317 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5318 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5319 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5320 				/*
5321 				 * For active open side clear flags for
5322 				 * re-use passive open is blocked by
5323 				 * connect.
5324 				 */
5325 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5326 					/*
5327 					 * You were aborted, passive side
5328 					 * always hits here
5329 					 */
5330 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5331 					error = ECONNRESET;
5332 				}
5333 				so->so_state &= ~(SS_ISCONNECTING |
5334 				    SS_ISDISCONNECTING |
5335 				    SS_ISCONFIRMING |
5336 				    SS_ISCONNECTED);
5337 				if (error == 0) {
5338 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5339 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5340 						error = ENOTCONN;
5341 					}
5342 				}
5343 				goto out;
5344 			}
5345 		}
5346 		if (block_allowed) {
5347 			error = sbwait(&so->so_rcv);
5348 			if (error) {
5349 				goto out;
5350 			}
5351 			held_length = 0;
5352 			goto restart_nosblocks;
5353 		} else {
5354 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5355 			error = EWOULDBLOCK;
5356 			goto out;
5357 		}
5358 	}
5359 	if (hold_sblock == 1) {
5360 		SOCKBUF_UNLOCK(&so->so_rcv);
5361 		hold_sblock = 0;
5362 	}
5363 	/* we possibly have data we can read */
5364 	/* sa_ignore FREED_MEMORY */
5365 	control = TAILQ_FIRST(&inp->read_queue);
5366 	if (control == NULL) {
5367 		/*
5368 		 * This could be happening since the appender did the
5369 		 * increment but as not yet did the tailq insert onto the
5370 		 * read_queue
5371 		 */
5372 		if (hold_rlock == 0) {
5373 			SCTP_INP_READ_LOCK(inp);
5374 		}
5375 		control = TAILQ_FIRST(&inp->read_queue);
5376 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5377 #ifdef INVARIANTS
5378 			panic("Huh, its non zero and nothing on control?");
5379 #endif
5380 			so->so_rcv.sb_cc = 0;
5381 		}
5382 		SCTP_INP_READ_UNLOCK(inp);
5383 		hold_rlock = 0;
5384 		goto restart;
5385 	}
5386 	if ((control->length == 0) &&
5387 	    (control->do_not_ref_stcb)) {
5388 		/*
5389 		 * Clean up code for freeing assoc that left behind a
5390 		 * pdapi.. maybe a peer in EEOR that just closed after
5391 		 * sending and never indicated a EOR.
5392 		 */
5393 		if (hold_rlock == 0) {
5394 			hold_rlock = 1;
5395 			SCTP_INP_READ_LOCK(inp);
5396 		}
5397 		control->held_length = 0;
5398 		if (control->data) {
5399 			/* Hmm there is data here .. fix */
5400 			struct mbuf *m_tmp;
5401 			int cnt = 0;
5402 
5403 			m_tmp = control->data;
5404 			while (m_tmp) {
5405 				cnt += SCTP_BUF_LEN(m_tmp);
5406 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5407 					control->tail_mbuf = m_tmp;
5408 					control->end_added = 1;
5409 				}
5410 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5411 			}
5412 			control->length = cnt;
5413 		} else {
5414 			/* remove it */
5415 			TAILQ_REMOVE(&inp->read_queue, control, next);
5416 			/* Add back any hiddend data */
5417 			sctp_free_remote_addr(control->whoFrom);
5418 			sctp_free_a_readq(stcb, control);
5419 		}
5420 		if (hold_rlock) {
5421 			hold_rlock = 0;
5422 			SCTP_INP_READ_UNLOCK(inp);
5423 		}
5424 		goto restart;
5425 	}
5426 	if ((control->length == 0) &&
5427 	    (control->end_added == 1)) {
5428 		/*
5429 		 * Do we also need to check for (control->pdapi_aborted ==
5430 		 * 1)?
5431 		 */
5432 		if (hold_rlock == 0) {
5433 			hold_rlock = 1;
5434 			SCTP_INP_READ_LOCK(inp);
5435 		}
5436 		TAILQ_REMOVE(&inp->read_queue, control, next);
5437 		if (control->data) {
5438 #ifdef INVARIANTS
5439 			panic("control->data not null but control->length == 0");
5440 #else
5441 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5442 			sctp_m_freem(control->data);
5443 			control->data = NULL;
5444 #endif
5445 		}
5446 		if (control->aux_data) {
5447 			sctp_m_free(control->aux_data);
5448 			control->aux_data = NULL;
5449 		}
5450 #ifdef INVARIANTS
5451 		if (control->on_strm_q) {
5452 			panic("About to free ctl:%p so:%p and its in %d",
5453 			    control, so, control->on_strm_q);
5454 		}
5455 #endif
5456 		sctp_free_remote_addr(control->whoFrom);
5457 		sctp_free_a_readq(stcb, control);
5458 		if (hold_rlock) {
5459 			hold_rlock = 0;
5460 			SCTP_INP_READ_UNLOCK(inp);
5461 		}
5462 		goto restart;
5463 	}
5464 	if (control->length == 0) {
5465 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5466 		    (filling_sinfo)) {
5467 			/* find a more suitable one then this */
5468 			ctl = TAILQ_NEXT(control, next);
5469 			while (ctl) {
5470 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5471 				    (ctl->some_taken ||
5472 				    (ctl->spec_flags & M_NOTIFICATION) ||
5473 				    ((ctl->do_not_ref_stcb == 0) &&
5474 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5475 				    ) {
5476 					/*-
5477 					 * If we have a different TCB next, and there is data
5478 					 * present. If we have already taken some (pdapi), OR we can
5479 					 * ref the tcb and no delivery as started on this stream, we
5480 					 * take it. Note we allow a notification on a different
5481 					 * assoc to be delivered..
5482 					 */
5483 					control = ctl;
5484 					goto found_one;
5485 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5486 					    (ctl->length) &&
5487 					    ((ctl->some_taken) ||
5488 					    ((ctl->do_not_ref_stcb == 0) &&
5489 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5490 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5491 					/*-
5492 					 * If we have the same tcb, and there is data present, and we
5493 					 * have the strm interleave feature present. Then if we have
5494 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5495 					 * not started a delivery for this stream, we can take it.
5496 					 * Note we do NOT allow a notificaiton on the same assoc to
5497 					 * be delivered.
5498 					 */
5499 					control = ctl;
5500 					goto found_one;
5501 				}
5502 				ctl = TAILQ_NEXT(ctl, next);
5503 			}
5504 		}
5505 		/*
5506 		 * if we reach here, not suitable replacement is available
5507 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5508 		 * into the our held count, and its time to sleep again.
5509 		 */
5510 		held_length = so->so_rcv.sb_cc;
5511 		control->held_length = so->so_rcv.sb_cc;
5512 		goto restart;
5513 	}
5514 	/* Clear the held length since there is something to read */
5515 	control->held_length = 0;
5516 found_one:
5517 	/*
5518 	 * If we reach here, control has a some data for us to read off.
5519 	 * Note that stcb COULD be NULL.
5520 	 */
5521 	if (hold_rlock == 0) {
5522 		hold_rlock = 1;
5523 		SCTP_INP_READ_LOCK(inp);
5524 	}
5525 	control->some_taken++;
5526 	stcb = control->stcb;
5527 	if (stcb) {
5528 		if ((control->do_not_ref_stcb == 0) &&
5529 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5530 			if (freecnt_applied == 0)
5531 				stcb = NULL;
5532 		} else if (control->do_not_ref_stcb == 0) {
5533 			/* you can't free it on me please */
5534 			/*
5535 			 * The lock on the socket buffer protects us so the
5536 			 * free code will stop. But since we used the
5537 			 * socketbuf lock and the sender uses the tcb_lock
5538 			 * to increment, we need to use the atomic add to
5539 			 * the refcnt
5540 			 */
5541 			if (freecnt_applied) {
5542 #ifdef INVARIANTS
5543 				panic("refcnt already incremented");
5544 #else
5545 				SCTP_PRINTF("refcnt already incremented?\n");
5546 #endif
5547 			} else {
5548 				atomic_add_int(&stcb->asoc.refcnt, 1);
5549 				freecnt_applied = 1;
5550 			}
5551 			/*
5552 			 * Setup to remember how much we have not yet told
5553 			 * the peer our rwnd has opened up. Note we grab the
5554 			 * value from the tcb from last time. Note too that
5555 			 * sack sending clears this when a sack is sent,
5556 			 * which is fine. Once we hit the rwnd_req, we then
5557 			 * will go to the sctp_user_rcvd() that will not
5558 			 * lock until it KNOWs it MUST send a WUP-SACK.
5559 			 */
5560 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5561 			stcb->freed_by_sorcv_sincelast = 0;
5562 		}
5563 	}
5564 	if (stcb &&
5565 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5566 	    control->do_not_ref_stcb == 0) {
5567 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5568 	}
5569 	/* First lets get off the sinfo and sockaddr info */
5570 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5571 		sinfo->sinfo_stream = control->sinfo_stream;
5572 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5573 		sinfo->sinfo_flags = control->sinfo_flags;
5574 		sinfo->sinfo_ppid = control->sinfo_ppid;
5575 		sinfo->sinfo_context = control->sinfo_context;
5576 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5577 		sinfo->sinfo_tsn = control->sinfo_tsn;
5578 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5579 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5580 		nxt = TAILQ_NEXT(control, next);
5581 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5582 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5583 			struct sctp_extrcvinfo *s_extra;
5584 
5585 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5586 			if ((nxt) &&
5587 			    (nxt->length)) {
5588 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5589 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5590 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5591 				}
5592 				if (nxt->spec_flags & M_NOTIFICATION) {
5593 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5594 				}
5595 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5596 				s_extra->serinfo_next_length = nxt->length;
5597 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5598 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5599 				if (nxt->tail_mbuf != NULL) {
5600 					if (nxt->end_added) {
5601 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5602 					}
5603 				}
5604 			} else {
5605 				/*
5606 				 * we explicitly 0 this, since the memcpy
5607 				 * got some other things beyond the older
5608 				 * sinfo_ that is on the control's structure
5609 				 * :-D
5610 				 */
5611 				nxt = NULL;
5612 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5613 				s_extra->serinfo_next_aid = 0;
5614 				s_extra->serinfo_next_length = 0;
5615 				s_extra->serinfo_next_ppid = 0;
5616 				s_extra->serinfo_next_stream = 0;
5617 			}
5618 		}
5619 		/*
5620 		 * update off the real current cum-ack, if we have an stcb.
5621 		 */
5622 		if ((control->do_not_ref_stcb == 0) && stcb)
5623 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5624 		/*
5625 		 * mask off the high bits, we keep the actual chunk bits in
5626 		 * there.
5627 		 */
5628 		sinfo->sinfo_flags &= 0x00ff;
5629 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5630 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5631 		}
5632 	}
5633 #ifdef SCTP_ASOCLOG_OF_TSNS
5634 	{
5635 		int index, newindex;
5636 		struct sctp_pcbtsn_rlog *entry;
5637 
5638 		do {
5639 			index = inp->readlog_index;
5640 			newindex = index + 1;
5641 			if (newindex >= SCTP_READ_LOG_SIZE) {
5642 				newindex = 0;
5643 			}
5644 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5645 		entry = &inp->readlog[index];
5646 		entry->vtag = control->sinfo_assoc_id;
5647 		entry->strm = control->sinfo_stream;
5648 		entry->seq = (uint16_t)control->mid;
5649 		entry->sz = control->length;
5650 		entry->flgs = control->sinfo_flags;
5651 	}
5652 #endif
5653 	if ((fromlen > 0) && (from != NULL)) {
5654 		union sctp_sockstore store;
5655 		size_t len;
5656 
5657 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5658 #ifdef INET6
5659 		case AF_INET6:
5660 			len = sizeof(struct sockaddr_in6);
5661 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5662 			store.sin6.sin6_port = control->port_from;
5663 			break;
5664 #endif
5665 #ifdef INET
5666 		case AF_INET:
5667 #ifdef INET6
5668 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5669 				len = sizeof(struct sockaddr_in6);
5670 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5671 				    &store.sin6);
5672 				store.sin6.sin6_port = control->port_from;
5673 			} else {
5674 				len = sizeof(struct sockaddr_in);
5675 				store.sin = control->whoFrom->ro._l_addr.sin;
5676 				store.sin.sin_port = control->port_from;
5677 			}
5678 #else
5679 			len = sizeof(struct sockaddr_in);
5680 			store.sin = control->whoFrom->ro._l_addr.sin;
5681 			store.sin.sin_port = control->port_from;
5682 #endif
5683 			break;
5684 #endif
5685 		default:
5686 			len = 0;
5687 			break;
5688 		}
5689 		memcpy(from, &store, min((size_t)fromlen, len));
5690 #ifdef INET6
5691 		{
5692 			struct sockaddr_in6 lsa6, *from6;
5693 
5694 			from6 = (struct sockaddr_in6 *)from;
5695 			sctp_recover_scope_mac(from6, (&lsa6));
5696 		}
5697 #endif
5698 	}
5699 	if (hold_rlock) {
5700 		SCTP_INP_READ_UNLOCK(inp);
5701 		hold_rlock = 0;
5702 	}
5703 	if (hold_sblock) {
5704 		SOCKBUF_UNLOCK(&so->so_rcv);
5705 		hold_sblock = 0;
5706 	}
5707 	/* now copy out what data we can */
5708 	if (mp == NULL) {
5709 		/* copy out each mbuf in the chain up to length */
5710 get_more_data:
5711 		m = control->data;
5712 		while (m) {
5713 			/* Move out all we can */
5714 			cp_len = (int)uio->uio_resid;
5715 			my_len = (int)SCTP_BUF_LEN(m);
5716 			if (cp_len > my_len) {
5717 				/* not enough in this buf */
5718 				cp_len = my_len;
5719 			}
5720 			if (hold_rlock) {
5721 				SCTP_INP_READ_UNLOCK(inp);
5722 				hold_rlock = 0;
5723 			}
5724 			if (cp_len > 0)
5725 				error = uiomove(mtod(m, char *), cp_len, uio);
5726 			/* re-read */
5727 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5728 				goto release;
5729 			}
5730 			if ((control->do_not_ref_stcb == 0) && stcb &&
5731 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5732 				no_rcv_needed = 1;
5733 			}
5734 			if (error) {
5735 				/* error we are out of here */
5736 				goto release;
5737 			}
5738 			SCTP_INP_READ_LOCK(inp);
5739 			hold_rlock = 1;
5740 			if (cp_len == SCTP_BUF_LEN(m)) {
5741 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5742 				    (control->end_added)) {
5743 					out_flags |= MSG_EOR;
5744 					if ((control->do_not_ref_stcb == 0) &&
5745 					    (control->stcb != NULL) &&
5746 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5747 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5748 				}
5749 				if (control->spec_flags & M_NOTIFICATION) {
5750 					out_flags |= MSG_NOTIFICATION;
5751 				}
5752 				/* we ate up the mbuf */
5753 				if (in_flags & MSG_PEEK) {
5754 					/* just looking */
5755 					m = SCTP_BUF_NEXT(m);
5756 					copied_so_far += cp_len;
5757 				} else {
5758 					/* dispose of the mbuf */
5759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 						sctp_sblog(&so->so_rcv,
5761 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5762 					}
5763 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5764 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5765 						sctp_sblog(&so->so_rcv,
5766 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5767 					}
5768 					copied_so_far += cp_len;
5769 					freed_so_far += cp_len;
5770 					freed_so_far += MSIZE;
5771 					atomic_subtract_int(&control->length, cp_len);
5772 					control->data = sctp_m_free(m);
5773 					m = control->data;
5774 					/*
5775 					 * been through it all, must hold sb
5776 					 * lock ok to null tail
5777 					 */
5778 					if (control->data == NULL) {
5779 #ifdef INVARIANTS
5780 						if ((control->end_added == 0) ||
5781 						    (TAILQ_NEXT(control, next) == NULL)) {
5782 							/*
5783 							 * If the end is not
5784 							 * added, OR the
5785 							 * next is NOT null
5786 							 * we MUST have the
5787 							 * lock.
5788 							 */
5789 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5790 								panic("Hmm we don't own the lock?");
5791 							}
5792 						}
5793 #endif
5794 						control->tail_mbuf = NULL;
5795 #ifdef INVARIANTS
5796 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5797 							panic("end_added, nothing left and no MSG_EOR");
5798 						}
5799 #endif
5800 					}
5801 				}
5802 			} else {
5803 				/* Do we need to trim the mbuf? */
5804 				if (control->spec_flags & M_NOTIFICATION) {
5805 					out_flags |= MSG_NOTIFICATION;
5806 				}
5807 				if ((in_flags & MSG_PEEK) == 0) {
5808 					SCTP_BUF_RESV_UF(m, cp_len);
5809 					SCTP_BUF_LEN(m) -= cp_len;
5810 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5811 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5812 					}
5813 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5814 					if ((control->do_not_ref_stcb == 0) &&
5815 					    stcb) {
5816 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5817 					}
5818 					copied_so_far += cp_len;
5819 					freed_so_far += cp_len;
5820 					freed_so_far += MSIZE;
5821 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5822 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5823 						    SCTP_LOG_SBRESULT, 0);
5824 					}
5825 					atomic_subtract_int(&control->length, cp_len);
5826 				} else {
5827 					copied_so_far += cp_len;
5828 				}
5829 			}
5830 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5831 				break;
5832 			}
5833 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5834 			    (control->do_not_ref_stcb == 0) &&
5835 			    (freed_so_far >= rwnd_req)) {
5836 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5837 			}
5838 		}		/* end while(m) */
5839 		/*
5840 		 * At this point we have looked at it all and we either have
5841 		 * a MSG_EOR/or read all the user wants... <OR>
5842 		 * control->length == 0.
5843 		 */
5844 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5845 			/* we are done with this control */
5846 			if (control->length == 0) {
5847 				if (control->data) {
5848 #ifdef INVARIANTS
5849 					panic("control->data not null at read eor?");
5850 #else
5851 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5852 					sctp_m_freem(control->data);
5853 					control->data = NULL;
5854 #endif
5855 				}
5856 		done_with_control:
5857 				if (hold_rlock == 0) {
5858 					SCTP_INP_READ_LOCK(inp);
5859 					hold_rlock = 1;
5860 				}
5861 				TAILQ_REMOVE(&inp->read_queue, control, next);
5862 				/* Add back any hiddend data */
5863 				if (control->held_length) {
5864 					held_length = 0;
5865 					control->held_length = 0;
5866 					wakeup_read_socket = 1;
5867 				}
5868 				if (control->aux_data) {
5869 					sctp_m_free(control->aux_data);
5870 					control->aux_data = NULL;
5871 				}
5872 				no_rcv_needed = control->do_not_ref_stcb;
5873 				sctp_free_remote_addr(control->whoFrom);
5874 				control->data = NULL;
5875 #ifdef INVARIANTS
5876 				if (control->on_strm_q) {
5877 					panic("About to free ctl:%p so:%p and its in %d",
5878 					    control, so, control->on_strm_q);
5879 				}
5880 #endif
5881 				sctp_free_a_readq(stcb, control);
5882 				control = NULL;
5883 				if ((freed_so_far >= rwnd_req) &&
5884 				    (no_rcv_needed == 0))
5885 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5886 
5887 			} else {
5888 				/*
5889 				 * The user did not read all of this
5890 				 * message, turn off the returned MSG_EOR
5891 				 * since we are leaving more behind on the
5892 				 * control to read.
5893 				 */
5894 #ifdef INVARIANTS
5895 				if (control->end_added &&
5896 				    (control->data == NULL) &&
5897 				    (control->tail_mbuf == NULL)) {
5898 					panic("Gak, control->length is corrupt?");
5899 				}
5900 #endif
5901 				no_rcv_needed = control->do_not_ref_stcb;
5902 				out_flags &= ~MSG_EOR;
5903 			}
5904 		}
5905 		if (out_flags & MSG_EOR) {
5906 			goto release;
5907 		}
5908 		if ((uio->uio_resid == 0) ||
5909 		    ((in_eeor_mode) &&
5910 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5911 			goto release;
5912 		}
5913 		/*
5914 		 * If I hit here the receiver wants more and this message is
5915 		 * NOT done (pd-api). So two questions. Can we block? if not
5916 		 * we are done. Did the user NOT set MSG_WAITALL?
5917 		 */
5918 		if (block_allowed == 0) {
5919 			goto release;
5920 		}
5921 		/*
5922 		 * We need to wait for more data a few things: - We don't
5923 		 * sbunlock() so we don't get someone else reading. - We
5924 		 * must be sure to account for the case where what is added
5925 		 * is NOT to our control when we wakeup.
5926 		 */
5927 
5928 		/*
5929 		 * Do we need to tell the transport a rwnd update might be
5930 		 * needed before we go to sleep?
5931 		 */
5932 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5933 		    ((freed_so_far >= rwnd_req) &&
5934 		    (control->do_not_ref_stcb == 0) &&
5935 		    (no_rcv_needed == 0))) {
5936 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5937 		}
5938 wait_some_more:
5939 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5940 			goto release;
5941 		}
5942 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5943 			goto release;
5944 
5945 		if (hold_rlock == 1) {
5946 			SCTP_INP_READ_UNLOCK(inp);
5947 			hold_rlock = 0;
5948 		}
5949 		if (hold_sblock == 0) {
5950 			SOCKBUF_LOCK(&so->so_rcv);
5951 			hold_sblock = 1;
5952 		}
5953 		if ((copied_so_far) && (control->length == 0) &&
5954 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5955 			goto release;
5956 		}
5957 		if (so->so_rcv.sb_cc <= control->held_length) {
5958 			error = sbwait(&so->so_rcv);
5959 			if (error) {
5960 				goto release;
5961 			}
5962 			control->held_length = 0;
5963 		}
5964 		if (hold_sblock) {
5965 			SOCKBUF_UNLOCK(&so->so_rcv);
5966 			hold_sblock = 0;
5967 		}
5968 		if (control->length == 0) {
5969 			/* still nothing here */
5970 			if (control->end_added == 1) {
5971 				/* he aborted, or is done i.e.did a shutdown */
5972 				out_flags |= MSG_EOR;
5973 				if (control->pdapi_aborted) {
5974 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5975 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5976 
5977 					out_flags |= MSG_TRUNC;
5978 				} else {
5979 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5980 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5981 				}
5982 				goto done_with_control;
5983 			}
5984 			if (so->so_rcv.sb_cc > held_length) {
5985 				control->held_length = so->so_rcv.sb_cc;
5986 				held_length = 0;
5987 			}
5988 			goto wait_some_more;
5989 		} else if (control->data == NULL) {
5990 			/*
5991 			 * we must re-sync since data is probably being
5992 			 * added
5993 			 */
5994 			SCTP_INP_READ_LOCK(inp);
5995 			if ((control->length > 0) && (control->data == NULL)) {
5996 				/*
5997 				 * big trouble.. we have the lock and its
5998 				 * corrupt?
5999 				 */
6000 #ifdef INVARIANTS
6001 				panic("Impossible data==NULL length !=0");
6002 #endif
6003 				out_flags |= MSG_EOR;
6004 				out_flags |= MSG_TRUNC;
6005 				control->length = 0;
6006 				SCTP_INP_READ_UNLOCK(inp);
6007 				goto done_with_control;
6008 			}
6009 			SCTP_INP_READ_UNLOCK(inp);
6010 			/* We will fall around to get more data */
6011 		}
6012 		goto get_more_data;
6013 	} else {
6014 		/*-
6015 		 * Give caller back the mbuf chain,
6016 		 * store in uio_resid the length
6017 		 */
6018 		wakeup_read_socket = 0;
6019 		if ((control->end_added == 0) ||
6020 		    (TAILQ_NEXT(control, next) == NULL)) {
6021 			/* Need to get rlock */
6022 			if (hold_rlock == 0) {
6023 				SCTP_INP_READ_LOCK(inp);
6024 				hold_rlock = 1;
6025 			}
6026 		}
6027 		if (control->end_added) {
6028 			out_flags |= MSG_EOR;
6029 			if ((control->do_not_ref_stcb == 0) &&
6030 			    (control->stcb != NULL) &&
6031 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6032 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6033 		}
6034 		if (control->spec_flags & M_NOTIFICATION) {
6035 			out_flags |= MSG_NOTIFICATION;
6036 		}
6037 		uio->uio_resid = control->length;
6038 		*mp = control->data;
6039 		m = control->data;
6040 		while (m) {
6041 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6042 				sctp_sblog(&so->so_rcv,
6043 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6044 			}
6045 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6046 			freed_so_far += SCTP_BUF_LEN(m);
6047 			freed_so_far += MSIZE;
6048 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6049 				sctp_sblog(&so->so_rcv,
6050 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6051 			}
6052 			m = SCTP_BUF_NEXT(m);
6053 		}
6054 		control->data = control->tail_mbuf = NULL;
6055 		control->length = 0;
6056 		if (out_flags & MSG_EOR) {
6057 			/* Done with this control */
6058 			goto done_with_control;
6059 		}
6060 	}
6061 release:
6062 	if (hold_rlock == 1) {
6063 		SCTP_INP_READ_UNLOCK(inp);
6064 		hold_rlock = 0;
6065 	}
6066 	if (hold_sblock == 1) {
6067 		SOCKBUF_UNLOCK(&so->so_rcv);
6068 		hold_sblock = 0;
6069 	}
6070 	sbunlock(&so->so_rcv);
6071 	sockbuf_lock = 0;
6072 
6073 release_unlocked:
6074 	if (hold_sblock) {
6075 		SOCKBUF_UNLOCK(&so->so_rcv);
6076 		hold_sblock = 0;
6077 	}
6078 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6079 		if ((freed_so_far >= rwnd_req) &&
6080 		    (control && (control->do_not_ref_stcb == 0)) &&
6081 		    (no_rcv_needed == 0))
6082 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6083 	}
6084 out:
6085 	if (msg_flags) {
6086 		*msg_flags = out_flags;
6087 	}
6088 	if (((out_flags & MSG_EOR) == 0) &&
6089 	    ((in_flags & MSG_PEEK) == 0) &&
6090 	    (sinfo) &&
6091 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6092 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6093 		struct sctp_extrcvinfo *s_extra;
6094 
6095 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6096 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6097 	}
6098 	if (hold_rlock == 1) {
6099 		SCTP_INP_READ_UNLOCK(inp);
6100 	}
6101 	if (hold_sblock) {
6102 		SOCKBUF_UNLOCK(&so->so_rcv);
6103 	}
6104 	if (sockbuf_lock) {
6105 		sbunlock(&so->so_rcv);
6106 	}
6107 	if (freecnt_applied) {
6108 		/*
6109 		 * The lock on the socket buffer protects us so the free
6110 		 * code will stop. But since we used the socketbuf lock and
6111 		 * the sender uses the tcb_lock to increment, we need to use
6112 		 * the atomic add to the refcnt.
6113 		 */
6114 		if (stcb == NULL) {
6115 #ifdef INVARIANTS
6116 			panic("stcb for refcnt has gone NULL?");
6117 			goto stage_left;
6118 #else
6119 			goto stage_left;
6120 #endif
6121 		}
6122 		/* Save the value back for next time */
6123 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6124 		atomic_add_int(&stcb->asoc.refcnt, -1);
6125 	}
6126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6127 		if (stcb) {
6128 			sctp_misc_ints(SCTP_SORECV_DONE,
6129 			    freed_so_far,
6130 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6131 			    stcb->asoc.my_rwnd,
6132 			    so->so_rcv.sb_cc);
6133 		} else {
6134 			sctp_misc_ints(SCTP_SORECV_DONE,
6135 			    freed_so_far,
6136 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6137 			    0,
6138 			    so->so_rcv.sb_cc);
6139 		}
6140 	}
6141 stage_left:
6142 	if (wakeup_read_socket) {
6143 		sctp_sorwakeup(inp, so);
6144 	}
6145 	return (error);
6146 }
6147 
6148 
6149 #ifdef SCTP_MBUF_LOGGING
6150 struct mbuf *
6151 sctp_m_free(struct mbuf *m)
6152 {
6153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6154 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6155 	}
6156 	return (m_free(m));
6157 }
6158 
6159 void
6160 sctp_m_freem(struct mbuf *mb)
6161 {
6162 	while (mb != NULL)
6163 		mb = sctp_m_free(mb);
6164 }
6165 
6166 #endif
6167 
6168 int
6169 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6170 {
6171 	/*
6172 	 * Given a local address. For all associations that holds the
6173 	 * address, request a peer-set-primary.
6174 	 */
6175 	struct sctp_ifa *ifa;
6176 	struct sctp_laddr *wi;
6177 
6178 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6179 	if (ifa == NULL) {
6180 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6181 		return (EADDRNOTAVAIL);
6182 	}
6183 	/*
6184 	 * Now that we have the ifa we must awaken the iterator with this
6185 	 * message.
6186 	 */
6187 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6188 	if (wi == NULL) {
6189 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6190 		return (ENOMEM);
6191 	}
6192 	/* Now incr the count and int wi structure */
6193 	SCTP_INCR_LADDR_COUNT();
6194 	memset(wi, 0, sizeof(*wi));
6195 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6196 	wi->ifa = ifa;
6197 	wi->action = SCTP_SET_PRIM_ADDR;
6198 	atomic_add_int(&ifa->refcount, 1);
6199 
6200 	/* Now add it to the work queue */
6201 	SCTP_WQ_ADDR_LOCK();
6202 	/*
6203 	 * Should this really be a tailq? As it is we will process the
6204 	 * newest first :-0
6205 	 */
6206 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6207 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6208 	    (struct sctp_inpcb *)NULL,
6209 	    (struct sctp_tcb *)NULL,
6210 	    (struct sctp_nets *)NULL);
6211 	SCTP_WQ_ADDR_UNLOCK();
6212 	return (0);
6213 }
6214 
6215 
6216 int
6217 sctp_soreceive(struct socket *so,
6218     struct sockaddr **psa,
6219     struct uio *uio,
6220     struct mbuf **mp0,
6221     struct mbuf **controlp,
6222     int *flagsp)
6223 {
6224 	int error, fromlen;
6225 	uint8_t sockbuf[256];
6226 	struct sockaddr *from;
6227 	struct sctp_extrcvinfo sinfo;
6228 	int filling_sinfo = 1;
6229 	int flags;
6230 	struct sctp_inpcb *inp;
6231 
6232 	inp = (struct sctp_inpcb *)so->so_pcb;
6233 	/* pickup the assoc we are reading from */
6234 	if (inp == NULL) {
6235 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6236 		return (EINVAL);
6237 	}
6238 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6239 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6240 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6241 	    (controlp == NULL)) {
6242 		/* user does not want the sndrcv ctl */
6243 		filling_sinfo = 0;
6244 	}
6245 	if (psa) {
6246 		from = (struct sockaddr *)sockbuf;
6247 		fromlen = sizeof(sockbuf);
6248 		from->sa_len = 0;
6249 	} else {
6250 		from = NULL;
6251 		fromlen = 0;
6252 	}
6253 
6254 	if (filling_sinfo) {
6255 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6256 	}
6257 	if (flagsp != NULL) {
6258 		flags = *flagsp;
6259 	} else {
6260 		flags = 0;
6261 	}
6262 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6263 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6264 	if (flagsp != NULL) {
6265 		*flagsp = flags;
6266 	}
6267 	if (controlp != NULL) {
6268 		/* copy back the sinfo in a CMSG format */
6269 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6270 			*controlp = sctp_build_ctl_nchunk(inp,
6271 			    (struct sctp_sndrcvinfo *)&sinfo);
6272 		} else {
6273 			*controlp = NULL;
6274 		}
6275 	}
6276 	if (psa) {
6277 		/* copy back the address info */
6278 		if (from && from->sa_len) {
6279 			*psa = sodupsockaddr(from, M_NOWAIT);
6280 		} else {
6281 			*psa = NULL;
6282 		}
6283 	}
6284 	return (error);
6285 }
6286 
6287 
6288 
6289 
6290 
6291 int
6292 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6293     int totaddr, int *error)
6294 {
6295 	int added = 0;
6296 	int i;
6297 	struct sctp_inpcb *inp;
6298 	struct sockaddr *sa;
6299 	size_t incr = 0;
6300 #ifdef INET
6301 	struct sockaddr_in *sin;
6302 #endif
6303 #ifdef INET6
6304 	struct sockaddr_in6 *sin6;
6305 #endif
6306 
6307 	sa = addr;
6308 	inp = stcb->sctp_ep;
6309 	*error = 0;
6310 	for (i = 0; i < totaddr; i++) {
6311 		switch (sa->sa_family) {
6312 #ifdef INET
6313 		case AF_INET:
6314 			incr = sizeof(struct sockaddr_in);
6315 			sin = (struct sockaddr_in *)sa;
6316 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6317 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6318 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6319 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6320 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6321 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6322 				*error = EINVAL;
6323 				goto out_now;
6324 			}
6325 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6326 			    SCTP_DONOT_SETSCOPE,
6327 			    SCTP_ADDR_IS_CONFIRMED)) {
6328 				/* assoc gone no un-lock */
6329 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6330 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6331 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6332 				*error = ENOBUFS;
6333 				goto out_now;
6334 			}
6335 			added++;
6336 			break;
6337 #endif
6338 #ifdef INET6
6339 		case AF_INET6:
6340 			incr = sizeof(struct sockaddr_in6);
6341 			sin6 = (struct sockaddr_in6 *)sa;
6342 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6343 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6344 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6345 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6346 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6347 				*error = EINVAL;
6348 				goto out_now;
6349 			}
6350 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6351 			    SCTP_DONOT_SETSCOPE,
6352 			    SCTP_ADDR_IS_CONFIRMED)) {
6353 				/* assoc gone no un-lock */
6354 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6355 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6356 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6357 				*error = ENOBUFS;
6358 				goto out_now;
6359 			}
6360 			added++;
6361 			break;
6362 #endif
6363 		default:
6364 			break;
6365 		}
6366 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6367 	}
6368 out_now:
6369 	return (added);
6370 }
6371 
6372 struct sctp_tcb *
6373 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6374     unsigned int *totaddr,
6375     unsigned int *num_v4, unsigned int *num_v6, int *error,
6376     unsigned int limit, int *bad_addr)
6377 {
6378 	struct sockaddr *sa;
6379 	struct sctp_tcb *stcb = NULL;
6380 	unsigned int incr, at, i;
6381 
6382 	at = 0;
6383 	sa = addr;
6384 	*error = *num_v6 = *num_v4 = 0;
6385 	/* account and validate addresses */
6386 	for (i = 0; i < *totaddr; i++) {
6387 		switch (sa->sa_family) {
6388 #ifdef INET
6389 		case AF_INET:
6390 			incr = (unsigned int)sizeof(struct sockaddr_in);
6391 			if (sa->sa_len != incr) {
6392 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6393 				*error = EINVAL;
6394 				*bad_addr = 1;
6395 				return (NULL);
6396 			}
6397 			(*num_v4) += 1;
6398 			break;
6399 #endif
6400 #ifdef INET6
6401 		case AF_INET6:
6402 			{
6403 				struct sockaddr_in6 *sin6;
6404 
6405 				sin6 = (struct sockaddr_in6 *)sa;
6406 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6407 					/* Must be non-mapped for connectx */
6408 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 					*error = EINVAL;
6410 					*bad_addr = 1;
6411 					return (NULL);
6412 				}
6413 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6414 				if (sa->sa_len != incr) {
6415 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 					*error = EINVAL;
6417 					*bad_addr = 1;
6418 					return (NULL);
6419 				}
6420 				(*num_v6) += 1;
6421 				break;
6422 			}
6423 #endif
6424 		default:
6425 			*totaddr = i;
6426 			incr = 0;
6427 			/* we are done */
6428 			break;
6429 		}
6430 		if (i == *totaddr) {
6431 			break;
6432 		}
6433 		SCTP_INP_INCR_REF(inp);
6434 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6435 		if (stcb != NULL) {
6436 			/* Already have or am bring up an association */
6437 			return (stcb);
6438 		} else {
6439 			SCTP_INP_DECR_REF(inp);
6440 		}
6441 		if ((at + incr) > limit) {
6442 			*totaddr = i;
6443 			break;
6444 		}
6445 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6446 	}
6447 	return ((struct sctp_tcb *)NULL);
6448 }
6449 
6450 /*
6451  * sctp_bindx(ADD) for one address.
6452  * assumes all arguments are valid/checked by caller.
6453  */
6454 void
6455 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6456     struct sockaddr *sa, sctp_assoc_t assoc_id,
6457     uint32_t vrf_id, int *error, void *p)
6458 {
6459 	struct sockaddr *addr_touse;
6460 #if defined(INET) && defined(INET6)
6461 	struct sockaddr_in sin;
6462 #endif
6463 
6464 	/* see if we're bound all already! */
6465 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6466 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 		*error = EINVAL;
6468 		return;
6469 	}
6470 	addr_touse = sa;
6471 #ifdef INET6
6472 	if (sa->sa_family == AF_INET6) {
6473 #ifdef INET
6474 		struct sockaddr_in6 *sin6;
6475 
6476 #endif
6477 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 			*error = EINVAL;
6480 			return;
6481 		}
6482 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6483 			/* can only bind v6 on PF_INET6 sockets */
6484 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6485 			*error = EINVAL;
6486 			return;
6487 		}
6488 #ifdef INET
6489 		sin6 = (struct sockaddr_in6 *)addr_touse;
6490 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6491 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6492 			    SCTP_IPV6_V6ONLY(inp)) {
6493 				/* can't bind v4-mapped on PF_INET sockets */
6494 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6495 				*error = EINVAL;
6496 				return;
6497 			}
6498 			in6_sin6_2_sin(&sin, sin6);
6499 			addr_touse = (struct sockaddr *)&sin;
6500 		}
6501 #endif
6502 	}
6503 #endif
6504 #ifdef INET
6505 	if (sa->sa_family == AF_INET) {
6506 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6507 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508 			*error = EINVAL;
6509 			return;
6510 		}
6511 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6512 		    SCTP_IPV6_V6ONLY(inp)) {
6513 			/* can't bind v4 on PF_INET sockets */
6514 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6515 			*error = EINVAL;
6516 			return;
6517 		}
6518 	}
6519 #endif
6520 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6521 		if (p == NULL) {
6522 			/* Can't get proc for Net/Open BSD */
6523 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6524 			*error = EINVAL;
6525 			return;
6526 		}
6527 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6528 		return;
6529 	}
6530 	/*
6531 	 * No locks required here since bind and mgmt_ep_sa all do their own
6532 	 * locking. If we do something for the FIX: below we may need to
6533 	 * lock in that case.
6534 	 */
6535 	if (assoc_id == 0) {
6536 		/* add the address */
6537 		struct sctp_inpcb *lep;
6538 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6539 
6540 		/* validate the incoming port */
6541 		if ((lsin->sin_port != 0) &&
6542 		    (lsin->sin_port != inp->sctp_lport)) {
6543 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6544 			*error = EINVAL;
6545 			return;
6546 		} else {
6547 			/* user specified 0 port, set it to existing port */
6548 			lsin->sin_port = inp->sctp_lport;
6549 		}
6550 
6551 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6552 		if (lep != NULL) {
6553 			/*
6554 			 * We must decrement the refcount since we have the
6555 			 * ep already and are binding. No remove going on
6556 			 * here.
6557 			 */
6558 			SCTP_INP_DECR_REF(lep);
6559 		}
6560 		if (lep == inp) {
6561 			/* already bound to it.. ok */
6562 			return;
6563 		} else if (lep == NULL) {
6564 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6565 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6566 			    SCTP_ADD_IP_ADDRESS,
6567 			    vrf_id, NULL);
6568 		} else {
6569 			*error = EADDRINUSE;
6570 		}
6571 		if (*error)
6572 			return;
6573 	} else {
6574 		/*
6575 		 * FIX: decide whether we allow assoc based bindx
6576 		 */
6577 	}
6578 }
6579 
6580 /*
6581  * sctp_bindx(DELETE) for one address.
6582  * assumes all arguments are valid/checked by caller.
6583  */
6584 void
6585 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6586     struct sockaddr *sa, sctp_assoc_t assoc_id,
6587     uint32_t vrf_id, int *error)
6588 {
6589 	struct sockaddr *addr_touse;
6590 #if defined(INET) && defined(INET6)
6591 	struct sockaddr_in sin;
6592 #endif
6593 
6594 	/* see if we're bound all already! */
6595 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6596 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 		*error = EINVAL;
6598 		return;
6599 	}
6600 	addr_touse = sa;
6601 #ifdef INET6
6602 	if (sa->sa_family == AF_INET6) {
6603 #ifdef INET
6604 		struct sockaddr_in6 *sin6;
6605 #endif
6606 
6607 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6608 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6609 			*error = EINVAL;
6610 			return;
6611 		}
6612 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6613 			/* can only bind v6 on PF_INET6 sockets */
6614 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6615 			*error = EINVAL;
6616 			return;
6617 		}
6618 #ifdef INET
6619 		sin6 = (struct sockaddr_in6 *)addr_touse;
6620 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6621 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6622 			    SCTP_IPV6_V6ONLY(inp)) {
6623 				/* can't bind mapped-v4 on PF_INET sockets */
6624 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6625 				*error = EINVAL;
6626 				return;
6627 			}
6628 			in6_sin6_2_sin(&sin, sin6);
6629 			addr_touse = (struct sockaddr *)&sin;
6630 		}
6631 #endif
6632 	}
6633 #endif
6634 #ifdef INET
6635 	if (sa->sa_family == AF_INET) {
6636 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6637 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6638 			*error = EINVAL;
6639 			return;
6640 		}
6641 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6642 		    SCTP_IPV6_V6ONLY(inp)) {
6643 			/* can't bind v4 on PF_INET sockets */
6644 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6645 			*error = EINVAL;
6646 			return;
6647 		}
6648 	}
6649 #endif
6650 	/*
6651 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6652 	 * below is ever changed we may need to lock before calling
6653 	 * association level binding.
6654 	 */
6655 	if (assoc_id == 0) {
6656 		/* delete the address */
6657 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6658 		    SCTP_DEL_IP_ADDRESS,
6659 		    vrf_id, NULL);
6660 	} else {
6661 		/*
6662 		 * FIX: decide whether we allow assoc based bindx
6663 		 */
6664 	}
6665 }
6666 
6667 /*
6668  * returns the valid local address count for an assoc, taking into account
6669  * all scoping rules
6670  */
6671 int
6672 sctp_local_addr_count(struct sctp_tcb *stcb)
6673 {
6674 	int loopback_scope;
6675 #if defined(INET)
6676 	int ipv4_local_scope, ipv4_addr_legal;
6677 #endif
6678 #if defined (INET6)
6679 	int local_scope, site_scope, ipv6_addr_legal;
6680 #endif
6681 	struct sctp_vrf *vrf;
6682 	struct sctp_ifn *sctp_ifn;
6683 	struct sctp_ifa *sctp_ifa;
6684 	int count = 0;
6685 
6686 	/* Turn on all the appropriate scopes */
6687 	loopback_scope = stcb->asoc.scope.loopback_scope;
6688 #if defined(INET)
6689 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6690 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6691 #endif
6692 #if defined(INET6)
6693 	local_scope = stcb->asoc.scope.local_scope;
6694 	site_scope = stcb->asoc.scope.site_scope;
6695 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6696 #endif
6697 	SCTP_IPI_ADDR_RLOCK();
6698 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6699 	if (vrf == NULL) {
6700 		/* no vrf, no addresses */
6701 		SCTP_IPI_ADDR_RUNLOCK();
6702 		return (0);
6703 	}
6704 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6705 		/*
6706 		 * bound all case: go through all ifns on the vrf
6707 		 */
6708 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6709 			if ((loopback_scope == 0) &&
6710 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6711 				continue;
6712 			}
6713 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6714 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6715 					continue;
6716 				switch (sctp_ifa->address.sa.sa_family) {
6717 #ifdef INET
6718 				case AF_INET:
6719 					if (ipv4_addr_legal) {
6720 						struct sockaddr_in *sin;
6721 
6722 						sin = &sctp_ifa->address.sin;
6723 						if (sin->sin_addr.s_addr == 0) {
6724 							/*
6725 							 * skip unspecified
6726 							 * addrs
6727 							 */
6728 							continue;
6729 						}
6730 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6731 						    &sin->sin_addr) != 0) {
6732 							continue;
6733 						}
6734 						if ((ipv4_local_scope == 0) &&
6735 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6736 							continue;
6737 						}
6738 						/* count this one */
6739 						count++;
6740 					} else {
6741 						continue;
6742 					}
6743 					break;
6744 #endif
6745 #ifdef INET6
6746 				case AF_INET6:
6747 					if (ipv6_addr_legal) {
6748 						struct sockaddr_in6 *sin6;
6749 
6750 						sin6 = &sctp_ifa->address.sin6;
6751 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6752 							continue;
6753 						}
6754 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6755 						    &sin6->sin6_addr) != 0) {
6756 							continue;
6757 						}
6758 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6759 							if (local_scope == 0)
6760 								continue;
6761 							if (sin6->sin6_scope_id == 0) {
6762 								if (sa6_recoverscope(sin6) != 0)
6763 									/*
6764 									 *
6765 									 * bad
6766 									 * link
6767 									 *
6768 									 * local
6769 									 *
6770 									 * address
6771 									 */
6772 									continue;
6773 							}
6774 						}
6775 						if ((site_scope == 0) &&
6776 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6777 							continue;
6778 						}
6779 						/* count this one */
6780 						count++;
6781 					}
6782 					break;
6783 #endif
6784 				default:
6785 					/* TSNH */
6786 					break;
6787 				}
6788 			}
6789 		}
6790 	} else {
6791 		/*
6792 		 * subset bound case
6793 		 */
6794 		struct sctp_laddr *laddr;
6795 
6796 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6797 		    sctp_nxt_addr) {
6798 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6799 				continue;
6800 			}
6801 			/* count this one */
6802 			count++;
6803 		}
6804 	}
6805 	SCTP_IPI_ADDR_RUNLOCK();
6806 	return (count);
6807 }
6808 
6809 #if defined(SCTP_LOCAL_TRACE_BUF)
6810 
6811 void
6812 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6813 {
6814 	uint32_t saveindex, newindex;
6815 
6816 	do {
6817 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6818 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6819 			newindex = 1;
6820 		} else {
6821 			newindex = saveindex + 1;
6822 		}
6823 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6824 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6825 		saveindex = 0;
6826 	}
6827 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6828 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6829 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6830 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6831 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6832 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6833 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6834 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6835 }
6836 
6837 #endif
6838 static void
6839 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6840     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6841 {
6842 	struct ip *iph;
6843 #ifdef INET6
6844 	struct ip6_hdr *ip6;
6845 #endif
6846 	struct mbuf *sp, *last;
6847 	struct udphdr *uhdr;
6848 	uint16_t port;
6849 
6850 	if ((m->m_flags & M_PKTHDR) == 0) {
6851 		/* Can't handle one that is not a pkt hdr */
6852 		goto out;
6853 	}
6854 	/* Pull the src port */
6855 	iph = mtod(m, struct ip *);
6856 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6857 	port = uhdr->uh_sport;
6858 	/*
6859 	 * Split out the mbuf chain. Leave the IP header in m, place the
6860 	 * rest in the sp.
6861 	 */
6862 	sp = m_split(m, off, M_NOWAIT);
6863 	if (sp == NULL) {
6864 		/* Gak, drop packet, we can't do a split */
6865 		goto out;
6866 	}
6867 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6868 		/* Gak, packet can't have an SCTP header in it - too small */
6869 		m_freem(sp);
6870 		goto out;
6871 	}
6872 	/* Now pull up the UDP header and SCTP header together */
6873 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6874 	if (sp == NULL) {
6875 		/* Gak pullup failed */
6876 		goto out;
6877 	}
6878 	/* Trim out the UDP header */
6879 	m_adj(sp, sizeof(struct udphdr));
6880 
6881 	/* Now reconstruct the mbuf chain */
6882 	for (last = m; last->m_next; last = last->m_next);
6883 	last->m_next = sp;
6884 	m->m_pkthdr.len += sp->m_pkthdr.len;
6885 	/*
6886 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6887 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6888 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6889 	 * SCTP checksum. Therefore, clear the bit.
6890 	 */
6891 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6892 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6893 	    m->m_pkthdr.len,
6894 	    if_name(m->m_pkthdr.rcvif),
6895 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6896 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6897 	iph = mtod(m, struct ip *);
6898 	switch (iph->ip_v) {
6899 #ifdef INET
6900 	case IPVERSION:
6901 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6902 		sctp_input_with_port(m, off, port);
6903 		break;
6904 #endif
6905 #ifdef INET6
6906 	case IPV6_VERSION >> 4:
6907 		ip6 = mtod(m, struct ip6_hdr *);
6908 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6909 		sctp6_input_with_port(&m, &off, port);
6910 		break;
6911 #endif
6912 	default:
6913 		goto out;
6914 		break;
6915 	}
6916 	return;
6917 out:
6918 	m_freem(m);
6919 }
6920 
6921 #ifdef INET
6922 static void
6923 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6924 {
6925 	struct ip *outer_ip, *inner_ip;
6926 	struct sctphdr *sh;
6927 	struct icmp *icmp;
6928 	struct udphdr *udp;
6929 	struct sctp_inpcb *inp;
6930 	struct sctp_tcb *stcb;
6931 	struct sctp_nets *net;
6932 	struct sctp_init_chunk *ch;
6933 	struct sockaddr_in src, dst;
6934 	uint8_t type, code;
6935 
6936 	inner_ip = (struct ip *)vip;
6937 	icmp = (struct icmp *)((caddr_t)inner_ip -
6938 	    (sizeof(struct icmp) - sizeof(struct ip)));
6939 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6940 	if (ntohs(outer_ip->ip_len) <
6941 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6942 		return;
6943 	}
6944 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6945 	sh = (struct sctphdr *)(udp + 1);
6946 	memset(&src, 0, sizeof(struct sockaddr_in));
6947 	src.sin_family = AF_INET;
6948 	src.sin_len = sizeof(struct sockaddr_in);
6949 	src.sin_port = sh->src_port;
6950 	src.sin_addr = inner_ip->ip_src;
6951 	memset(&dst, 0, sizeof(struct sockaddr_in));
6952 	dst.sin_family = AF_INET;
6953 	dst.sin_len = sizeof(struct sockaddr_in);
6954 	dst.sin_port = sh->dest_port;
6955 	dst.sin_addr = inner_ip->ip_dst;
6956 	/*
6957 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6958 	 * holds our local endpoint address. Thus we reverse the dst and the
6959 	 * src in the lookup.
6960 	 */
6961 	inp = NULL;
6962 	net = NULL;
6963 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6964 	    (struct sockaddr *)&src,
6965 	    &inp, &net, 1,
6966 	    SCTP_DEFAULT_VRFID);
6967 	if ((stcb != NULL) &&
6968 	    (net != NULL) &&
6969 	    (inp != NULL)) {
6970 		/* Check the UDP port numbers */
6971 		if ((udp->uh_dport != net->port) ||
6972 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6973 			SCTP_TCB_UNLOCK(stcb);
6974 			return;
6975 		}
6976 		/* Check the verification tag */
6977 		if (ntohl(sh->v_tag) != 0) {
6978 			/*
6979 			 * This must be the verification tag used for
6980 			 * sending out packets. We don't consider packets
6981 			 * reflecting the verification tag.
6982 			 */
6983 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6984 				SCTP_TCB_UNLOCK(stcb);
6985 				return;
6986 			}
6987 		} else {
6988 			if (ntohs(outer_ip->ip_len) >=
6989 			    sizeof(struct ip) +
6990 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6991 				/*
6992 				 * In this case we can check if we got an
6993 				 * INIT chunk and if the initiate tag
6994 				 * matches.
6995 				 */
6996 				ch = (struct sctp_init_chunk *)(sh + 1);
6997 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6998 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6999 					SCTP_TCB_UNLOCK(stcb);
7000 					return;
7001 				}
7002 			} else {
7003 				SCTP_TCB_UNLOCK(stcb);
7004 				return;
7005 			}
7006 		}
7007 		type = icmp->icmp_type;
7008 		code = icmp->icmp_code;
7009 		if ((type == ICMP_UNREACH) &&
7010 		    (code == ICMP_UNREACH_PORT)) {
7011 			code = ICMP_UNREACH_PROTOCOL;
7012 		}
7013 		sctp_notify(inp, stcb, net, type, code,
7014 		    ntohs(inner_ip->ip_len),
7015 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7016 	} else {
7017 		if ((stcb == NULL) && (inp != NULL)) {
7018 			/* reduce ref-count */
7019 			SCTP_INP_WLOCK(inp);
7020 			SCTP_INP_DECR_REF(inp);
7021 			SCTP_INP_WUNLOCK(inp);
7022 		}
7023 		if (stcb) {
7024 			SCTP_TCB_UNLOCK(stcb);
7025 		}
7026 	}
7027 	return;
7028 }
7029 #endif
7030 
7031 #ifdef INET6
7032 static void
7033 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7034 {
7035 	struct ip6ctlparam *ip6cp;
7036 	struct sctp_inpcb *inp;
7037 	struct sctp_tcb *stcb;
7038 	struct sctp_nets *net;
7039 	struct sctphdr sh;
7040 	struct udphdr udp;
7041 	struct sockaddr_in6 src, dst;
7042 	uint8_t type, code;
7043 
7044 	ip6cp = (struct ip6ctlparam *)d;
7045 	/*
7046 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7047 	 */
7048 	if (ip6cp->ip6c_m == NULL) {
7049 		return;
7050 	}
7051 	/*
7052 	 * Check if we can safely examine the ports and the verification tag
7053 	 * of the SCTP common header.
7054 	 */
7055 	if (ip6cp->ip6c_m->m_pkthdr.len <
7056 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7057 		return;
7058 	}
7059 	/* Copy out the UDP header. */
7060 	memset(&udp, 0, sizeof(struct udphdr));
7061 	m_copydata(ip6cp->ip6c_m,
7062 	    ip6cp->ip6c_off,
7063 	    sizeof(struct udphdr),
7064 	    (caddr_t)&udp);
7065 	/* Copy out the port numbers and the verification tag. */
7066 	memset(&sh, 0, sizeof(struct sctphdr));
7067 	m_copydata(ip6cp->ip6c_m,
7068 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7069 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7070 	    (caddr_t)&sh);
7071 	memset(&src, 0, sizeof(struct sockaddr_in6));
7072 	src.sin6_family = AF_INET6;
7073 	src.sin6_len = sizeof(struct sockaddr_in6);
7074 	src.sin6_port = sh.src_port;
7075 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7076 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7077 		return;
7078 	}
7079 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7080 	dst.sin6_family = AF_INET6;
7081 	dst.sin6_len = sizeof(struct sockaddr_in6);
7082 	dst.sin6_port = sh.dest_port;
7083 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7084 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7085 		return;
7086 	}
7087 	inp = NULL;
7088 	net = NULL;
7089 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7090 	    (struct sockaddr *)&src,
7091 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7092 	if ((stcb != NULL) &&
7093 	    (net != NULL) &&
7094 	    (inp != NULL)) {
7095 		/* Check the UDP port numbers */
7096 		if ((udp.uh_dport != net->port) ||
7097 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7098 			SCTP_TCB_UNLOCK(stcb);
7099 			return;
7100 		}
7101 		/* Check the verification tag */
7102 		if (ntohl(sh.v_tag) != 0) {
7103 			/*
7104 			 * This must be the verification tag used for
7105 			 * sending out packets. We don't consider packets
7106 			 * reflecting the verification tag.
7107 			 */
7108 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7109 				SCTP_TCB_UNLOCK(stcb);
7110 				return;
7111 			}
7112 		} else {
7113 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7114 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7115 			    sizeof(struct sctphdr) +
7116 			    sizeof(struct sctp_chunkhdr) +
7117 			    offsetof(struct sctp_init, a_rwnd)) {
7118 				/*
7119 				 * In this case we can check if we got an
7120 				 * INIT chunk and if the initiate tag
7121 				 * matches.
7122 				 */
7123 				uint32_t initiate_tag;
7124 				uint8_t chunk_type;
7125 
7126 				m_copydata(ip6cp->ip6c_m,
7127 				    ip6cp->ip6c_off +
7128 				    sizeof(struct udphdr) +
7129 				    sizeof(struct sctphdr),
7130 				    sizeof(uint8_t),
7131 				    (caddr_t)&chunk_type);
7132 				m_copydata(ip6cp->ip6c_m,
7133 				    ip6cp->ip6c_off +
7134 				    sizeof(struct udphdr) +
7135 				    sizeof(struct sctphdr) +
7136 				    sizeof(struct sctp_chunkhdr),
7137 				    sizeof(uint32_t),
7138 				    (caddr_t)&initiate_tag);
7139 				if ((chunk_type != SCTP_INITIATION) ||
7140 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7141 					SCTP_TCB_UNLOCK(stcb);
7142 					return;
7143 				}
7144 			} else {
7145 				SCTP_TCB_UNLOCK(stcb);
7146 				return;
7147 			}
7148 		}
7149 		type = ip6cp->ip6c_icmp6->icmp6_type;
7150 		code = ip6cp->ip6c_icmp6->icmp6_code;
7151 		if ((type == ICMP6_DST_UNREACH) &&
7152 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7153 			type = ICMP6_PARAM_PROB;
7154 			code = ICMP6_PARAMPROB_NEXTHEADER;
7155 		}
7156 		sctp6_notify(inp, stcb, net, type, code,
7157 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7158 	} else {
7159 		if ((stcb == NULL) && (inp != NULL)) {
7160 			/* reduce inp's ref-count */
7161 			SCTP_INP_WLOCK(inp);
7162 			SCTP_INP_DECR_REF(inp);
7163 			SCTP_INP_WUNLOCK(inp);
7164 		}
7165 		if (stcb) {
7166 			SCTP_TCB_UNLOCK(stcb);
7167 		}
7168 	}
7169 }
7170 #endif
7171 
7172 void
7173 sctp_over_udp_stop(void)
7174 {
7175 	/*
7176 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7177 	 * for writting!
7178 	 */
7179 #ifdef INET
7180 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7181 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7182 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7183 	}
7184 #endif
7185 #ifdef INET6
7186 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7187 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7188 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7189 	}
7190 #endif
7191 }
7192 
7193 int
7194 sctp_over_udp_start(void)
7195 {
7196 	uint16_t port;
7197 	int ret;
7198 #ifdef INET
7199 	struct sockaddr_in sin;
7200 #endif
7201 #ifdef INET6
7202 	struct sockaddr_in6 sin6;
7203 #endif
7204 	/*
7205 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7206 	 * for writting!
7207 	 */
7208 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7209 	if (ntohs(port) == 0) {
7210 		/* Must have a port set */
7211 		return (EINVAL);
7212 	}
7213 #ifdef INET
7214 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7215 		/* Already running -- must stop first */
7216 		return (EALREADY);
7217 	}
7218 #endif
7219 #ifdef INET6
7220 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7221 		/* Already running -- must stop first */
7222 		return (EALREADY);
7223 	}
7224 #endif
7225 #ifdef INET
7226 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7227 	    SOCK_DGRAM, IPPROTO_UDP,
7228 	    curthread->td_ucred, curthread))) {
7229 		sctp_over_udp_stop();
7230 		return (ret);
7231 	}
7232 	/* Call the special UDP hook. */
7233 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7234 	    sctp_recv_udp_tunneled_packet,
7235 	    sctp_recv_icmp_tunneled_packet,
7236 	    NULL))) {
7237 		sctp_over_udp_stop();
7238 		return (ret);
7239 	}
7240 	/* Ok, we have a socket, bind it to the port. */
7241 	memset(&sin, 0, sizeof(struct sockaddr_in));
7242 	sin.sin_len = sizeof(struct sockaddr_in);
7243 	sin.sin_family = AF_INET;
7244 	sin.sin_port = htons(port);
7245 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7246 	    (struct sockaddr *)&sin, curthread))) {
7247 		sctp_over_udp_stop();
7248 		return (ret);
7249 	}
7250 #endif
7251 #ifdef INET6
7252 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7253 	    SOCK_DGRAM, IPPROTO_UDP,
7254 	    curthread->td_ucred, curthread))) {
7255 		sctp_over_udp_stop();
7256 		return (ret);
7257 	}
7258 	/* Call the special UDP hook. */
7259 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7260 	    sctp_recv_udp_tunneled_packet,
7261 	    sctp_recv_icmp6_tunneled_packet,
7262 	    NULL))) {
7263 		sctp_over_udp_stop();
7264 		return (ret);
7265 	}
7266 	/* Ok, we have a socket, bind it to the port. */
7267 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7268 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7269 	sin6.sin6_family = AF_INET6;
7270 	sin6.sin6_port = htons(port);
7271 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7272 	    (struct sockaddr *)&sin6, curthread))) {
7273 		sctp_over_udp_stop();
7274 		return (ret);
7275 	}
7276 #endif
7277 	return (0);
7278 }
7279 
7280 /*
7281  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7282  * If all arguments are zero, zero is returned.
7283  */
7284 uint32_t
7285 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7286 {
7287 	if (mtu1 > 0) {
7288 		if (mtu2 > 0) {
7289 			if (mtu3 > 0) {
7290 				return (min(mtu1, min(mtu2, mtu3)));
7291 			} else {
7292 				return (min(mtu1, mtu2));
7293 			}
7294 		} else {
7295 			if (mtu3 > 0) {
7296 				return (min(mtu1, mtu3));
7297 			} else {
7298 				return (mtu1);
7299 			}
7300 		}
7301 	} else {
7302 		if (mtu2 > 0) {
7303 			if (mtu3 > 0) {
7304 				return (min(mtu2, mtu3));
7305 			} else {
7306 				return (mtu2);
7307 			}
7308 		} else {
7309 			return (mtu3);
7310 		}
7311 	}
7312 }
7313 
7314 void
7315 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7316 {
7317 	struct in_conninfo inc;
7318 
7319 	memset(&inc, 0, sizeof(struct in_conninfo));
7320 	inc.inc_fibnum = fibnum;
7321 	switch (addr->sa.sa_family) {
7322 #ifdef INET
7323 	case AF_INET:
7324 		inc.inc_faddr = addr->sin.sin_addr;
7325 		break;
7326 #endif
7327 #ifdef INET6
7328 	case AF_INET6:
7329 		inc.inc_flags |= INC_ISIPV6;
7330 		inc.inc6_faddr = addr->sin6.sin6_addr;
7331 		break;
7332 #endif
7333 	default:
7334 		return;
7335 	}
7336 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7337 }
7338 
7339 uint32_t
7340 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7341 {
7342 	struct in_conninfo inc;
7343 
7344 	memset(&inc, 0, sizeof(struct in_conninfo));
7345 	inc.inc_fibnum = fibnum;
7346 	switch (addr->sa.sa_family) {
7347 #ifdef INET
7348 	case AF_INET:
7349 		inc.inc_faddr = addr->sin.sin_addr;
7350 		break;
7351 #endif
7352 #ifdef INET6
7353 	case AF_INET6:
7354 		inc.inc_flags |= INC_ISIPV6;
7355 		inc.inc6_faddr = addr->sin6.sin6_addr;
7356 		break;
7357 #endif
7358 	default:
7359 		return (0);
7360 	}
7361 	return ((uint32_t)tcp_hc_getmtu(&inc));
7362 }
7363