xref: /freebsd/sys/netinet/sctputil.c (revision 2f513db7)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8166,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	int iteration_count = 0;
1375 	int inp_skip = 0;
1376 	int first_in = 1;
1377 	struct sctp_inpcb *tinp;
1378 
1379 	SCTP_INP_INFO_RLOCK();
1380 	SCTP_ITERATOR_LOCK();
1381 	sctp_it_ctl.cur_it = it;
1382 	if (it->inp) {
1383 		SCTP_INP_RLOCK(it->inp);
1384 		SCTP_INP_DECR_REF(it->inp);
1385 	}
1386 	if (it->inp == NULL) {
1387 		/* iterator is complete */
1388 done_with_iterator:
1389 		sctp_it_ctl.cur_it = NULL;
1390 		SCTP_ITERATOR_UNLOCK();
1391 		SCTP_INP_INFO_RUNLOCK();
1392 		if (it->function_atend != NULL) {
1393 			(*it->function_atend) (it->pointer, it->val);
1394 		}
1395 		SCTP_FREE(it, SCTP_M_ITER);
1396 		return;
1397 	}
1398 select_a_new_ep:
1399 	if (first_in) {
1400 		first_in = 0;
1401 	} else {
1402 		SCTP_INP_RLOCK(it->inp);
1403 	}
1404 	while (((it->pcb_flags) &&
1405 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1406 	    ((it->pcb_features) &&
1407 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1408 		/* endpoint flags or features don't match, so keep looking */
1409 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1410 			SCTP_INP_RUNLOCK(it->inp);
1411 			goto done_with_iterator;
1412 		}
1413 		tinp = it->inp;
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 		SCTP_INP_RUNLOCK(tinp);
1416 		if (it->inp == NULL) {
1417 			goto done_with_iterator;
1418 		}
1419 		SCTP_INP_RLOCK(it->inp);
1420 	}
1421 	/* now go through each assoc which is in the desired state */
1422 	if (it->done_current_ep == 0) {
1423 		if (it->function_inp != NULL)
1424 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1425 		it->done_current_ep = 1;
1426 	}
1427 	if (it->stcb == NULL) {
1428 		/* run the per instance function */
1429 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1430 	}
1431 	if ((inp_skip) || it->stcb == NULL) {
1432 		if (it->function_inp_end != NULL) {
1433 			inp_skip = (*it->function_inp_end) (it->inp,
1434 			    it->pointer,
1435 			    it->val);
1436 		}
1437 		SCTP_INP_RUNLOCK(it->inp);
1438 		goto no_stcb;
1439 	}
1440 	while (it->stcb) {
1441 		SCTP_TCB_LOCK(it->stcb);
1442 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1443 			/* not in the right state... keep looking */
1444 			SCTP_TCB_UNLOCK(it->stcb);
1445 			goto next_assoc;
1446 		}
1447 		/* see if we have limited out the iterator loop */
1448 		iteration_count++;
1449 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1450 			/* Pause to let others grab the lock */
1451 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1452 			SCTP_TCB_UNLOCK(it->stcb);
1453 			SCTP_INP_INCR_REF(it->inp);
1454 			SCTP_INP_RUNLOCK(it->inp);
1455 			SCTP_ITERATOR_UNLOCK();
1456 			SCTP_INP_INFO_RUNLOCK();
1457 			SCTP_INP_INFO_RLOCK();
1458 			SCTP_ITERATOR_LOCK();
1459 			if (sctp_it_ctl.iterator_flags) {
1460 				/* We won't be staying here */
1461 				SCTP_INP_DECR_REF(it->inp);
1462 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1463 				if (sctp_it_ctl.iterator_flags &
1464 				    SCTP_ITERATOR_STOP_CUR_IT) {
1465 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1466 					goto done_with_iterator;
1467 				}
1468 				if (sctp_it_ctl.iterator_flags &
1469 				    SCTP_ITERATOR_STOP_CUR_INP) {
1470 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1471 					goto no_stcb;
1472 				}
1473 				/* If we reach here huh? */
1474 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1475 				    sctp_it_ctl.iterator_flags);
1476 				sctp_it_ctl.iterator_flags = 0;
1477 			}
1478 			SCTP_INP_RLOCK(it->inp);
1479 			SCTP_INP_DECR_REF(it->inp);
1480 			SCTP_TCB_LOCK(it->stcb);
1481 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1482 			iteration_count = 0;
1483 		}
1484 
1485 		/* run function on this one */
1486 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1487 
1488 		/*
1489 		 * we lie here, it really needs to have its own type but
1490 		 * first I must verify that this won't effect things :-0
1491 		 */
1492 		if (it->no_chunk_output == 0)
1493 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1494 
1495 		SCTP_TCB_UNLOCK(it->stcb);
1496 next_assoc:
1497 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1498 		if (it->stcb == NULL) {
1499 			/* Run last function */
1500 			if (it->function_inp_end != NULL) {
1501 				inp_skip = (*it->function_inp_end) (it->inp,
1502 				    it->pointer,
1503 				    it->val);
1504 			}
1505 		}
1506 	}
1507 	SCTP_INP_RUNLOCK(it->inp);
1508 no_stcb:
1509 	/* done with all assocs on this endpoint, move on to next endpoint */
1510 	it->done_current_ep = 0;
1511 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1512 		it->inp = NULL;
1513 	} else {
1514 		it->inp = LIST_NEXT(it->inp, sctp_list);
1515 	}
1516 	if (it->inp == NULL) {
1517 		goto done_with_iterator;
1518 	}
1519 	goto select_a_new_ep;
1520 }
1521 
1522 void
1523 sctp_iterator_worker(void)
1524 {
1525 	struct sctp_iterator *it;
1526 
1527 	/* This function is called with the WQ lock in place */
1528 	sctp_it_ctl.iterator_running = 1;
1529 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1530 		/* now lets work on this one */
1531 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1532 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1533 		CURVNET_SET(it->vn);
1534 		sctp_iterator_work(it);
1535 		CURVNET_RESTORE();
1536 		SCTP_IPI_ITERATOR_WQ_LOCK();
1537 		/* sa_ignore FREED_MEMORY */
1538 	}
1539 	sctp_it_ctl.iterator_running = 0;
1540 	return;
1541 }
1542 
1543 
1544 static void
1545 sctp_handle_addr_wq(void)
1546 {
1547 	/* deal with the ADDR wq from the rtsock calls */
1548 	struct sctp_laddr *wi, *nwi;
1549 	struct sctp_asconf_iterator *asc;
1550 
1551 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1552 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1553 	if (asc == NULL) {
1554 		/* Try later, no memory */
1555 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1556 		    (struct sctp_inpcb *)NULL,
1557 		    (struct sctp_tcb *)NULL,
1558 		    (struct sctp_nets *)NULL);
1559 		return;
1560 	}
1561 	LIST_INIT(&asc->list_of_work);
1562 	asc->cnt = 0;
1563 
1564 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1565 		LIST_REMOVE(wi, sctp_nxt_addr);
1566 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1567 		asc->cnt++;
1568 	}
1569 
1570 	if (asc->cnt == 0) {
1571 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1572 	} else {
1573 		int ret;
1574 
1575 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1576 		    sctp_asconf_iterator_stcb,
1577 		    NULL,	/* No ep end for boundall */
1578 		    SCTP_PCB_FLAGS_BOUNDALL,
1579 		    SCTP_PCB_ANY_FEATURES,
1580 		    SCTP_ASOC_ANY_STATE,
1581 		    (void *)asc, 0,
1582 		    sctp_asconf_iterator_end, NULL, 0);
1583 		if (ret) {
1584 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1585 			/*
1586 			 * Freeing if we are stopping or put back on the
1587 			 * addr_wq.
1588 			 */
1589 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1590 				sctp_asconf_iterator_end(asc, 0);
1591 			} else {
1592 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1593 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1594 				}
1595 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1596 			}
1597 		}
1598 	}
1599 }
1600 
1601 void
1602 sctp_timeout_handler(void *t)
1603 {
1604 	struct sctp_inpcb *inp;
1605 	struct sctp_tcb *stcb;
1606 	struct sctp_nets *net;
1607 	struct sctp_timer *tmr;
1608 	struct mbuf *op_err;
1609 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1610 	struct socket *so;
1611 #endif
1612 	int did_output;
1613 	int type;
1614 
1615 	tmr = (struct sctp_timer *)t;
1616 	inp = (struct sctp_inpcb *)tmr->ep;
1617 	stcb = (struct sctp_tcb *)tmr->tcb;
1618 	net = (struct sctp_nets *)tmr->net;
1619 	CURVNET_SET((struct vnet *)tmr->vnet);
1620 	did_output = 1;
1621 
1622 #ifdef SCTP_AUDITING_ENABLED
1623 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1624 	sctp_auditing(3, inp, stcb, net);
1625 #endif
1626 
1627 	/* sanity checks... */
1628 	if (tmr->self != (void *)tmr) {
1629 		/*
1630 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1631 		 * (void *)tmr);
1632 		 */
1633 		CURVNET_RESTORE();
1634 		return;
1635 	}
1636 	tmr->stopped_from = 0xa001;
1637 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1638 		/*
1639 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1640 		 * tmr->type);
1641 		 */
1642 		CURVNET_RESTORE();
1643 		return;
1644 	}
1645 	tmr->stopped_from = 0xa002;
1646 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1647 		CURVNET_RESTORE();
1648 		return;
1649 	}
1650 	/* if this is an iterator timeout, get the struct and clear inp */
1651 	tmr->stopped_from = 0xa003;
1652 	if (inp) {
1653 		SCTP_INP_INCR_REF(inp);
1654 		if ((inp->sctp_socket == NULL) &&
1655 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1656 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1657 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1658 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1659 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1660 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1661 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1662 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1663 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1664 			SCTP_INP_DECR_REF(inp);
1665 			CURVNET_RESTORE();
1666 			return;
1667 		}
1668 	}
1669 	tmr->stopped_from = 0xa004;
1670 	if (stcb) {
1671 		atomic_add_int(&stcb->asoc.refcnt, 1);
1672 		if (stcb->asoc.state == 0) {
1673 			atomic_add_int(&stcb->asoc.refcnt, -1);
1674 			if (inp) {
1675 				SCTP_INP_DECR_REF(inp);
1676 			}
1677 			CURVNET_RESTORE();
1678 			return;
1679 		}
1680 	}
1681 	type = tmr->type;
1682 	tmr->stopped_from = 0xa005;
1683 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1684 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1685 		if (inp) {
1686 			SCTP_INP_DECR_REF(inp);
1687 		}
1688 		if (stcb) {
1689 			atomic_add_int(&stcb->asoc.refcnt, -1);
1690 		}
1691 		CURVNET_RESTORE();
1692 		return;
1693 	}
1694 	tmr->stopped_from = 0xa006;
1695 
1696 	if (stcb) {
1697 		SCTP_TCB_LOCK(stcb);
1698 		atomic_add_int(&stcb->asoc.refcnt, -1);
1699 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1700 		    ((stcb->asoc.state == 0) ||
1701 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1702 			SCTP_TCB_UNLOCK(stcb);
1703 			if (inp) {
1704 				SCTP_INP_DECR_REF(inp);
1705 			}
1706 			CURVNET_RESTORE();
1707 			return;
1708 		}
1709 	} else if (inp != NULL) {
1710 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1711 			SCTP_INP_WLOCK(inp);
1712 		}
1713 	} else {
1714 		SCTP_WQ_ADDR_LOCK();
1715 	}
1716 	/* record in stopped what t-o occurred */
1717 	tmr->stopped_from = type;
1718 
1719 	/* mark as being serviced now */
1720 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1721 		/*
1722 		 * Callout has been rescheduled.
1723 		 */
1724 		goto get_out;
1725 	}
1726 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1727 		/*
1728 		 * Not active, so no action.
1729 		 */
1730 		goto get_out;
1731 	}
1732 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1733 
1734 	/* call the handler for the appropriate timer type */
1735 	switch (type) {
1736 	case SCTP_TIMER_TYPE_ADDR_WQ:
1737 		sctp_handle_addr_wq();
1738 		break;
1739 	case SCTP_TIMER_TYPE_SEND:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timodata);
1744 		stcb->asoc.timodata++;
1745 		stcb->asoc.num_send_timers_up--;
1746 		if (stcb->asoc.num_send_timers_up < 0) {
1747 			stcb->asoc.num_send_timers_up = 0;
1748 		}
1749 		SCTP_TCB_LOCK_ASSERT(stcb);
1750 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 
1753 			goto out_decr;
1754 		}
1755 		SCTP_TCB_LOCK_ASSERT(stcb);
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1760 		if ((stcb->asoc.num_send_timers_up == 0) &&
1761 		    (stcb->asoc.sent_queue_cnt > 0)) {
1762 			struct sctp_tmit_chunk *chk;
1763 
1764 			/*
1765 			 * safeguard. If there on some on the sent queue
1766 			 * somewhere but no timers running something is
1767 			 * wrong... so we start a timer on the first chunk
1768 			 * on the send queue on whatever net it is sent to.
1769 			 */
1770 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1771 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1772 			    chk->whoTo);
1773 		}
1774 		break;
1775 	case SCTP_TIMER_TYPE_INIT:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoinit);
1780 		stcb->asoc.timoinit++;
1781 		if (sctp_t1init_timer(inp, stcb, net)) {
1782 			/* no need to unlock on tcb its gone */
1783 			goto out_decr;
1784 		}
1785 		/* We do output but not here */
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_RECV:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timosack);
1793 		stcb->asoc.timosack++;
1794 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, net);
1797 #endif
1798 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWN:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		if (sctp_shutdown_timer(inp, stcb, net)) {
1805 			/* no need to unlock on tcb its gone */
1806 			goto out_decr;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoshutdown);
1809 		stcb->asoc.timoshutdown++;
1810 #ifdef SCTP_AUDITING_ENABLED
1811 		sctp_auditing(4, inp, stcb, net);
1812 #endif
1813 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1814 		break;
1815 	case SCTP_TIMER_TYPE_HEARTBEAT:
1816 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1817 			break;
1818 		}
1819 		SCTP_STAT_INCR(sctps_timoheartbeat);
1820 		stcb->asoc.timoheartbeat++;
1821 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1822 			/* no need to unlock on tcb its gone */
1823 			goto out_decr;
1824 		}
1825 #ifdef SCTP_AUDITING_ENABLED
1826 		sctp_auditing(4, inp, stcb, net);
1827 #endif
1828 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1829 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1830 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1831 		}
1832 		break;
1833 	case SCTP_TIMER_TYPE_COOKIE:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 
1838 		if (sctp_cookie_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timocookie);
1843 		stcb->asoc.timocookie++;
1844 #ifdef SCTP_AUDITING_ENABLED
1845 		sctp_auditing(4, inp, stcb, net);
1846 #endif
1847 		/*
1848 		 * We consider T3 and Cookie timer pretty much the same with
1849 		 * respect to where from in chunk_output.
1850 		 */
1851 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1852 		break;
1853 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1854 		{
1855 			struct timeval tv;
1856 			int i, secret;
1857 
1858 			if (inp == NULL) {
1859 				break;
1860 			}
1861 			SCTP_STAT_INCR(sctps_timosecret);
1862 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1863 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1864 			inp->sctp_ep.last_secret_number =
1865 			    inp->sctp_ep.current_secret_number;
1866 			inp->sctp_ep.current_secret_number++;
1867 			if (inp->sctp_ep.current_secret_number >=
1868 			    SCTP_HOW_MANY_SECRETS) {
1869 				inp->sctp_ep.current_secret_number = 0;
1870 			}
1871 			secret = (int)inp->sctp_ep.current_secret_number;
1872 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1873 				inp->sctp_ep.secret_key[secret][i] =
1874 				    sctp_select_initial_TSN(&inp->sctp_ep);
1875 			}
1876 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1877 		}
1878 		did_output = 0;
1879 		break;
1880 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1881 		if ((stcb == NULL) || (inp == NULL)) {
1882 			break;
1883 		}
1884 		SCTP_STAT_INCR(sctps_timopathmtu);
1885 		sctp_pathmtu_timer(inp, stcb, net);
1886 		did_output = 0;
1887 		break;
1888 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1889 		if ((stcb == NULL) || (inp == NULL)) {
1890 			break;
1891 		}
1892 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1893 			/* no need to unlock on tcb its gone */
1894 			goto out_decr;
1895 		}
1896 		SCTP_STAT_INCR(sctps_timoshutdownack);
1897 		stcb->asoc.timoshutdownack++;
1898 #ifdef SCTP_AUDITING_ENABLED
1899 		sctp_auditing(4, inp, stcb, net);
1900 #endif
1901 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1902 		break;
1903 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1904 		if ((stcb == NULL) || (inp == NULL)) {
1905 			break;
1906 		}
1907 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1908 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1909 		    "Shutdown guard timer expired");
1910 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1911 		/* no need to unlock on tcb its gone */
1912 		goto out_decr;
1913 
1914 	case SCTP_TIMER_TYPE_STRRESET:
1915 		if ((stcb == NULL) || (inp == NULL)) {
1916 			break;
1917 		}
1918 		if (sctp_strreset_timer(inp, stcb, net)) {
1919 			/* no need to unlock on tcb its gone */
1920 			goto out_decr;
1921 		}
1922 		SCTP_STAT_INCR(sctps_timostrmrst);
1923 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1924 		break;
1925 	case SCTP_TIMER_TYPE_ASCONF:
1926 		if ((stcb == NULL) || (inp == NULL)) {
1927 			break;
1928 		}
1929 		if (sctp_asconf_timer(inp, stcb, net)) {
1930 			/* no need to unlock on tcb its gone */
1931 			goto out_decr;
1932 		}
1933 		SCTP_STAT_INCR(sctps_timoasconf);
1934 #ifdef SCTP_AUDITING_ENABLED
1935 		sctp_auditing(4, inp, stcb, net);
1936 #endif
1937 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1938 		break;
1939 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1940 		if ((stcb == NULL) || (inp == NULL)) {
1941 			break;
1942 		}
1943 		sctp_delete_prim_timer(inp, stcb, net);
1944 		SCTP_STAT_INCR(sctps_timodelprim);
1945 		break;
1946 
1947 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1948 		if ((stcb == NULL) || (inp == NULL)) {
1949 			break;
1950 		}
1951 		SCTP_STAT_INCR(sctps_timoautoclose);
1952 		sctp_autoclose_timer(inp, stcb, net);
1953 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1954 		did_output = 0;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ASOCKILL:
1957 		if ((stcb == NULL) || (inp == NULL)) {
1958 			break;
1959 		}
1960 		SCTP_STAT_INCR(sctps_timoassockill);
1961 		/* Can we free it yet? */
1962 		SCTP_INP_DECR_REF(inp);
1963 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1964 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1966 		so = SCTP_INP_SO(inp);
1967 		atomic_add_int(&stcb->asoc.refcnt, 1);
1968 		SCTP_TCB_UNLOCK(stcb);
1969 		SCTP_SOCKET_LOCK(so, 1);
1970 		SCTP_TCB_LOCK(stcb);
1971 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1972 #endif
1973 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1974 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1975 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1976 		SCTP_SOCKET_UNLOCK(so, 1);
1977 #endif
1978 		/*
1979 		 * free asoc, always unlocks (or destroy's) so prevent
1980 		 * duplicate unlock or unlock of a free mtx :-0
1981 		 */
1982 		stcb = NULL;
1983 		goto out_no_decr;
1984 	case SCTP_TIMER_TYPE_INPKILL:
1985 		SCTP_STAT_INCR(sctps_timoinpkill);
1986 		if (inp == NULL) {
1987 			break;
1988 		}
1989 		/*
1990 		 * special case, take away our increment since WE are the
1991 		 * killer
1992 		 */
1993 		SCTP_INP_DECR_REF(inp);
1994 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1995 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1996 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1997 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1998 		inp = NULL;
1999 		goto out_no_decr;
2000 	default:
2001 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2002 		    type);
2003 		break;
2004 	}
2005 #ifdef SCTP_AUDITING_ENABLED
2006 	sctp_audit_log(0xF1, (uint8_t)type);
2007 	if (inp)
2008 		sctp_auditing(5, inp, stcb, net);
2009 #endif
2010 	if ((did_output) && stcb) {
2011 		/*
2012 		 * Now we need to clean up the control chunk chain if an
2013 		 * ECNE is on it. It must be marked as UNSENT again so next
2014 		 * call will continue to send it until such time that we get
2015 		 * a CWR, to remove it. It is, however, less likely that we
2016 		 * will find a ecn echo on the chain though.
2017 		 */
2018 		sctp_fix_ecn_echo(&stcb->asoc);
2019 	}
2020 get_out:
2021 	if (stcb) {
2022 		SCTP_TCB_UNLOCK(stcb);
2023 	} else if (inp != NULL) {
2024 		SCTP_INP_WUNLOCK(inp);
2025 	} else {
2026 		SCTP_WQ_ADDR_UNLOCK();
2027 	}
2028 
2029 out_decr:
2030 	if (inp) {
2031 		SCTP_INP_DECR_REF(inp);
2032 	}
2033 
2034 out_no_decr:
2035 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2036 	CURVNET_RESTORE();
2037 }
2038 
2039 void
2040 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2041     struct sctp_nets *net)
2042 {
2043 	uint32_t to_ticks;
2044 	struct sctp_timer *tmr;
2045 
2046 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2047 		return;
2048 
2049 	tmr = NULL;
2050 	if (stcb) {
2051 		SCTP_TCB_LOCK_ASSERT(stcb);
2052 	}
2053 	/* Don't restart timer on net that's been removed. */
2054 	if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2055 		return;
2056 	}
2057 	switch (t_type) {
2058 	case SCTP_TIMER_TYPE_ADDR_WQ:
2059 		/* Only 1 tick away :-) */
2060 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2061 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2062 		break;
2063 	case SCTP_TIMER_TYPE_SEND:
2064 		/* Here we use the RTO timer */
2065 		{
2066 			int rto_val;
2067 
2068 			if ((stcb == NULL) || (net == NULL)) {
2069 				return;
2070 			}
2071 			tmr = &net->rxt_timer;
2072 			if (net->RTO == 0) {
2073 				rto_val = stcb->asoc.initial_rto;
2074 			} else {
2075 				rto_val = net->RTO;
2076 			}
2077 			to_ticks = MSEC_TO_TICKS(rto_val);
2078 		}
2079 		break;
2080 	case SCTP_TIMER_TYPE_INIT:
2081 		/*
2082 		 * Here we use the INIT timer default usually about 1
2083 		 * minute.
2084 		 */
2085 		if ((stcb == NULL) || (net == NULL)) {
2086 			return;
2087 		}
2088 		tmr = &net->rxt_timer;
2089 		if (net->RTO == 0) {
2090 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2091 		} else {
2092 			to_ticks = MSEC_TO_TICKS(net->RTO);
2093 		}
2094 		break;
2095 	case SCTP_TIMER_TYPE_RECV:
2096 		/*
2097 		 * Here we use the Delayed-Ack timer value from the inp
2098 		 * ususually about 200ms.
2099 		 */
2100 		if (stcb == NULL) {
2101 			return;
2102 		}
2103 		tmr = &stcb->asoc.dack_timer;
2104 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2105 		break;
2106 	case SCTP_TIMER_TYPE_SHUTDOWN:
2107 		/* Here we use the RTO of the destination. */
2108 		if ((stcb == NULL) || (net == NULL)) {
2109 			return;
2110 		}
2111 		if (net->RTO == 0) {
2112 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		} else {
2114 			to_ticks = MSEC_TO_TICKS(net->RTO);
2115 		}
2116 		tmr = &net->rxt_timer;
2117 		break;
2118 	case SCTP_TIMER_TYPE_HEARTBEAT:
2119 		/*
2120 		 * the net is used here so that we can add in the RTO. Even
2121 		 * though we use a different timer. We also add the HB timer
2122 		 * PLUS a random jitter.
2123 		 */
2124 		if ((stcb == NULL) || (net == NULL)) {
2125 			return;
2126 		} else {
2127 			uint32_t rndval;
2128 			uint32_t jitter;
2129 
2130 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2131 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2132 				return;
2133 			}
2134 			if (net->RTO == 0) {
2135 				to_ticks = stcb->asoc.initial_rto;
2136 			} else {
2137 				to_ticks = net->RTO;
2138 			}
2139 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2140 			jitter = rndval % to_ticks;
2141 			if (jitter >= (to_ticks >> 1)) {
2142 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2143 			} else {
2144 				to_ticks = to_ticks - jitter;
2145 			}
2146 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2147 			    !(net->dest_state & SCTP_ADDR_PF)) {
2148 				to_ticks += net->heart_beat_delay;
2149 			}
2150 			/*
2151 			 * Now we must convert the to_ticks that are now in
2152 			 * ms to ticks.
2153 			 */
2154 			to_ticks = MSEC_TO_TICKS(to_ticks);
2155 			tmr = &net->hb_timer;
2156 		}
2157 		break;
2158 	case SCTP_TIMER_TYPE_COOKIE:
2159 		/*
2160 		 * Here we can use the RTO timer from the network since one
2161 		 * RTT was compelete. If a retran happened then we will be
2162 		 * using the RTO initial value.
2163 		 */
2164 		if ((stcb == NULL) || (net == NULL)) {
2165 			return;
2166 		}
2167 		if (net->RTO == 0) {
2168 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		} else {
2170 			to_ticks = MSEC_TO_TICKS(net->RTO);
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2175 		/*
2176 		 * nothing needed but the endpoint here ususually about 60
2177 		 * minutes.
2178 		 */
2179 		tmr = &inp->sctp_ep.signature_change;
2180 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2181 		break;
2182 	case SCTP_TIMER_TYPE_ASOCKILL:
2183 		if (stcb == NULL) {
2184 			return;
2185 		}
2186 		tmr = &stcb->asoc.strreset_timer;
2187 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2188 		break;
2189 	case SCTP_TIMER_TYPE_INPKILL:
2190 		/*
2191 		 * The inp is setup to die. We re-use the signature_chage
2192 		 * timer since that has stopped and we are in the GONE
2193 		 * state.
2194 		 */
2195 		tmr = &inp->sctp_ep.signature_change;
2196 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2197 		break;
2198 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2199 		/*
2200 		 * Here we use the value found in the EP for PMTU ususually
2201 		 * about 10 minutes.
2202 		 */
2203 		if ((stcb == NULL) || (net == NULL)) {
2204 			return;
2205 		}
2206 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2207 			return;
2208 		}
2209 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2210 		tmr = &net->pmtu_timer;
2211 		break;
2212 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2213 		/* Here we use the RTO of the destination */
2214 		if ((stcb == NULL) || (net == NULL)) {
2215 			return;
2216 		}
2217 		if (net->RTO == 0) {
2218 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2219 		} else {
2220 			to_ticks = MSEC_TO_TICKS(net->RTO);
2221 		}
2222 		tmr = &net->rxt_timer;
2223 		break;
2224 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2225 		/*
2226 		 * Here we use the endpoints shutdown guard timer usually
2227 		 * about 3 minutes.
2228 		 */
2229 		if (stcb == NULL) {
2230 			return;
2231 		}
2232 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2233 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2234 		} else {
2235 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2236 		}
2237 		tmr = &stcb->asoc.shut_guard_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_STRRESET:
2240 		/*
2241 		 * Here the timer comes from the stcb but its value is from
2242 		 * the net's RTO.
2243 		 */
2244 		if ((stcb == NULL) || (net == NULL)) {
2245 			return;
2246 		}
2247 		if (net->RTO == 0) {
2248 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2249 		} else {
2250 			to_ticks = MSEC_TO_TICKS(net->RTO);
2251 		}
2252 		tmr = &stcb->asoc.strreset_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ASCONF:
2255 		/*
2256 		 * Here the timer comes from the stcb but its value is from
2257 		 * the net's RTO.
2258 		 */
2259 		if ((stcb == NULL) || (net == NULL)) {
2260 			return;
2261 		}
2262 		if (net->RTO == 0) {
2263 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2264 		} else {
2265 			to_ticks = MSEC_TO_TICKS(net->RTO);
2266 		}
2267 		tmr = &stcb->asoc.asconf_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2270 		if ((stcb == NULL) || (net != NULL)) {
2271 			return;
2272 		}
2273 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2274 		tmr = &stcb->asoc.delete_prim_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2277 		if (stcb == NULL) {
2278 			return;
2279 		}
2280 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2281 			/*
2282 			 * Really an error since stcb is NOT set to
2283 			 * autoclose
2284 			 */
2285 			return;
2286 		}
2287 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2288 		tmr = &stcb->asoc.autoclose_timer;
2289 		break;
2290 	default:
2291 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2292 		    __func__, t_type);
2293 		return;
2294 		break;
2295 	}
2296 	if ((to_ticks <= 0) || (tmr == NULL)) {
2297 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2298 		    __func__, t_type, to_ticks, (void *)tmr);
2299 		return;
2300 	}
2301 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2302 		/*
2303 		 * we do NOT allow you to have it already running. if it is
2304 		 * we leave the current one up unchanged
2305 		 */
2306 		return;
2307 	}
2308 	/* At this point we can proceed */
2309 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2310 		stcb->asoc.num_send_timers_up++;
2311 	}
2312 	tmr->stopped_from = 0;
2313 	tmr->type = t_type;
2314 	tmr->ep = (void *)inp;
2315 	tmr->tcb = (void *)stcb;
2316 	tmr->net = (void *)net;
2317 	tmr->self = (void *)tmr;
2318 	tmr->vnet = (void *)curvnet;
2319 	tmr->ticks = sctp_get_tick_count();
2320 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2321 	return;
2322 }
2323 
2324 void
2325 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2326     struct sctp_nets *net, uint32_t from)
2327 {
2328 	struct sctp_timer *tmr;
2329 
2330 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2331 	    (inp == NULL))
2332 		return;
2333 
2334 	tmr = NULL;
2335 	if (stcb) {
2336 		SCTP_TCB_LOCK_ASSERT(stcb);
2337 	}
2338 	switch (t_type) {
2339 	case SCTP_TIMER_TYPE_ADDR_WQ:
2340 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2341 		break;
2342 	case SCTP_TIMER_TYPE_SEND:
2343 		if ((stcb == NULL) || (net == NULL)) {
2344 			return;
2345 		}
2346 		tmr = &net->rxt_timer;
2347 		break;
2348 	case SCTP_TIMER_TYPE_INIT:
2349 		if ((stcb == NULL) || (net == NULL)) {
2350 			return;
2351 		}
2352 		tmr = &net->rxt_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_RECV:
2355 		if (stcb == NULL) {
2356 			return;
2357 		}
2358 		tmr = &stcb->asoc.dack_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_SHUTDOWN:
2361 		if ((stcb == NULL) || (net == NULL)) {
2362 			return;
2363 		}
2364 		tmr = &net->rxt_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_HEARTBEAT:
2367 		if ((stcb == NULL) || (net == NULL)) {
2368 			return;
2369 		}
2370 		tmr = &net->hb_timer;
2371 		break;
2372 	case SCTP_TIMER_TYPE_COOKIE:
2373 		if ((stcb == NULL) || (net == NULL)) {
2374 			return;
2375 		}
2376 		tmr = &net->rxt_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2379 		/* nothing needed but the endpoint here */
2380 		tmr = &inp->sctp_ep.signature_change;
2381 		/*
2382 		 * We re-use the newcookie timer for the INP kill timer. We
2383 		 * must assure that we do not kill it by accident.
2384 		 */
2385 		break;
2386 	case SCTP_TIMER_TYPE_ASOCKILL:
2387 		/*
2388 		 * Stop the asoc kill timer.
2389 		 */
2390 		if (stcb == NULL) {
2391 			return;
2392 		}
2393 		tmr = &stcb->asoc.strreset_timer;
2394 		break;
2395 
2396 	case SCTP_TIMER_TYPE_INPKILL:
2397 		/*
2398 		 * The inp is setup to die. We re-use the signature_chage
2399 		 * timer since that has stopped and we are in the GONE
2400 		 * state.
2401 		 */
2402 		tmr = &inp->sctp_ep.signature_change;
2403 		break;
2404 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2405 		if ((stcb == NULL) || (net == NULL)) {
2406 			return;
2407 		}
2408 		tmr = &net->pmtu_timer;
2409 		break;
2410 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2411 		if ((stcb == NULL) || (net == NULL)) {
2412 			return;
2413 		}
2414 		tmr = &net->rxt_timer;
2415 		break;
2416 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2417 		if (stcb == NULL) {
2418 			return;
2419 		}
2420 		tmr = &stcb->asoc.shut_guard_timer;
2421 		break;
2422 	case SCTP_TIMER_TYPE_STRRESET:
2423 		if (stcb == NULL) {
2424 			return;
2425 		}
2426 		tmr = &stcb->asoc.strreset_timer;
2427 		break;
2428 	case SCTP_TIMER_TYPE_ASCONF:
2429 		if (stcb == NULL) {
2430 			return;
2431 		}
2432 		tmr = &stcb->asoc.asconf_timer;
2433 		break;
2434 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2435 		if (stcb == NULL) {
2436 			return;
2437 		}
2438 		tmr = &stcb->asoc.delete_prim_timer;
2439 		break;
2440 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2441 		if (stcb == NULL) {
2442 			return;
2443 		}
2444 		tmr = &stcb->asoc.autoclose_timer;
2445 		break;
2446 	default:
2447 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2448 		    __func__, t_type);
2449 		break;
2450 	}
2451 	if (tmr == NULL) {
2452 		return;
2453 	}
2454 	if ((tmr->type != t_type) && tmr->type) {
2455 		/*
2456 		 * Ok we have a timer that is under joint use. Cookie timer
2457 		 * per chance with the SEND timer. We therefore are NOT
2458 		 * running the timer that the caller wants stopped.  So just
2459 		 * return.
2460 		 */
2461 		return;
2462 	}
2463 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2464 		stcb->asoc.num_send_timers_up--;
2465 		if (stcb->asoc.num_send_timers_up < 0) {
2466 			stcb->asoc.num_send_timers_up = 0;
2467 		}
2468 	}
2469 	tmr->self = NULL;
2470 	tmr->stopped_from = from;
2471 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2472 	return;
2473 }
2474 
2475 uint32_t
2476 sctp_calculate_len(struct mbuf *m)
2477 {
2478 	uint32_t tlen = 0;
2479 	struct mbuf *at;
2480 
2481 	at = m;
2482 	while (at) {
2483 		tlen += SCTP_BUF_LEN(at);
2484 		at = SCTP_BUF_NEXT(at);
2485 	}
2486 	return (tlen);
2487 }
2488 
2489 void
2490 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2491     struct sctp_association *asoc, uint32_t mtu)
2492 {
2493 	/*
2494 	 * Reset the P-MTU size on this association, this involves changing
2495 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2496 	 * allow the DF flag to be cleared.
2497 	 */
2498 	struct sctp_tmit_chunk *chk;
2499 	unsigned int eff_mtu, ovh;
2500 
2501 	asoc->smallest_mtu = mtu;
2502 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2503 		ovh = SCTP_MIN_OVERHEAD;
2504 	} else {
2505 		ovh = SCTP_MIN_V4_OVERHEAD;
2506 	}
2507 	eff_mtu = mtu - ovh;
2508 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2509 		if (chk->send_size > eff_mtu) {
2510 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2511 		}
2512 	}
2513 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2514 		if (chk->send_size > eff_mtu) {
2515 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2516 		}
2517 	}
2518 }
2519 
2520 
2521 /*
2522  * Given an association and starting time of the current RTT period, update
2523  * RTO in number of msecs. net should point to the current network.
2524  * Return 1, if an RTO update was performed, return 0 if no update was
2525  * performed due to invalid starting point.
2526  */
2527 
2528 int
2529 sctp_calculate_rto(struct sctp_tcb *stcb,
2530     struct sctp_association *asoc,
2531     struct sctp_nets *net,
2532     struct timeval *old,
2533     int rtt_from_sack)
2534 {
2535 	struct timeval now;
2536 	uint64_t rtt_us;	/* RTT in us */
2537 	int32_t rtt;		/* RTT in ms */
2538 	uint32_t new_rto;
2539 	int first_measure = 0;
2540 
2541 	/************************/
2542 	/* 1. calculate new RTT */
2543 	/************************/
2544 	/* get the current time */
2545 	if (stcb->asoc.use_precise_time) {
2546 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2547 	} else {
2548 		(void)SCTP_GETTIME_TIMEVAL(&now);
2549 	}
2550 	if ((old->tv_sec > now.tv_sec) ||
2551 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2552 		/* The starting point is in the future. */
2553 		return (0);
2554 	}
2555 	timevalsub(&now, old);
2556 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2557 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2558 		/* The RTT is larger than a sane value. */
2559 		return (0);
2560 	}
2561 	/* store the current RTT in us */
2562 	net->rtt = rtt_us;
2563 	/* compute rtt in ms */
2564 	rtt = (int32_t)(net->rtt / 1000);
2565 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2566 		/*
2567 		 * Tell the CC module that a new update has just occurred
2568 		 * from a sack
2569 		 */
2570 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2571 	}
2572 	/*
2573 	 * Do we need to determine the lan? We do this only on sacks i.e.
2574 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2575 	 */
2576 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2577 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2578 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2579 			net->lan_type = SCTP_LAN_INTERNET;
2580 		} else {
2581 			net->lan_type = SCTP_LAN_LOCAL;
2582 		}
2583 	}
2584 
2585 	/***************************/
2586 	/* 2. update RTTVAR & SRTT */
2587 	/***************************/
2588 	/*-
2589 	 * Compute the scaled average lastsa and the
2590 	 * scaled variance lastsv as described in van Jacobson
2591 	 * Paper "Congestion Avoidance and Control", Annex A.
2592 	 *
2593 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2594 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2595 	 */
2596 	if (net->RTO_measured) {
2597 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2598 		net->lastsa += rtt;
2599 		if (rtt < 0) {
2600 			rtt = -rtt;
2601 		}
2602 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2603 		net->lastsv += rtt;
2604 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2605 			rto_logging(net, SCTP_LOG_RTTVAR);
2606 		}
2607 	} else {
2608 		/* First RTO measurment */
2609 		net->RTO_measured = 1;
2610 		first_measure = 1;
2611 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2612 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2613 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2614 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2615 		}
2616 	}
2617 	if (net->lastsv == 0) {
2618 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2619 	}
2620 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2621 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2622 	    (stcb->asoc.sat_network_lockout == 0)) {
2623 		stcb->asoc.sat_network = 1;
2624 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2625 		stcb->asoc.sat_network = 0;
2626 		stcb->asoc.sat_network_lockout = 1;
2627 	}
2628 	/* bound it, per C6/C7 in Section 5.3.1 */
2629 	if (new_rto < stcb->asoc.minrto) {
2630 		new_rto = stcb->asoc.minrto;
2631 	}
2632 	if (new_rto > stcb->asoc.maxrto) {
2633 		new_rto = stcb->asoc.maxrto;
2634 	}
2635 	net->RTO = new_rto;
2636 	return (1);
2637 }
2638 
2639 /*
2640  * return a pointer to a contiguous piece of data from the given mbuf chain
2641  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2642  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2643  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2644  */
2645 caddr_t
2646 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2647 {
2648 	uint32_t count;
2649 	uint8_t *ptr;
2650 
2651 	ptr = in_ptr;
2652 	if ((off < 0) || (len <= 0))
2653 		return (NULL);
2654 
2655 	/* find the desired start location */
2656 	while ((m != NULL) && (off > 0)) {
2657 		if (off < SCTP_BUF_LEN(m))
2658 			break;
2659 		off -= SCTP_BUF_LEN(m);
2660 		m = SCTP_BUF_NEXT(m);
2661 	}
2662 	if (m == NULL)
2663 		return (NULL);
2664 
2665 	/* is the current mbuf large enough (eg. contiguous)? */
2666 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2667 		return (mtod(m, caddr_t)+off);
2668 	} else {
2669 		/* else, it spans more than one mbuf, so save a temp copy... */
2670 		while ((m != NULL) && (len > 0)) {
2671 			count = min(SCTP_BUF_LEN(m) - off, len);
2672 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2673 			len -= count;
2674 			ptr += count;
2675 			off = 0;
2676 			m = SCTP_BUF_NEXT(m);
2677 		}
2678 		if ((m == NULL) && (len > 0))
2679 			return (NULL);
2680 		else
2681 			return ((caddr_t)in_ptr);
2682 	}
2683 }
2684 
2685 
2686 
2687 struct sctp_paramhdr *
2688 sctp_get_next_param(struct mbuf *m,
2689     int offset,
2690     struct sctp_paramhdr *pull,
2691     int pull_limit)
2692 {
2693 	/* This just provides a typed signature to Peter's Pull routine */
2694 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2695 	    (uint8_t *)pull));
2696 }
2697 
2698 
2699 struct mbuf *
2700 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2701 {
2702 	struct mbuf *m_last;
2703 	caddr_t dp;
2704 
2705 	if (padlen > 3) {
2706 		return (NULL);
2707 	}
2708 	if (padlen <= M_TRAILINGSPACE(m)) {
2709 		/*
2710 		 * The easy way. We hope the majority of the time we hit
2711 		 * here :)
2712 		 */
2713 		m_last = m;
2714 	} else {
2715 		/* Hard way we must grow the mbuf chain */
2716 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2717 		if (m_last == NULL) {
2718 			return (NULL);
2719 		}
2720 		SCTP_BUF_LEN(m_last) = 0;
2721 		SCTP_BUF_NEXT(m_last) = NULL;
2722 		SCTP_BUF_NEXT(m) = m_last;
2723 	}
2724 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2725 	SCTP_BUF_LEN(m_last) += padlen;
2726 	memset(dp, 0, padlen);
2727 	return (m_last);
2728 }
2729 
2730 struct mbuf *
2731 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2732 {
2733 	/* find the last mbuf in chain and pad it */
2734 	struct mbuf *m_at;
2735 
2736 	if (last_mbuf != NULL) {
2737 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2738 	} else {
2739 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2740 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2741 				return (sctp_add_pad_tombuf(m_at, padval));
2742 			}
2743 		}
2744 	}
2745 	return (NULL);
2746 }
2747 
2748 static void
2749 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2750     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2751 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2752     SCTP_UNUSED
2753 #endif
2754 )
2755 {
2756 	struct mbuf *m_notify;
2757 	struct sctp_assoc_change *sac;
2758 	struct sctp_queued_to_read *control;
2759 	unsigned int notif_len;
2760 	uint16_t abort_len;
2761 	unsigned int i;
2762 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2763 	struct socket *so;
2764 #endif
2765 
2766 	if (stcb == NULL) {
2767 		return;
2768 	}
2769 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2770 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2771 		if (abort != NULL) {
2772 			abort_len = ntohs(abort->ch.chunk_length);
2773 			/*
2774 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2775 			 * contiguous.
2776 			 */
2777 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2778 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2779 			}
2780 		} else {
2781 			abort_len = 0;
2782 		}
2783 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2784 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2785 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2786 			notif_len += abort_len;
2787 		}
2788 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2789 		if (m_notify == NULL) {
2790 			/* Retry with smaller value. */
2791 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2792 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2793 			if (m_notify == NULL) {
2794 				goto set_error;
2795 			}
2796 		}
2797 		SCTP_BUF_NEXT(m_notify) = NULL;
2798 		sac = mtod(m_notify, struct sctp_assoc_change *);
2799 		memset(sac, 0, notif_len);
2800 		sac->sac_type = SCTP_ASSOC_CHANGE;
2801 		sac->sac_flags = 0;
2802 		sac->sac_length = sizeof(struct sctp_assoc_change);
2803 		sac->sac_state = state;
2804 		sac->sac_error = error;
2805 		/* XXX verify these stream counts */
2806 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2807 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2808 		sac->sac_assoc_id = sctp_get_associd(stcb);
2809 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2810 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2811 				i = 0;
2812 				if (stcb->asoc.prsctp_supported == 1) {
2813 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2814 				}
2815 				if (stcb->asoc.auth_supported == 1) {
2816 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2817 				}
2818 				if (stcb->asoc.asconf_supported == 1) {
2819 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2820 				}
2821 				if (stcb->asoc.idata_supported == 1) {
2822 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2823 				}
2824 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2825 				if (stcb->asoc.reconfig_supported == 1) {
2826 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2827 				}
2828 				sac->sac_length += i;
2829 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2830 				memcpy(sac->sac_info, abort, abort_len);
2831 				sac->sac_length += abort_len;
2832 			}
2833 		}
2834 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2835 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2836 		    0, 0, stcb->asoc.context, 0, 0, 0,
2837 		    m_notify);
2838 		if (control != NULL) {
2839 			control->length = SCTP_BUF_LEN(m_notify);
2840 			control->spec_flags = M_NOTIFICATION;
2841 			/* not that we need this */
2842 			control->tail_mbuf = m_notify;
2843 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2844 			    control,
2845 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2846 			    so_locked);
2847 		} else {
2848 			sctp_m_freem(m_notify);
2849 		}
2850 	}
2851 	/*
2852 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2853 	 * comes in.
2854 	 */
2855 set_error:
2856 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2857 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2858 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2859 		SOCK_LOCK(stcb->sctp_socket);
2860 		if (from_peer) {
2861 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2862 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2863 				stcb->sctp_socket->so_error = ECONNREFUSED;
2864 			} else {
2865 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2866 				stcb->sctp_socket->so_error = ECONNRESET;
2867 			}
2868 		} else {
2869 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2870 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2871 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2872 				stcb->sctp_socket->so_error = ETIMEDOUT;
2873 			} else {
2874 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2875 				stcb->sctp_socket->so_error = ECONNABORTED;
2876 			}
2877 		}
2878 		SOCK_UNLOCK(stcb->sctp_socket);
2879 	}
2880 	/* Wake ANY sleepers */
2881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2882 	so = SCTP_INP_SO(stcb->sctp_ep);
2883 	if (!so_locked) {
2884 		atomic_add_int(&stcb->asoc.refcnt, 1);
2885 		SCTP_TCB_UNLOCK(stcb);
2886 		SCTP_SOCKET_LOCK(so, 1);
2887 		SCTP_TCB_LOCK(stcb);
2888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2889 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2890 			SCTP_SOCKET_UNLOCK(so, 1);
2891 			return;
2892 		}
2893 	}
2894 #endif
2895 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2896 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2897 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2898 		socantrcvmore(stcb->sctp_socket);
2899 	}
2900 	sorwakeup(stcb->sctp_socket);
2901 	sowwakeup(stcb->sctp_socket);
2902 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2903 	if (!so_locked) {
2904 		SCTP_SOCKET_UNLOCK(so, 1);
2905 	}
2906 #endif
2907 }
2908 
2909 static void
2910 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2911     struct sockaddr *sa, uint32_t error, int so_locked
2912 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2913     SCTP_UNUSED
2914 #endif
2915 )
2916 {
2917 	struct mbuf *m_notify;
2918 	struct sctp_paddr_change *spc;
2919 	struct sctp_queued_to_read *control;
2920 
2921 	if ((stcb == NULL) ||
2922 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2923 		/* event not enabled */
2924 		return;
2925 	}
2926 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2927 	if (m_notify == NULL)
2928 		return;
2929 	SCTP_BUF_LEN(m_notify) = 0;
2930 	spc = mtod(m_notify, struct sctp_paddr_change *);
2931 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2932 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2933 	spc->spc_flags = 0;
2934 	spc->spc_length = sizeof(struct sctp_paddr_change);
2935 	switch (sa->sa_family) {
2936 #ifdef INET
2937 	case AF_INET:
2938 #ifdef INET6
2939 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2940 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2941 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2942 		} else {
2943 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2944 		}
2945 #else
2946 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2947 #endif
2948 		break;
2949 #endif
2950 #ifdef INET6
2951 	case AF_INET6:
2952 		{
2953 			struct sockaddr_in6 *sin6;
2954 
2955 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2956 
2957 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2958 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2959 				if (sin6->sin6_scope_id == 0) {
2960 					/* recover scope_id for user */
2961 					(void)sa6_recoverscope(sin6);
2962 				} else {
2963 					/* clear embedded scope_id for user */
2964 					in6_clearscope(&sin6->sin6_addr);
2965 				}
2966 			}
2967 			break;
2968 		}
2969 #endif
2970 	default:
2971 		/* TSNH */
2972 		break;
2973 	}
2974 	spc->spc_state = state;
2975 	spc->spc_error = error;
2976 	spc->spc_assoc_id = sctp_get_associd(stcb);
2977 
2978 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2979 	SCTP_BUF_NEXT(m_notify) = NULL;
2980 
2981 	/* append to socket */
2982 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2983 	    0, 0, stcb->asoc.context, 0, 0, 0,
2984 	    m_notify);
2985 	if (control == NULL) {
2986 		/* no memory */
2987 		sctp_m_freem(m_notify);
2988 		return;
2989 	}
2990 	control->length = SCTP_BUF_LEN(m_notify);
2991 	control->spec_flags = M_NOTIFICATION;
2992 	/* not that we need this */
2993 	control->tail_mbuf = m_notify;
2994 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2995 	    control,
2996 	    &stcb->sctp_socket->so_rcv, 1,
2997 	    SCTP_READ_LOCK_NOT_HELD,
2998 	    so_locked);
2999 }
3000 
3001 
3002 static void
3003 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3004     struct sctp_tmit_chunk *chk, int so_locked
3005 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3006     SCTP_UNUSED
3007 #endif
3008 )
3009 {
3010 	struct mbuf *m_notify;
3011 	struct sctp_send_failed *ssf;
3012 	struct sctp_send_failed_event *ssfe;
3013 	struct sctp_queued_to_read *control;
3014 	struct sctp_chunkhdr *chkhdr;
3015 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3016 
3017 	if ((stcb == NULL) ||
3018 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3019 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3020 		/* event not enabled */
3021 		return;
3022 	}
3023 
3024 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3025 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3026 	} else {
3027 		notifhdr_len = sizeof(struct sctp_send_failed);
3028 	}
3029 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3030 	if (m_notify == NULL)
3031 		/* no space left */
3032 		return;
3033 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3034 	if (stcb->asoc.idata_supported) {
3035 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3036 	} else {
3037 		chkhdr_len = sizeof(struct sctp_data_chunk);
3038 	}
3039 	/* Use some defaults in case we can't access the chunk header */
3040 	if (chk->send_size >= chkhdr_len) {
3041 		payload_len = chk->send_size - chkhdr_len;
3042 	} else {
3043 		payload_len = 0;
3044 	}
3045 	padding_len = 0;
3046 	if (chk->data != NULL) {
3047 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3048 		if (chkhdr != NULL) {
3049 			chk_len = ntohs(chkhdr->chunk_length);
3050 			if ((chk_len >= chkhdr_len) &&
3051 			    (chk->send_size >= chk_len) &&
3052 			    (chk->send_size - chk_len < 4)) {
3053 				padding_len = chk->send_size - chk_len;
3054 				payload_len = chk->send_size - chkhdr_len - padding_len;
3055 			}
3056 		}
3057 	}
3058 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3059 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3060 		memset(ssfe, 0, notifhdr_len);
3061 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3062 		if (sent) {
3063 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3064 		} else {
3065 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3066 		}
3067 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3068 		ssfe->ssfe_error = error;
3069 		/* not exactly what the user sent in, but should be close :) */
3070 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3071 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3072 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3073 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3074 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3075 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3076 	} else {
3077 		ssf = mtod(m_notify, struct sctp_send_failed *);
3078 		memset(ssf, 0, notifhdr_len);
3079 		ssf->ssf_type = SCTP_SEND_FAILED;
3080 		if (sent) {
3081 			ssf->ssf_flags = SCTP_DATA_SENT;
3082 		} else {
3083 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3084 		}
3085 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3086 		ssf->ssf_error = error;
3087 		/* not exactly what the user sent in, but should be close :) */
3088 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3089 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3090 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3091 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3092 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3093 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3094 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3095 	}
3096 	if (chk->data != NULL) {
3097 		/* Trim off the sctp chunk header (it should be there) */
3098 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3099 			m_adj(chk->data, chkhdr_len);
3100 			m_adj(chk->data, -padding_len);
3101 			sctp_mbuf_crush(chk->data);
3102 			chk->send_size -= (chkhdr_len + padding_len);
3103 		}
3104 	}
3105 	SCTP_BUF_NEXT(m_notify) = chk->data;
3106 	/* Steal off the mbuf */
3107 	chk->data = NULL;
3108 	/*
3109 	 * For this case, we check the actual socket buffer, since the assoc
3110 	 * is going away we don't want to overfill the socket buffer for a
3111 	 * non-reader
3112 	 */
3113 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3114 		sctp_m_freem(m_notify);
3115 		return;
3116 	}
3117 	/* append to socket */
3118 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3119 	    0, 0, stcb->asoc.context, 0, 0, 0,
3120 	    m_notify);
3121 	if (control == NULL) {
3122 		/* no memory */
3123 		sctp_m_freem(m_notify);
3124 		return;
3125 	}
3126 	control->length = SCTP_BUF_LEN(m_notify);
3127 	control->spec_flags = M_NOTIFICATION;
3128 	/* not that we need this */
3129 	control->tail_mbuf = m_notify;
3130 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3131 	    control,
3132 	    &stcb->sctp_socket->so_rcv, 1,
3133 	    SCTP_READ_LOCK_NOT_HELD,
3134 	    so_locked);
3135 }
3136 
3137 
3138 static void
3139 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3140     struct sctp_stream_queue_pending *sp, int so_locked
3141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3142     SCTP_UNUSED
3143 #endif
3144 )
3145 {
3146 	struct mbuf *m_notify;
3147 	struct sctp_send_failed *ssf;
3148 	struct sctp_send_failed_event *ssfe;
3149 	struct sctp_queued_to_read *control;
3150 	int notifhdr_len;
3151 
3152 	if ((stcb == NULL) ||
3153 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3154 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3155 		/* event not enabled */
3156 		return;
3157 	}
3158 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3159 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3160 	} else {
3161 		notifhdr_len = sizeof(struct sctp_send_failed);
3162 	}
3163 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3164 	if (m_notify == NULL) {
3165 		/* no space left */
3166 		return;
3167 	}
3168 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3169 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3170 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3171 		memset(ssfe, 0, notifhdr_len);
3172 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3173 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3174 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3175 		ssfe->ssfe_error = error;
3176 		/* not exactly what the user sent in, but should be close :) */
3177 		ssfe->ssfe_info.snd_sid = sp->sid;
3178 		if (sp->some_taken) {
3179 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3180 		} else {
3181 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3182 		}
3183 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3184 		ssfe->ssfe_info.snd_context = sp->context;
3185 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3186 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3187 	} else {
3188 		ssf = mtod(m_notify, struct sctp_send_failed *);
3189 		memset(ssf, 0, notifhdr_len);
3190 		ssf->ssf_type = SCTP_SEND_FAILED;
3191 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3192 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3193 		ssf->ssf_error = error;
3194 		/* not exactly what the user sent in, but should be close :) */
3195 		ssf->ssf_info.sinfo_stream = sp->sid;
3196 		ssf->ssf_info.sinfo_ssn = 0;
3197 		if (sp->some_taken) {
3198 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3199 		} else {
3200 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3201 		}
3202 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3203 		ssf->ssf_info.sinfo_context = sp->context;
3204 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3205 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3206 	}
3207 	SCTP_BUF_NEXT(m_notify) = sp->data;
3208 
3209 	/* Steal off the mbuf */
3210 	sp->data = NULL;
3211 	/*
3212 	 * For this case, we check the actual socket buffer, since the assoc
3213 	 * is going away we don't want to overfill the socket buffer for a
3214 	 * non-reader
3215 	 */
3216 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3217 		sctp_m_freem(m_notify);
3218 		return;
3219 	}
3220 	/* append to socket */
3221 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3222 	    0, 0, stcb->asoc.context, 0, 0, 0,
3223 	    m_notify);
3224 	if (control == NULL) {
3225 		/* no memory */
3226 		sctp_m_freem(m_notify);
3227 		return;
3228 	}
3229 	control->length = SCTP_BUF_LEN(m_notify);
3230 	control->spec_flags = M_NOTIFICATION;
3231 	/* not that we need this */
3232 	control->tail_mbuf = m_notify;
3233 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3234 	    control,
3235 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3236 }
3237 
3238 
3239 
3240 static void
3241 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3242 {
3243 	struct mbuf *m_notify;
3244 	struct sctp_adaptation_event *sai;
3245 	struct sctp_queued_to_read *control;
3246 
3247 	if ((stcb == NULL) ||
3248 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3249 		/* event not enabled */
3250 		return;
3251 	}
3252 
3253 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3254 	if (m_notify == NULL)
3255 		/* no space left */
3256 		return;
3257 	SCTP_BUF_LEN(m_notify) = 0;
3258 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3259 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3260 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3261 	sai->sai_flags = 0;
3262 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3263 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3264 	sai->sai_assoc_id = sctp_get_associd(stcb);
3265 
3266 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3267 	SCTP_BUF_NEXT(m_notify) = NULL;
3268 
3269 	/* append to socket */
3270 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3271 	    0, 0, stcb->asoc.context, 0, 0, 0,
3272 	    m_notify);
3273 	if (control == NULL) {
3274 		/* no memory */
3275 		sctp_m_freem(m_notify);
3276 		return;
3277 	}
3278 	control->length = SCTP_BUF_LEN(m_notify);
3279 	control->spec_flags = M_NOTIFICATION;
3280 	/* not that we need this */
3281 	control->tail_mbuf = m_notify;
3282 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3283 	    control,
3284 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3285 }
3286 
3287 /* This always must be called with the read-queue LOCKED in the INP */
3288 static void
3289 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3290     uint32_t val, int so_locked
3291 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3292     SCTP_UNUSED
3293 #endif
3294 )
3295 {
3296 	struct mbuf *m_notify;
3297 	struct sctp_pdapi_event *pdapi;
3298 	struct sctp_queued_to_read *control;
3299 	struct sockbuf *sb;
3300 
3301 	if ((stcb == NULL) ||
3302 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3303 		/* event not enabled */
3304 		return;
3305 	}
3306 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3307 		return;
3308 	}
3309 
3310 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3311 	if (m_notify == NULL)
3312 		/* no space left */
3313 		return;
3314 	SCTP_BUF_LEN(m_notify) = 0;
3315 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3316 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3317 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3318 	pdapi->pdapi_flags = 0;
3319 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3320 	pdapi->pdapi_indication = error;
3321 	pdapi->pdapi_stream = (val >> 16);
3322 	pdapi->pdapi_seq = (val & 0x0000ffff);
3323 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3324 
3325 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3326 	SCTP_BUF_NEXT(m_notify) = NULL;
3327 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3328 	    0, 0, stcb->asoc.context, 0, 0, 0,
3329 	    m_notify);
3330 	if (control == NULL) {
3331 		/* no memory */
3332 		sctp_m_freem(m_notify);
3333 		return;
3334 	}
3335 	control->length = SCTP_BUF_LEN(m_notify);
3336 	control->spec_flags = M_NOTIFICATION;
3337 	/* not that we need this */
3338 	control->tail_mbuf = m_notify;
3339 	sb = &stcb->sctp_socket->so_rcv;
3340 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3341 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3342 	}
3343 	sctp_sballoc(stcb, sb, m_notify);
3344 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3345 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3346 	}
3347 	control->end_added = 1;
3348 	if (stcb->asoc.control_pdapi)
3349 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3350 	else {
3351 		/* we really should not see this case */
3352 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3353 	}
3354 	if (stcb->sctp_ep && stcb->sctp_socket) {
3355 		/* This should always be the case */
3356 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3357 		struct socket *so;
3358 
3359 		so = SCTP_INP_SO(stcb->sctp_ep);
3360 		if (!so_locked) {
3361 			atomic_add_int(&stcb->asoc.refcnt, 1);
3362 			SCTP_TCB_UNLOCK(stcb);
3363 			SCTP_SOCKET_LOCK(so, 1);
3364 			SCTP_TCB_LOCK(stcb);
3365 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3366 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3367 				SCTP_SOCKET_UNLOCK(so, 1);
3368 				return;
3369 			}
3370 		}
3371 #endif
3372 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3373 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3374 		if (!so_locked) {
3375 			SCTP_SOCKET_UNLOCK(so, 1);
3376 		}
3377 #endif
3378 	}
3379 }
3380 
3381 static void
3382 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3383 {
3384 	struct mbuf *m_notify;
3385 	struct sctp_shutdown_event *sse;
3386 	struct sctp_queued_to_read *control;
3387 
3388 	/*
3389 	 * For TCP model AND UDP connected sockets we will send an error up
3390 	 * when an SHUTDOWN completes
3391 	 */
3392 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3393 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3394 		/* mark socket closed for read/write and wakeup! */
3395 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3396 		struct socket *so;
3397 
3398 		so = SCTP_INP_SO(stcb->sctp_ep);
3399 		atomic_add_int(&stcb->asoc.refcnt, 1);
3400 		SCTP_TCB_UNLOCK(stcb);
3401 		SCTP_SOCKET_LOCK(so, 1);
3402 		SCTP_TCB_LOCK(stcb);
3403 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3404 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3405 			SCTP_SOCKET_UNLOCK(so, 1);
3406 			return;
3407 		}
3408 #endif
3409 		socantsendmore(stcb->sctp_socket);
3410 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3411 		SCTP_SOCKET_UNLOCK(so, 1);
3412 #endif
3413 	}
3414 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3415 		/* event not enabled */
3416 		return;
3417 	}
3418 
3419 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3420 	if (m_notify == NULL)
3421 		/* no space left */
3422 		return;
3423 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3424 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3425 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3426 	sse->sse_flags = 0;
3427 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3428 	sse->sse_assoc_id = sctp_get_associd(stcb);
3429 
3430 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3431 	SCTP_BUF_NEXT(m_notify) = NULL;
3432 
3433 	/* append to socket */
3434 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3435 	    0, 0, stcb->asoc.context, 0, 0, 0,
3436 	    m_notify);
3437 	if (control == NULL) {
3438 		/* no memory */
3439 		sctp_m_freem(m_notify);
3440 		return;
3441 	}
3442 	control->length = SCTP_BUF_LEN(m_notify);
3443 	control->spec_flags = M_NOTIFICATION;
3444 	/* not that we need this */
3445 	control->tail_mbuf = m_notify;
3446 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3447 	    control,
3448 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3449 }
3450 
3451 static void
3452 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3453     int so_locked
3454 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3455     SCTP_UNUSED
3456 #endif
3457 )
3458 {
3459 	struct mbuf *m_notify;
3460 	struct sctp_sender_dry_event *event;
3461 	struct sctp_queued_to_read *control;
3462 
3463 	if ((stcb == NULL) ||
3464 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3465 		/* event not enabled */
3466 		return;
3467 	}
3468 
3469 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3470 	if (m_notify == NULL) {
3471 		/* no space left */
3472 		return;
3473 	}
3474 	SCTP_BUF_LEN(m_notify) = 0;
3475 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3476 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3477 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3478 	event->sender_dry_flags = 0;
3479 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3480 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3481 
3482 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3483 	SCTP_BUF_NEXT(m_notify) = NULL;
3484 
3485 	/* append to socket */
3486 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3487 	    0, 0, stcb->asoc.context, 0, 0, 0,
3488 	    m_notify);
3489 	if (control == NULL) {
3490 		/* no memory */
3491 		sctp_m_freem(m_notify);
3492 		return;
3493 	}
3494 	control->length = SCTP_BUF_LEN(m_notify);
3495 	control->spec_flags = M_NOTIFICATION;
3496 	/* not that we need this */
3497 	control->tail_mbuf = m_notify;
3498 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3499 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3500 }
3501 
3502 
3503 void
3504 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3505 {
3506 	struct mbuf *m_notify;
3507 	struct sctp_queued_to_read *control;
3508 	struct sctp_stream_change_event *stradd;
3509 
3510 	if ((stcb == NULL) ||
3511 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3512 		/* event not enabled */
3513 		return;
3514 	}
3515 	if ((stcb->asoc.peer_req_out) && flag) {
3516 		/* Peer made the request, don't tell the local user */
3517 		stcb->asoc.peer_req_out = 0;
3518 		return;
3519 	}
3520 	stcb->asoc.peer_req_out = 0;
3521 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3522 	if (m_notify == NULL)
3523 		/* no space left */
3524 		return;
3525 	SCTP_BUF_LEN(m_notify) = 0;
3526 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3527 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3528 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3529 	stradd->strchange_flags = flag;
3530 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3531 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3532 	stradd->strchange_instrms = numberin;
3533 	stradd->strchange_outstrms = numberout;
3534 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3535 	SCTP_BUF_NEXT(m_notify) = NULL;
3536 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3537 		/* no space */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	/* append to socket */
3542 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3543 	    0, 0, stcb->asoc.context, 0, 0, 0,
3544 	    m_notify);
3545 	if (control == NULL) {
3546 		/* no memory */
3547 		sctp_m_freem(m_notify);
3548 		return;
3549 	}
3550 	control->length = SCTP_BUF_LEN(m_notify);
3551 	control->spec_flags = M_NOTIFICATION;
3552 	/* not that we need this */
3553 	control->tail_mbuf = m_notify;
3554 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3555 	    control,
3556 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3557 }
3558 
3559 void
3560 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3561 {
3562 	struct mbuf *m_notify;
3563 	struct sctp_queued_to_read *control;
3564 	struct sctp_assoc_reset_event *strasoc;
3565 
3566 	if ((stcb == NULL) ||
3567 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3568 		/* event not enabled */
3569 		return;
3570 	}
3571 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3572 	if (m_notify == NULL)
3573 		/* no space left */
3574 		return;
3575 	SCTP_BUF_LEN(m_notify) = 0;
3576 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3577 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3578 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3579 	strasoc->assocreset_flags = flag;
3580 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3581 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3582 	strasoc->assocreset_local_tsn = sending_tsn;
3583 	strasoc->assocreset_remote_tsn = recv_tsn;
3584 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3585 	SCTP_BUF_NEXT(m_notify) = NULL;
3586 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3587 		/* no space */
3588 		sctp_m_freem(m_notify);
3589 		return;
3590 	}
3591 	/* append to socket */
3592 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3593 	    0, 0, stcb->asoc.context, 0, 0, 0,
3594 	    m_notify);
3595 	if (control == NULL) {
3596 		/* no memory */
3597 		sctp_m_freem(m_notify);
3598 		return;
3599 	}
3600 	control->length = SCTP_BUF_LEN(m_notify);
3601 	control->spec_flags = M_NOTIFICATION;
3602 	/* not that we need this */
3603 	control->tail_mbuf = m_notify;
3604 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3605 	    control,
3606 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3607 }
3608 
3609 
3610 
3611 static void
3612 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3613     int number_entries, uint16_t *list, int flag)
3614 {
3615 	struct mbuf *m_notify;
3616 	struct sctp_queued_to_read *control;
3617 	struct sctp_stream_reset_event *strreset;
3618 	int len;
3619 
3620 	if ((stcb == NULL) ||
3621 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3622 		/* event not enabled */
3623 		return;
3624 	}
3625 
3626 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3627 	if (m_notify == NULL)
3628 		/* no space left */
3629 		return;
3630 	SCTP_BUF_LEN(m_notify) = 0;
3631 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3632 	if (len > M_TRAILINGSPACE(m_notify)) {
3633 		/* never enough room */
3634 		sctp_m_freem(m_notify);
3635 		return;
3636 	}
3637 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3638 	memset(strreset, 0, len);
3639 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3640 	strreset->strreset_flags = flag;
3641 	strreset->strreset_length = len;
3642 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3643 	if (number_entries) {
3644 		int i;
3645 
3646 		for (i = 0; i < number_entries; i++) {
3647 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3648 		}
3649 	}
3650 	SCTP_BUF_LEN(m_notify) = len;
3651 	SCTP_BUF_NEXT(m_notify) = NULL;
3652 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3653 		/* no space */
3654 		sctp_m_freem(m_notify);
3655 		return;
3656 	}
3657 	/* append to socket */
3658 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3659 	    0, 0, stcb->asoc.context, 0, 0, 0,
3660 	    m_notify);
3661 	if (control == NULL) {
3662 		/* no memory */
3663 		sctp_m_freem(m_notify);
3664 		return;
3665 	}
3666 	control->length = SCTP_BUF_LEN(m_notify);
3667 	control->spec_flags = M_NOTIFICATION;
3668 	/* not that we need this */
3669 	control->tail_mbuf = m_notify;
3670 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3671 	    control,
3672 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3673 }
3674 
3675 
3676 static void
3677 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3678 {
3679 	struct mbuf *m_notify;
3680 	struct sctp_remote_error *sre;
3681 	struct sctp_queued_to_read *control;
3682 	unsigned int notif_len;
3683 	uint16_t chunk_len;
3684 
3685 	if ((stcb == NULL) ||
3686 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3687 		return;
3688 	}
3689 	if (chunk != NULL) {
3690 		chunk_len = ntohs(chunk->ch.chunk_length);
3691 		/*
3692 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3693 		 * contiguous.
3694 		 */
3695 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3696 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3697 		}
3698 	} else {
3699 		chunk_len = 0;
3700 	}
3701 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3702 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3703 	if (m_notify == NULL) {
3704 		/* Retry with smaller value. */
3705 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3706 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3707 		if (m_notify == NULL) {
3708 			return;
3709 		}
3710 	}
3711 	SCTP_BUF_NEXT(m_notify) = NULL;
3712 	sre = mtod(m_notify, struct sctp_remote_error *);
3713 	memset(sre, 0, notif_len);
3714 	sre->sre_type = SCTP_REMOTE_ERROR;
3715 	sre->sre_flags = 0;
3716 	sre->sre_length = sizeof(struct sctp_remote_error);
3717 	sre->sre_error = error;
3718 	sre->sre_assoc_id = sctp_get_associd(stcb);
3719 	if (notif_len > sizeof(struct sctp_remote_error)) {
3720 		memcpy(sre->sre_data, chunk, chunk_len);
3721 		sre->sre_length += chunk_len;
3722 	}
3723 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3724 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3725 	    0, 0, stcb->asoc.context, 0, 0, 0,
3726 	    m_notify);
3727 	if (control != NULL) {
3728 		control->length = SCTP_BUF_LEN(m_notify);
3729 		control->spec_flags = M_NOTIFICATION;
3730 		/* not that we need this */
3731 		control->tail_mbuf = m_notify;
3732 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3733 		    control,
3734 		    &stcb->sctp_socket->so_rcv, 1,
3735 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3736 	} else {
3737 		sctp_m_freem(m_notify);
3738 	}
3739 }
3740 
3741 
3742 void
3743 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3744     uint32_t error, void *data, int so_locked
3745 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3746     SCTP_UNUSED
3747 #endif
3748 )
3749 {
3750 	if ((stcb == NULL) ||
3751 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3752 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3753 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3754 		/* If the socket is gone we are out of here */
3755 		return;
3756 	}
3757 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3758 		return;
3759 	}
3760 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3761 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3762 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3763 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3764 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3765 			/* Don't report these in front states */
3766 			return;
3767 		}
3768 	}
3769 	switch (notification) {
3770 	case SCTP_NOTIFY_ASSOC_UP:
3771 		if (stcb->asoc.assoc_up_sent == 0) {
3772 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3773 			stcb->asoc.assoc_up_sent = 1;
3774 		}
3775 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3776 			sctp_notify_adaptation_layer(stcb);
3777 		}
3778 		if (stcb->asoc.auth_supported == 0) {
3779 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3780 			    NULL, so_locked);
3781 		}
3782 		break;
3783 	case SCTP_NOTIFY_ASSOC_DOWN:
3784 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3785 		break;
3786 	case SCTP_NOTIFY_INTERFACE_DOWN:
3787 		{
3788 			struct sctp_nets *net;
3789 
3790 			net = (struct sctp_nets *)data;
3791 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3792 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3793 			break;
3794 		}
3795 	case SCTP_NOTIFY_INTERFACE_UP:
3796 		{
3797 			struct sctp_nets *net;
3798 
3799 			net = (struct sctp_nets *)data;
3800 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3801 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3802 			break;
3803 		}
3804 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3805 		{
3806 			struct sctp_nets *net;
3807 
3808 			net = (struct sctp_nets *)data;
3809 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3810 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3811 			break;
3812 		}
3813 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3814 		sctp_notify_send_failed2(stcb, error,
3815 		    (struct sctp_stream_queue_pending *)data, so_locked);
3816 		break;
3817 	case SCTP_NOTIFY_SENT_DG_FAIL:
3818 		sctp_notify_send_failed(stcb, 1, error,
3819 		    (struct sctp_tmit_chunk *)data, so_locked);
3820 		break;
3821 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3822 		sctp_notify_send_failed(stcb, 0, error,
3823 		    (struct sctp_tmit_chunk *)data, so_locked);
3824 		break;
3825 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3826 		{
3827 			uint32_t val;
3828 
3829 			val = *((uint32_t *)data);
3830 
3831 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3832 			break;
3833 		}
3834 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3835 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3836 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3837 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3838 		} else {
3839 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3840 		}
3841 		break;
3842 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3843 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3844 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3845 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3846 		} else {
3847 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3848 		}
3849 		break;
3850 	case SCTP_NOTIFY_ASSOC_RESTART:
3851 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3852 		if (stcb->asoc.auth_supported == 0) {
3853 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3854 			    NULL, so_locked);
3855 		}
3856 		break;
3857 	case SCTP_NOTIFY_STR_RESET_SEND:
3858 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3859 		break;
3860 	case SCTP_NOTIFY_STR_RESET_RECV:
3861 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3862 		break;
3863 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3864 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3865 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3866 		break;
3867 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3868 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3869 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3870 		break;
3871 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3872 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3873 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3874 		break;
3875 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3876 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3877 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3878 		break;
3879 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3880 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3881 		    error, so_locked);
3882 		break;
3883 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3884 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3885 		    error, so_locked);
3886 		break;
3887 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3888 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3889 		    error, so_locked);
3890 		break;
3891 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3892 		sctp_notify_shutdown_event(stcb);
3893 		break;
3894 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3895 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3896 		    (uint16_t)(uintptr_t)data,
3897 		    so_locked);
3898 		break;
3899 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3900 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3901 		    (uint16_t)(uintptr_t)data,
3902 		    so_locked);
3903 		break;
3904 	case SCTP_NOTIFY_NO_PEER_AUTH:
3905 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3906 		    (uint16_t)(uintptr_t)data,
3907 		    so_locked);
3908 		break;
3909 	case SCTP_NOTIFY_SENDER_DRY:
3910 		sctp_notify_sender_dry_event(stcb, so_locked);
3911 		break;
3912 	case SCTP_NOTIFY_REMOTE_ERROR:
3913 		sctp_notify_remote_error(stcb, error, data);
3914 		break;
3915 	default:
3916 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3917 		    __func__, notification, notification);
3918 		break;
3919 	}			/* end switch */
3920 }
3921 
3922 void
3923 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3924 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3925     SCTP_UNUSED
3926 #endif
3927 )
3928 {
3929 	struct sctp_association *asoc;
3930 	struct sctp_stream_out *outs;
3931 	struct sctp_tmit_chunk *chk, *nchk;
3932 	struct sctp_stream_queue_pending *sp, *nsp;
3933 	int i;
3934 
3935 	if (stcb == NULL) {
3936 		return;
3937 	}
3938 	asoc = &stcb->asoc;
3939 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3940 		/* already being freed */
3941 		return;
3942 	}
3943 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3944 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3945 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3946 		return;
3947 	}
3948 	/* now through all the gunk freeing chunks */
3949 	if (holds_lock == 0) {
3950 		SCTP_TCB_SEND_LOCK(stcb);
3951 	}
3952 	/* sent queue SHOULD be empty */
3953 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3954 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3955 		asoc->sent_queue_cnt--;
3956 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3957 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3958 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3959 #ifdef INVARIANTS
3960 			} else {
3961 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3962 #endif
3963 			}
3964 		}
3965 		if (chk->data != NULL) {
3966 			sctp_free_bufspace(stcb, asoc, chk, 1);
3967 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3968 			    error, chk, so_locked);
3969 			if (chk->data) {
3970 				sctp_m_freem(chk->data);
3971 				chk->data = NULL;
3972 			}
3973 		}
3974 		sctp_free_a_chunk(stcb, chk, so_locked);
3975 		/* sa_ignore FREED_MEMORY */
3976 	}
3977 	/* pending send queue SHOULD be empty */
3978 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3979 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3980 		asoc->send_queue_cnt--;
3981 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3982 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3983 #ifdef INVARIANTS
3984 		} else {
3985 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3986 #endif
3987 		}
3988 		if (chk->data != NULL) {
3989 			sctp_free_bufspace(stcb, asoc, chk, 1);
3990 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3991 			    error, chk, so_locked);
3992 			if (chk->data) {
3993 				sctp_m_freem(chk->data);
3994 				chk->data = NULL;
3995 			}
3996 		}
3997 		sctp_free_a_chunk(stcb, chk, so_locked);
3998 		/* sa_ignore FREED_MEMORY */
3999 	}
4000 	for (i = 0; i < asoc->streamoutcnt; i++) {
4001 		/* For each stream */
4002 		outs = &asoc->strmout[i];
4003 		/* clean up any sends there */
4004 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4005 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4006 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4007 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4008 			sctp_free_spbufspace(stcb, asoc, sp);
4009 			if (sp->data) {
4010 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4011 				    error, (void *)sp, so_locked);
4012 				if (sp->data) {
4013 					sctp_m_freem(sp->data);
4014 					sp->data = NULL;
4015 					sp->tail_mbuf = NULL;
4016 					sp->length = 0;
4017 				}
4018 			}
4019 			if (sp->net) {
4020 				sctp_free_remote_addr(sp->net);
4021 				sp->net = NULL;
4022 			}
4023 			/* Free the chunk */
4024 			sctp_free_a_strmoq(stcb, sp, so_locked);
4025 			/* sa_ignore FREED_MEMORY */
4026 		}
4027 	}
4028 
4029 	if (holds_lock == 0) {
4030 		SCTP_TCB_SEND_UNLOCK(stcb);
4031 	}
4032 }
4033 
4034 void
4035 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4036     struct sctp_abort_chunk *abort, int so_locked
4037 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4038     SCTP_UNUSED
4039 #endif
4040 )
4041 {
4042 	if (stcb == NULL) {
4043 		return;
4044 	}
4045 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4046 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4047 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4048 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4049 	}
4050 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4051 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4052 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4053 		return;
4054 	}
4055 	/* Tell them we lost the asoc */
4056 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4057 	if (from_peer) {
4058 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4059 	} else {
4060 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4061 	}
4062 }
4063 
4064 void
4065 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4066     struct mbuf *m, int iphlen,
4067     struct sockaddr *src, struct sockaddr *dst,
4068     struct sctphdr *sh, struct mbuf *op_err,
4069     uint8_t mflowtype, uint32_t mflowid,
4070     uint32_t vrf_id, uint16_t port)
4071 {
4072 	uint32_t vtag;
4073 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4074 	struct socket *so;
4075 #endif
4076 
4077 	vtag = 0;
4078 	if (stcb != NULL) {
4079 		vtag = stcb->asoc.peer_vtag;
4080 		vrf_id = stcb->asoc.vrf_id;
4081 	}
4082 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4083 	    mflowtype, mflowid, inp->fibnum,
4084 	    vrf_id, port);
4085 	if (stcb != NULL) {
4086 		/* We have a TCB to abort, send notification too */
4087 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4088 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4089 		/* Ok, now lets free it */
4090 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4091 		so = SCTP_INP_SO(inp);
4092 		atomic_add_int(&stcb->asoc.refcnt, 1);
4093 		SCTP_TCB_UNLOCK(stcb);
4094 		SCTP_SOCKET_LOCK(so, 1);
4095 		SCTP_TCB_LOCK(stcb);
4096 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4097 #endif
4098 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4099 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4100 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4101 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4102 		}
4103 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4104 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4105 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106 		SCTP_SOCKET_UNLOCK(so, 1);
4107 #endif
4108 	}
4109 }
4110 #ifdef SCTP_ASOCLOG_OF_TSNS
4111 void
4112 sctp_print_out_track_log(struct sctp_tcb *stcb)
4113 {
4114 #ifdef NOSIY_PRINTS
4115 	int i;
4116 
4117 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4118 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4119 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4120 		SCTP_PRINTF("None rcvd\n");
4121 		goto none_in;
4122 	}
4123 	if (stcb->asoc.tsn_in_wrapped) {
4124 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4125 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4126 			    stcb->asoc.in_tsnlog[i].tsn,
4127 			    stcb->asoc.in_tsnlog[i].strm,
4128 			    stcb->asoc.in_tsnlog[i].seq,
4129 			    stcb->asoc.in_tsnlog[i].flgs,
4130 			    stcb->asoc.in_tsnlog[i].sz);
4131 		}
4132 	}
4133 	if (stcb->asoc.tsn_in_at) {
4134 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4135 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4136 			    stcb->asoc.in_tsnlog[i].tsn,
4137 			    stcb->asoc.in_tsnlog[i].strm,
4138 			    stcb->asoc.in_tsnlog[i].seq,
4139 			    stcb->asoc.in_tsnlog[i].flgs,
4140 			    stcb->asoc.in_tsnlog[i].sz);
4141 		}
4142 	}
4143 none_in:
4144 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4145 	if ((stcb->asoc.tsn_out_at == 0) &&
4146 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4147 		SCTP_PRINTF("None sent\n");
4148 	}
4149 	if (stcb->asoc.tsn_out_wrapped) {
4150 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4151 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4152 			    stcb->asoc.out_tsnlog[i].tsn,
4153 			    stcb->asoc.out_tsnlog[i].strm,
4154 			    stcb->asoc.out_tsnlog[i].seq,
4155 			    stcb->asoc.out_tsnlog[i].flgs,
4156 			    stcb->asoc.out_tsnlog[i].sz);
4157 		}
4158 	}
4159 	if (stcb->asoc.tsn_out_at) {
4160 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4161 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4162 			    stcb->asoc.out_tsnlog[i].tsn,
4163 			    stcb->asoc.out_tsnlog[i].strm,
4164 			    stcb->asoc.out_tsnlog[i].seq,
4165 			    stcb->asoc.out_tsnlog[i].flgs,
4166 			    stcb->asoc.out_tsnlog[i].sz);
4167 		}
4168 	}
4169 #endif
4170 }
4171 #endif
4172 
4173 void
4174 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4175     struct mbuf *op_err,
4176     int so_locked
4177 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4178     SCTP_UNUSED
4179 #endif
4180 )
4181 {
4182 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4183 	struct socket *so;
4184 #endif
4185 
4186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4187 	so = SCTP_INP_SO(inp);
4188 #endif
4189 	if (stcb == NULL) {
4190 		/* Got to have a TCB */
4191 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4192 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4193 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4194 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4195 			}
4196 		}
4197 		return;
4198 	} else {
4199 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4200 	}
4201 	/* notify the peer */
4202 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4203 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4204 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4205 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4206 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4207 	}
4208 	/* notify the ulp */
4209 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4210 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4211 	}
4212 	/* now free the asoc */
4213 #ifdef SCTP_ASOCLOG_OF_TSNS
4214 	sctp_print_out_track_log(stcb);
4215 #endif
4216 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4217 	if (!so_locked) {
4218 		atomic_add_int(&stcb->asoc.refcnt, 1);
4219 		SCTP_TCB_UNLOCK(stcb);
4220 		SCTP_SOCKET_LOCK(so, 1);
4221 		SCTP_TCB_LOCK(stcb);
4222 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4223 	}
4224 #endif
4225 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4226 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4227 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4228 	if (!so_locked) {
4229 		SCTP_SOCKET_UNLOCK(so, 1);
4230 	}
4231 #endif
4232 }
4233 
4234 void
4235 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4236     struct sockaddr *src, struct sockaddr *dst,
4237     struct sctphdr *sh, struct sctp_inpcb *inp,
4238     struct mbuf *cause,
4239     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4240     uint32_t vrf_id, uint16_t port)
4241 {
4242 	struct sctp_chunkhdr *ch, chunk_buf;
4243 	unsigned int chk_length;
4244 	int contains_init_chunk;
4245 
4246 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4247 	/* Generate a TO address for future reference */
4248 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4249 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4250 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4251 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4252 		}
4253 	}
4254 	contains_init_chunk = 0;
4255 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4256 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4257 	while (ch != NULL) {
4258 		chk_length = ntohs(ch->chunk_length);
4259 		if (chk_length < sizeof(*ch)) {
4260 			/* break to abort land */
4261 			break;
4262 		}
4263 		switch (ch->chunk_type) {
4264 		case SCTP_INIT:
4265 			contains_init_chunk = 1;
4266 			break;
4267 		case SCTP_PACKET_DROPPED:
4268 			/* we don't respond to pkt-dropped */
4269 			return;
4270 		case SCTP_ABORT_ASSOCIATION:
4271 			/* we don't respond with an ABORT to an ABORT */
4272 			return;
4273 		case SCTP_SHUTDOWN_COMPLETE:
4274 			/*
4275 			 * we ignore it since we are not waiting for it and
4276 			 * peer is gone
4277 			 */
4278 			return;
4279 		case SCTP_SHUTDOWN_ACK:
4280 			sctp_send_shutdown_complete2(src, dst, sh,
4281 			    mflowtype, mflowid, fibnum,
4282 			    vrf_id, port);
4283 			return;
4284 		default:
4285 			break;
4286 		}
4287 		offset += SCTP_SIZE32(chk_length);
4288 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4289 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4290 	}
4291 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4292 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4293 	    (contains_init_chunk == 0))) {
4294 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4295 		    mflowtype, mflowid, fibnum,
4296 		    vrf_id, port);
4297 	}
4298 }
4299 
4300 /*
4301  * check the inbound datagram to make sure there is not an abort inside it,
4302  * if there is return 1, else return 0.
4303  */
4304 int
4305 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4306 {
4307 	struct sctp_chunkhdr *ch;
4308 	struct sctp_init_chunk *init_chk, chunk_buf;
4309 	int offset;
4310 	unsigned int chk_length;
4311 
4312 	offset = iphlen + sizeof(struct sctphdr);
4313 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4314 	    (uint8_t *)&chunk_buf);
4315 	while (ch != NULL) {
4316 		chk_length = ntohs(ch->chunk_length);
4317 		if (chk_length < sizeof(*ch)) {
4318 			/* packet is probably corrupt */
4319 			break;
4320 		}
4321 		/* we seem to be ok, is it an abort? */
4322 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4323 			/* yep, tell them */
4324 			return (1);
4325 		}
4326 		if (ch->chunk_type == SCTP_INITIATION) {
4327 			/* need to update the Vtag */
4328 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4329 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4330 			if (init_chk != NULL) {
4331 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4332 			}
4333 		}
4334 		/* Nope, move to the next chunk */
4335 		offset += SCTP_SIZE32(chk_length);
4336 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4337 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4338 	}
4339 	return (0);
4340 }
4341 
4342 /*
4343  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4344  * set (i.e. it's 0) so, create this function to compare link local scopes
4345  */
4346 #ifdef INET6
4347 uint32_t
4348 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4349 {
4350 	struct sockaddr_in6 a, b;
4351 
4352 	/* save copies */
4353 	a = *addr1;
4354 	b = *addr2;
4355 
4356 	if (a.sin6_scope_id == 0)
4357 		if (sa6_recoverscope(&a)) {
4358 			/* can't get scope, so can't match */
4359 			return (0);
4360 		}
4361 	if (b.sin6_scope_id == 0)
4362 		if (sa6_recoverscope(&b)) {
4363 			/* can't get scope, so can't match */
4364 			return (0);
4365 		}
4366 	if (a.sin6_scope_id != b.sin6_scope_id)
4367 		return (0);
4368 
4369 	return (1);
4370 }
4371 
4372 /*
4373  * returns a sockaddr_in6 with embedded scope recovered and removed
4374  */
4375 struct sockaddr_in6 *
4376 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4377 {
4378 	/* check and strip embedded scope junk */
4379 	if (addr->sin6_family == AF_INET6) {
4380 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4381 			if (addr->sin6_scope_id == 0) {
4382 				*store = *addr;
4383 				if (!sa6_recoverscope(store)) {
4384 					/* use the recovered scope */
4385 					addr = store;
4386 				}
4387 			} else {
4388 				/* else, return the original "to" addr */
4389 				in6_clearscope(&addr->sin6_addr);
4390 			}
4391 		}
4392 	}
4393 	return (addr);
4394 }
4395 #endif
4396 
4397 /*
4398  * are the two addresses the same?  currently a "scopeless" check returns: 1
4399  * if same, 0 if not
4400  */
4401 int
4402 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4403 {
4404 
4405 	/* must be valid */
4406 	if (sa1 == NULL || sa2 == NULL)
4407 		return (0);
4408 
4409 	/* must be the same family */
4410 	if (sa1->sa_family != sa2->sa_family)
4411 		return (0);
4412 
4413 	switch (sa1->sa_family) {
4414 #ifdef INET6
4415 	case AF_INET6:
4416 		{
4417 			/* IPv6 addresses */
4418 			struct sockaddr_in6 *sin6_1, *sin6_2;
4419 
4420 			sin6_1 = (struct sockaddr_in6 *)sa1;
4421 			sin6_2 = (struct sockaddr_in6 *)sa2;
4422 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4423 			    sin6_2));
4424 		}
4425 #endif
4426 #ifdef INET
4427 	case AF_INET:
4428 		{
4429 			/* IPv4 addresses */
4430 			struct sockaddr_in *sin_1, *sin_2;
4431 
4432 			sin_1 = (struct sockaddr_in *)sa1;
4433 			sin_2 = (struct sockaddr_in *)sa2;
4434 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4435 		}
4436 #endif
4437 	default:
4438 		/* we don't do these... */
4439 		return (0);
4440 	}
4441 }
4442 
4443 void
4444 sctp_print_address(struct sockaddr *sa)
4445 {
4446 #ifdef INET6
4447 	char ip6buf[INET6_ADDRSTRLEN];
4448 #endif
4449 
4450 	switch (sa->sa_family) {
4451 #ifdef INET6
4452 	case AF_INET6:
4453 		{
4454 			struct sockaddr_in6 *sin6;
4455 
4456 			sin6 = (struct sockaddr_in6 *)sa;
4457 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4458 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4459 			    ntohs(sin6->sin6_port),
4460 			    sin6->sin6_scope_id);
4461 			break;
4462 		}
4463 #endif
4464 #ifdef INET
4465 	case AF_INET:
4466 		{
4467 			struct sockaddr_in *sin;
4468 			unsigned char *p;
4469 
4470 			sin = (struct sockaddr_in *)sa;
4471 			p = (unsigned char *)&sin->sin_addr;
4472 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4473 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4474 			break;
4475 		}
4476 #endif
4477 	default:
4478 		SCTP_PRINTF("?\n");
4479 		break;
4480 	}
4481 }
4482 
4483 void
4484 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4485     struct sctp_inpcb *new_inp,
4486     struct sctp_tcb *stcb,
4487     int waitflags)
4488 {
4489 	/*
4490 	 * go through our old INP and pull off any control structures that
4491 	 * belong to stcb and move then to the new inp.
4492 	 */
4493 	struct socket *old_so, *new_so;
4494 	struct sctp_queued_to_read *control, *nctl;
4495 	struct sctp_readhead tmp_queue;
4496 	struct mbuf *m;
4497 	int error = 0;
4498 
4499 	old_so = old_inp->sctp_socket;
4500 	new_so = new_inp->sctp_socket;
4501 	TAILQ_INIT(&tmp_queue);
4502 	error = sblock(&old_so->so_rcv, waitflags);
4503 	if (error) {
4504 		/*
4505 		 * Gak, can't get sblock, we have a problem. data will be
4506 		 * left stranded.. and we don't dare look at it since the
4507 		 * other thread may be reading something. Oh well, its a
4508 		 * screwed up app that does a peeloff OR a accept while
4509 		 * reading from the main socket... actually its only the
4510 		 * peeloff() case, since I think read will fail on a
4511 		 * listening socket..
4512 		 */
4513 		return;
4514 	}
4515 	/* lock the socket buffers */
4516 	SCTP_INP_READ_LOCK(old_inp);
4517 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4518 		/* Pull off all for out target stcb */
4519 		if (control->stcb == stcb) {
4520 			/* remove it we want it */
4521 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4522 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4523 			m = control->data;
4524 			while (m) {
4525 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4526 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4527 				}
4528 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4529 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4530 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4531 				}
4532 				m = SCTP_BUF_NEXT(m);
4533 			}
4534 		}
4535 	}
4536 	SCTP_INP_READ_UNLOCK(old_inp);
4537 	/* Remove the sb-lock on the old socket */
4538 
4539 	sbunlock(&old_so->so_rcv);
4540 	/* Now we move them over to the new socket buffer */
4541 	SCTP_INP_READ_LOCK(new_inp);
4542 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4543 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4544 		m = control->data;
4545 		while (m) {
4546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4547 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4548 			}
4549 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4550 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4551 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4552 			}
4553 			m = SCTP_BUF_NEXT(m);
4554 		}
4555 	}
4556 	SCTP_INP_READ_UNLOCK(new_inp);
4557 }
4558 
4559 void
4560 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4561     struct sctp_tcb *stcb,
4562     int so_locked
4563 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4564     SCTP_UNUSED
4565 #endif
4566 )
4567 {
4568 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4569 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4570 		struct socket *so;
4571 
4572 		so = SCTP_INP_SO(inp);
4573 		if (!so_locked) {
4574 			if (stcb) {
4575 				atomic_add_int(&stcb->asoc.refcnt, 1);
4576 				SCTP_TCB_UNLOCK(stcb);
4577 			}
4578 			SCTP_SOCKET_LOCK(so, 1);
4579 			if (stcb) {
4580 				SCTP_TCB_LOCK(stcb);
4581 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4582 			}
4583 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4584 				SCTP_SOCKET_UNLOCK(so, 1);
4585 				return;
4586 			}
4587 		}
4588 #endif
4589 		sctp_sorwakeup(inp, inp->sctp_socket);
4590 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4591 		if (!so_locked) {
4592 			SCTP_SOCKET_UNLOCK(so, 1);
4593 		}
4594 #endif
4595 	}
4596 }
4597 
4598 void
4599 sctp_add_to_readq(struct sctp_inpcb *inp,
4600     struct sctp_tcb *stcb,
4601     struct sctp_queued_to_read *control,
4602     struct sockbuf *sb,
4603     int end,
4604     int inp_read_lock_held,
4605     int so_locked
4606 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4607     SCTP_UNUSED
4608 #endif
4609 )
4610 {
4611 	/*
4612 	 * Here we must place the control on the end of the socket read
4613 	 * queue AND increment sb_cc so that select will work properly on
4614 	 * read.
4615 	 */
4616 	struct mbuf *m, *prev = NULL;
4617 
4618 	if (inp == NULL) {
4619 		/* Gak, TSNH!! */
4620 #ifdef INVARIANTS
4621 		panic("Gak, inp NULL on add_to_readq");
4622 #endif
4623 		return;
4624 	}
4625 	if (inp_read_lock_held == 0)
4626 		SCTP_INP_READ_LOCK(inp);
4627 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4628 		if (!control->on_strm_q) {
4629 			sctp_free_remote_addr(control->whoFrom);
4630 			if (control->data) {
4631 				sctp_m_freem(control->data);
4632 				control->data = NULL;
4633 			}
4634 			sctp_free_a_readq(stcb, control);
4635 		}
4636 		if (inp_read_lock_held == 0)
4637 			SCTP_INP_READ_UNLOCK(inp);
4638 		return;
4639 	}
4640 	if (!(control->spec_flags & M_NOTIFICATION)) {
4641 		atomic_add_int(&inp->total_recvs, 1);
4642 		if (!control->do_not_ref_stcb) {
4643 			atomic_add_int(&stcb->total_recvs, 1);
4644 		}
4645 	}
4646 	m = control->data;
4647 	control->held_length = 0;
4648 	control->length = 0;
4649 	while (m) {
4650 		if (SCTP_BUF_LEN(m) == 0) {
4651 			/* Skip mbufs with NO length */
4652 			if (prev == NULL) {
4653 				/* First one */
4654 				control->data = sctp_m_free(m);
4655 				m = control->data;
4656 			} else {
4657 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4658 				m = SCTP_BUF_NEXT(prev);
4659 			}
4660 			if (m == NULL) {
4661 				control->tail_mbuf = prev;
4662 			}
4663 			continue;
4664 		}
4665 		prev = m;
4666 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4667 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4668 		}
4669 		sctp_sballoc(stcb, sb, m);
4670 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4671 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4672 		}
4673 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4674 		m = SCTP_BUF_NEXT(m);
4675 	}
4676 	if (prev != NULL) {
4677 		control->tail_mbuf = prev;
4678 	} else {
4679 		/* Everything got collapsed out?? */
4680 		if (!control->on_strm_q) {
4681 			sctp_free_remote_addr(control->whoFrom);
4682 			sctp_free_a_readq(stcb, control);
4683 		}
4684 		if (inp_read_lock_held == 0)
4685 			SCTP_INP_READ_UNLOCK(inp);
4686 		return;
4687 	}
4688 	if (end) {
4689 		control->end_added = 1;
4690 	}
4691 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4692 	control->on_read_q = 1;
4693 	if (inp_read_lock_held == 0)
4694 		SCTP_INP_READ_UNLOCK(inp);
4695 	if (inp && inp->sctp_socket) {
4696 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4697 	}
4698 }
4699 
4700 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4701  *************ALTERNATE ROUTING CODE
4702  */
4703 
4704 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4705  *************ALTERNATE ROUTING CODE
4706  */
4707 
4708 struct mbuf *
4709 sctp_generate_cause(uint16_t code, char *info)
4710 {
4711 	struct mbuf *m;
4712 	struct sctp_gen_error_cause *cause;
4713 	size_t info_len;
4714 	uint16_t len;
4715 
4716 	if ((code == 0) || (info == NULL)) {
4717 		return (NULL);
4718 	}
4719 	info_len = strlen(info);
4720 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4721 		return (NULL);
4722 	}
4723 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4724 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4725 	if (m != NULL) {
4726 		SCTP_BUF_LEN(m) = len;
4727 		cause = mtod(m, struct sctp_gen_error_cause *);
4728 		cause->code = htons(code);
4729 		cause->length = htons(len);
4730 		memcpy(cause->info, info, info_len);
4731 	}
4732 	return (m);
4733 }
4734 
4735 struct mbuf *
4736 sctp_generate_no_user_data_cause(uint32_t tsn)
4737 {
4738 	struct mbuf *m;
4739 	struct sctp_error_no_user_data *no_user_data_cause;
4740 	uint16_t len;
4741 
4742 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4743 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4744 	if (m != NULL) {
4745 		SCTP_BUF_LEN(m) = len;
4746 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4747 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4748 		no_user_data_cause->cause.length = htons(len);
4749 		no_user_data_cause->tsn = htonl(tsn);
4750 	}
4751 	return (m);
4752 }
4753 
4754 #ifdef SCTP_MBCNT_LOGGING
4755 void
4756 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4757     struct sctp_tmit_chunk *tp1, int chk_cnt)
4758 {
4759 	if (tp1->data == NULL) {
4760 		return;
4761 	}
4762 	asoc->chunks_on_out_queue -= chk_cnt;
4763 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4764 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4765 		    asoc->total_output_queue_size,
4766 		    tp1->book_size,
4767 		    0,
4768 		    tp1->mbcnt);
4769 	}
4770 	if (asoc->total_output_queue_size >= tp1->book_size) {
4771 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4772 	} else {
4773 		asoc->total_output_queue_size = 0;
4774 	}
4775 
4776 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4777 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4778 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4779 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4780 		} else {
4781 			stcb->sctp_socket->so_snd.sb_cc = 0;
4782 
4783 		}
4784 	}
4785 }
4786 
4787 #endif
4788 
4789 int
4790 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4791     uint8_t sent, int so_locked
4792 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4793     SCTP_UNUSED
4794 #endif
4795 )
4796 {
4797 	struct sctp_stream_out *strq;
4798 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4799 	struct sctp_stream_queue_pending *sp;
4800 	uint32_t mid;
4801 	uint16_t sid;
4802 	uint8_t foundeom = 0;
4803 	int ret_sz = 0;
4804 	int notdone;
4805 	int do_wakeup_routine = 0;
4806 
4807 	sid = tp1->rec.data.sid;
4808 	mid = tp1->rec.data.mid;
4809 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4810 		stcb->asoc.abandoned_sent[0]++;
4811 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4812 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4813 #if defined(SCTP_DETAILED_STR_STATS)
4814 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4815 #endif
4816 	} else {
4817 		stcb->asoc.abandoned_unsent[0]++;
4818 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4819 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4820 #if defined(SCTP_DETAILED_STR_STATS)
4821 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4822 #endif
4823 	}
4824 	do {
4825 		ret_sz += tp1->book_size;
4826 		if (tp1->data != NULL) {
4827 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4828 				sctp_flight_size_decrease(tp1);
4829 				sctp_total_flight_decrease(stcb, tp1);
4830 			}
4831 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4832 			stcb->asoc.peers_rwnd += tp1->send_size;
4833 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4834 			if (sent) {
4835 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4836 			} else {
4837 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4838 			}
4839 			if (tp1->data) {
4840 				sctp_m_freem(tp1->data);
4841 				tp1->data = NULL;
4842 			}
4843 			do_wakeup_routine = 1;
4844 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4845 				stcb->asoc.sent_queue_cnt_removeable--;
4846 			}
4847 		}
4848 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4849 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4850 		    SCTP_DATA_NOT_FRAG) {
4851 			/* not frag'ed we ae done   */
4852 			notdone = 0;
4853 			foundeom = 1;
4854 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4855 			/* end of frag, we are done */
4856 			notdone = 0;
4857 			foundeom = 1;
4858 		} else {
4859 			/*
4860 			 * Its a begin or middle piece, we must mark all of
4861 			 * it
4862 			 */
4863 			notdone = 1;
4864 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4865 		}
4866 	} while (tp1 && notdone);
4867 	if (foundeom == 0) {
4868 		/*
4869 		 * The multi-part message was scattered across the send and
4870 		 * sent queue.
4871 		 */
4872 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4873 			if ((tp1->rec.data.sid != sid) ||
4874 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4875 				break;
4876 			}
4877 			/*
4878 			 * save to chk in case we have some on stream out
4879 			 * queue. If so and we have an un-transmitted one we
4880 			 * don't have to fudge the TSN.
4881 			 */
4882 			chk = tp1;
4883 			ret_sz += tp1->book_size;
4884 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4885 			if (sent) {
4886 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4887 			} else {
4888 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4889 			}
4890 			if (tp1->data) {
4891 				sctp_m_freem(tp1->data);
4892 				tp1->data = NULL;
4893 			}
4894 			/* No flight involved here book the size to 0 */
4895 			tp1->book_size = 0;
4896 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4897 				foundeom = 1;
4898 			}
4899 			do_wakeup_routine = 1;
4900 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4901 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4902 			/*
4903 			 * on to the sent queue so we can wait for it to be
4904 			 * passed by.
4905 			 */
4906 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4907 			    sctp_next);
4908 			stcb->asoc.send_queue_cnt--;
4909 			stcb->asoc.sent_queue_cnt++;
4910 		}
4911 	}
4912 	if (foundeom == 0) {
4913 		/*
4914 		 * Still no eom found. That means there is stuff left on the
4915 		 * stream out queue.. yuck.
4916 		 */
4917 		SCTP_TCB_SEND_LOCK(stcb);
4918 		strq = &stcb->asoc.strmout[sid];
4919 		sp = TAILQ_FIRST(&strq->outqueue);
4920 		if (sp != NULL) {
4921 			sp->discard_rest = 1;
4922 			/*
4923 			 * We may need to put a chunk on the queue that
4924 			 * holds the TSN that would have been sent with the
4925 			 * LAST bit.
4926 			 */
4927 			if (chk == NULL) {
4928 				/* Yep, we have to */
4929 				sctp_alloc_a_chunk(stcb, chk);
4930 				if (chk == NULL) {
4931 					/*
4932 					 * we are hosed. All we can do is
4933 					 * nothing.. which will cause an
4934 					 * abort if the peer is paying
4935 					 * attention.
4936 					 */
4937 					goto oh_well;
4938 				}
4939 				memset(chk, 0, sizeof(*chk));
4940 				chk->rec.data.rcv_flags = 0;
4941 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4942 				chk->asoc = &stcb->asoc;
4943 				if (stcb->asoc.idata_supported == 0) {
4944 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4945 						chk->rec.data.mid = 0;
4946 					} else {
4947 						chk->rec.data.mid = strq->next_mid_ordered;
4948 					}
4949 				} else {
4950 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4951 						chk->rec.data.mid = strq->next_mid_unordered;
4952 					} else {
4953 						chk->rec.data.mid = strq->next_mid_ordered;
4954 					}
4955 				}
4956 				chk->rec.data.sid = sp->sid;
4957 				chk->rec.data.ppid = sp->ppid;
4958 				chk->rec.data.context = sp->context;
4959 				chk->flags = sp->act_flags;
4960 				chk->whoTo = NULL;
4961 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4962 				strq->chunks_on_queues++;
4963 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4964 				stcb->asoc.sent_queue_cnt++;
4965 				stcb->asoc.pr_sctp_cnt++;
4966 			}
4967 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4968 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4969 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4970 			}
4971 			if (stcb->asoc.idata_supported == 0) {
4972 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4973 					strq->next_mid_ordered++;
4974 				}
4975 			} else {
4976 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4977 					strq->next_mid_unordered++;
4978 				} else {
4979 					strq->next_mid_ordered++;
4980 				}
4981 			}
4982 	oh_well:
4983 			if (sp->data) {
4984 				/*
4985 				 * Pull any data to free up the SB and allow
4986 				 * sender to "add more" while we will throw
4987 				 * away :-)
4988 				 */
4989 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4990 				ret_sz += sp->length;
4991 				do_wakeup_routine = 1;
4992 				sp->some_taken = 1;
4993 				sctp_m_freem(sp->data);
4994 				sp->data = NULL;
4995 				sp->tail_mbuf = NULL;
4996 				sp->length = 0;
4997 			}
4998 		}
4999 		SCTP_TCB_SEND_UNLOCK(stcb);
5000 	}
5001 	if (do_wakeup_routine) {
5002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5003 		struct socket *so;
5004 
5005 		so = SCTP_INP_SO(stcb->sctp_ep);
5006 		if (!so_locked) {
5007 			atomic_add_int(&stcb->asoc.refcnt, 1);
5008 			SCTP_TCB_UNLOCK(stcb);
5009 			SCTP_SOCKET_LOCK(so, 1);
5010 			SCTP_TCB_LOCK(stcb);
5011 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5012 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5013 				/* assoc was freed while we were unlocked */
5014 				SCTP_SOCKET_UNLOCK(so, 1);
5015 				return (ret_sz);
5016 			}
5017 		}
5018 #endif
5019 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5020 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5021 		if (!so_locked) {
5022 			SCTP_SOCKET_UNLOCK(so, 1);
5023 		}
5024 #endif
5025 	}
5026 	return (ret_sz);
5027 }
5028 
5029 /*
5030  * checks to see if the given address, sa, is one that is currently known by
5031  * the kernel note: can't distinguish the same address on multiple interfaces
5032  * and doesn't handle multiple addresses with different zone/scope id's note:
5033  * ifa_ifwithaddr() compares the entire sockaddr struct
5034  */
5035 struct sctp_ifa *
5036 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5037     int holds_lock)
5038 {
5039 	struct sctp_laddr *laddr;
5040 
5041 	if (holds_lock == 0) {
5042 		SCTP_INP_RLOCK(inp);
5043 	}
5044 
5045 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5046 		if (laddr->ifa == NULL)
5047 			continue;
5048 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5049 			continue;
5050 #ifdef INET
5051 		if (addr->sa_family == AF_INET) {
5052 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5053 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5054 				/* found him. */
5055 				if (holds_lock == 0) {
5056 					SCTP_INP_RUNLOCK(inp);
5057 				}
5058 				return (laddr->ifa);
5059 				break;
5060 			}
5061 		}
5062 #endif
5063 #ifdef INET6
5064 		if (addr->sa_family == AF_INET6) {
5065 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5066 			    &laddr->ifa->address.sin6)) {
5067 				/* found him. */
5068 				if (holds_lock == 0) {
5069 					SCTP_INP_RUNLOCK(inp);
5070 				}
5071 				return (laddr->ifa);
5072 				break;
5073 			}
5074 		}
5075 #endif
5076 	}
5077 	if (holds_lock == 0) {
5078 		SCTP_INP_RUNLOCK(inp);
5079 	}
5080 	return (NULL);
5081 }
5082 
5083 uint32_t
5084 sctp_get_ifa_hash_val(struct sockaddr *addr)
5085 {
5086 	switch (addr->sa_family) {
5087 #ifdef INET
5088 	case AF_INET:
5089 		{
5090 			struct sockaddr_in *sin;
5091 
5092 			sin = (struct sockaddr_in *)addr;
5093 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5094 		}
5095 #endif
5096 #ifdef INET6
5097 	case AF_INET6:
5098 		{
5099 			struct sockaddr_in6 *sin6;
5100 			uint32_t hash_of_addr;
5101 
5102 			sin6 = (struct sockaddr_in6 *)addr;
5103 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5104 			    sin6->sin6_addr.s6_addr32[1] +
5105 			    sin6->sin6_addr.s6_addr32[2] +
5106 			    sin6->sin6_addr.s6_addr32[3]);
5107 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5108 			return (hash_of_addr);
5109 		}
5110 #endif
5111 	default:
5112 		break;
5113 	}
5114 	return (0);
5115 }
5116 
5117 struct sctp_ifa *
5118 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5119 {
5120 	struct sctp_ifa *sctp_ifap;
5121 	struct sctp_vrf *vrf;
5122 	struct sctp_ifalist *hash_head;
5123 	uint32_t hash_of_addr;
5124 
5125 	if (holds_lock == 0)
5126 		SCTP_IPI_ADDR_RLOCK();
5127 
5128 	vrf = sctp_find_vrf(vrf_id);
5129 	if (vrf == NULL) {
5130 		if (holds_lock == 0)
5131 			SCTP_IPI_ADDR_RUNLOCK();
5132 		return (NULL);
5133 	}
5134 
5135 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5136 
5137 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5138 	if (hash_head == NULL) {
5139 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5140 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5141 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5142 		sctp_print_address(addr);
5143 		SCTP_PRINTF("No such bucket for address\n");
5144 		if (holds_lock == 0)
5145 			SCTP_IPI_ADDR_RUNLOCK();
5146 
5147 		return (NULL);
5148 	}
5149 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5150 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5151 			continue;
5152 #ifdef INET
5153 		if (addr->sa_family == AF_INET) {
5154 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5155 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5156 				/* found him. */
5157 				if (holds_lock == 0)
5158 					SCTP_IPI_ADDR_RUNLOCK();
5159 				return (sctp_ifap);
5160 				break;
5161 			}
5162 		}
5163 #endif
5164 #ifdef INET6
5165 		if (addr->sa_family == AF_INET6) {
5166 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5167 			    &sctp_ifap->address.sin6)) {
5168 				/* found him. */
5169 				if (holds_lock == 0)
5170 					SCTP_IPI_ADDR_RUNLOCK();
5171 				return (sctp_ifap);
5172 				break;
5173 			}
5174 		}
5175 #endif
5176 	}
5177 	if (holds_lock == 0)
5178 		SCTP_IPI_ADDR_RUNLOCK();
5179 	return (NULL);
5180 }
5181 
5182 static void
5183 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5184     uint32_t rwnd_req)
5185 {
5186 	/* User pulled some data, do we need a rwnd update? */
5187 	int r_unlocked = 0;
5188 	uint32_t dif, rwnd;
5189 	struct socket *so = NULL;
5190 
5191 	if (stcb == NULL)
5192 		return;
5193 
5194 	atomic_add_int(&stcb->asoc.refcnt, 1);
5195 
5196 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5197 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5198 		/* Pre-check If we are freeing no update */
5199 		goto no_lock;
5200 	}
5201 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5202 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5203 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5204 		goto out;
5205 	}
5206 	so = stcb->sctp_socket;
5207 	if (so == NULL) {
5208 		goto out;
5209 	}
5210 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5211 	/* Have you have freed enough to look */
5212 	*freed_so_far = 0;
5213 	/* Yep, its worth a look and the lock overhead */
5214 
5215 	/* Figure out what the rwnd would be */
5216 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5217 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5218 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5219 	} else {
5220 		dif = 0;
5221 	}
5222 	if (dif >= rwnd_req) {
5223 		if (hold_rlock) {
5224 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5225 			r_unlocked = 1;
5226 		}
5227 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5228 			/*
5229 			 * One last check before we allow the guy possibly
5230 			 * to get in. There is a race, where the guy has not
5231 			 * reached the gate. In that case
5232 			 */
5233 			goto out;
5234 		}
5235 		SCTP_TCB_LOCK(stcb);
5236 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5237 			/* No reports here */
5238 			SCTP_TCB_UNLOCK(stcb);
5239 			goto out;
5240 		}
5241 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5242 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5243 
5244 		sctp_chunk_output(stcb->sctp_ep, stcb,
5245 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5246 		/* make sure no timer is running */
5247 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5248 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5249 		SCTP_TCB_UNLOCK(stcb);
5250 	} else {
5251 		/* Update how much we have pending */
5252 		stcb->freed_by_sorcv_sincelast = dif;
5253 	}
5254 out:
5255 	if (so && r_unlocked && hold_rlock) {
5256 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5257 	}
5258 
5259 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5260 no_lock:
5261 	atomic_add_int(&stcb->asoc.refcnt, -1);
5262 	return;
5263 }
5264 
5265 int
5266 sctp_sorecvmsg(struct socket *so,
5267     struct uio *uio,
5268     struct mbuf **mp,
5269     struct sockaddr *from,
5270     int fromlen,
5271     int *msg_flags,
5272     struct sctp_sndrcvinfo *sinfo,
5273     int filling_sinfo)
5274 {
5275 	/*
5276 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5277 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5278 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5279 	 * On the way out we may send out any combination of:
5280 	 * MSG_NOTIFICATION MSG_EOR
5281 	 *
5282 	 */
5283 	struct sctp_inpcb *inp = NULL;
5284 	ssize_t my_len = 0;
5285 	ssize_t cp_len = 0;
5286 	int error = 0;
5287 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5288 	struct mbuf *m = NULL;
5289 	struct sctp_tcb *stcb = NULL;
5290 	int wakeup_read_socket = 0;
5291 	int freecnt_applied = 0;
5292 	int out_flags = 0, in_flags = 0;
5293 	int block_allowed = 1;
5294 	uint32_t freed_so_far = 0;
5295 	ssize_t copied_so_far = 0;
5296 	int in_eeor_mode = 0;
5297 	int no_rcv_needed = 0;
5298 	uint32_t rwnd_req = 0;
5299 	int hold_sblock = 0;
5300 	int hold_rlock = 0;
5301 	ssize_t slen = 0;
5302 	uint32_t held_length = 0;
5303 	int sockbuf_lock = 0;
5304 
5305 	if (uio == NULL) {
5306 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5307 		return (EINVAL);
5308 	}
5309 
5310 	if (msg_flags) {
5311 		in_flags = *msg_flags;
5312 		if (in_flags & MSG_PEEK)
5313 			SCTP_STAT_INCR(sctps_read_peeks);
5314 	} else {
5315 		in_flags = 0;
5316 	}
5317 	slen = uio->uio_resid;
5318 
5319 	/* Pull in and set up our int flags */
5320 	if (in_flags & MSG_OOB) {
5321 		/* Out of band's NOT supported */
5322 		return (EOPNOTSUPP);
5323 	}
5324 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5325 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5326 		return (EINVAL);
5327 	}
5328 	if ((in_flags & (MSG_DONTWAIT
5329 	    | MSG_NBIO
5330 	    )) ||
5331 	    SCTP_SO_IS_NBIO(so)) {
5332 		block_allowed = 0;
5333 	}
5334 	/* setup the endpoint */
5335 	inp = (struct sctp_inpcb *)so->so_pcb;
5336 	if (inp == NULL) {
5337 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5338 		return (EFAULT);
5339 	}
5340 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5341 	/* Must be at least a MTU's worth */
5342 	if (rwnd_req < SCTP_MIN_RWND)
5343 		rwnd_req = SCTP_MIN_RWND;
5344 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5345 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5346 		sctp_misc_ints(SCTP_SORECV_ENTER,
5347 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5348 	}
5349 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5350 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5351 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5352 	}
5353 
5354 
5355 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5356 	if (error) {
5357 		goto release_unlocked;
5358 	}
5359 	sockbuf_lock = 1;
5360 restart:
5361 
5362 
5363 restart_nosblocks:
5364 	if (hold_sblock == 0) {
5365 		SOCKBUF_LOCK(&so->so_rcv);
5366 		hold_sblock = 1;
5367 	}
5368 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5369 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5370 		goto out;
5371 	}
5372 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5373 		if (so->so_error) {
5374 			error = so->so_error;
5375 			if ((in_flags & MSG_PEEK) == 0)
5376 				so->so_error = 0;
5377 			goto out;
5378 		} else {
5379 			if (so->so_rcv.sb_cc == 0) {
5380 				/* indicate EOF */
5381 				error = 0;
5382 				goto out;
5383 			}
5384 		}
5385 	}
5386 	if (so->so_rcv.sb_cc <= held_length) {
5387 		if (so->so_error) {
5388 			error = so->so_error;
5389 			if ((in_flags & MSG_PEEK) == 0) {
5390 				so->so_error = 0;
5391 			}
5392 			goto out;
5393 		}
5394 		if ((so->so_rcv.sb_cc == 0) &&
5395 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5396 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5397 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5398 				/*
5399 				 * For active open side clear flags for
5400 				 * re-use passive open is blocked by
5401 				 * connect.
5402 				 */
5403 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5404 					/*
5405 					 * You were aborted, passive side
5406 					 * always hits here
5407 					 */
5408 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5409 					error = ECONNRESET;
5410 				}
5411 				so->so_state &= ~(SS_ISCONNECTING |
5412 				    SS_ISDISCONNECTING |
5413 				    SS_ISCONFIRMING |
5414 				    SS_ISCONNECTED);
5415 				if (error == 0) {
5416 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5417 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5418 						error = ENOTCONN;
5419 					}
5420 				}
5421 				goto out;
5422 			}
5423 		}
5424 		if (block_allowed) {
5425 			error = sbwait(&so->so_rcv);
5426 			if (error) {
5427 				goto out;
5428 			}
5429 			held_length = 0;
5430 			goto restart_nosblocks;
5431 		} else {
5432 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5433 			error = EWOULDBLOCK;
5434 			goto out;
5435 		}
5436 	}
5437 	if (hold_sblock == 1) {
5438 		SOCKBUF_UNLOCK(&so->so_rcv);
5439 		hold_sblock = 0;
5440 	}
5441 	/* we possibly have data we can read */
5442 	/* sa_ignore FREED_MEMORY */
5443 	control = TAILQ_FIRST(&inp->read_queue);
5444 	if (control == NULL) {
5445 		/*
5446 		 * This could be happening since the appender did the
5447 		 * increment but as not yet did the tailq insert onto the
5448 		 * read_queue
5449 		 */
5450 		if (hold_rlock == 0) {
5451 			SCTP_INP_READ_LOCK(inp);
5452 		}
5453 		control = TAILQ_FIRST(&inp->read_queue);
5454 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5455 #ifdef INVARIANTS
5456 			panic("Huh, its non zero and nothing on control?");
5457 #endif
5458 			so->so_rcv.sb_cc = 0;
5459 		}
5460 		SCTP_INP_READ_UNLOCK(inp);
5461 		hold_rlock = 0;
5462 		goto restart;
5463 	}
5464 
5465 	if ((control->length == 0) &&
5466 	    (control->do_not_ref_stcb)) {
5467 		/*
5468 		 * Clean up code for freeing assoc that left behind a
5469 		 * pdapi.. maybe a peer in EEOR that just closed after
5470 		 * sending and never indicated a EOR.
5471 		 */
5472 		if (hold_rlock == 0) {
5473 			hold_rlock = 1;
5474 			SCTP_INP_READ_LOCK(inp);
5475 		}
5476 		control->held_length = 0;
5477 		if (control->data) {
5478 			/* Hmm there is data here .. fix */
5479 			struct mbuf *m_tmp;
5480 			int cnt = 0;
5481 
5482 			m_tmp = control->data;
5483 			while (m_tmp) {
5484 				cnt += SCTP_BUF_LEN(m_tmp);
5485 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5486 					control->tail_mbuf = m_tmp;
5487 					control->end_added = 1;
5488 				}
5489 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5490 			}
5491 			control->length = cnt;
5492 		} else {
5493 			/* remove it */
5494 			TAILQ_REMOVE(&inp->read_queue, control, next);
5495 			/* Add back any hiddend data */
5496 			sctp_free_remote_addr(control->whoFrom);
5497 			sctp_free_a_readq(stcb, control);
5498 		}
5499 		if (hold_rlock) {
5500 			hold_rlock = 0;
5501 			SCTP_INP_READ_UNLOCK(inp);
5502 		}
5503 		goto restart;
5504 	}
5505 	if ((control->length == 0) &&
5506 	    (control->end_added == 1)) {
5507 		/*
5508 		 * Do we also need to check for (control->pdapi_aborted ==
5509 		 * 1)?
5510 		 */
5511 		if (hold_rlock == 0) {
5512 			hold_rlock = 1;
5513 			SCTP_INP_READ_LOCK(inp);
5514 		}
5515 		TAILQ_REMOVE(&inp->read_queue, control, next);
5516 		if (control->data) {
5517 #ifdef INVARIANTS
5518 			panic("control->data not null but control->length == 0");
5519 #else
5520 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5521 			sctp_m_freem(control->data);
5522 			control->data = NULL;
5523 #endif
5524 		}
5525 		if (control->aux_data) {
5526 			sctp_m_free(control->aux_data);
5527 			control->aux_data = NULL;
5528 		}
5529 #ifdef INVARIANTS
5530 		if (control->on_strm_q) {
5531 			panic("About to free ctl:%p so:%p and its in %d",
5532 			    control, so, control->on_strm_q);
5533 		}
5534 #endif
5535 		sctp_free_remote_addr(control->whoFrom);
5536 		sctp_free_a_readq(stcb, control);
5537 		if (hold_rlock) {
5538 			hold_rlock = 0;
5539 			SCTP_INP_READ_UNLOCK(inp);
5540 		}
5541 		goto restart;
5542 	}
5543 	if (control->length == 0) {
5544 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5545 		    (filling_sinfo)) {
5546 			/* find a more suitable one then this */
5547 			ctl = TAILQ_NEXT(control, next);
5548 			while (ctl) {
5549 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5550 				    (ctl->some_taken ||
5551 				    (ctl->spec_flags & M_NOTIFICATION) ||
5552 				    ((ctl->do_not_ref_stcb == 0) &&
5553 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5554 				    ) {
5555 					/*-
5556 					 * If we have a different TCB next, and there is data
5557 					 * present. If we have already taken some (pdapi), OR we can
5558 					 * ref the tcb and no delivery as started on this stream, we
5559 					 * take it. Note we allow a notification on a different
5560 					 * assoc to be delivered..
5561 					 */
5562 					control = ctl;
5563 					goto found_one;
5564 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5565 					    (ctl->length) &&
5566 					    ((ctl->some_taken) ||
5567 					    ((ctl->do_not_ref_stcb == 0) &&
5568 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5569 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5570 					/*-
5571 					 * If we have the same tcb, and there is data present, and we
5572 					 * have the strm interleave feature present. Then if we have
5573 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5574 					 * not started a delivery for this stream, we can take it.
5575 					 * Note we do NOT allow a notificaiton on the same assoc to
5576 					 * be delivered.
5577 					 */
5578 					control = ctl;
5579 					goto found_one;
5580 				}
5581 				ctl = TAILQ_NEXT(ctl, next);
5582 			}
5583 		}
5584 		/*
5585 		 * if we reach here, not suitable replacement is available
5586 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5587 		 * into the our held count, and its time to sleep again.
5588 		 */
5589 		held_length = so->so_rcv.sb_cc;
5590 		control->held_length = so->so_rcv.sb_cc;
5591 		goto restart;
5592 	}
5593 	/* Clear the held length since there is something to read */
5594 	control->held_length = 0;
5595 found_one:
5596 	/*
5597 	 * If we reach here, control has a some data for us to read off.
5598 	 * Note that stcb COULD be NULL.
5599 	 */
5600 	if (hold_rlock == 0) {
5601 		hold_rlock = 1;
5602 		SCTP_INP_READ_LOCK(inp);
5603 	}
5604 	control->some_taken++;
5605 	stcb = control->stcb;
5606 	if (stcb) {
5607 		if ((control->do_not_ref_stcb == 0) &&
5608 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5609 			if (freecnt_applied == 0)
5610 				stcb = NULL;
5611 		} else if (control->do_not_ref_stcb == 0) {
5612 			/* you can't free it on me please */
5613 			/*
5614 			 * The lock on the socket buffer protects us so the
5615 			 * free code will stop. But since we used the
5616 			 * socketbuf lock and the sender uses the tcb_lock
5617 			 * to increment, we need to use the atomic add to
5618 			 * the refcnt
5619 			 */
5620 			if (freecnt_applied) {
5621 #ifdef INVARIANTS
5622 				panic("refcnt already incremented");
5623 #else
5624 				SCTP_PRINTF("refcnt already incremented?\n");
5625 #endif
5626 			} else {
5627 				atomic_add_int(&stcb->asoc.refcnt, 1);
5628 				freecnt_applied = 1;
5629 			}
5630 			/*
5631 			 * Setup to remember how much we have not yet told
5632 			 * the peer our rwnd has opened up. Note we grab the
5633 			 * value from the tcb from last time. Note too that
5634 			 * sack sending clears this when a sack is sent,
5635 			 * which is fine. Once we hit the rwnd_req, we then
5636 			 * will go to the sctp_user_rcvd() that will not
5637 			 * lock until it KNOWs it MUST send a WUP-SACK.
5638 			 */
5639 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5640 			stcb->freed_by_sorcv_sincelast = 0;
5641 		}
5642 	}
5643 	if (stcb &&
5644 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5645 	    control->do_not_ref_stcb == 0) {
5646 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5647 	}
5648 
5649 	/* First lets get off the sinfo and sockaddr info */
5650 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5651 		sinfo->sinfo_stream = control->sinfo_stream;
5652 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5653 		sinfo->sinfo_flags = control->sinfo_flags;
5654 		sinfo->sinfo_ppid = control->sinfo_ppid;
5655 		sinfo->sinfo_context = control->sinfo_context;
5656 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5657 		sinfo->sinfo_tsn = control->sinfo_tsn;
5658 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5659 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5660 		nxt = TAILQ_NEXT(control, next);
5661 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5662 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5663 			struct sctp_extrcvinfo *s_extra;
5664 
5665 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5666 			if ((nxt) &&
5667 			    (nxt->length)) {
5668 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5669 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5670 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5671 				}
5672 				if (nxt->spec_flags & M_NOTIFICATION) {
5673 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5674 				}
5675 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5676 				s_extra->serinfo_next_length = nxt->length;
5677 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5678 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5679 				if (nxt->tail_mbuf != NULL) {
5680 					if (nxt->end_added) {
5681 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5682 					}
5683 				}
5684 			} else {
5685 				/*
5686 				 * we explicitly 0 this, since the memcpy
5687 				 * got some other things beyond the older
5688 				 * sinfo_ that is on the control's structure
5689 				 * :-D
5690 				 */
5691 				nxt = NULL;
5692 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5693 				s_extra->serinfo_next_aid = 0;
5694 				s_extra->serinfo_next_length = 0;
5695 				s_extra->serinfo_next_ppid = 0;
5696 				s_extra->serinfo_next_stream = 0;
5697 			}
5698 		}
5699 		/*
5700 		 * update off the real current cum-ack, if we have an stcb.
5701 		 */
5702 		if ((control->do_not_ref_stcb == 0) && stcb)
5703 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5704 		/*
5705 		 * mask off the high bits, we keep the actual chunk bits in
5706 		 * there.
5707 		 */
5708 		sinfo->sinfo_flags &= 0x00ff;
5709 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5710 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5711 		}
5712 	}
5713 #ifdef SCTP_ASOCLOG_OF_TSNS
5714 	{
5715 		int index, newindex;
5716 		struct sctp_pcbtsn_rlog *entry;
5717 
5718 		do {
5719 			index = inp->readlog_index;
5720 			newindex = index + 1;
5721 			if (newindex >= SCTP_READ_LOG_SIZE) {
5722 				newindex = 0;
5723 			}
5724 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5725 		entry = &inp->readlog[index];
5726 		entry->vtag = control->sinfo_assoc_id;
5727 		entry->strm = control->sinfo_stream;
5728 		entry->seq = (uint16_t)control->mid;
5729 		entry->sz = control->length;
5730 		entry->flgs = control->sinfo_flags;
5731 	}
5732 #endif
5733 	if ((fromlen > 0) && (from != NULL)) {
5734 		union sctp_sockstore store;
5735 		size_t len;
5736 
5737 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5738 #ifdef INET6
5739 		case AF_INET6:
5740 			len = sizeof(struct sockaddr_in6);
5741 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5742 			store.sin6.sin6_port = control->port_from;
5743 			break;
5744 #endif
5745 #ifdef INET
5746 		case AF_INET:
5747 #ifdef INET6
5748 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5749 				len = sizeof(struct sockaddr_in6);
5750 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5751 				    &store.sin6);
5752 				store.sin6.sin6_port = control->port_from;
5753 			} else {
5754 				len = sizeof(struct sockaddr_in);
5755 				store.sin = control->whoFrom->ro._l_addr.sin;
5756 				store.sin.sin_port = control->port_from;
5757 			}
5758 #else
5759 			len = sizeof(struct sockaddr_in);
5760 			store.sin = control->whoFrom->ro._l_addr.sin;
5761 			store.sin.sin_port = control->port_from;
5762 #endif
5763 			break;
5764 #endif
5765 		default:
5766 			len = 0;
5767 			break;
5768 		}
5769 		memcpy(from, &store, min((size_t)fromlen, len));
5770 #ifdef INET6
5771 		{
5772 			struct sockaddr_in6 lsa6, *from6;
5773 
5774 			from6 = (struct sockaddr_in6 *)from;
5775 			sctp_recover_scope_mac(from6, (&lsa6));
5776 		}
5777 #endif
5778 	}
5779 	if (hold_rlock) {
5780 		SCTP_INP_READ_UNLOCK(inp);
5781 		hold_rlock = 0;
5782 	}
5783 	if (hold_sblock) {
5784 		SOCKBUF_UNLOCK(&so->so_rcv);
5785 		hold_sblock = 0;
5786 	}
5787 	/* now copy out what data we can */
5788 	if (mp == NULL) {
5789 		/* copy out each mbuf in the chain up to length */
5790 get_more_data:
5791 		m = control->data;
5792 		while (m) {
5793 			/* Move out all we can */
5794 			cp_len = uio->uio_resid;
5795 			my_len = SCTP_BUF_LEN(m);
5796 			if (cp_len > my_len) {
5797 				/* not enough in this buf */
5798 				cp_len = my_len;
5799 			}
5800 			if (hold_rlock) {
5801 				SCTP_INP_READ_UNLOCK(inp);
5802 				hold_rlock = 0;
5803 			}
5804 			if (cp_len > 0)
5805 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5806 			/* re-read */
5807 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5808 				goto release;
5809 			}
5810 
5811 			if ((control->do_not_ref_stcb == 0) && stcb &&
5812 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5813 				no_rcv_needed = 1;
5814 			}
5815 			if (error) {
5816 				/* error we are out of here */
5817 				goto release;
5818 			}
5819 			SCTP_INP_READ_LOCK(inp);
5820 			hold_rlock = 1;
5821 			if (cp_len == SCTP_BUF_LEN(m)) {
5822 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5823 				    (control->end_added)) {
5824 					out_flags |= MSG_EOR;
5825 					if ((control->do_not_ref_stcb == 0) &&
5826 					    (control->stcb != NULL) &&
5827 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5828 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5829 				}
5830 				if (control->spec_flags & M_NOTIFICATION) {
5831 					out_flags |= MSG_NOTIFICATION;
5832 				}
5833 				/* we ate up the mbuf */
5834 				if (in_flags & MSG_PEEK) {
5835 					/* just looking */
5836 					m = SCTP_BUF_NEXT(m);
5837 					copied_so_far += cp_len;
5838 				} else {
5839 					/* dispose of the mbuf */
5840 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5841 						sctp_sblog(&so->so_rcv,
5842 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5843 					}
5844 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5845 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5846 						sctp_sblog(&so->so_rcv,
5847 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5848 					}
5849 					copied_so_far += cp_len;
5850 					freed_so_far += (uint32_t)cp_len;
5851 					freed_so_far += MSIZE;
5852 					atomic_subtract_int(&control->length, cp_len);
5853 					control->data = sctp_m_free(m);
5854 					m = control->data;
5855 					/*
5856 					 * been through it all, must hold sb
5857 					 * lock ok to null tail
5858 					 */
5859 					if (control->data == NULL) {
5860 #ifdef INVARIANTS
5861 						if ((control->end_added == 0) ||
5862 						    (TAILQ_NEXT(control, next) == NULL)) {
5863 							/*
5864 							 * If the end is not
5865 							 * added, OR the
5866 							 * next is NOT null
5867 							 * we MUST have the
5868 							 * lock.
5869 							 */
5870 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5871 								panic("Hmm we don't own the lock?");
5872 							}
5873 						}
5874 #endif
5875 						control->tail_mbuf = NULL;
5876 #ifdef INVARIANTS
5877 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5878 							panic("end_added, nothing left and no MSG_EOR");
5879 						}
5880 #endif
5881 					}
5882 				}
5883 			} else {
5884 				/* Do we need to trim the mbuf? */
5885 				if (control->spec_flags & M_NOTIFICATION) {
5886 					out_flags |= MSG_NOTIFICATION;
5887 				}
5888 				if ((in_flags & MSG_PEEK) == 0) {
5889 					SCTP_BUF_RESV_UF(m, cp_len);
5890 					SCTP_BUF_LEN(m) -= (int)cp_len;
5891 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5892 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5893 					}
5894 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5895 					if ((control->do_not_ref_stcb == 0) &&
5896 					    stcb) {
5897 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5898 					}
5899 					copied_so_far += cp_len;
5900 					freed_so_far += (uint32_t)cp_len;
5901 					freed_so_far += MSIZE;
5902 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5903 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5904 						    SCTP_LOG_SBRESULT, 0);
5905 					}
5906 					atomic_subtract_int(&control->length, cp_len);
5907 				} else {
5908 					copied_so_far += cp_len;
5909 				}
5910 			}
5911 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5912 				break;
5913 			}
5914 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5915 			    (control->do_not_ref_stcb == 0) &&
5916 			    (freed_so_far >= rwnd_req)) {
5917 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5918 			}
5919 		}		/* end while(m) */
5920 		/*
5921 		 * At this point we have looked at it all and we either have
5922 		 * a MSG_EOR/or read all the user wants... <OR>
5923 		 * control->length == 0.
5924 		 */
5925 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5926 			/* we are done with this control */
5927 			if (control->length == 0) {
5928 				if (control->data) {
5929 #ifdef INVARIANTS
5930 					panic("control->data not null at read eor?");
5931 #else
5932 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5933 					sctp_m_freem(control->data);
5934 					control->data = NULL;
5935 #endif
5936 				}
5937 		done_with_control:
5938 				if (hold_rlock == 0) {
5939 					SCTP_INP_READ_LOCK(inp);
5940 					hold_rlock = 1;
5941 				}
5942 				TAILQ_REMOVE(&inp->read_queue, control, next);
5943 				/* Add back any hiddend data */
5944 				if (control->held_length) {
5945 					held_length = 0;
5946 					control->held_length = 0;
5947 					wakeup_read_socket = 1;
5948 				}
5949 				if (control->aux_data) {
5950 					sctp_m_free(control->aux_data);
5951 					control->aux_data = NULL;
5952 				}
5953 				no_rcv_needed = control->do_not_ref_stcb;
5954 				sctp_free_remote_addr(control->whoFrom);
5955 				control->data = NULL;
5956 #ifdef INVARIANTS
5957 				if (control->on_strm_q) {
5958 					panic("About to free ctl:%p so:%p and its in %d",
5959 					    control, so, control->on_strm_q);
5960 				}
5961 #endif
5962 				sctp_free_a_readq(stcb, control);
5963 				control = NULL;
5964 				if ((freed_so_far >= rwnd_req) &&
5965 				    (no_rcv_needed == 0))
5966 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5967 
5968 			} else {
5969 				/*
5970 				 * The user did not read all of this
5971 				 * message, turn off the returned MSG_EOR
5972 				 * since we are leaving more behind on the
5973 				 * control to read.
5974 				 */
5975 #ifdef INVARIANTS
5976 				if (control->end_added &&
5977 				    (control->data == NULL) &&
5978 				    (control->tail_mbuf == NULL)) {
5979 					panic("Gak, control->length is corrupt?");
5980 				}
5981 #endif
5982 				no_rcv_needed = control->do_not_ref_stcb;
5983 				out_flags &= ~MSG_EOR;
5984 			}
5985 		}
5986 		if (out_flags & MSG_EOR) {
5987 			goto release;
5988 		}
5989 		if ((uio->uio_resid == 0) ||
5990 		    ((in_eeor_mode) &&
5991 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5992 			goto release;
5993 		}
5994 		/*
5995 		 * If I hit here the receiver wants more and this message is
5996 		 * NOT done (pd-api). So two questions. Can we block? if not
5997 		 * we are done. Did the user NOT set MSG_WAITALL?
5998 		 */
5999 		if (block_allowed == 0) {
6000 			goto release;
6001 		}
6002 		/*
6003 		 * We need to wait for more data a few things: - We don't
6004 		 * sbunlock() so we don't get someone else reading. - We
6005 		 * must be sure to account for the case where what is added
6006 		 * is NOT to our control when we wakeup.
6007 		 */
6008 
6009 		/*
6010 		 * Do we need to tell the transport a rwnd update might be
6011 		 * needed before we go to sleep?
6012 		 */
6013 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6014 		    ((freed_so_far >= rwnd_req) &&
6015 		    (control->do_not_ref_stcb == 0) &&
6016 		    (no_rcv_needed == 0))) {
6017 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6018 		}
6019 wait_some_more:
6020 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6021 			goto release;
6022 		}
6023 
6024 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6025 			goto release;
6026 
6027 		if (hold_rlock == 1) {
6028 			SCTP_INP_READ_UNLOCK(inp);
6029 			hold_rlock = 0;
6030 		}
6031 		if (hold_sblock == 0) {
6032 			SOCKBUF_LOCK(&so->so_rcv);
6033 			hold_sblock = 1;
6034 		}
6035 		if ((copied_so_far) && (control->length == 0) &&
6036 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6037 			goto release;
6038 		}
6039 		if (so->so_rcv.sb_cc <= control->held_length) {
6040 			error = sbwait(&so->so_rcv);
6041 			if (error) {
6042 				goto release;
6043 			}
6044 			control->held_length = 0;
6045 		}
6046 		if (hold_sblock) {
6047 			SOCKBUF_UNLOCK(&so->so_rcv);
6048 			hold_sblock = 0;
6049 		}
6050 		if (control->length == 0) {
6051 			/* still nothing here */
6052 			if (control->end_added == 1) {
6053 				/* he aborted, or is done i.e.did a shutdown */
6054 				out_flags |= MSG_EOR;
6055 				if (control->pdapi_aborted) {
6056 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6057 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6058 
6059 					out_flags |= MSG_TRUNC;
6060 				} else {
6061 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6062 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6063 				}
6064 				goto done_with_control;
6065 			}
6066 			if (so->so_rcv.sb_cc > held_length) {
6067 				control->held_length = so->so_rcv.sb_cc;
6068 				held_length = 0;
6069 			}
6070 			goto wait_some_more;
6071 		} else if (control->data == NULL) {
6072 			/*
6073 			 * we must re-sync since data is probably being
6074 			 * added
6075 			 */
6076 			SCTP_INP_READ_LOCK(inp);
6077 			if ((control->length > 0) && (control->data == NULL)) {
6078 				/*
6079 				 * big trouble.. we have the lock and its
6080 				 * corrupt?
6081 				 */
6082 #ifdef INVARIANTS
6083 				panic("Impossible data==NULL length !=0");
6084 #endif
6085 				out_flags |= MSG_EOR;
6086 				out_flags |= MSG_TRUNC;
6087 				control->length = 0;
6088 				SCTP_INP_READ_UNLOCK(inp);
6089 				goto done_with_control;
6090 			}
6091 			SCTP_INP_READ_UNLOCK(inp);
6092 			/* We will fall around to get more data */
6093 		}
6094 		goto get_more_data;
6095 	} else {
6096 		/*-
6097 		 * Give caller back the mbuf chain,
6098 		 * store in uio_resid the length
6099 		 */
6100 		wakeup_read_socket = 0;
6101 		if ((control->end_added == 0) ||
6102 		    (TAILQ_NEXT(control, next) == NULL)) {
6103 			/* Need to get rlock */
6104 			if (hold_rlock == 0) {
6105 				SCTP_INP_READ_LOCK(inp);
6106 				hold_rlock = 1;
6107 			}
6108 		}
6109 		if (control->end_added) {
6110 			out_flags |= MSG_EOR;
6111 			if ((control->do_not_ref_stcb == 0) &&
6112 			    (control->stcb != NULL) &&
6113 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6114 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6115 		}
6116 		if (control->spec_flags & M_NOTIFICATION) {
6117 			out_flags |= MSG_NOTIFICATION;
6118 		}
6119 		uio->uio_resid = control->length;
6120 		*mp = control->data;
6121 		m = control->data;
6122 		while (m) {
6123 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6124 				sctp_sblog(&so->so_rcv,
6125 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6126 			}
6127 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6128 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6129 			freed_so_far += MSIZE;
6130 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6131 				sctp_sblog(&so->so_rcv,
6132 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6133 			}
6134 			m = SCTP_BUF_NEXT(m);
6135 		}
6136 		control->data = control->tail_mbuf = NULL;
6137 		control->length = 0;
6138 		if (out_flags & MSG_EOR) {
6139 			/* Done with this control */
6140 			goto done_with_control;
6141 		}
6142 	}
6143 release:
6144 	if (hold_rlock == 1) {
6145 		SCTP_INP_READ_UNLOCK(inp);
6146 		hold_rlock = 0;
6147 	}
6148 	if (hold_sblock == 1) {
6149 		SOCKBUF_UNLOCK(&so->so_rcv);
6150 		hold_sblock = 0;
6151 	}
6152 
6153 	sbunlock(&so->so_rcv);
6154 	sockbuf_lock = 0;
6155 
6156 release_unlocked:
6157 	if (hold_sblock) {
6158 		SOCKBUF_UNLOCK(&so->so_rcv);
6159 		hold_sblock = 0;
6160 	}
6161 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6162 		if ((freed_so_far >= rwnd_req) &&
6163 		    (control && (control->do_not_ref_stcb == 0)) &&
6164 		    (no_rcv_needed == 0))
6165 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6166 	}
6167 out:
6168 	if (msg_flags) {
6169 		*msg_flags = out_flags;
6170 	}
6171 	if (((out_flags & MSG_EOR) == 0) &&
6172 	    ((in_flags & MSG_PEEK) == 0) &&
6173 	    (sinfo) &&
6174 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6175 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6176 		struct sctp_extrcvinfo *s_extra;
6177 
6178 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6179 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6180 	}
6181 	if (hold_rlock == 1) {
6182 		SCTP_INP_READ_UNLOCK(inp);
6183 	}
6184 	if (hold_sblock) {
6185 		SOCKBUF_UNLOCK(&so->so_rcv);
6186 	}
6187 	if (sockbuf_lock) {
6188 		sbunlock(&so->so_rcv);
6189 	}
6190 
6191 	if (freecnt_applied) {
6192 		/*
6193 		 * The lock on the socket buffer protects us so the free
6194 		 * code will stop. But since we used the socketbuf lock and
6195 		 * the sender uses the tcb_lock to increment, we need to use
6196 		 * the atomic add to the refcnt.
6197 		 */
6198 		if (stcb == NULL) {
6199 #ifdef INVARIANTS
6200 			panic("stcb for refcnt has gone NULL?");
6201 			goto stage_left;
6202 #else
6203 			goto stage_left;
6204 #endif
6205 		}
6206 		/* Save the value back for next time */
6207 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6208 		atomic_add_int(&stcb->asoc.refcnt, -1);
6209 	}
6210 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6211 		if (stcb) {
6212 			sctp_misc_ints(SCTP_SORECV_DONE,
6213 			    freed_so_far,
6214 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6215 			    stcb->asoc.my_rwnd,
6216 			    so->so_rcv.sb_cc);
6217 		} else {
6218 			sctp_misc_ints(SCTP_SORECV_DONE,
6219 			    freed_so_far,
6220 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6221 			    0,
6222 			    so->so_rcv.sb_cc);
6223 		}
6224 	}
6225 stage_left:
6226 	if (wakeup_read_socket) {
6227 		sctp_sorwakeup(inp, so);
6228 	}
6229 	return (error);
6230 }
6231 
6232 
6233 #ifdef SCTP_MBUF_LOGGING
6234 struct mbuf *
6235 sctp_m_free(struct mbuf *m)
6236 {
6237 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6238 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6239 	}
6240 	return (m_free(m));
6241 }
6242 
6243 void
6244 sctp_m_freem(struct mbuf *mb)
6245 {
6246 	while (mb != NULL)
6247 		mb = sctp_m_free(mb);
6248 }
6249 
6250 #endif
6251 
6252 int
6253 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6254 {
6255 	/*
6256 	 * Given a local address. For all associations that holds the
6257 	 * address, request a peer-set-primary.
6258 	 */
6259 	struct sctp_ifa *ifa;
6260 	struct sctp_laddr *wi;
6261 
6262 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6263 	if (ifa == NULL) {
6264 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6265 		return (EADDRNOTAVAIL);
6266 	}
6267 	/*
6268 	 * Now that we have the ifa we must awaken the iterator with this
6269 	 * message.
6270 	 */
6271 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6272 	if (wi == NULL) {
6273 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6274 		return (ENOMEM);
6275 	}
6276 	/* Now incr the count and int wi structure */
6277 	SCTP_INCR_LADDR_COUNT();
6278 	memset(wi, 0, sizeof(*wi));
6279 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6280 	wi->ifa = ifa;
6281 	wi->action = SCTP_SET_PRIM_ADDR;
6282 	atomic_add_int(&ifa->refcount, 1);
6283 
6284 	/* Now add it to the work queue */
6285 	SCTP_WQ_ADDR_LOCK();
6286 	/*
6287 	 * Should this really be a tailq? As it is we will process the
6288 	 * newest first :-0
6289 	 */
6290 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6291 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6292 	    (struct sctp_inpcb *)NULL,
6293 	    (struct sctp_tcb *)NULL,
6294 	    (struct sctp_nets *)NULL);
6295 	SCTP_WQ_ADDR_UNLOCK();
6296 	return (0);
6297 }
6298 
6299 
6300 int
6301 sctp_soreceive(struct socket *so,
6302     struct sockaddr **psa,
6303     struct uio *uio,
6304     struct mbuf **mp0,
6305     struct mbuf **controlp,
6306     int *flagsp)
6307 {
6308 	int error, fromlen;
6309 	uint8_t sockbuf[256];
6310 	struct sockaddr *from;
6311 	struct sctp_extrcvinfo sinfo;
6312 	int filling_sinfo = 1;
6313 	int flags;
6314 	struct sctp_inpcb *inp;
6315 
6316 	inp = (struct sctp_inpcb *)so->so_pcb;
6317 	/* pickup the assoc we are reading from */
6318 	if (inp == NULL) {
6319 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6320 		return (EINVAL);
6321 	}
6322 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6323 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6324 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6325 	    (controlp == NULL)) {
6326 		/* user does not want the sndrcv ctl */
6327 		filling_sinfo = 0;
6328 	}
6329 	if (psa) {
6330 		from = (struct sockaddr *)sockbuf;
6331 		fromlen = sizeof(sockbuf);
6332 		from->sa_len = 0;
6333 	} else {
6334 		from = NULL;
6335 		fromlen = 0;
6336 	}
6337 
6338 	if (filling_sinfo) {
6339 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6340 	}
6341 	if (flagsp != NULL) {
6342 		flags = *flagsp;
6343 	} else {
6344 		flags = 0;
6345 	}
6346 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6347 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6348 	if (flagsp != NULL) {
6349 		*flagsp = flags;
6350 	}
6351 	if (controlp != NULL) {
6352 		/* copy back the sinfo in a CMSG format */
6353 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6354 			*controlp = sctp_build_ctl_nchunk(inp,
6355 			    (struct sctp_sndrcvinfo *)&sinfo);
6356 		} else {
6357 			*controlp = NULL;
6358 		}
6359 	}
6360 	if (psa) {
6361 		/* copy back the address info */
6362 		if (from && from->sa_len) {
6363 			*psa = sodupsockaddr(from, M_NOWAIT);
6364 		} else {
6365 			*psa = NULL;
6366 		}
6367 	}
6368 	return (error);
6369 }
6370 
6371 
6372 
6373 
6374 
6375 int
6376 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6377     int totaddr, int *error)
6378 {
6379 	int added = 0;
6380 	int i;
6381 	struct sctp_inpcb *inp;
6382 	struct sockaddr *sa;
6383 	size_t incr = 0;
6384 #ifdef INET
6385 	struct sockaddr_in *sin;
6386 #endif
6387 #ifdef INET6
6388 	struct sockaddr_in6 *sin6;
6389 #endif
6390 
6391 	sa = addr;
6392 	inp = stcb->sctp_ep;
6393 	*error = 0;
6394 	for (i = 0; i < totaddr; i++) {
6395 		switch (sa->sa_family) {
6396 #ifdef INET
6397 		case AF_INET:
6398 			incr = sizeof(struct sockaddr_in);
6399 			sin = (struct sockaddr_in *)sa;
6400 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6401 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6402 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6403 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6404 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6405 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6406 				*error = EINVAL;
6407 				goto out_now;
6408 			}
6409 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6410 			    SCTP_DONOT_SETSCOPE,
6411 			    SCTP_ADDR_IS_CONFIRMED)) {
6412 				/* assoc gone no un-lock */
6413 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6414 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6415 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6416 				*error = ENOBUFS;
6417 				goto out_now;
6418 			}
6419 			added++;
6420 			break;
6421 #endif
6422 #ifdef INET6
6423 		case AF_INET6:
6424 			incr = sizeof(struct sockaddr_in6);
6425 			sin6 = (struct sockaddr_in6 *)sa;
6426 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6427 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6428 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6429 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6430 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6431 				*error = EINVAL;
6432 				goto out_now;
6433 			}
6434 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6435 			    SCTP_DONOT_SETSCOPE,
6436 			    SCTP_ADDR_IS_CONFIRMED)) {
6437 				/* assoc gone no un-lock */
6438 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6439 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6440 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6441 				*error = ENOBUFS;
6442 				goto out_now;
6443 			}
6444 			added++;
6445 			break;
6446 #endif
6447 		default:
6448 			break;
6449 		}
6450 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6451 	}
6452 out_now:
6453 	return (added);
6454 }
6455 
6456 int
6457 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6458     unsigned int totaddr,
6459     unsigned int *num_v4, unsigned int *num_v6,
6460     unsigned int limit)
6461 {
6462 	struct sockaddr *sa;
6463 	struct sctp_tcb *stcb;
6464 	unsigned int incr, at, i;
6465 
6466 	at = 0;
6467 	sa = addr;
6468 	*num_v6 = *num_v4 = 0;
6469 	/* account and validate addresses */
6470 	if (totaddr == 0) {
6471 		return (EINVAL);
6472 	}
6473 	for (i = 0; i < totaddr; i++) {
6474 		if (at + sizeof(struct sockaddr) > limit) {
6475 			return (EINVAL);
6476 		}
6477 		switch (sa->sa_family) {
6478 #ifdef INET
6479 		case AF_INET:
6480 			incr = (unsigned int)sizeof(struct sockaddr_in);
6481 			if (sa->sa_len != incr) {
6482 				return (EINVAL);
6483 			}
6484 			(*num_v4) += 1;
6485 			break;
6486 #endif
6487 #ifdef INET6
6488 		case AF_INET6:
6489 			{
6490 				struct sockaddr_in6 *sin6;
6491 
6492 				sin6 = (struct sockaddr_in6 *)sa;
6493 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6494 					/* Must be non-mapped for connectx */
6495 					return (EINVAL);
6496 				}
6497 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6498 				if (sa->sa_len != incr) {
6499 					return (EINVAL);
6500 				}
6501 				(*num_v6) += 1;
6502 				break;
6503 			}
6504 #endif
6505 		default:
6506 			return (EINVAL);
6507 		}
6508 		if ((at + incr) > limit) {
6509 			return (EINVAL);
6510 		}
6511 		SCTP_INP_INCR_REF(inp);
6512 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6513 		if (stcb != NULL) {
6514 			SCTP_TCB_UNLOCK(stcb);
6515 			return (EALREADY);
6516 		} else {
6517 			SCTP_INP_DECR_REF(inp);
6518 		}
6519 		at += incr;
6520 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6521 	}
6522 	return (0);
6523 }
6524 
6525 /*
6526  * sctp_bindx(ADD) for one address.
6527  * assumes all arguments are valid/checked by caller.
6528  */
6529 void
6530 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6531     struct sockaddr *sa, sctp_assoc_t assoc_id,
6532     uint32_t vrf_id, int *error, void *p)
6533 {
6534 	struct sockaddr *addr_touse;
6535 #if defined(INET) && defined(INET6)
6536 	struct sockaddr_in sin;
6537 #endif
6538 
6539 	/* see if we're bound all already! */
6540 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6541 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542 		*error = EINVAL;
6543 		return;
6544 	}
6545 	addr_touse = sa;
6546 #ifdef INET6
6547 	if (sa->sa_family == AF_INET6) {
6548 #ifdef INET
6549 		struct sockaddr_in6 *sin6;
6550 
6551 #endif
6552 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6553 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554 			*error = EINVAL;
6555 			return;
6556 		}
6557 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6558 			/* can only bind v6 on PF_INET6 sockets */
6559 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6560 			*error = EINVAL;
6561 			return;
6562 		}
6563 #ifdef INET
6564 		sin6 = (struct sockaddr_in6 *)addr_touse;
6565 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6566 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6567 			    SCTP_IPV6_V6ONLY(inp)) {
6568 				/* can't bind v4-mapped on PF_INET sockets */
6569 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 				*error = EINVAL;
6571 				return;
6572 			}
6573 			in6_sin6_2_sin(&sin, sin6);
6574 			addr_touse = (struct sockaddr *)&sin;
6575 		}
6576 #endif
6577 	}
6578 #endif
6579 #ifdef INET
6580 	if (sa->sa_family == AF_INET) {
6581 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6582 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6583 			*error = EINVAL;
6584 			return;
6585 		}
6586 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6587 		    SCTP_IPV6_V6ONLY(inp)) {
6588 			/* can't bind v4 on PF_INET sockets */
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 	}
6594 #endif
6595 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6596 		if (p == NULL) {
6597 			/* Can't get proc for Net/Open BSD */
6598 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6599 			*error = EINVAL;
6600 			return;
6601 		}
6602 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6603 		return;
6604 	}
6605 	/*
6606 	 * No locks required here since bind and mgmt_ep_sa all do their own
6607 	 * locking. If we do something for the FIX: below we may need to
6608 	 * lock in that case.
6609 	 */
6610 	if (assoc_id == 0) {
6611 		/* add the address */
6612 		struct sctp_inpcb *lep;
6613 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6614 
6615 		/* validate the incoming port */
6616 		if ((lsin->sin_port != 0) &&
6617 		    (lsin->sin_port != inp->sctp_lport)) {
6618 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6619 			*error = EINVAL;
6620 			return;
6621 		} else {
6622 			/* user specified 0 port, set it to existing port */
6623 			lsin->sin_port = inp->sctp_lport;
6624 		}
6625 
6626 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6627 		if (lep != NULL) {
6628 			/*
6629 			 * We must decrement the refcount since we have the
6630 			 * ep already and are binding. No remove going on
6631 			 * here.
6632 			 */
6633 			SCTP_INP_DECR_REF(lep);
6634 		}
6635 		if (lep == inp) {
6636 			/* already bound to it.. ok */
6637 			return;
6638 		} else if (lep == NULL) {
6639 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6640 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6641 			    SCTP_ADD_IP_ADDRESS,
6642 			    vrf_id, NULL);
6643 		} else {
6644 			*error = EADDRINUSE;
6645 		}
6646 		if (*error)
6647 			return;
6648 	} else {
6649 		/*
6650 		 * FIX: decide whether we allow assoc based bindx
6651 		 */
6652 	}
6653 }
6654 
6655 /*
6656  * sctp_bindx(DELETE) for one address.
6657  * assumes all arguments are valid/checked by caller.
6658  */
6659 void
6660 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6661     struct sockaddr *sa, sctp_assoc_t assoc_id,
6662     uint32_t vrf_id, int *error)
6663 {
6664 	struct sockaddr *addr_touse;
6665 #if defined(INET) && defined(INET6)
6666 	struct sockaddr_in sin;
6667 #endif
6668 
6669 	/* see if we're bound all already! */
6670 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6671 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6672 		*error = EINVAL;
6673 		return;
6674 	}
6675 	addr_touse = sa;
6676 #ifdef INET6
6677 	if (sa->sa_family == AF_INET6) {
6678 #ifdef INET
6679 		struct sockaddr_in6 *sin6;
6680 #endif
6681 
6682 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6683 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6684 			*error = EINVAL;
6685 			return;
6686 		}
6687 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6688 			/* can only bind v6 on PF_INET6 sockets */
6689 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6690 			*error = EINVAL;
6691 			return;
6692 		}
6693 #ifdef INET
6694 		sin6 = (struct sockaddr_in6 *)addr_touse;
6695 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6696 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6697 			    SCTP_IPV6_V6ONLY(inp)) {
6698 				/* can't bind mapped-v4 on PF_INET sockets */
6699 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6700 				*error = EINVAL;
6701 				return;
6702 			}
6703 			in6_sin6_2_sin(&sin, sin6);
6704 			addr_touse = (struct sockaddr *)&sin;
6705 		}
6706 #endif
6707 	}
6708 #endif
6709 #ifdef INET
6710 	if (sa->sa_family == AF_INET) {
6711 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6712 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6713 			*error = EINVAL;
6714 			return;
6715 		}
6716 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6717 		    SCTP_IPV6_V6ONLY(inp)) {
6718 			/* can't bind v4 on PF_INET sockets */
6719 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6720 			*error = EINVAL;
6721 			return;
6722 		}
6723 	}
6724 #endif
6725 	/*
6726 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6727 	 * below is ever changed we may need to lock before calling
6728 	 * association level binding.
6729 	 */
6730 	if (assoc_id == 0) {
6731 		/* delete the address */
6732 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6733 		    SCTP_DEL_IP_ADDRESS,
6734 		    vrf_id, NULL);
6735 	} else {
6736 		/*
6737 		 * FIX: decide whether we allow assoc based bindx
6738 		 */
6739 	}
6740 }
6741 
6742 /*
6743  * returns the valid local address count for an assoc, taking into account
6744  * all scoping rules
6745  */
6746 int
6747 sctp_local_addr_count(struct sctp_tcb *stcb)
6748 {
6749 	int loopback_scope;
6750 #if defined(INET)
6751 	int ipv4_local_scope, ipv4_addr_legal;
6752 #endif
6753 #if defined (INET6)
6754 	int local_scope, site_scope, ipv6_addr_legal;
6755 #endif
6756 	struct sctp_vrf *vrf;
6757 	struct sctp_ifn *sctp_ifn;
6758 	struct sctp_ifa *sctp_ifa;
6759 	int count = 0;
6760 
6761 	/* Turn on all the appropriate scopes */
6762 	loopback_scope = stcb->asoc.scope.loopback_scope;
6763 #if defined(INET)
6764 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6765 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6766 #endif
6767 #if defined(INET6)
6768 	local_scope = stcb->asoc.scope.local_scope;
6769 	site_scope = stcb->asoc.scope.site_scope;
6770 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6771 #endif
6772 	SCTP_IPI_ADDR_RLOCK();
6773 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6774 	if (vrf == NULL) {
6775 		/* no vrf, no addresses */
6776 		SCTP_IPI_ADDR_RUNLOCK();
6777 		return (0);
6778 	}
6779 
6780 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6781 		/*
6782 		 * bound all case: go through all ifns on the vrf
6783 		 */
6784 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6785 			if ((loopback_scope == 0) &&
6786 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6787 				continue;
6788 			}
6789 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6790 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6791 					continue;
6792 				switch (sctp_ifa->address.sa.sa_family) {
6793 #ifdef INET
6794 				case AF_INET:
6795 					if (ipv4_addr_legal) {
6796 						struct sockaddr_in *sin;
6797 
6798 						sin = &sctp_ifa->address.sin;
6799 						if (sin->sin_addr.s_addr == 0) {
6800 							/*
6801 							 * skip unspecified
6802 							 * addrs
6803 							 */
6804 							continue;
6805 						}
6806 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6807 						    &sin->sin_addr) != 0) {
6808 							continue;
6809 						}
6810 						if ((ipv4_local_scope == 0) &&
6811 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6812 							continue;
6813 						}
6814 						/* count this one */
6815 						count++;
6816 					} else {
6817 						continue;
6818 					}
6819 					break;
6820 #endif
6821 #ifdef INET6
6822 				case AF_INET6:
6823 					if (ipv6_addr_legal) {
6824 						struct sockaddr_in6 *sin6;
6825 
6826 						sin6 = &sctp_ifa->address.sin6;
6827 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6828 							continue;
6829 						}
6830 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6831 						    &sin6->sin6_addr) != 0) {
6832 							continue;
6833 						}
6834 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6835 							if (local_scope == 0)
6836 								continue;
6837 							if (sin6->sin6_scope_id == 0) {
6838 								if (sa6_recoverscope(sin6) != 0)
6839 									/*
6840 									 *
6841 									 * bad
6842 									 * link
6843 									 *
6844 									 * local
6845 									 *
6846 									 * address
6847 									 */
6848 									continue;
6849 							}
6850 						}
6851 						if ((site_scope == 0) &&
6852 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6853 							continue;
6854 						}
6855 						/* count this one */
6856 						count++;
6857 					}
6858 					break;
6859 #endif
6860 				default:
6861 					/* TSNH */
6862 					break;
6863 				}
6864 			}
6865 		}
6866 	} else {
6867 		/*
6868 		 * subset bound case
6869 		 */
6870 		struct sctp_laddr *laddr;
6871 
6872 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6873 		    sctp_nxt_addr) {
6874 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6875 				continue;
6876 			}
6877 			/* count this one */
6878 			count++;
6879 		}
6880 	}
6881 	SCTP_IPI_ADDR_RUNLOCK();
6882 	return (count);
6883 }
6884 
6885 #if defined(SCTP_LOCAL_TRACE_BUF)
6886 
6887 void
6888 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6889 {
6890 	uint32_t saveindex, newindex;
6891 
6892 	do {
6893 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6894 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6895 			newindex = 1;
6896 		} else {
6897 			newindex = saveindex + 1;
6898 		}
6899 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6900 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6901 		saveindex = 0;
6902 	}
6903 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6904 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6905 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6906 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6907 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6908 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6909 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6910 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6911 }
6912 
6913 #endif
6914 static void
6915 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6916     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6917 {
6918 	struct ip *iph;
6919 #ifdef INET6
6920 	struct ip6_hdr *ip6;
6921 #endif
6922 	struct mbuf *sp, *last;
6923 	struct udphdr *uhdr;
6924 	uint16_t port;
6925 
6926 	if ((m->m_flags & M_PKTHDR) == 0) {
6927 		/* Can't handle one that is not a pkt hdr */
6928 		goto out;
6929 	}
6930 	/* Pull the src port */
6931 	iph = mtod(m, struct ip *);
6932 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6933 	port = uhdr->uh_sport;
6934 	/*
6935 	 * Split out the mbuf chain. Leave the IP header in m, place the
6936 	 * rest in the sp.
6937 	 */
6938 	sp = m_split(m, off, M_NOWAIT);
6939 	if (sp == NULL) {
6940 		/* Gak, drop packet, we can't do a split */
6941 		goto out;
6942 	}
6943 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6944 		/* Gak, packet can't have an SCTP header in it - too small */
6945 		m_freem(sp);
6946 		goto out;
6947 	}
6948 	/* Now pull up the UDP header and SCTP header together */
6949 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6950 	if (sp == NULL) {
6951 		/* Gak pullup failed */
6952 		goto out;
6953 	}
6954 	/* Trim out the UDP header */
6955 	m_adj(sp, sizeof(struct udphdr));
6956 
6957 	/* Now reconstruct the mbuf chain */
6958 	for (last = m; last->m_next; last = last->m_next);
6959 	last->m_next = sp;
6960 	m->m_pkthdr.len += sp->m_pkthdr.len;
6961 	/*
6962 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6963 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6964 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6965 	 * SCTP checksum. Therefore, clear the bit.
6966 	 */
6967 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6968 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6969 	    m->m_pkthdr.len,
6970 	    if_name(m->m_pkthdr.rcvif),
6971 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6972 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6973 	iph = mtod(m, struct ip *);
6974 	switch (iph->ip_v) {
6975 #ifdef INET
6976 	case IPVERSION:
6977 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6978 		sctp_input_with_port(m, off, port);
6979 		break;
6980 #endif
6981 #ifdef INET6
6982 	case IPV6_VERSION >> 4:
6983 		ip6 = mtod(m, struct ip6_hdr *);
6984 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6985 		sctp6_input_with_port(&m, &off, port);
6986 		break;
6987 #endif
6988 	default:
6989 		goto out;
6990 		break;
6991 	}
6992 	return;
6993 out:
6994 	m_freem(m);
6995 }
6996 
6997 #ifdef INET
6998 static void
6999 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7000 {
7001 	struct ip *outer_ip, *inner_ip;
7002 	struct sctphdr *sh;
7003 	struct icmp *icmp;
7004 	struct udphdr *udp;
7005 	struct sctp_inpcb *inp;
7006 	struct sctp_tcb *stcb;
7007 	struct sctp_nets *net;
7008 	struct sctp_init_chunk *ch;
7009 	struct sockaddr_in src, dst;
7010 	uint8_t type, code;
7011 
7012 	inner_ip = (struct ip *)vip;
7013 	icmp = (struct icmp *)((caddr_t)inner_ip -
7014 	    (sizeof(struct icmp) - sizeof(struct ip)));
7015 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7016 	if (ntohs(outer_ip->ip_len) <
7017 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7018 		return;
7019 	}
7020 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7021 	sh = (struct sctphdr *)(udp + 1);
7022 	memset(&src, 0, sizeof(struct sockaddr_in));
7023 	src.sin_family = AF_INET;
7024 	src.sin_len = sizeof(struct sockaddr_in);
7025 	src.sin_port = sh->src_port;
7026 	src.sin_addr = inner_ip->ip_src;
7027 	memset(&dst, 0, sizeof(struct sockaddr_in));
7028 	dst.sin_family = AF_INET;
7029 	dst.sin_len = sizeof(struct sockaddr_in);
7030 	dst.sin_port = sh->dest_port;
7031 	dst.sin_addr = inner_ip->ip_dst;
7032 	/*
7033 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7034 	 * holds our local endpoint address. Thus we reverse the dst and the
7035 	 * src in the lookup.
7036 	 */
7037 	inp = NULL;
7038 	net = NULL;
7039 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7040 	    (struct sockaddr *)&src,
7041 	    &inp, &net, 1,
7042 	    SCTP_DEFAULT_VRFID);
7043 	if ((stcb != NULL) &&
7044 	    (net != NULL) &&
7045 	    (inp != NULL)) {
7046 		/* Check the UDP port numbers */
7047 		if ((udp->uh_dport != net->port) ||
7048 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7049 			SCTP_TCB_UNLOCK(stcb);
7050 			return;
7051 		}
7052 		/* Check the verification tag */
7053 		if (ntohl(sh->v_tag) != 0) {
7054 			/*
7055 			 * This must be the verification tag used for
7056 			 * sending out packets. We don't consider packets
7057 			 * reflecting the verification tag.
7058 			 */
7059 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7060 				SCTP_TCB_UNLOCK(stcb);
7061 				return;
7062 			}
7063 		} else {
7064 			if (ntohs(outer_ip->ip_len) >=
7065 			    sizeof(struct ip) +
7066 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7067 				/*
7068 				 * In this case we can check if we got an
7069 				 * INIT chunk and if the initiate tag
7070 				 * matches.
7071 				 */
7072 				ch = (struct sctp_init_chunk *)(sh + 1);
7073 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7074 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7075 					SCTP_TCB_UNLOCK(stcb);
7076 					return;
7077 				}
7078 			} else {
7079 				SCTP_TCB_UNLOCK(stcb);
7080 				return;
7081 			}
7082 		}
7083 		type = icmp->icmp_type;
7084 		code = icmp->icmp_code;
7085 		if ((type == ICMP_UNREACH) &&
7086 		    (code == ICMP_UNREACH_PORT)) {
7087 			code = ICMP_UNREACH_PROTOCOL;
7088 		}
7089 		sctp_notify(inp, stcb, net, type, code,
7090 		    ntohs(inner_ip->ip_len),
7091 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7092 	} else {
7093 		if ((stcb == NULL) && (inp != NULL)) {
7094 			/* reduce ref-count */
7095 			SCTP_INP_WLOCK(inp);
7096 			SCTP_INP_DECR_REF(inp);
7097 			SCTP_INP_WUNLOCK(inp);
7098 		}
7099 		if (stcb) {
7100 			SCTP_TCB_UNLOCK(stcb);
7101 		}
7102 	}
7103 	return;
7104 }
7105 #endif
7106 
7107 #ifdef INET6
7108 static void
7109 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7110 {
7111 	struct ip6ctlparam *ip6cp;
7112 	struct sctp_inpcb *inp;
7113 	struct sctp_tcb *stcb;
7114 	struct sctp_nets *net;
7115 	struct sctphdr sh;
7116 	struct udphdr udp;
7117 	struct sockaddr_in6 src, dst;
7118 	uint8_t type, code;
7119 
7120 	ip6cp = (struct ip6ctlparam *)d;
7121 	/*
7122 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7123 	 */
7124 	if (ip6cp->ip6c_m == NULL) {
7125 		return;
7126 	}
7127 	/*
7128 	 * Check if we can safely examine the ports and the verification tag
7129 	 * of the SCTP common header.
7130 	 */
7131 	if (ip6cp->ip6c_m->m_pkthdr.len <
7132 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7133 		return;
7134 	}
7135 	/* Copy out the UDP header. */
7136 	memset(&udp, 0, sizeof(struct udphdr));
7137 	m_copydata(ip6cp->ip6c_m,
7138 	    ip6cp->ip6c_off,
7139 	    sizeof(struct udphdr),
7140 	    (caddr_t)&udp);
7141 	/* Copy out the port numbers and the verification tag. */
7142 	memset(&sh, 0, sizeof(struct sctphdr));
7143 	m_copydata(ip6cp->ip6c_m,
7144 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7145 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7146 	    (caddr_t)&sh);
7147 	memset(&src, 0, sizeof(struct sockaddr_in6));
7148 	src.sin6_family = AF_INET6;
7149 	src.sin6_len = sizeof(struct sockaddr_in6);
7150 	src.sin6_port = sh.src_port;
7151 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7152 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7153 		return;
7154 	}
7155 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7156 	dst.sin6_family = AF_INET6;
7157 	dst.sin6_len = sizeof(struct sockaddr_in6);
7158 	dst.sin6_port = sh.dest_port;
7159 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7160 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7161 		return;
7162 	}
7163 	inp = NULL;
7164 	net = NULL;
7165 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7166 	    (struct sockaddr *)&src,
7167 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7168 	if ((stcb != NULL) &&
7169 	    (net != NULL) &&
7170 	    (inp != NULL)) {
7171 		/* Check the UDP port numbers */
7172 		if ((udp.uh_dport != net->port) ||
7173 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7174 			SCTP_TCB_UNLOCK(stcb);
7175 			return;
7176 		}
7177 		/* Check the verification tag */
7178 		if (ntohl(sh.v_tag) != 0) {
7179 			/*
7180 			 * This must be the verification tag used for
7181 			 * sending out packets. We don't consider packets
7182 			 * reflecting the verification tag.
7183 			 */
7184 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7185 				SCTP_TCB_UNLOCK(stcb);
7186 				return;
7187 			}
7188 		} else {
7189 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7190 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7191 			    sizeof(struct sctphdr) +
7192 			    sizeof(struct sctp_chunkhdr) +
7193 			    offsetof(struct sctp_init, a_rwnd)) {
7194 				/*
7195 				 * In this case we can check if we got an
7196 				 * INIT chunk and if the initiate tag
7197 				 * matches.
7198 				 */
7199 				uint32_t initiate_tag;
7200 				uint8_t chunk_type;
7201 
7202 				m_copydata(ip6cp->ip6c_m,
7203 				    ip6cp->ip6c_off +
7204 				    sizeof(struct udphdr) +
7205 				    sizeof(struct sctphdr),
7206 				    sizeof(uint8_t),
7207 				    (caddr_t)&chunk_type);
7208 				m_copydata(ip6cp->ip6c_m,
7209 				    ip6cp->ip6c_off +
7210 				    sizeof(struct udphdr) +
7211 				    sizeof(struct sctphdr) +
7212 				    sizeof(struct sctp_chunkhdr),
7213 				    sizeof(uint32_t),
7214 				    (caddr_t)&initiate_tag);
7215 				if ((chunk_type != SCTP_INITIATION) ||
7216 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7217 					SCTP_TCB_UNLOCK(stcb);
7218 					return;
7219 				}
7220 			} else {
7221 				SCTP_TCB_UNLOCK(stcb);
7222 				return;
7223 			}
7224 		}
7225 		type = ip6cp->ip6c_icmp6->icmp6_type;
7226 		code = ip6cp->ip6c_icmp6->icmp6_code;
7227 		if ((type == ICMP6_DST_UNREACH) &&
7228 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7229 			type = ICMP6_PARAM_PROB;
7230 			code = ICMP6_PARAMPROB_NEXTHEADER;
7231 		}
7232 		sctp6_notify(inp, stcb, net, type, code,
7233 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7234 	} else {
7235 		if ((stcb == NULL) && (inp != NULL)) {
7236 			/* reduce inp's ref-count */
7237 			SCTP_INP_WLOCK(inp);
7238 			SCTP_INP_DECR_REF(inp);
7239 			SCTP_INP_WUNLOCK(inp);
7240 		}
7241 		if (stcb) {
7242 			SCTP_TCB_UNLOCK(stcb);
7243 		}
7244 	}
7245 }
7246 #endif
7247 
7248 void
7249 sctp_over_udp_stop(void)
7250 {
7251 	/*
7252 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7253 	 * for writting!
7254 	 */
7255 #ifdef INET
7256 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7257 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7258 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7259 	}
7260 #endif
7261 #ifdef INET6
7262 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7263 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7264 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7265 	}
7266 #endif
7267 }
7268 
7269 int
7270 sctp_over_udp_start(void)
7271 {
7272 	uint16_t port;
7273 	int ret;
7274 #ifdef INET
7275 	struct sockaddr_in sin;
7276 #endif
7277 #ifdef INET6
7278 	struct sockaddr_in6 sin6;
7279 #endif
7280 	/*
7281 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7282 	 * for writting!
7283 	 */
7284 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7285 	if (ntohs(port) == 0) {
7286 		/* Must have a port set */
7287 		return (EINVAL);
7288 	}
7289 #ifdef INET
7290 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7291 		/* Already running -- must stop first */
7292 		return (EALREADY);
7293 	}
7294 #endif
7295 #ifdef INET6
7296 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7297 		/* Already running -- must stop first */
7298 		return (EALREADY);
7299 	}
7300 #endif
7301 #ifdef INET
7302 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7303 	    SOCK_DGRAM, IPPROTO_UDP,
7304 	    curthread->td_ucred, curthread))) {
7305 		sctp_over_udp_stop();
7306 		return (ret);
7307 	}
7308 	/* Call the special UDP hook. */
7309 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7310 	    sctp_recv_udp_tunneled_packet,
7311 	    sctp_recv_icmp_tunneled_packet,
7312 	    NULL))) {
7313 		sctp_over_udp_stop();
7314 		return (ret);
7315 	}
7316 	/* Ok, we have a socket, bind it to the port. */
7317 	memset(&sin, 0, sizeof(struct sockaddr_in));
7318 	sin.sin_len = sizeof(struct sockaddr_in);
7319 	sin.sin_family = AF_INET;
7320 	sin.sin_port = htons(port);
7321 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7322 	    (struct sockaddr *)&sin, curthread))) {
7323 		sctp_over_udp_stop();
7324 		return (ret);
7325 	}
7326 #endif
7327 #ifdef INET6
7328 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7329 	    SOCK_DGRAM, IPPROTO_UDP,
7330 	    curthread->td_ucred, curthread))) {
7331 		sctp_over_udp_stop();
7332 		return (ret);
7333 	}
7334 	/* Call the special UDP hook. */
7335 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7336 	    sctp_recv_udp_tunneled_packet,
7337 	    sctp_recv_icmp6_tunneled_packet,
7338 	    NULL))) {
7339 		sctp_over_udp_stop();
7340 		return (ret);
7341 	}
7342 	/* Ok, we have a socket, bind it to the port. */
7343 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7344 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7345 	sin6.sin6_family = AF_INET6;
7346 	sin6.sin6_port = htons(port);
7347 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7348 	    (struct sockaddr *)&sin6, curthread))) {
7349 		sctp_over_udp_stop();
7350 		return (ret);
7351 	}
7352 #endif
7353 	return (0);
7354 }
7355 
7356 /*
7357  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7358  * If all arguments are zero, zero is returned.
7359  */
7360 uint32_t
7361 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7362 {
7363 	if (mtu1 > 0) {
7364 		if (mtu2 > 0) {
7365 			if (mtu3 > 0) {
7366 				return (min(mtu1, min(mtu2, mtu3)));
7367 			} else {
7368 				return (min(mtu1, mtu2));
7369 			}
7370 		} else {
7371 			if (mtu3 > 0) {
7372 				return (min(mtu1, mtu3));
7373 			} else {
7374 				return (mtu1);
7375 			}
7376 		}
7377 	} else {
7378 		if (mtu2 > 0) {
7379 			if (mtu3 > 0) {
7380 				return (min(mtu2, mtu3));
7381 			} else {
7382 				return (mtu2);
7383 			}
7384 		} else {
7385 			return (mtu3);
7386 		}
7387 	}
7388 }
7389 
7390 void
7391 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7392 {
7393 	struct in_conninfo inc;
7394 
7395 	memset(&inc, 0, sizeof(struct in_conninfo));
7396 	inc.inc_fibnum = fibnum;
7397 	switch (addr->sa.sa_family) {
7398 #ifdef INET
7399 	case AF_INET:
7400 		inc.inc_faddr = addr->sin.sin_addr;
7401 		break;
7402 #endif
7403 #ifdef INET6
7404 	case AF_INET6:
7405 		inc.inc_flags |= INC_ISIPV6;
7406 		inc.inc6_faddr = addr->sin6.sin6_addr;
7407 		break;
7408 #endif
7409 	default:
7410 		return;
7411 	}
7412 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7413 }
7414 
7415 uint32_t
7416 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7417 {
7418 	struct in_conninfo inc;
7419 
7420 	memset(&inc, 0, sizeof(struct in_conninfo));
7421 	inc.inc_fibnum = fibnum;
7422 	switch (addr->sa.sa_family) {
7423 #ifdef INET
7424 	case AF_INET:
7425 		inc.inc_faddr = addr->sin.sin_addr;
7426 		break;
7427 #endif
7428 #ifdef INET6
7429 	case AF_INET6:
7430 		inc.inc_flags |= INC_ISIPV6;
7431 		inc.inc6_faddr = addr->sin6.sin6_addr;
7432 		break;
7433 #endif
7434 	default:
7435 		return (0);
7436 	}
7437 	return ((uint32_t)tcp_hc_getmtu(&inc));
7438 }
7439 
7440 void
7441 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7442 {
7443 #if defined(KDTRACE_HOOKS)
7444 	int old_state = stcb->asoc.state;
7445 #endif
7446 
7447 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7448 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7449 	    new_state));
7450 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7451 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7452 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7453 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7454 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7455 	}
7456 #if defined(KDTRACE_HOOKS)
7457 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7458 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7459 	    (new_state == SCTP_STATE_INUSE))) {
7460 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7461 	}
7462 #endif
7463 }
7464 
7465 void
7466 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7467 {
7468 #if defined(KDTRACE_HOOKS)
7469 	int old_state = stcb->asoc.state;
7470 #endif
7471 
7472 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7473 	    ("sctp_add_substate: Can't set state (substate = %x)",
7474 	    substate));
7475 	stcb->asoc.state |= substate;
7476 #if defined(KDTRACE_HOOKS)
7477 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7478 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7479 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7480 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7481 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7482 	}
7483 #endif
7484 }
7485