xref: /freebsd/sys/netinet/sctputil.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	SCTP_WQ_ADDR_LOCK();
1466 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 		LIST_REMOVE(wi, sctp_nxt_addr);
1468 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 		asc->cnt++;
1470 	}
1471 	SCTP_WQ_ADDR_UNLOCK();
1472 
1473 	if (asc->cnt == 0) {
1474 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1475 	} else {
1476 		int ret;
1477 
1478 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1479 		    sctp_asconf_iterator_stcb,
1480 		    NULL,	/* No ep end for boundall */
1481 		    SCTP_PCB_FLAGS_BOUNDALL,
1482 		    SCTP_PCB_ANY_FEATURES,
1483 		    SCTP_ASOC_ANY_STATE,
1484 		    (void *)asc, 0,
1485 		    sctp_asconf_iterator_end, NULL, 0);
1486 		if (ret) {
1487 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1488 			/*
1489 			 * Freeing if we are stopping or put back on the
1490 			 * addr_wq.
1491 			 */
1492 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1493 				sctp_asconf_iterator_end(asc, 0);
1494 			} else {
1495 				SCTP_WQ_ADDR_LOCK();
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_WQ_ADDR_UNLOCK();
1500 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1501 			}
1502 		}
1503 	}
1504 }
1505 
1506 void
1507 sctp_timeout_handler(void *t)
1508 {
1509 	struct sctp_inpcb *inp;
1510 	struct sctp_tcb *stcb;
1511 	struct sctp_nets *net;
1512 	struct sctp_timer *tmr;
1513 	struct mbuf *op_err;
1514 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515 	struct socket *so;
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occurred */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t)type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2486 	        (uint64_t)now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t)(net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *)pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2687 	struct socket *so;
2688 #endif
2689 
2690 	if (stcb == NULL) {
2691 		return;
2692 	}
2693 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2694 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2695 		if (abort != NULL) {
2696 			abort_len = ntohs(abort->ch.chunk_length);
2697 		} else {
2698 			abort_len = 0;
2699 		}
2700 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2702 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2703 			notif_len += abort_len;
2704 		}
2705 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2706 		if (m_notify == NULL) {
2707 			/* Retry with smaller value. */
2708 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2709 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2710 			if (m_notify == NULL) {
2711 				goto set_error;
2712 			}
2713 		}
2714 		SCTP_BUF_NEXT(m_notify) = NULL;
2715 		sac = mtod(m_notify, struct sctp_assoc_change *);
2716 		memset(sac, 0, notif_len);
2717 		sac->sac_type = SCTP_ASSOC_CHANGE;
2718 		sac->sac_flags = 0;
2719 		sac->sac_length = sizeof(struct sctp_assoc_change);
2720 		sac->sac_state = state;
2721 		sac->sac_error = error;
2722 		/* XXX verify these stream counts */
2723 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2724 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2725 		sac->sac_assoc_id = sctp_get_associd(stcb);
2726 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2727 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2728 				i = 0;
2729 				if (stcb->asoc.prsctp_supported == 1) {
2730 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2731 				}
2732 				if (stcb->asoc.auth_supported == 1) {
2733 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2734 				}
2735 				if (stcb->asoc.asconf_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2737 				}
2738 				if (stcb->asoc.idata_supported == 1) {
2739 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2740 				}
2741 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2742 				if (stcb->asoc.reconfig_supported == 1) {
2743 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2744 				}
2745 				sac->sac_length += i;
2746 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2747 				memcpy(sac->sac_info, abort, abort_len);
2748 				sac->sac_length += abort_len;
2749 			}
2750 		}
2751 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2752 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2753 		    0, 0, stcb->asoc.context, 0, 0, 0,
2754 		    m_notify);
2755 		if (control != NULL) {
2756 			control->length = SCTP_BUF_LEN(m_notify);
2757 			/* not that we need this */
2758 			control->tail_mbuf = m_notify;
2759 			control->spec_flags = M_NOTIFICATION;
2760 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2761 			    control,
2762 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2763 			    so_locked);
2764 		} else {
2765 			sctp_m_freem(m_notify);
2766 		}
2767 	}
2768 	/*
2769 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2770 	 * comes in.
2771 	 */
2772 set_error:
2773 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2774 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2775 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2776 		SOCK_LOCK(stcb->sctp_socket);
2777 		if (from_peer) {
2778 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2779 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2780 				stcb->sctp_socket->so_error = ECONNREFUSED;
2781 			} else {
2782 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2783 				stcb->sctp_socket->so_error = ECONNRESET;
2784 			}
2785 		} else {
2786 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2787 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2789 				stcb->sctp_socket->so_error = ETIMEDOUT;
2790 			} else {
2791 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2792 				stcb->sctp_socket->so_error = ECONNABORTED;
2793 			}
2794 		}
2795 		SOCK_UNLOCK(stcb->sctp_socket);
2796 	}
2797 	/* Wake ANY sleepers */
2798 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2799 	so = SCTP_INP_SO(stcb->sctp_ep);
2800 	if (!so_locked) {
2801 		atomic_add_int(&stcb->asoc.refcnt, 1);
2802 		SCTP_TCB_UNLOCK(stcb);
2803 		SCTP_SOCKET_LOCK(so, 1);
2804 		SCTP_TCB_LOCK(stcb);
2805 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2806 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2807 			SCTP_SOCKET_UNLOCK(so, 1);
2808 			return;
2809 		}
2810 	}
2811 #endif
2812 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2813 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2814 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2815 		socantrcvmore(stcb->sctp_socket);
2816 	}
2817 	sorwakeup(stcb->sctp_socket);
2818 	sowwakeup(stcb->sctp_socket);
2819 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2820 	if (!so_locked) {
2821 		SCTP_SOCKET_UNLOCK(so, 1);
2822 	}
2823 #endif
2824 }
2825 
2826 static void
2827 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2828     struct sockaddr *sa, uint32_t error, int so_locked
2829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2830     SCTP_UNUSED
2831 #endif
2832 )
2833 {
2834 	struct mbuf *m_notify;
2835 	struct sctp_paddr_change *spc;
2836 	struct sctp_queued_to_read *control;
2837 
2838 	if ((stcb == NULL) ||
2839 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2840 		/* event not enabled */
2841 		return;
2842 	}
2843 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2844 	if (m_notify == NULL)
2845 		return;
2846 	SCTP_BUF_LEN(m_notify) = 0;
2847 	spc = mtod(m_notify, struct sctp_paddr_change *);
2848 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2849 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2850 	spc->spc_flags = 0;
2851 	spc->spc_length = sizeof(struct sctp_paddr_change);
2852 	switch (sa->sa_family) {
2853 #ifdef INET
2854 	case AF_INET:
2855 #ifdef INET6
2856 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2857 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2858 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2859 		} else {
2860 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2861 		}
2862 #else
2863 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2864 #endif
2865 		break;
2866 #endif
2867 #ifdef INET6
2868 	case AF_INET6:
2869 		{
2870 			struct sockaddr_in6 *sin6;
2871 
2872 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2873 
2874 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2875 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2876 				if (sin6->sin6_scope_id == 0) {
2877 					/* recover scope_id for user */
2878 					(void)sa6_recoverscope(sin6);
2879 				} else {
2880 					/* clear embedded scope_id for user */
2881 					in6_clearscope(&sin6->sin6_addr);
2882 				}
2883 			}
2884 			break;
2885 		}
2886 #endif
2887 	default:
2888 		/* TSNH */
2889 		break;
2890 	}
2891 	spc->spc_state = state;
2892 	spc->spc_error = error;
2893 	spc->spc_assoc_id = sctp_get_associd(stcb);
2894 
2895 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2896 	SCTP_BUF_NEXT(m_notify) = NULL;
2897 
2898 	/* append to socket */
2899 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2900 	    0, 0, stcb->asoc.context, 0, 0, 0,
2901 	    m_notify);
2902 	if (control == NULL) {
2903 		/* no memory */
2904 		sctp_m_freem(m_notify);
2905 		return;
2906 	}
2907 	control->length = SCTP_BUF_LEN(m_notify);
2908 	control->spec_flags = M_NOTIFICATION;
2909 	/* not that we need this */
2910 	control->tail_mbuf = m_notify;
2911 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2912 	    control,
2913 	    &stcb->sctp_socket->so_rcv, 1,
2914 	    SCTP_READ_LOCK_NOT_HELD,
2915 	    so_locked);
2916 }
2917 
2918 
2919 static void
2920 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2921     struct sctp_tmit_chunk *chk, int so_locked
2922 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2923     SCTP_UNUSED
2924 #endif
2925 )
2926 {
2927 	struct mbuf *m_notify;
2928 	struct sctp_send_failed *ssf;
2929 	struct sctp_send_failed_event *ssfe;
2930 	struct sctp_queued_to_read *control;
2931 	struct sctp_chunkhdr *chkhdr;
2932 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		notifhdr_len = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2950 	if (stcb->asoc.idata_supported) {
2951 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2952 	} else {
2953 		chkhdr_len = sizeof(struct sctp_data_chunk);
2954 	}
2955 	/* Use some defaults in case we can't access the chunk header */
2956 	if (chk->send_size >= chkhdr_len) {
2957 		payload_len = chk->send_size - chkhdr_len;
2958 	} else {
2959 		payload_len = 0;
2960 	}
2961 	padding_len = 0;
2962 	if (chk->data != NULL) {
2963 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2964 		if (chkhdr != NULL) {
2965 			chk_len = ntohs(chkhdr->chunk_length);
2966 			if ((chk_len >= chkhdr_len) &&
2967 			    (chk->send_size >= chk_len) &&
2968 			    (chk->send_size - chk_len < 4)) {
2969 				padding_len = chk->send_size - chk_len;
2970 				payload_len = chk->send_size - chkhdr_len - padding_len;
2971 			}
2972 		}
2973 	}
2974 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2975 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2976 		memset(ssfe, 0, notifhdr_len);
2977 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2978 		if (sent) {
2979 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2980 		} else {
2981 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2982 		}
2983 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2984 		ssfe->ssfe_error = error;
2985 		/* not exactly what the user sent in, but should be close :) */
2986 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2987 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2988 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2989 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2990 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2991 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2992 	} else {
2993 		ssf = mtod(m_notify, struct sctp_send_failed *);
2994 		memset(ssf, 0, notifhdr_len);
2995 		ssf->ssf_type = SCTP_SEND_FAILED;
2996 		if (sent) {
2997 			ssf->ssf_flags = SCTP_DATA_SENT;
2998 		} else {
2999 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3000 		}
3001 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3002 		ssf->ssf_error = error;
3003 		/* not exactly what the user sent in, but should be close :) */
3004 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3005 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3006 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3007 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3008 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3009 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3010 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3011 	}
3012 	if (chk->data != NULL) {
3013 		/* Trim off the sctp chunk header (it should be there) */
3014 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3015 			m_adj(chk->data, chkhdr_len);
3016 			m_adj(chk->data, -padding_len);
3017 			sctp_mbuf_crush(chk->data);
3018 			chk->send_size -= (chkhdr_len + padding_len);
3019 		}
3020 	}
3021 	SCTP_BUF_NEXT(m_notify) = chk->data;
3022 	/* Steal off the mbuf */
3023 	chk->data = NULL;
3024 	/*
3025 	 * For this case, we check the actual socket buffer, since the assoc
3026 	 * is going away we don't want to overfill the socket buffer for a
3027 	 * non-reader
3028 	 */
3029 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3030 		sctp_m_freem(m_notify);
3031 		return;
3032 	}
3033 	/* append to socket */
3034 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3035 	    0, 0, stcb->asoc.context, 0, 0, 0,
3036 	    m_notify);
3037 	if (control == NULL) {
3038 		/* no memory */
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	control->spec_flags = M_NOTIFICATION;
3043 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3044 	    control,
3045 	    &stcb->sctp_socket->so_rcv, 1,
3046 	    SCTP_READ_LOCK_NOT_HELD,
3047 	    so_locked);
3048 }
3049 
3050 
3051 static void
3052 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3053     struct sctp_stream_queue_pending *sp, int so_locked
3054 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3055     SCTP_UNUSED
3056 #endif
3057 )
3058 {
3059 	struct mbuf *m_notify;
3060 	struct sctp_send_failed *ssf;
3061 	struct sctp_send_failed_event *ssfe;
3062 	struct sctp_queued_to_read *control;
3063 	int notifhdr_len;
3064 
3065 	if ((stcb == NULL) ||
3066 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3067 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3068 		/* event not enabled */
3069 		return;
3070 	}
3071 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3072 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3073 	} else {
3074 		notifhdr_len = sizeof(struct sctp_send_failed);
3075 	}
3076 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3077 	if (m_notify == NULL) {
3078 		/* no space left */
3079 		return;
3080 	}
3081 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3082 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3083 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3084 		memset(ssfe, 0, notifhdr_len);
3085 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3086 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3087 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3088 		ssfe->ssfe_error = error;
3089 		/* not exactly what the user sent in, but should be close :) */
3090 		ssfe->ssfe_info.snd_sid = sp->sid;
3091 		if (sp->some_taken) {
3092 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3093 		} else {
3094 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3095 		}
3096 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3097 		ssfe->ssfe_info.snd_context = sp->context;
3098 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3099 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3100 	} else {
3101 		ssf = mtod(m_notify, struct sctp_send_failed *);
3102 		memset(ssf, 0, notifhdr_len);
3103 		ssf->ssf_type = SCTP_SEND_FAILED;
3104 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3105 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3106 		ssf->ssf_error = error;
3107 		/* not exactly what the user sent in, but should be close :) */
3108 		ssf->ssf_info.sinfo_stream = sp->sid;
3109 		ssf->ssf_info.sinfo_ssn = 0;
3110 		if (sp->some_taken) {
3111 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3112 		} else {
3113 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3114 		}
3115 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3116 		ssf->ssf_info.sinfo_context = sp->context;
3117 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3118 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3119 	}
3120 	SCTP_BUF_NEXT(m_notify) = sp->data;
3121 
3122 	/* Steal off the mbuf */
3123 	sp->data = NULL;
3124 	/*
3125 	 * For this case, we check the actual socket buffer, since the assoc
3126 	 * is going away we don't want to overfill the socket buffer for a
3127 	 * non-reader
3128 	 */
3129 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3130 		sctp_m_freem(m_notify);
3131 		return;
3132 	}
3133 	/* append to socket */
3134 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3135 	    0, 0, stcb->asoc.context, 0, 0, 0,
3136 	    m_notify);
3137 	if (control == NULL) {
3138 		/* no memory */
3139 		sctp_m_freem(m_notify);
3140 		return;
3141 	}
3142 	control->spec_flags = M_NOTIFICATION;
3143 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3144 	    control,
3145 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3146 }
3147 
3148 
3149 
3150 static void
3151 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3152 {
3153 	struct mbuf *m_notify;
3154 	struct sctp_adaptation_event *sai;
3155 	struct sctp_queued_to_read *control;
3156 
3157 	if ((stcb == NULL) ||
3158 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3159 		/* event not enabled */
3160 		return;
3161 	}
3162 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3163 	if (m_notify == NULL)
3164 		/* no space left */
3165 		return;
3166 	SCTP_BUF_LEN(m_notify) = 0;
3167 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3168 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3169 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3170 	sai->sai_flags = 0;
3171 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3172 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3173 	sai->sai_assoc_id = sctp_get_associd(stcb);
3174 
3175 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3176 	SCTP_BUF_NEXT(m_notify) = NULL;
3177 
3178 	/* append to socket */
3179 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3180 	    0, 0, stcb->asoc.context, 0, 0, 0,
3181 	    m_notify);
3182 	if (control == NULL) {
3183 		/* no memory */
3184 		sctp_m_freem(m_notify);
3185 		return;
3186 	}
3187 	control->length = SCTP_BUF_LEN(m_notify);
3188 	control->spec_flags = M_NOTIFICATION;
3189 	/* not that we need this */
3190 	control->tail_mbuf = m_notify;
3191 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3192 	    control,
3193 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3194 }
3195 
3196 /* This always must be called with the read-queue LOCKED in the INP */
3197 static void
3198 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3199     uint32_t val, int so_locked
3200 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3201     SCTP_UNUSED
3202 #endif
3203 )
3204 {
3205 	struct mbuf *m_notify;
3206 	struct sctp_pdapi_event *pdapi;
3207 	struct sctp_queued_to_read *control;
3208 	struct sockbuf *sb;
3209 
3210 	if ((stcb == NULL) ||
3211 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3212 		/* event not enabled */
3213 		return;
3214 	}
3215 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3216 		return;
3217 	}
3218 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3219 	if (m_notify == NULL)
3220 		/* no space left */
3221 		return;
3222 	SCTP_BUF_LEN(m_notify) = 0;
3223 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3224 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3225 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3226 	pdapi->pdapi_flags = 0;
3227 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3228 	pdapi->pdapi_indication = error;
3229 	pdapi->pdapi_stream = (val >> 16);
3230 	pdapi->pdapi_seq = (val & 0x0000ffff);
3231 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3232 
3233 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3234 	SCTP_BUF_NEXT(m_notify) = NULL;
3235 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3236 	    0, 0, stcb->asoc.context, 0, 0, 0,
3237 	    m_notify);
3238 	if (control == NULL) {
3239 		/* no memory */
3240 		sctp_m_freem(m_notify);
3241 		return;
3242 	}
3243 	control->spec_flags = M_NOTIFICATION;
3244 	control->length = SCTP_BUF_LEN(m_notify);
3245 	/* not that we need this */
3246 	control->tail_mbuf = m_notify;
3247 	control->held_length = 0;
3248 	control->length = 0;
3249 	sb = &stcb->sctp_socket->so_rcv;
3250 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3251 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3252 	}
3253 	sctp_sballoc(stcb, sb, m_notify);
3254 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3255 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3256 	}
3257 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3258 	control->end_added = 1;
3259 	if (stcb->asoc.control_pdapi)
3260 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3261 	else {
3262 		/* we really should not see this case */
3263 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3264 	}
3265 	if (stcb->sctp_ep && stcb->sctp_socket) {
3266 		/* This should always be the case */
3267 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3268 		struct socket *so;
3269 
3270 		so = SCTP_INP_SO(stcb->sctp_ep);
3271 		if (!so_locked) {
3272 			atomic_add_int(&stcb->asoc.refcnt, 1);
3273 			SCTP_TCB_UNLOCK(stcb);
3274 			SCTP_SOCKET_LOCK(so, 1);
3275 			SCTP_TCB_LOCK(stcb);
3276 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3277 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3278 				SCTP_SOCKET_UNLOCK(so, 1);
3279 				return;
3280 			}
3281 		}
3282 #endif
3283 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3284 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3285 		if (!so_locked) {
3286 			SCTP_SOCKET_UNLOCK(so, 1);
3287 		}
3288 #endif
3289 	}
3290 }
3291 
3292 static void
3293 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3294 {
3295 	struct mbuf *m_notify;
3296 	struct sctp_shutdown_event *sse;
3297 	struct sctp_queued_to_read *control;
3298 
3299 	/*
3300 	 * For TCP model AND UDP connected sockets we will send an error up
3301 	 * when an SHUTDOWN completes
3302 	 */
3303 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3304 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3305 		/* mark socket closed for read/write and wakeup! */
3306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3307 		struct socket *so;
3308 
3309 		so = SCTP_INP_SO(stcb->sctp_ep);
3310 		atomic_add_int(&stcb->asoc.refcnt, 1);
3311 		SCTP_TCB_UNLOCK(stcb);
3312 		SCTP_SOCKET_LOCK(so, 1);
3313 		SCTP_TCB_LOCK(stcb);
3314 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3315 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3316 			SCTP_SOCKET_UNLOCK(so, 1);
3317 			return;
3318 		}
3319 #endif
3320 		socantsendmore(stcb->sctp_socket);
3321 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3322 		SCTP_SOCKET_UNLOCK(so, 1);
3323 #endif
3324 	}
3325 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3326 		/* event not enabled */
3327 		return;
3328 	}
3329 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3330 	if (m_notify == NULL)
3331 		/* no space left */
3332 		return;
3333 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3334 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3335 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3336 	sse->sse_flags = 0;
3337 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3338 	sse->sse_assoc_id = sctp_get_associd(stcb);
3339 
3340 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3341 	SCTP_BUF_NEXT(m_notify) = NULL;
3342 
3343 	/* append to socket */
3344 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3345 	    0, 0, stcb->asoc.context, 0, 0, 0,
3346 	    m_notify);
3347 	if (control == NULL) {
3348 		/* no memory */
3349 		sctp_m_freem(m_notify);
3350 		return;
3351 	}
3352 	control->spec_flags = M_NOTIFICATION;
3353 	control->length = SCTP_BUF_LEN(m_notify);
3354 	/* not that we need this */
3355 	control->tail_mbuf = m_notify;
3356 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3357 	    control,
3358 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3359 }
3360 
3361 static void
3362 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3363     int so_locked
3364 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3365     SCTP_UNUSED
3366 #endif
3367 )
3368 {
3369 	struct mbuf *m_notify;
3370 	struct sctp_sender_dry_event *event;
3371 	struct sctp_queued_to_read *control;
3372 
3373 	if ((stcb == NULL) ||
3374 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3375 		/* event not enabled */
3376 		return;
3377 	}
3378 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3379 	if (m_notify == NULL) {
3380 		/* no space left */
3381 		return;
3382 	}
3383 	SCTP_BUF_LEN(m_notify) = 0;
3384 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3385 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3386 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3387 	event->sender_dry_flags = 0;
3388 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3389 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3390 
3391 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3392 	SCTP_BUF_NEXT(m_notify) = NULL;
3393 
3394 	/* append to socket */
3395 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3396 	    0, 0, stcb->asoc.context, 0, 0, 0,
3397 	    m_notify);
3398 	if (control == NULL) {
3399 		/* no memory */
3400 		sctp_m_freem(m_notify);
3401 		return;
3402 	}
3403 	control->length = SCTP_BUF_LEN(m_notify);
3404 	control->spec_flags = M_NOTIFICATION;
3405 	/* not that we need this */
3406 	control->tail_mbuf = m_notify;
3407 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3408 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3409 }
3410 
3411 
3412 void
3413 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3414 {
3415 	struct mbuf *m_notify;
3416 	struct sctp_queued_to_read *control;
3417 	struct sctp_stream_change_event *stradd;
3418 
3419 	if ((stcb == NULL) ||
3420 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3421 		/* event not enabled */
3422 		return;
3423 	}
3424 	if ((stcb->asoc.peer_req_out) && flag) {
3425 		/* Peer made the request, don't tell the local user */
3426 		stcb->asoc.peer_req_out = 0;
3427 		return;
3428 	}
3429 	stcb->asoc.peer_req_out = 0;
3430 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3431 	if (m_notify == NULL)
3432 		/* no space left */
3433 		return;
3434 	SCTP_BUF_LEN(m_notify) = 0;
3435 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3436 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3437 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3438 	stradd->strchange_flags = flag;
3439 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3440 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3441 	stradd->strchange_instrms = numberin;
3442 	stradd->strchange_outstrms = numberout;
3443 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3444 	SCTP_BUF_NEXT(m_notify) = NULL;
3445 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3446 		/* no space */
3447 		sctp_m_freem(m_notify);
3448 		return;
3449 	}
3450 	/* append to socket */
3451 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3452 	    0, 0, stcb->asoc.context, 0, 0, 0,
3453 	    m_notify);
3454 	if (control == NULL) {
3455 		/* no memory */
3456 		sctp_m_freem(m_notify);
3457 		return;
3458 	}
3459 	control->spec_flags = M_NOTIFICATION;
3460 	control->length = SCTP_BUF_LEN(m_notify);
3461 	/* not that we need this */
3462 	control->tail_mbuf = m_notify;
3463 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3464 	    control,
3465 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3466 }
3467 
3468 void
3469 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3470 {
3471 	struct mbuf *m_notify;
3472 	struct sctp_queued_to_read *control;
3473 	struct sctp_assoc_reset_event *strasoc;
3474 
3475 	if ((stcb == NULL) ||
3476 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3477 		/* event not enabled */
3478 		return;
3479 	}
3480 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3481 	if (m_notify == NULL)
3482 		/* no space left */
3483 		return;
3484 	SCTP_BUF_LEN(m_notify) = 0;
3485 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3486 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3487 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3488 	strasoc->assocreset_flags = flag;
3489 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3490 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3491 	strasoc->assocreset_local_tsn = sending_tsn;
3492 	strasoc->assocreset_remote_tsn = recv_tsn;
3493 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3494 	SCTP_BUF_NEXT(m_notify) = NULL;
3495 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3496 		/* no space */
3497 		sctp_m_freem(m_notify);
3498 		return;
3499 	}
3500 	/* append to socket */
3501 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3502 	    0, 0, stcb->asoc.context, 0, 0, 0,
3503 	    m_notify);
3504 	if (control == NULL) {
3505 		/* no memory */
3506 		sctp_m_freem(m_notify);
3507 		return;
3508 	}
3509 	control->spec_flags = M_NOTIFICATION;
3510 	control->length = SCTP_BUF_LEN(m_notify);
3511 	/* not that we need this */
3512 	control->tail_mbuf = m_notify;
3513 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3514 	    control,
3515 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3516 }
3517 
3518 
3519 
3520 static void
3521 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3522     int number_entries, uint16_t *list, int flag)
3523 {
3524 	struct mbuf *m_notify;
3525 	struct sctp_queued_to_read *control;
3526 	struct sctp_stream_reset_event *strreset;
3527 	int len;
3528 
3529 	if ((stcb == NULL) ||
3530 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3531 		/* event not enabled */
3532 		return;
3533 	}
3534 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3535 	if (m_notify == NULL)
3536 		/* no space left */
3537 		return;
3538 	SCTP_BUF_LEN(m_notify) = 0;
3539 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3540 	if (len > M_TRAILINGSPACE(m_notify)) {
3541 		/* never enough room */
3542 		sctp_m_freem(m_notify);
3543 		return;
3544 	}
3545 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3546 	memset(strreset, 0, len);
3547 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3548 	strreset->strreset_flags = flag;
3549 	strreset->strreset_length = len;
3550 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3551 	if (number_entries) {
3552 		int i;
3553 
3554 		for (i = 0; i < number_entries; i++) {
3555 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3556 		}
3557 	}
3558 	SCTP_BUF_LEN(m_notify) = len;
3559 	SCTP_BUF_NEXT(m_notify) = NULL;
3560 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3561 		/* no space */
3562 		sctp_m_freem(m_notify);
3563 		return;
3564 	}
3565 	/* append to socket */
3566 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3567 	    0, 0, stcb->asoc.context, 0, 0, 0,
3568 	    m_notify);
3569 	if (control == NULL) {
3570 		/* no memory */
3571 		sctp_m_freem(m_notify);
3572 		return;
3573 	}
3574 	control->spec_flags = M_NOTIFICATION;
3575 	control->length = SCTP_BUF_LEN(m_notify);
3576 	/* not that we need this */
3577 	control->tail_mbuf = m_notify;
3578 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3579 	    control,
3580 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3581 }
3582 
3583 
3584 static void
3585 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3586 {
3587 	struct mbuf *m_notify;
3588 	struct sctp_remote_error *sre;
3589 	struct sctp_queued_to_read *control;
3590 	unsigned int notif_len;
3591 	uint16_t chunk_len;
3592 
3593 	if ((stcb == NULL) ||
3594 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3595 		return;
3596 	}
3597 	if (chunk != NULL) {
3598 		chunk_len = ntohs(chunk->ch.chunk_length);
3599 	} else {
3600 		chunk_len = 0;
3601 	}
3602 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3603 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3604 	if (m_notify == NULL) {
3605 		/* Retry with smaller value. */
3606 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3607 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3608 		if (m_notify == NULL) {
3609 			return;
3610 		}
3611 	}
3612 	SCTP_BUF_NEXT(m_notify) = NULL;
3613 	sre = mtod(m_notify, struct sctp_remote_error *);
3614 	memset(sre, 0, notif_len);
3615 	sre->sre_type = SCTP_REMOTE_ERROR;
3616 	sre->sre_flags = 0;
3617 	sre->sre_length = sizeof(struct sctp_remote_error);
3618 	sre->sre_error = error;
3619 	sre->sre_assoc_id = sctp_get_associd(stcb);
3620 	if (notif_len > sizeof(struct sctp_remote_error)) {
3621 		memcpy(sre->sre_data, chunk, chunk_len);
3622 		sre->sre_length += chunk_len;
3623 	}
3624 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3625 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3626 	    0, 0, stcb->asoc.context, 0, 0, 0,
3627 	    m_notify);
3628 	if (control != NULL) {
3629 		control->length = SCTP_BUF_LEN(m_notify);
3630 		/* not that we need this */
3631 		control->tail_mbuf = m_notify;
3632 		control->spec_flags = M_NOTIFICATION;
3633 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3634 		    control,
3635 		    &stcb->sctp_socket->so_rcv, 1,
3636 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3637 	} else {
3638 		sctp_m_freem(m_notify);
3639 	}
3640 }
3641 
3642 
3643 void
3644 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3645     uint32_t error, void *data, int so_locked
3646 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3647     SCTP_UNUSED
3648 #endif
3649 )
3650 {
3651 	if ((stcb == NULL) ||
3652 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3653 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3654 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3655 		/* If the socket is gone we are out of here */
3656 		return;
3657 	}
3658 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3659 		return;
3660 	}
3661 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3662 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3663 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3664 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3665 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3666 			/* Don't report these in front states */
3667 			return;
3668 		}
3669 	}
3670 	switch (notification) {
3671 	case SCTP_NOTIFY_ASSOC_UP:
3672 		if (stcb->asoc.assoc_up_sent == 0) {
3673 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3674 			stcb->asoc.assoc_up_sent = 1;
3675 		}
3676 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3677 			sctp_notify_adaptation_layer(stcb);
3678 		}
3679 		if (stcb->asoc.auth_supported == 0) {
3680 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3681 			    NULL, so_locked);
3682 		}
3683 		break;
3684 	case SCTP_NOTIFY_ASSOC_DOWN:
3685 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3686 		break;
3687 	case SCTP_NOTIFY_INTERFACE_DOWN:
3688 		{
3689 			struct sctp_nets *net;
3690 
3691 			net = (struct sctp_nets *)data;
3692 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3693 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3694 			break;
3695 		}
3696 	case SCTP_NOTIFY_INTERFACE_UP:
3697 		{
3698 			struct sctp_nets *net;
3699 
3700 			net = (struct sctp_nets *)data;
3701 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3702 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3703 			break;
3704 		}
3705 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3706 		{
3707 			struct sctp_nets *net;
3708 
3709 			net = (struct sctp_nets *)data;
3710 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3711 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3712 			break;
3713 		}
3714 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3715 		sctp_notify_send_failed2(stcb, error,
3716 		    (struct sctp_stream_queue_pending *)data, so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_SENT_DG_FAIL:
3719 		sctp_notify_send_failed(stcb, 1, error,
3720 		    (struct sctp_tmit_chunk *)data, so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3723 		sctp_notify_send_failed(stcb, 0, error,
3724 		    (struct sctp_tmit_chunk *)data, so_locked);
3725 		break;
3726 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3727 		{
3728 			uint32_t val;
3729 
3730 			val = *((uint32_t *)data);
3731 
3732 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3733 			break;
3734 		}
3735 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3736 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3737 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3738 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3739 		} else {
3740 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3741 		}
3742 		break;
3743 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3744 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3745 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3746 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3747 		} else {
3748 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3749 		}
3750 		break;
3751 	case SCTP_NOTIFY_ASSOC_RESTART:
3752 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3753 		if (stcb->asoc.auth_supported == 0) {
3754 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3755 			    NULL, so_locked);
3756 		}
3757 		break;
3758 	case SCTP_NOTIFY_STR_RESET_SEND:
3759 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3760 		break;
3761 	case SCTP_NOTIFY_STR_RESET_RECV:
3762 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3766 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3770 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3771 		break;
3772 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3773 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3774 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3775 		break;
3776 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3777 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3778 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3779 		break;
3780 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3781 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3782 		    error, so_locked);
3783 		break;
3784 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3785 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3786 		    error, so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3789 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3790 		    error, so_locked);
3791 		break;
3792 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3793 		sctp_notify_shutdown_event(stcb);
3794 		break;
3795 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3796 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3797 		    (uint16_t)(uintptr_t)data,
3798 		    so_locked);
3799 		break;
3800 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3801 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3802 		    (uint16_t)(uintptr_t)data,
3803 		    so_locked);
3804 		break;
3805 	case SCTP_NOTIFY_NO_PEER_AUTH:
3806 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3807 		    (uint16_t)(uintptr_t)data,
3808 		    so_locked);
3809 		break;
3810 	case SCTP_NOTIFY_SENDER_DRY:
3811 		sctp_notify_sender_dry_event(stcb, so_locked);
3812 		break;
3813 	case SCTP_NOTIFY_REMOTE_ERROR:
3814 		sctp_notify_remote_error(stcb, error, data);
3815 		break;
3816 	default:
3817 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3818 		    __func__, notification, notification);
3819 		break;
3820 	}			/* end switch */
3821 }
3822 
3823 void
3824 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3825 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3826     SCTP_UNUSED
3827 #endif
3828 )
3829 {
3830 	struct sctp_association *asoc;
3831 	struct sctp_stream_out *outs;
3832 	struct sctp_tmit_chunk *chk, *nchk;
3833 	struct sctp_stream_queue_pending *sp, *nsp;
3834 	int i;
3835 
3836 	if (stcb == NULL) {
3837 		return;
3838 	}
3839 	asoc = &stcb->asoc;
3840 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3841 		/* already being freed */
3842 		return;
3843 	}
3844 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3845 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3846 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3847 		return;
3848 	}
3849 	/* now through all the gunk freeing chunks */
3850 	if (holds_lock == 0) {
3851 		SCTP_TCB_SEND_LOCK(stcb);
3852 	}
3853 	/* sent queue SHOULD be empty */
3854 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3855 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3856 		asoc->sent_queue_cnt--;
3857 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3858 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3859 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3860 #ifdef INVARIANTS
3861 			} else {
3862 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3863 #endif
3864 			}
3865 		}
3866 		if (chk->data != NULL) {
3867 			sctp_free_bufspace(stcb, asoc, chk, 1);
3868 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3869 			    error, chk, so_locked);
3870 			if (chk->data) {
3871 				sctp_m_freem(chk->data);
3872 				chk->data = NULL;
3873 			}
3874 		}
3875 		sctp_free_a_chunk(stcb, chk, so_locked);
3876 		/* sa_ignore FREED_MEMORY */
3877 	}
3878 	/* pending send queue SHOULD be empty */
3879 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3880 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3881 		asoc->send_queue_cnt--;
3882 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3883 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3884 #ifdef INVARIANTS
3885 		} else {
3886 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3887 #endif
3888 		}
3889 		if (chk->data != NULL) {
3890 			sctp_free_bufspace(stcb, asoc, chk, 1);
3891 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3892 			    error, chk, so_locked);
3893 			if (chk->data) {
3894 				sctp_m_freem(chk->data);
3895 				chk->data = NULL;
3896 			}
3897 		}
3898 		sctp_free_a_chunk(stcb, chk, so_locked);
3899 		/* sa_ignore FREED_MEMORY */
3900 	}
3901 	for (i = 0; i < asoc->streamoutcnt; i++) {
3902 		/* For each stream */
3903 		outs = &asoc->strmout[i];
3904 		/* clean up any sends there */
3905 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3906 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3907 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3908 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3909 			sctp_free_spbufspace(stcb, asoc, sp);
3910 			if (sp->data) {
3911 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3912 				    error, (void *)sp, so_locked);
3913 				if (sp->data) {
3914 					sctp_m_freem(sp->data);
3915 					sp->data = NULL;
3916 					sp->tail_mbuf = NULL;
3917 					sp->length = 0;
3918 				}
3919 			}
3920 			if (sp->net) {
3921 				sctp_free_remote_addr(sp->net);
3922 				sp->net = NULL;
3923 			}
3924 			/* Free the chunk */
3925 			sctp_free_a_strmoq(stcb, sp, so_locked);
3926 			/* sa_ignore FREED_MEMORY */
3927 		}
3928 	}
3929 
3930 	if (holds_lock == 0) {
3931 		SCTP_TCB_SEND_UNLOCK(stcb);
3932 	}
3933 }
3934 
3935 void
3936 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3937     struct sctp_abort_chunk *abort, int so_locked
3938 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3939     SCTP_UNUSED
3940 #endif
3941 )
3942 {
3943 	if (stcb == NULL) {
3944 		return;
3945 	}
3946 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3947 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3948 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3949 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3950 	}
3951 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3952 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3953 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3954 		return;
3955 	}
3956 	/* Tell them we lost the asoc */
3957 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3958 	if (from_peer) {
3959 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3960 	} else {
3961 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3962 	}
3963 }
3964 
3965 void
3966 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3967     struct mbuf *m, int iphlen,
3968     struct sockaddr *src, struct sockaddr *dst,
3969     struct sctphdr *sh, struct mbuf *op_err,
3970     uint8_t mflowtype, uint32_t mflowid,
3971     uint32_t vrf_id, uint16_t port)
3972 {
3973 	uint32_t vtag;
3974 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3975 	struct socket *so;
3976 #endif
3977 
3978 	vtag = 0;
3979 	if (stcb != NULL) {
3980 		vtag = stcb->asoc.peer_vtag;
3981 		vrf_id = stcb->asoc.vrf_id;
3982 	}
3983 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3984 	    mflowtype, mflowid, inp->fibnum,
3985 	    vrf_id, port);
3986 	if (stcb != NULL) {
3987 		/* We have a TCB to abort, send notification too */
3988 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3989 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3990 		/* Ok, now lets free it */
3991 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3992 		so = SCTP_INP_SO(inp);
3993 		atomic_add_int(&stcb->asoc.refcnt, 1);
3994 		SCTP_TCB_UNLOCK(stcb);
3995 		SCTP_SOCKET_LOCK(so, 1);
3996 		SCTP_TCB_LOCK(stcb);
3997 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3998 #endif
3999 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4000 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4001 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4002 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4003 		}
4004 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4005 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4007 		SCTP_SOCKET_UNLOCK(so, 1);
4008 #endif
4009 	}
4010 }
4011 #ifdef SCTP_ASOCLOG_OF_TSNS
4012 void
4013 sctp_print_out_track_log(struct sctp_tcb *stcb)
4014 {
4015 #ifdef NOSIY_PRINTS
4016 	int i;
4017 
4018 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4019 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4020 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4021 		SCTP_PRINTF("None rcvd\n");
4022 		goto none_in;
4023 	}
4024 	if (stcb->asoc.tsn_in_wrapped) {
4025 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4026 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4027 			    stcb->asoc.in_tsnlog[i].tsn,
4028 			    stcb->asoc.in_tsnlog[i].strm,
4029 			    stcb->asoc.in_tsnlog[i].seq,
4030 			    stcb->asoc.in_tsnlog[i].flgs,
4031 			    stcb->asoc.in_tsnlog[i].sz);
4032 		}
4033 	}
4034 	if (stcb->asoc.tsn_in_at) {
4035 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4036 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4037 			    stcb->asoc.in_tsnlog[i].tsn,
4038 			    stcb->asoc.in_tsnlog[i].strm,
4039 			    stcb->asoc.in_tsnlog[i].seq,
4040 			    stcb->asoc.in_tsnlog[i].flgs,
4041 			    stcb->asoc.in_tsnlog[i].sz);
4042 		}
4043 	}
4044 none_in:
4045 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4046 	if ((stcb->asoc.tsn_out_at == 0) &&
4047 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4048 		SCTP_PRINTF("None sent\n");
4049 	}
4050 	if (stcb->asoc.tsn_out_wrapped) {
4051 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4052 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4053 			    stcb->asoc.out_tsnlog[i].tsn,
4054 			    stcb->asoc.out_tsnlog[i].strm,
4055 			    stcb->asoc.out_tsnlog[i].seq,
4056 			    stcb->asoc.out_tsnlog[i].flgs,
4057 			    stcb->asoc.out_tsnlog[i].sz);
4058 		}
4059 	}
4060 	if (stcb->asoc.tsn_out_at) {
4061 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4062 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4063 			    stcb->asoc.out_tsnlog[i].tsn,
4064 			    stcb->asoc.out_tsnlog[i].strm,
4065 			    stcb->asoc.out_tsnlog[i].seq,
4066 			    stcb->asoc.out_tsnlog[i].flgs,
4067 			    stcb->asoc.out_tsnlog[i].sz);
4068 		}
4069 	}
4070 #endif
4071 }
4072 #endif
4073 
4074 void
4075 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4076     struct mbuf *op_err,
4077     int so_locked
4078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4079     SCTP_UNUSED
4080 #endif
4081 )
4082 {
4083 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4084 	struct socket *so;
4085 #endif
4086 
4087 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4088 	so = SCTP_INP_SO(inp);
4089 #endif
4090 	if (stcb == NULL) {
4091 		/* Got to have a TCB */
4092 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4093 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4094 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4095 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4096 			}
4097 		}
4098 		return;
4099 	} else {
4100 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4101 	}
4102 	/* notify the peer */
4103 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4104 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4105 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4106 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4107 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4108 	}
4109 	/* notify the ulp */
4110 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4111 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4112 	}
4113 	/* now free the asoc */
4114 #ifdef SCTP_ASOCLOG_OF_TSNS
4115 	sctp_print_out_track_log(stcb);
4116 #endif
4117 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4118 	if (!so_locked) {
4119 		atomic_add_int(&stcb->asoc.refcnt, 1);
4120 		SCTP_TCB_UNLOCK(stcb);
4121 		SCTP_SOCKET_LOCK(so, 1);
4122 		SCTP_TCB_LOCK(stcb);
4123 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4124 	}
4125 #endif
4126 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4127 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4128 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4129 	if (!so_locked) {
4130 		SCTP_SOCKET_UNLOCK(so, 1);
4131 	}
4132 #endif
4133 }
4134 
4135 void
4136 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4137     struct sockaddr *src, struct sockaddr *dst,
4138     struct sctphdr *sh, struct sctp_inpcb *inp,
4139     struct mbuf *cause,
4140     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4141     uint32_t vrf_id, uint16_t port)
4142 {
4143 	struct sctp_chunkhdr *ch, chunk_buf;
4144 	unsigned int chk_length;
4145 	int contains_init_chunk;
4146 
4147 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4148 	/* Generate a TO address for future reference */
4149 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4150 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4151 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4152 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4153 		}
4154 	}
4155 	contains_init_chunk = 0;
4156 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4157 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4158 	while (ch != NULL) {
4159 		chk_length = ntohs(ch->chunk_length);
4160 		if (chk_length < sizeof(*ch)) {
4161 			/* break to abort land */
4162 			break;
4163 		}
4164 		switch (ch->chunk_type) {
4165 		case SCTP_INIT:
4166 			contains_init_chunk = 1;
4167 			break;
4168 		case SCTP_PACKET_DROPPED:
4169 			/* we don't respond to pkt-dropped */
4170 			return;
4171 		case SCTP_ABORT_ASSOCIATION:
4172 			/* we don't respond with an ABORT to an ABORT */
4173 			return;
4174 		case SCTP_SHUTDOWN_COMPLETE:
4175 			/*
4176 			 * we ignore it since we are not waiting for it and
4177 			 * peer is gone
4178 			 */
4179 			return;
4180 		case SCTP_SHUTDOWN_ACK:
4181 			sctp_send_shutdown_complete2(src, dst, sh,
4182 			    mflowtype, mflowid, fibnum,
4183 			    vrf_id, port);
4184 			return;
4185 		default:
4186 			break;
4187 		}
4188 		offset += SCTP_SIZE32(chk_length);
4189 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4190 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4191 	}
4192 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4193 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4194 	    (contains_init_chunk == 0))) {
4195 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4196 		    mflowtype, mflowid, fibnum,
4197 		    vrf_id, port);
4198 	}
4199 }
4200 
4201 /*
4202  * check the inbound datagram to make sure there is not an abort inside it,
4203  * if there is return 1, else return 0.
4204  */
4205 int
4206 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4207 {
4208 	struct sctp_chunkhdr *ch;
4209 	struct sctp_init_chunk *init_chk, chunk_buf;
4210 	int offset;
4211 	unsigned int chk_length;
4212 
4213 	offset = iphlen + sizeof(struct sctphdr);
4214 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4215 	    (uint8_t *)&chunk_buf);
4216 	while (ch != NULL) {
4217 		chk_length = ntohs(ch->chunk_length);
4218 		if (chk_length < sizeof(*ch)) {
4219 			/* packet is probably corrupt */
4220 			break;
4221 		}
4222 		/* we seem to be ok, is it an abort? */
4223 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4224 			/* yep, tell them */
4225 			return (1);
4226 		}
4227 		if (ch->chunk_type == SCTP_INITIATION) {
4228 			/* need to update the Vtag */
4229 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4230 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4231 			if (init_chk != NULL) {
4232 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4233 			}
4234 		}
4235 		/* Nope, move to the next chunk */
4236 		offset += SCTP_SIZE32(chk_length);
4237 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4238 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4239 	}
4240 	return (0);
4241 }
4242 
4243 /*
4244  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4245  * set (i.e. it's 0) so, create this function to compare link local scopes
4246  */
4247 #ifdef INET6
4248 uint32_t
4249 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4250 {
4251 	struct sockaddr_in6 a, b;
4252 
4253 	/* save copies */
4254 	a = *addr1;
4255 	b = *addr2;
4256 
4257 	if (a.sin6_scope_id == 0)
4258 		if (sa6_recoverscope(&a)) {
4259 			/* can't get scope, so can't match */
4260 			return (0);
4261 		}
4262 	if (b.sin6_scope_id == 0)
4263 		if (sa6_recoverscope(&b)) {
4264 			/* can't get scope, so can't match */
4265 			return (0);
4266 		}
4267 	if (a.sin6_scope_id != b.sin6_scope_id)
4268 		return (0);
4269 
4270 	return (1);
4271 }
4272 
4273 /*
4274  * returns a sockaddr_in6 with embedded scope recovered and removed
4275  */
4276 struct sockaddr_in6 *
4277 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4278 {
4279 	/* check and strip embedded scope junk */
4280 	if (addr->sin6_family == AF_INET6) {
4281 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4282 			if (addr->sin6_scope_id == 0) {
4283 				*store = *addr;
4284 				if (!sa6_recoverscope(store)) {
4285 					/* use the recovered scope */
4286 					addr = store;
4287 				}
4288 			} else {
4289 				/* else, return the original "to" addr */
4290 				in6_clearscope(&addr->sin6_addr);
4291 			}
4292 		}
4293 	}
4294 	return (addr);
4295 }
4296 #endif
4297 
4298 /*
4299  * are the two addresses the same?  currently a "scopeless" check returns: 1
4300  * if same, 0 if not
4301  */
4302 int
4303 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4304 {
4305 
4306 	/* must be valid */
4307 	if (sa1 == NULL || sa2 == NULL)
4308 		return (0);
4309 
4310 	/* must be the same family */
4311 	if (sa1->sa_family != sa2->sa_family)
4312 		return (0);
4313 
4314 	switch (sa1->sa_family) {
4315 #ifdef INET6
4316 	case AF_INET6:
4317 		{
4318 			/* IPv6 addresses */
4319 			struct sockaddr_in6 *sin6_1, *sin6_2;
4320 
4321 			sin6_1 = (struct sockaddr_in6 *)sa1;
4322 			sin6_2 = (struct sockaddr_in6 *)sa2;
4323 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4324 			    sin6_2));
4325 		}
4326 #endif
4327 #ifdef INET
4328 	case AF_INET:
4329 		{
4330 			/* IPv4 addresses */
4331 			struct sockaddr_in *sin_1, *sin_2;
4332 
4333 			sin_1 = (struct sockaddr_in *)sa1;
4334 			sin_2 = (struct sockaddr_in *)sa2;
4335 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4336 		}
4337 #endif
4338 	default:
4339 		/* we don't do these... */
4340 		return (0);
4341 	}
4342 }
4343 
4344 void
4345 sctp_print_address(struct sockaddr *sa)
4346 {
4347 #ifdef INET6
4348 	char ip6buf[INET6_ADDRSTRLEN];
4349 #endif
4350 
4351 	switch (sa->sa_family) {
4352 #ifdef INET6
4353 	case AF_INET6:
4354 		{
4355 			struct sockaddr_in6 *sin6;
4356 
4357 			sin6 = (struct sockaddr_in6 *)sa;
4358 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4359 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4360 			    ntohs(sin6->sin6_port),
4361 			    sin6->sin6_scope_id);
4362 			break;
4363 		}
4364 #endif
4365 #ifdef INET
4366 	case AF_INET:
4367 		{
4368 			struct sockaddr_in *sin;
4369 			unsigned char *p;
4370 
4371 			sin = (struct sockaddr_in *)sa;
4372 			p = (unsigned char *)&sin->sin_addr;
4373 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4374 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4375 			break;
4376 		}
4377 #endif
4378 	default:
4379 		SCTP_PRINTF("?\n");
4380 		break;
4381 	}
4382 }
4383 
4384 void
4385 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4386     struct sctp_inpcb *new_inp,
4387     struct sctp_tcb *stcb,
4388     int waitflags)
4389 {
4390 	/*
4391 	 * go through our old INP and pull off any control structures that
4392 	 * belong to stcb and move then to the new inp.
4393 	 */
4394 	struct socket *old_so, *new_so;
4395 	struct sctp_queued_to_read *control, *nctl;
4396 	struct sctp_readhead tmp_queue;
4397 	struct mbuf *m;
4398 	int error = 0;
4399 
4400 	old_so = old_inp->sctp_socket;
4401 	new_so = new_inp->sctp_socket;
4402 	TAILQ_INIT(&tmp_queue);
4403 	error = sblock(&old_so->so_rcv, waitflags);
4404 	if (error) {
4405 		/*
4406 		 * Gak, can't get sblock, we have a problem. data will be
4407 		 * left stranded.. and we don't dare look at it since the
4408 		 * other thread may be reading something. Oh well, its a
4409 		 * screwed up app that does a peeloff OR a accept while
4410 		 * reading from the main socket... actually its only the
4411 		 * peeloff() case, since I think read will fail on a
4412 		 * listening socket..
4413 		 */
4414 		return;
4415 	}
4416 	/* lock the socket buffers */
4417 	SCTP_INP_READ_LOCK(old_inp);
4418 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4419 		/* Pull off all for out target stcb */
4420 		if (control->stcb == stcb) {
4421 			/* remove it we want it */
4422 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4423 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4424 			m = control->data;
4425 			while (m) {
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4428 				}
4429 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4430 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4431 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4432 				}
4433 				m = SCTP_BUF_NEXT(m);
4434 			}
4435 		}
4436 	}
4437 	SCTP_INP_READ_UNLOCK(old_inp);
4438 	/* Remove the sb-lock on the old socket */
4439 
4440 	sbunlock(&old_so->so_rcv);
4441 	/* Now we move them over to the new socket buffer */
4442 	SCTP_INP_READ_LOCK(new_inp);
4443 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4444 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4445 		m = control->data;
4446 		while (m) {
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4449 			}
4450 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4451 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4452 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4453 			}
4454 			m = SCTP_BUF_NEXT(m);
4455 		}
4456 	}
4457 	SCTP_INP_READ_UNLOCK(new_inp);
4458 }
4459 
4460 void
4461 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4462     struct sctp_tcb *stcb,
4463     int so_locked
4464 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4465     SCTP_UNUSED
4466 #endif
4467 )
4468 {
4469 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4470 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4471 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4472 		} else {
4473 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4474 			struct socket *so;
4475 
4476 			so = SCTP_INP_SO(inp);
4477 			if (!so_locked) {
4478 				if (stcb) {
4479 					atomic_add_int(&stcb->asoc.refcnt, 1);
4480 					SCTP_TCB_UNLOCK(stcb);
4481 				}
4482 				SCTP_SOCKET_LOCK(so, 1);
4483 				if (stcb) {
4484 					SCTP_TCB_LOCK(stcb);
4485 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4486 				}
4487 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4488 					SCTP_SOCKET_UNLOCK(so, 1);
4489 					return;
4490 				}
4491 			}
4492 #endif
4493 			sctp_sorwakeup(inp, inp->sctp_socket);
4494 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4495 			if (!so_locked) {
4496 				SCTP_SOCKET_UNLOCK(so, 1);
4497 			}
4498 #endif
4499 		}
4500 	}
4501 }
4502 
4503 void
4504 sctp_add_to_readq(struct sctp_inpcb *inp,
4505     struct sctp_tcb *stcb,
4506     struct sctp_queued_to_read *control,
4507     struct sockbuf *sb,
4508     int end,
4509     int inp_read_lock_held,
4510     int so_locked
4511 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4512     SCTP_UNUSED
4513 #endif
4514 )
4515 {
4516 	/*
4517 	 * Here we must place the control on the end of the socket read
4518 	 * queue AND increment sb_cc so that select will work properly on
4519 	 * read.
4520 	 */
4521 	struct mbuf *m, *prev = NULL;
4522 
4523 	if (inp == NULL) {
4524 		/* Gak, TSNH!! */
4525 #ifdef INVARIANTS
4526 		panic("Gak, inp NULL on add_to_readq");
4527 #endif
4528 		return;
4529 	}
4530 	if (inp_read_lock_held == 0)
4531 		SCTP_INP_READ_LOCK(inp);
4532 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4533 		sctp_free_remote_addr(control->whoFrom);
4534 		if (control->data) {
4535 			sctp_m_freem(control->data);
4536 			control->data = NULL;
4537 		}
4538 		sctp_free_a_readq(stcb, control);
4539 		if (inp_read_lock_held == 0)
4540 			SCTP_INP_READ_UNLOCK(inp);
4541 		return;
4542 	}
4543 	if (!(control->spec_flags & M_NOTIFICATION)) {
4544 		atomic_add_int(&inp->total_recvs, 1);
4545 		if (!control->do_not_ref_stcb) {
4546 			atomic_add_int(&stcb->total_recvs, 1);
4547 		}
4548 	}
4549 	m = control->data;
4550 	control->held_length = 0;
4551 	control->length = 0;
4552 	while (m) {
4553 		if (SCTP_BUF_LEN(m) == 0) {
4554 			/* Skip mbufs with NO length */
4555 			if (prev == NULL) {
4556 				/* First one */
4557 				control->data = sctp_m_free(m);
4558 				m = control->data;
4559 			} else {
4560 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4561 				m = SCTP_BUF_NEXT(prev);
4562 			}
4563 			if (m == NULL) {
4564 				control->tail_mbuf = prev;
4565 			}
4566 			continue;
4567 		}
4568 		prev = m;
4569 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4570 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4571 		}
4572 		sctp_sballoc(stcb, sb, m);
4573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4574 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4575 		}
4576 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4577 		m = SCTP_BUF_NEXT(m);
4578 	}
4579 	if (prev != NULL) {
4580 		control->tail_mbuf = prev;
4581 	} else {
4582 		/* Everything got collapsed out?? */
4583 		sctp_free_remote_addr(control->whoFrom);
4584 		sctp_free_a_readq(stcb, control);
4585 		if (inp_read_lock_held == 0)
4586 			SCTP_INP_READ_UNLOCK(inp);
4587 		return;
4588 	}
4589 	if (end) {
4590 		control->end_added = 1;
4591 	}
4592 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4593 	control->on_read_q = 1;
4594 	if (inp_read_lock_held == 0)
4595 		SCTP_INP_READ_UNLOCK(inp);
4596 	if (inp && inp->sctp_socket) {
4597 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4598 	}
4599 }
4600 
4601 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4602  *************ALTERNATE ROUTING CODE
4603  */
4604 
4605 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4606  *************ALTERNATE ROUTING CODE
4607  */
4608 
4609 struct mbuf *
4610 sctp_generate_cause(uint16_t code, char *info)
4611 {
4612 	struct mbuf *m;
4613 	struct sctp_gen_error_cause *cause;
4614 	size_t info_len;
4615 	uint16_t len;
4616 
4617 	if ((code == 0) || (info == NULL)) {
4618 		return (NULL);
4619 	}
4620 	info_len = strlen(info);
4621 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4622 		return (NULL);
4623 	}
4624 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4625 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4626 	if (m != NULL) {
4627 		SCTP_BUF_LEN(m) = len;
4628 		cause = mtod(m, struct sctp_gen_error_cause *);
4629 		cause->code = htons(code);
4630 		cause->length = htons(len);
4631 		memcpy(cause->info, info, info_len);
4632 	}
4633 	return (m);
4634 }
4635 
4636 struct mbuf *
4637 sctp_generate_no_user_data_cause(uint32_t tsn)
4638 {
4639 	struct mbuf *m;
4640 	struct sctp_error_no_user_data *no_user_data_cause;
4641 	uint16_t len;
4642 
4643 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4644 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4645 	if (m != NULL) {
4646 		SCTP_BUF_LEN(m) = len;
4647 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4648 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4649 		no_user_data_cause->cause.length = htons(len);
4650 		no_user_data_cause->tsn = htonl(tsn);
4651 	}
4652 	return (m);
4653 }
4654 
4655 #ifdef SCTP_MBCNT_LOGGING
4656 void
4657 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4658     struct sctp_tmit_chunk *tp1, int chk_cnt)
4659 {
4660 	if (tp1->data == NULL) {
4661 		return;
4662 	}
4663 	asoc->chunks_on_out_queue -= chk_cnt;
4664 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4665 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4666 		    asoc->total_output_queue_size,
4667 		    tp1->book_size,
4668 		    0,
4669 		    tp1->mbcnt);
4670 	}
4671 	if (asoc->total_output_queue_size >= tp1->book_size) {
4672 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4673 	} else {
4674 		asoc->total_output_queue_size = 0;
4675 	}
4676 
4677 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4678 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4679 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4680 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4681 		} else {
4682 			stcb->sctp_socket->so_snd.sb_cc = 0;
4683 
4684 		}
4685 	}
4686 }
4687 
4688 #endif
4689 
4690 int
4691 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4692     uint8_t sent, int so_locked
4693 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4694     SCTP_UNUSED
4695 #endif
4696 )
4697 {
4698 	struct sctp_stream_out *strq;
4699 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4700 	struct sctp_stream_queue_pending *sp;
4701 	uint32_t mid;
4702 	uint16_t sid;
4703 	uint8_t foundeom = 0;
4704 	int ret_sz = 0;
4705 	int notdone;
4706 	int do_wakeup_routine = 0;
4707 
4708 	sid = tp1->rec.data.sid;
4709 	mid = tp1->rec.data.mid;
4710 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4711 		stcb->asoc.abandoned_sent[0]++;
4712 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4713 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4714 #if defined(SCTP_DETAILED_STR_STATS)
4715 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4716 #endif
4717 	} else {
4718 		stcb->asoc.abandoned_unsent[0]++;
4719 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4720 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4721 #if defined(SCTP_DETAILED_STR_STATS)
4722 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4723 #endif
4724 	}
4725 	do {
4726 		ret_sz += tp1->book_size;
4727 		if (tp1->data != NULL) {
4728 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4729 				sctp_flight_size_decrease(tp1);
4730 				sctp_total_flight_decrease(stcb, tp1);
4731 			}
4732 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4733 			stcb->asoc.peers_rwnd += tp1->send_size;
4734 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4735 			if (sent) {
4736 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4737 			} else {
4738 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4739 			}
4740 			if (tp1->data) {
4741 				sctp_m_freem(tp1->data);
4742 				tp1->data = NULL;
4743 			}
4744 			do_wakeup_routine = 1;
4745 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4746 				stcb->asoc.sent_queue_cnt_removeable--;
4747 			}
4748 		}
4749 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4750 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4751 		    SCTP_DATA_NOT_FRAG) {
4752 			/* not frag'ed we ae done   */
4753 			notdone = 0;
4754 			foundeom = 1;
4755 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4756 			/* end of frag, we are done */
4757 			notdone = 0;
4758 			foundeom = 1;
4759 		} else {
4760 			/*
4761 			 * Its a begin or middle piece, we must mark all of
4762 			 * it
4763 			 */
4764 			notdone = 1;
4765 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4766 		}
4767 	} while (tp1 && notdone);
4768 	if (foundeom == 0) {
4769 		/*
4770 		 * The multi-part message was scattered across the send and
4771 		 * sent queue.
4772 		 */
4773 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4774 			if ((tp1->rec.data.sid != sid) ||
4775 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4776 				break;
4777 			}
4778 			/*
4779 			 * save to chk in case we have some on stream out
4780 			 * queue. If so and we have an un-transmitted one we
4781 			 * don't have to fudge the TSN.
4782 			 */
4783 			chk = tp1;
4784 			ret_sz += tp1->book_size;
4785 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4786 			if (sent) {
4787 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4788 			} else {
4789 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4790 			}
4791 			if (tp1->data) {
4792 				sctp_m_freem(tp1->data);
4793 				tp1->data = NULL;
4794 			}
4795 			/* No flight involved here book the size to 0 */
4796 			tp1->book_size = 0;
4797 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4798 				foundeom = 1;
4799 			}
4800 			do_wakeup_routine = 1;
4801 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4802 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4803 			/*
4804 			 * on to the sent queue so we can wait for it to be
4805 			 * passed by.
4806 			 */
4807 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4808 			    sctp_next);
4809 			stcb->asoc.send_queue_cnt--;
4810 			stcb->asoc.sent_queue_cnt++;
4811 		}
4812 	}
4813 	if (foundeom == 0) {
4814 		/*
4815 		 * Still no eom found. That means there is stuff left on the
4816 		 * stream out queue.. yuck.
4817 		 */
4818 		SCTP_TCB_SEND_LOCK(stcb);
4819 		strq = &stcb->asoc.strmout[sid];
4820 		sp = TAILQ_FIRST(&strq->outqueue);
4821 		if (sp != NULL) {
4822 			sp->discard_rest = 1;
4823 			/*
4824 			 * We may need to put a chunk on the queue that
4825 			 * holds the TSN that would have been sent with the
4826 			 * LAST bit.
4827 			 */
4828 			if (chk == NULL) {
4829 				/* Yep, we have to */
4830 				sctp_alloc_a_chunk(stcb, chk);
4831 				if (chk == NULL) {
4832 					/*
4833 					 * we are hosed. All we can do is
4834 					 * nothing.. which will cause an
4835 					 * abort if the peer is paying
4836 					 * attention.
4837 					 */
4838 					goto oh_well;
4839 				}
4840 				memset(chk, 0, sizeof(*chk));
4841 				chk->rec.data.rcv_flags = 0;
4842 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4843 				chk->asoc = &stcb->asoc;
4844 				if (stcb->asoc.idata_supported == 0) {
4845 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4846 						chk->rec.data.mid = 0;
4847 					} else {
4848 						chk->rec.data.mid = strq->next_mid_ordered;
4849 					}
4850 				} else {
4851 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4852 						chk->rec.data.mid = strq->next_mid_unordered;
4853 					} else {
4854 						chk->rec.data.mid = strq->next_mid_ordered;
4855 					}
4856 				}
4857 				chk->rec.data.sid = sp->sid;
4858 				chk->rec.data.ppid = sp->ppid;
4859 				chk->rec.data.context = sp->context;
4860 				chk->flags = sp->act_flags;
4861 				chk->whoTo = NULL;
4862 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4863 				strq->chunks_on_queues++;
4864 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4865 				stcb->asoc.sent_queue_cnt++;
4866 				stcb->asoc.pr_sctp_cnt++;
4867 			}
4868 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4869 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4870 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4871 			}
4872 			if (stcb->asoc.idata_supported == 0) {
4873 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4874 					strq->next_mid_ordered++;
4875 				}
4876 			} else {
4877 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4878 					strq->next_mid_unordered++;
4879 				} else {
4880 					strq->next_mid_ordered++;
4881 				}
4882 			}
4883 	oh_well:
4884 			if (sp->data) {
4885 				/*
4886 				 * Pull any data to free up the SB and allow
4887 				 * sender to "add more" while we will throw
4888 				 * away :-)
4889 				 */
4890 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4891 				ret_sz += sp->length;
4892 				do_wakeup_routine = 1;
4893 				sp->some_taken = 1;
4894 				sctp_m_freem(sp->data);
4895 				sp->data = NULL;
4896 				sp->tail_mbuf = NULL;
4897 				sp->length = 0;
4898 			}
4899 		}
4900 		SCTP_TCB_SEND_UNLOCK(stcb);
4901 	}
4902 	if (do_wakeup_routine) {
4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4904 		struct socket *so;
4905 
4906 		so = SCTP_INP_SO(stcb->sctp_ep);
4907 		if (!so_locked) {
4908 			atomic_add_int(&stcb->asoc.refcnt, 1);
4909 			SCTP_TCB_UNLOCK(stcb);
4910 			SCTP_SOCKET_LOCK(so, 1);
4911 			SCTP_TCB_LOCK(stcb);
4912 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4913 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4914 				/* assoc was freed while we were unlocked */
4915 				SCTP_SOCKET_UNLOCK(so, 1);
4916 				return (ret_sz);
4917 			}
4918 		}
4919 #endif
4920 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4922 		if (!so_locked) {
4923 			SCTP_SOCKET_UNLOCK(so, 1);
4924 		}
4925 #endif
4926 	}
4927 	return (ret_sz);
4928 }
4929 
4930 /*
4931  * checks to see if the given address, sa, is one that is currently known by
4932  * the kernel note: can't distinguish the same address on multiple interfaces
4933  * and doesn't handle multiple addresses with different zone/scope id's note:
4934  * ifa_ifwithaddr() compares the entire sockaddr struct
4935  */
4936 struct sctp_ifa *
4937 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4938     int holds_lock)
4939 {
4940 	struct sctp_laddr *laddr;
4941 
4942 	if (holds_lock == 0) {
4943 		SCTP_INP_RLOCK(inp);
4944 	}
4945 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4946 		if (laddr->ifa == NULL)
4947 			continue;
4948 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4949 			continue;
4950 #ifdef INET
4951 		if (addr->sa_family == AF_INET) {
4952 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4953 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4954 				/* found him. */
4955 				if (holds_lock == 0) {
4956 					SCTP_INP_RUNLOCK(inp);
4957 				}
4958 				return (laddr->ifa);
4959 				break;
4960 			}
4961 		}
4962 #endif
4963 #ifdef INET6
4964 		if (addr->sa_family == AF_INET6) {
4965 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4966 			    &laddr->ifa->address.sin6)) {
4967 				/* found him. */
4968 				if (holds_lock == 0) {
4969 					SCTP_INP_RUNLOCK(inp);
4970 				}
4971 				return (laddr->ifa);
4972 				break;
4973 			}
4974 		}
4975 #endif
4976 	}
4977 	if (holds_lock == 0) {
4978 		SCTP_INP_RUNLOCK(inp);
4979 	}
4980 	return (NULL);
4981 }
4982 
4983 uint32_t
4984 sctp_get_ifa_hash_val(struct sockaddr *addr)
4985 {
4986 	switch (addr->sa_family) {
4987 #ifdef INET
4988 	case AF_INET:
4989 		{
4990 			struct sockaddr_in *sin;
4991 
4992 			sin = (struct sockaddr_in *)addr;
4993 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4994 		}
4995 #endif
4996 #ifdef INET6
4997 	case AF_INET6:
4998 		{
4999 			struct sockaddr_in6 *sin6;
5000 			uint32_t hash_of_addr;
5001 
5002 			sin6 = (struct sockaddr_in6 *)addr;
5003 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5004 			    sin6->sin6_addr.s6_addr32[1] +
5005 			    sin6->sin6_addr.s6_addr32[2] +
5006 			    sin6->sin6_addr.s6_addr32[3]);
5007 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5008 			return (hash_of_addr);
5009 		}
5010 #endif
5011 	default:
5012 		break;
5013 	}
5014 	return (0);
5015 }
5016 
5017 struct sctp_ifa *
5018 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5019 {
5020 	struct sctp_ifa *sctp_ifap;
5021 	struct sctp_vrf *vrf;
5022 	struct sctp_ifalist *hash_head;
5023 	uint32_t hash_of_addr;
5024 
5025 	if (holds_lock == 0)
5026 		SCTP_IPI_ADDR_RLOCK();
5027 
5028 	vrf = sctp_find_vrf(vrf_id);
5029 	if (vrf == NULL) {
5030 		if (holds_lock == 0)
5031 			SCTP_IPI_ADDR_RUNLOCK();
5032 		return (NULL);
5033 	}
5034 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5035 
5036 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5037 	if (hash_head == NULL) {
5038 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5039 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5040 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5041 		sctp_print_address(addr);
5042 		SCTP_PRINTF("No such bucket for address\n");
5043 		if (holds_lock == 0)
5044 			SCTP_IPI_ADDR_RUNLOCK();
5045 
5046 		return (NULL);
5047 	}
5048 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5049 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5050 			continue;
5051 #ifdef INET
5052 		if (addr->sa_family == AF_INET) {
5053 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5054 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5055 				/* found him. */
5056 				if (holds_lock == 0)
5057 					SCTP_IPI_ADDR_RUNLOCK();
5058 				return (sctp_ifap);
5059 				break;
5060 			}
5061 		}
5062 #endif
5063 #ifdef INET6
5064 		if (addr->sa_family == AF_INET6) {
5065 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5066 			    &sctp_ifap->address.sin6)) {
5067 				/* found him. */
5068 				if (holds_lock == 0)
5069 					SCTP_IPI_ADDR_RUNLOCK();
5070 				return (sctp_ifap);
5071 				break;
5072 			}
5073 		}
5074 #endif
5075 	}
5076 	if (holds_lock == 0)
5077 		SCTP_IPI_ADDR_RUNLOCK();
5078 	return (NULL);
5079 }
5080 
5081 static void
5082 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5083     uint32_t rwnd_req)
5084 {
5085 	/* User pulled some data, do we need a rwnd update? */
5086 	int r_unlocked = 0;
5087 	uint32_t dif, rwnd;
5088 	struct socket *so = NULL;
5089 
5090 	if (stcb == NULL)
5091 		return;
5092 
5093 	atomic_add_int(&stcb->asoc.refcnt, 1);
5094 
5095 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5096 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5097 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5098 		/* Pre-check If we are freeing no update */
5099 		goto no_lock;
5100 	}
5101 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5102 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5103 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5104 		goto out;
5105 	}
5106 	so = stcb->sctp_socket;
5107 	if (so == NULL) {
5108 		goto out;
5109 	}
5110 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5111 	/* Have you have freed enough to look */
5112 	*freed_so_far = 0;
5113 	/* Yep, its worth a look and the lock overhead */
5114 
5115 	/* Figure out what the rwnd would be */
5116 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5117 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5118 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5119 	} else {
5120 		dif = 0;
5121 	}
5122 	if (dif >= rwnd_req) {
5123 		if (hold_rlock) {
5124 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5125 			r_unlocked = 1;
5126 		}
5127 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5128 			/*
5129 			 * One last check before we allow the guy possibly
5130 			 * to get in. There is a race, where the guy has not
5131 			 * reached the gate. In that case
5132 			 */
5133 			goto out;
5134 		}
5135 		SCTP_TCB_LOCK(stcb);
5136 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5137 			/* No reports here */
5138 			SCTP_TCB_UNLOCK(stcb);
5139 			goto out;
5140 		}
5141 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5142 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5143 
5144 		sctp_chunk_output(stcb->sctp_ep, stcb,
5145 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5146 		/* make sure no timer is running */
5147 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5148 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5149 		SCTP_TCB_UNLOCK(stcb);
5150 	} else {
5151 		/* Update how much we have pending */
5152 		stcb->freed_by_sorcv_sincelast = dif;
5153 	}
5154 out:
5155 	if (so && r_unlocked && hold_rlock) {
5156 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5157 	}
5158 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5159 no_lock:
5160 	atomic_add_int(&stcb->asoc.refcnt, -1);
5161 	return;
5162 }
5163 
5164 int
5165 sctp_sorecvmsg(struct socket *so,
5166     struct uio *uio,
5167     struct mbuf **mp,
5168     struct sockaddr *from,
5169     int fromlen,
5170     int *msg_flags,
5171     struct sctp_sndrcvinfo *sinfo,
5172     int filling_sinfo)
5173 {
5174 	/*
5175 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5176 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5177 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5178 	 * On the way out we may send out any combination of:
5179 	 * MSG_NOTIFICATION MSG_EOR
5180 	 *
5181 	 */
5182 	struct sctp_inpcb *inp = NULL;
5183 	int my_len = 0;
5184 	int cp_len = 0, error = 0;
5185 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5186 	struct mbuf *m = NULL;
5187 	struct sctp_tcb *stcb = NULL;
5188 	int wakeup_read_socket = 0;
5189 	int freecnt_applied = 0;
5190 	int out_flags = 0, in_flags = 0;
5191 	int block_allowed = 1;
5192 	uint32_t freed_so_far = 0;
5193 	uint32_t copied_so_far = 0;
5194 	int in_eeor_mode = 0;
5195 	int no_rcv_needed = 0;
5196 	uint32_t rwnd_req = 0;
5197 	int hold_sblock = 0;
5198 	int hold_rlock = 0;
5199 	ssize_t slen = 0;
5200 	uint32_t held_length = 0;
5201 	int sockbuf_lock = 0;
5202 
5203 	if (uio == NULL) {
5204 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5205 		return (EINVAL);
5206 	}
5207 	if (msg_flags) {
5208 		in_flags = *msg_flags;
5209 		if (in_flags & MSG_PEEK)
5210 			SCTP_STAT_INCR(sctps_read_peeks);
5211 	} else {
5212 		in_flags = 0;
5213 	}
5214 	slen = uio->uio_resid;
5215 
5216 	/* Pull in and set up our int flags */
5217 	if (in_flags & MSG_OOB) {
5218 		/* Out of band's NOT supported */
5219 		return (EOPNOTSUPP);
5220 	}
5221 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5222 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5223 		return (EINVAL);
5224 	}
5225 	if ((in_flags & (MSG_DONTWAIT
5226 	    | MSG_NBIO
5227 	    )) ||
5228 	    SCTP_SO_IS_NBIO(so)) {
5229 		block_allowed = 0;
5230 	}
5231 	/* setup the endpoint */
5232 	inp = (struct sctp_inpcb *)so->so_pcb;
5233 	if (inp == NULL) {
5234 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5235 		return (EFAULT);
5236 	}
5237 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5238 	/* Must be at least a MTU's worth */
5239 	if (rwnd_req < SCTP_MIN_RWND)
5240 		rwnd_req = SCTP_MIN_RWND;
5241 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5243 		sctp_misc_ints(SCTP_SORECV_ENTER,
5244 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5245 	}
5246 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5247 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5248 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5249 	}
5250 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5251 	if (error) {
5252 		goto release_unlocked;
5253 	}
5254 	sockbuf_lock = 1;
5255 restart:
5256 
5257 
5258 restart_nosblocks:
5259 	if (hold_sblock == 0) {
5260 		SOCKBUF_LOCK(&so->so_rcv);
5261 		hold_sblock = 1;
5262 	}
5263 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5264 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5265 		goto out;
5266 	}
5267 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5268 		if (so->so_error) {
5269 			error = so->so_error;
5270 			if ((in_flags & MSG_PEEK) == 0)
5271 				so->so_error = 0;
5272 			goto out;
5273 		} else {
5274 			if (so->so_rcv.sb_cc == 0) {
5275 				/* indicate EOF */
5276 				error = 0;
5277 				goto out;
5278 			}
5279 		}
5280 	}
5281 	if (so->so_rcv.sb_cc <= held_length) {
5282 		if (so->so_error) {
5283 			error = so->so_error;
5284 			if ((in_flags & MSG_PEEK) == 0) {
5285 				so->so_error = 0;
5286 			}
5287 			goto out;
5288 		}
5289 		if ((so->so_rcv.sb_cc == 0) &&
5290 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5291 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5292 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5293 				/*
5294 				 * For active open side clear flags for
5295 				 * re-use passive open is blocked by
5296 				 * connect.
5297 				 */
5298 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5299 					/*
5300 					 * You were aborted, passive side
5301 					 * always hits here
5302 					 */
5303 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5304 					error = ECONNRESET;
5305 				}
5306 				so->so_state &= ~(SS_ISCONNECTING |
5307 				    SS_ISDISCONNECTING |
5308 				    SS_ISCONFIRMING |
5309 				    SS_ISCONNECTED);
5310 				if (error == 0) {
5311 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5312 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5313 						error = ENOTCONN;
5314 					}
5315 				}
5316 				goto out;
5317 			}
5318 		}
5319 		if (block_allowed) {
5320 			error = sbwait(&so->so_rcv);
5321 			if (error) {
5322 				goto out;
5323 			}
5324 			held_length = 0;
5325 			goto restart_nosblocks;
5326 		} else {
5327 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5328 			error = EWOULDBLOCK;
5329 			goto out;
5330 		}
5331 	}
5332 	if (hold_sblock == 1) {
5333 		SOCKBUF_UNLOCK(&so->so_rcv);
5334 		hold_sblock = 0;
5335 	}
5336 	/* we possibly have data we can read */
5337 	/* sa_ignore FREED_MEMORY */
5338 	control = TAILQ_FIRST(&inp->read_queue);
5339 	if (control == NULL) {
5340 		/*
5341 		 * This could be happening since the appender did the
5342 		 * increment but as not yet did the tailq insert onto the
5343 		 * read_queue
5344 		 */
5345 		if (hold_rlock == 0) {
5346 			SCTP_INP_READ_LOCK(inp);
5347 		}
5348 		control = TAILQ_FIRST(&inp->read_queue);
5349 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5350 #ifdef INVARIANTS
5351 			panic("Huh, its non zero and nothing on control?");
5352 #endif
5353 			so->so_rcv.sb_cc = 0;
5354 		}
5355 		SCTP_INP_READ_UNLOCK(inp);
5356 		hold_rlock = 0;
5357 		goto restart;
5358 	}
5359 	if ((control->length == 0) &&
5360 	    (control->do_not_ref_stcb)) {
5361 		/*
5362 		 * Clean up code for freeing assoc that left behind a
5363 		 * pdapi.. maybe a peer in EEOR that just closed after
5364 		 * sending and never indicated a EOR.
5365 		 */
5366 		if (hold_rlock == 0) {
5367 			hold_rlock = 1;
5368 			SCTP_INP_READ_LOCK(inp);
5369 		}
5370 		control->held_length = 0;
5371 		if (control->data) {
5372 			/* Hmm there is data here .. fix */
5373 			struct mbuf *m_tmp;
5374 			int cnt = 0;
5375 
5376 			m_tmp = control->data;
5377 			while (m_tmp) {
5378 				cnt += SCTP_BUF_LEN(m_tmp);
5379 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5380 					control->tail_mbuf = m_tmp;
5381 					control->end_added = 1;
5382 				}
5383 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5384 			}
5385 			control->length = cnt;
5386 		} else {
5387 			/* remove it */
5388 			TAILQ_REMOVE(&inp->read_queue, control, next);
5389 			/* Add back any hiddend data */
5390 			sctp_free_remote_addr(control->whoFrom);
5391 			sctp_free_a_readq(stcb, control);
5392 		}
5393 		if (hold_rlock) {
5394 			hold_rlock = 0;
5395 			SCTP_INP_READ_UNLOCK(inp);
5396 		}
5397 		goto restart;
5398 	}
5399 	if ((control->length == 0) &&
5400 	    (control->end_added == 1)) {
5401 		/*
5402 		 * Do we also need to check for (control->pdapi_aborted ==
5403 		 * 1)?
5404 		 */
5405 		if (hold_rlock == 0) {
5406 			hold_rlock = 1;
5407 			SCTP_INP_READ_LOCK(inp);
5408 		}
5409 		TAILQ_REMOVE(&inp->read_queue, control, next);
5410 		if (control->data) {
5411 #ifdef INVARIANTS
5412 			panic("control->data not null but control->length == 0");
5413 #else
5414 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5415 			sctp_m_freem(control->data);
5416 			control->data = NULL;
5417 #endif
5418 		}
5419 		if (control->aux_data) {
5420 			sctp_m_free(control->aux_data);
5421 			control->aux_data = NULL;
5422 		}
5423 #ifdef INVARIANTS
5424 		if (control->on_strm_q) {
5425 			panic("About to free ctl:%p so:%p and its in %d",
5426 			    control, so, control->on_strm_q);
5427 		}
5428 #endif
5429 		sctp_free_remote_addr(control->whoFrom);
5430 		sctp_free_a_readq(stcb, control);
5431 		if (hold_rlock) {
5432 			hold_rlock = 0;
5433 			SCTP_INP_READ_UNLOCK(inp);
5434 		}
5435 		goto restart;
5436 	}
5437 	if (control->length == 0) {
5438 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5439 		    (filling_sinfo)) {
5440 			/* find a more suitable one then this */
5441 			ctl = TAILQ_NEXT(control, next);
5442 			while (ctl) {
5443 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5444 				    (ctl->some_taken ||
5445 				    (ctl->spec_flags & M_NOTIFICATION) ||
5446 				    ((ctl->do_not_ref_stcb == 0) &&
5447 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5448 				    ) {
5449 					/*-
5450 					 * If we have a different TCB next, and there is data
5451 					 * present. If we have already taken some (pdapi), OR we can
5452 					 * ref the tcb and no delivery as started on this stream, we
5453 					 * take it. Note we allow a notification on a different
5454 					 * assoc to be delivered..
5455 					 */
5456 					control = ctl;
5457 					goto found_one;
5458 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5459 					    (ctl->length) &&
5460 					    ((ctl->some_taken) ||
5461 					    ((ctl->do_not_ref_stcb == 0) &&
5462 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5463 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5464 					/*-
5465 					 * If we have the same tcb, and there is data present, and we
5466 					 * have the strm interleave feature present. Then if we have
5467 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5468 					 * not started a delivery for this stream, we can take it.
5469 					 * Note we do NOT allow a notificaiton on the same assoc to
5470 					 * be delivered.
5471 					 */
5472 					control = ctl;
5473 					goto found_one;
5474 				}
5475 				ctl = TAILQ_NEXT(ctl, next);
5476 			}
5477 		}
5478 		/*
5479 		 * if we reach here, not suitable replacement is available
5480 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5481 		 * into the our held count, and its time to sleep again.
5482 		 */
5483 		held_length = so->so_rcv.sb_cc;
5484 		control->held_length = so->so_rcv.sb_cc;
5485 		goto restart;
5486 	}
5487 	/* Clear the held length since there is something to read */
5488 	control->held_length = 0;
5489 found_one:
5490 	/*
5491 	 * If we reach here, control has a some data for us to read off.
5492 	 * Note that stcb COULD be NULL.
5493 	 */
5494 	if (hold_rlock == 0) {
5495 		hold_rlock = 1;
5496 		SCTP_INP_READ_LOCK(inp);
5497 	}
5498 	control->some_taken++;
5499 	stcb = control->stcb;
5500 	if (stcb) {
5501 		if ((control->do_not_ref_stcb == 0) &&
5502 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5503 			if (freecnt_applied == 0)
5504 				stcb = NULL;
5505 		} else if (control->do_not_ref_stcb == 0) {
5506 			/* you can't free it on me please */
5507 			/*
5508 			 * The lock on the socket buffer protects us so the
5509 			 * free code will stop. But since we used the
5510 			 * socketbuf lock and the sender uses the tcb_lock
5511 			 * to increment, we need to use the atomic add to
5512 			 * the refcnt
5513 			 */
5514 			if (freecnt_applied) {
5515 #ifdef INVARIANTS
5516 				panic("refcnt already incremented");
5517 #else
5518 				SCTP_PRINTF("refcnt already incremented?\n");
5519 #endif
5520 			} else {
5521 				atomic_add_int(&stcb->asoc.refcnt, 1);
5522 				freecnt_applied = 1;
5523 			}
5524 			/*
5525 			 * Setup to remember how much we have not yet told
5526 			 * the peer our rwnd has opened up. Note we grab the
5527 			 * value from the tcb from last time. Note too that
5528 			 * sack sending clears this when a sack is sent,
5529 			 * which is fine. Once we hit the rwnd_req, we then
5530 			 * will go to the sctp_user_rcvd() that will not
5531 			 * lock until it KNOWs it MUST send a WUP-SACK.
5532 			 */
5533 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5534 			stcb->freed_by_sorcv_sincelast = 0;
5535 		}
5536 	}
5537 	if (stcb &&
5538 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5539 	    control->do_not_ref_stcb == 0) {
5540 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5541 	}
5542 	/* First lets get off the sinfo and sockaddr info */
5543 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5544 		sinfo->sinfo_stream = control->sinfo_stream;
5545 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5546 		sinfo->sinfo_flags = control->sinfo_flags;
5547 		sinfo->sinfo_ppid = control->sinfo_ppid;
5548 		sinfo->sinfo_context = control->sinfo_context;
5549 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5550 		sinfo->sinfo_tsn = control->sinfo_tsn;
5551 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5552 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5553 		nxt = TAILQ_NEXT(control, next);
5554 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5555 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5556 			struct sctp_extrcvinfo *s_extra;
5557 
5558 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5559 			if ((nxt) &&
5560 			    (nxt->length)) {
5561 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5562 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5563 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5564 				}
5565 				if (nxt->spec_flags & M_NOTIFICATION) {
5566 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5567 				}
5568 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5569 				s_extra->serinfo_next_length = nxt->length;
5570 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5571 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5572 				if (nxt->tail_mbuf != NULL) {
5573 					if (nxt->end_added) {
5574 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5575 					}
5576 				}
5577 			} else {
5578 				/*
5579 				 * we explicitly 0 this, since the memcpy
5580 				 * got some other things beyond the older
5581 				 * sinfo_ that is on the control's structure
5582 				 * :-D
5583 				 */
5584 				nxt = NULL;
5585 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5586 				s_extra->serinfo_next_aid = 0;
5587 				s_extra->serinfo_next_length = 0;
5588 				s_extra->serinfo_next_ppid = 0;
5589 				s_extra->serinfo_next_stream = 0;
5590 			}
5591 		}
5592 		/*
5593 		 * update off the real current cum-ack, if we have an stcb.
5594 		 */
5595 		if ((control->do_not_ref_stcb == 0) && stcb)
5596 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5597 		/*
5598 		 * mask off the high bits, we keep the actual chunk bits in
5599 		 * there.
5600 		 */
5601 		sinfo->sinfo_flags &= 0x00ff;
5602 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5603 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5604 		}
5605 	}
5606 #ifdef SCTP_ASOCLOG_OF_TSNS
5607 	{
5608 		int index, newindex;
5609 		struct sctp_pcbtsn_rlog *entry;
5610 
5611 		do {
5612 			index = inp->readlog_index;
5613 			newindex = index + 1;
5614 			if (newindex >= SCTP_READ_LOG_SIZE) {
5615 				newindex = 0;
5616 			}
5617 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5618 		entry = &inp->readlog[index];
5619 		entry->vtag = control->sinfo_assoc_id;
5620 		entry->strm = control->sinfo_stream;
5621 		entry->seq = (uint16_t)control->mid;
5622 		entry->sz = control->length;
5623 		entry->flgs = control->sinfo_flags;
5624 	}
5625 #endif
5626 	if ((fromlen > 0) && (from != NULL)) {
5627 		union sctp_sockstore store;
5628 		size_t len;
5629 
5630 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5631 #ifdef INET6
5632 		case AF_INET6:
5633 			len = sizeof(struct sockaddr_in6);
5634 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5635 			store.sin6.sin6_port = control->port_from;
5636 			break;
5637 #endif
5638 #ifdef INET
5639 		case AF_INET:
5640 #ifdef INET6
5641 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5642 				len = sizeof(struct sockaddr_in6);
5643 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5644 				    &store.sin6);
5645 				store.sin6.sin6_port = control->port_from;
5646 			} else {
5647 				len = sizeof(struct sockaddr_in);
5648 				store.sin = control->whoFrom->ro._l_addr.sin;
5649 				store.sin.sin_port = control->port_from;
5650 			}
5651 #else
5652 			len = sizeof(struct sockaddr_in);
5653 			store.sin = control->whoFrom->ro._l_addr.sin;
5654 			store.sin.sin_port = control->port_from;
5655 #endif
5656 			break;
5657 #endif
5658 		default:
5659 			len = 0;
5660 			break;
5661 		}
5662 		memcpy(from, &store, min((size_t)fromlen, len));
5663 #ifdef INET6
5664 		{
5665 			struct sockaddr_in6 lsa6, *from6;
5666 
5667 			from6 = (struct sockaddr_in6 *)from;
5668 			sctp_recover_scope_mac(from6, (&lsa6));
5669 		}
5670 #endif
5671 	}
5672 	if (hold_rlock) {
5673 		SCTP_INP_READ_UNLOCK(inp);
5674 		hold_rlock = 0;
5675 	}
5676 	if (hold_sblock) {
5677 		SOCKBUF_UNLOCK(&so->so_rcv);
5678 		hold_sblock = 0;
5679 	}
5680 	/* now copy out what data we can */
5681 	if (mp == NULL) {
5682 		/* copy out each mbuf in the chain up to length */
5683 get_more_data:
5684 		m = control->data;
5685 		while (m) {
5686 			/* Move out all we can */
5687 			cp_len = (int)uio->uio_resid;
5688 			my_len = (int)SCTP_BUF_LEN(m);
5689 			if (cp_len > my_len) {
5690 				/* not enough in this buf */
5691 				cp_len = my_len;
5692 			}
5693 			if (hold_rlock) {
5694 				SCTP_INP_READ_UNLOCK(inp);
5695 				hold_rlock = 0;
5696 			}
5697 			if (cp_len > 0)
5698 				error = uiomove(mtod(m, char *), cp_len, uio);
5699 			/* re-read */
5700 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5701 				goto release;
5702 			}
5703 			if ((control->do_not_ref_stcb == 0) && stcb &&
5704 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5705 				no_rcv_needed = 1;
5706 			}
5707 			if (error) {
5708 				/* error we are out of here */
5709 				goto release;
5710 			}
5711 			SCTP_INP_READ_LOCK(inp);
5712 			hold_rlock = 1;
5713 			if (cp_len == SCTP_BUF_LEN(m)) {
5714 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5715 				    (control->end_added)) {
5716 					out_flags |= MSG_EOR;
5717 					if ((control->do_not_ref_stcb == 0) &&
5718 					    (control->stcb != NULL) &&
5719 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5720 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5721 				}
5722 				if (control->spec_flags & M_NOTIFICATION) {
5723 					out_flags |= MSG_NOTIFICATION;
5724 				}
5725 				/* we ate up the mbuf */
5726 				if (in_flags & MSG_PEEK) {
5727 					/* just looking */
5728 					m = SCTP_BUF_NEXT(m);
5729 					copied_so_far += cp_len;
5730 				} else {
5731 					/* dispose of the mbuf */
5732 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5733 						sctp_sblog(&so->so_rcv,
5734 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5735 					}
5736 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5737 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5738 						sctp_sblog(&so->so_rcv,
5739 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5740 					}
5741 					copied_so_far += cp_len;
5742 					freed_so_far += cp_len;
5743 					freed_so_far += MSIZE;
5744 					atomic_subtract_int(&control->length, cp_len);
5745 					control->data = sctp_m_free(m);
5746 					m = control->data;
5747 					/*
5748 					 * been through it all, must hold sb
5749 					 * lock ok to null tail
5750 					 */
5751 					if (control->data == NULL) {
5752 #ifdef INVARIANTS
5753 						if ((control->end_added == 0) ||
5754 						    (TAILQ_NEXT(control, next) == NULL)) {
5755 							/*
5756 							 * If the end is not
5757 							 * added, OR the
5758 							 * next is NOT null
5759 							 * we MUST have the
5760 							 * lock.
5761 							 */
5762 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5763 								panic("Hmm we don't own the lock?");
5764 							}
5765 						}
5766 #endif
5767 						control->tail_mbuf = NULL;
5768 #ifdef INVARIANTS
5769 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5770 							panic("end_added, nothing left and no MSG_EOR");
5771 						}
5772 #endif
5773 					}
5774 				}
5775 			} else {
5776 				/* Do we need to trim the mbuf? */
5777 				if (control->spec_flags & M_NOTIFICATION) {
5778 					out_flags |= MSG_NOTIFICATION;
5779 				}
5780 				if ((in_flags & MSG_PEEK) == 0) {
5781 					SCTP_BUF_RESV_UF(m, cp_len);
5782 					SCTP_BUF_LEN(m) -= cp_len;
5783 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5784 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5785 					}
5786 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5787 					if ((control->do_not_ref_stcb == 0) &&
5788 					    stcb) {
5789 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5790 					}
5791 					copied_so_far += cp_len;
5792 					freed_so_far += cp_len;
5793 					freed_so_far += MSIZE;
5794 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5795 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5796 						    SCTP_LOG_SBRESULT, 0);
5797 					}
5798 					atomic_subtract_int(&control->length, cp_len);
5799 				} else {
5800 					copied_so_far += cp_len;
5801 				}
5802 			}
5803 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5804 				break;
5805 			}
5806 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5807 			    (control->do_not_ref_stcb == 0) &&
5808 			    (freed_so_far >= rwnd_req)) {
5809 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5810 			}
5811 		}		/* end while(m) */
5812 		/*
5813 		 * At this point we have looked at it all and we either have
5814 		 * a MSG_EOR/or read all the user wants... <OR>
5815 		 * control->length == 0.
5816 		 */
5817 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5818 			/* we are done with this control */
5819 			if (control->length == 0) {
5820 				if (control->data) {
5821 #ifdef INVARIANTS
5822 					panic("control->data not null at read eor?");
5823 #else
5824 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5825 					sctp_m_freem(control->data);
5826 					control->data = NULL;
5827 #endif
5828 				}
5829 		done_with_control:
5830 				if (hold_rlock == 0) {
5831 					SCTP_INP_READ_LOCK(inp);
5832 					hold_rlock = 1;
5833 				}
5834 				TAILQ_REMOVE(&inp->read_queue, control, next);
5835 				/* Add back any hiddend data */
5836 				if (control->held_length) {
5837 					held_length = 0;
5838 					control->held_length = 0;
5839 					wakeup_read_socket = 1;
5840 				}
5841 				if (control->aux_data) {
5842 					sctp_m_free(control->aux_data);
5843 					control->aux_data = NULL;
5844 				}
5845 				no_rcv_needed = control->do_not_ref_stcb;
5846 				sctp_free_remote_addr(control->whoFrom);
5847 				control->data = NULL;
5848 #ifdef INVARIANTS
5849 				if (control->on_strm_q) {
5850 					panic("About to free ctl:%p so:%p and its in %d",
5851 					    control, so, control->on_strm_q);
5852 				}
5853 #endif
5854 				sctp_free_a_readq(stcb, control);
5855 				control = NULL;
5856 				if ((freed_so_far >= rwnd_req) &&
5857 				    (no_rcv_needed == 0))
5858 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5859 
5860 			} else {
5861 				/*
5862 				 * The user did not read all of this
5863 				 * message, turn off the returned MSG_EOR
5864 				 * since we are leaving more behind on the
5865 				 * control to read.
5866 				 */
5867 #ifdef INVARIANTS
5868 				if (control->end_added &&
5869 				    (control->data == NULL) &&
5870 				    (control->tail_mbuf == NULL)) {
5871 					panic("Gak, control->length is corrupt?");
5872 				}
5873 #endif
5874 				no_rcv_needed = control->do_not_ref_stcb;
5875 				out_flags &= ~MSG_EOR;
5876 			}
5877 		}
5878 		if (out_flags & MSG_EOR) {
5879 			goto release;
5880 		}
5881 		if ((uio->uio_resid == 0) ||
5882 		    ((in_eeor_mode) &&
5883 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5884 			goto release;
5885 		}
5886 		/*
5887 		 * If I hit here the receiver wants more and this message is
5888 		 * NOT done (pd-api). So two questions. Can we block? if not
5889 		 * we are done. Did the user NOT set MSG_WAITALL?
5890 		 */
5891 		if (block_allowed == 0) {
5892 			goto release;
5893 		}
5894 		/*
5895 		 * We need to wait for more data a few things: - We don't
5896 		 * sbunlock() so we don't get someone else reading. - We
5897 		 * must be sure to account for the case where what is added
5898 		 * is NOT to our control when we wakeup.
5899 		 */
5900 
5901 		/*
5902 		 * Do we need to tell the transport a rwnd update might be
5903 		 * needed before we go to sleep?
5904 		 */
5905 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5906 		    ((freed_so_far >= rwnd_req) &&
5907 		    (control->do_not_ref_stcb == 0) &&
5908 		    (no_rcv_needed == 0))) {
5909 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5910 		}
5911 wait_some_more:
5912 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5913 			goto release;
5914 		}
5915 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5916 			goto release;
5917 
5918 		if (hold_rlock == 1) {
5919 			SCTP_INP_READ_UNLOCK(inp);
5920 			hold_rlock = 0;
5921 		}
5922 		if (hold_sblock == 0) {
5923 			SOCKBUF_LOCK(&so->so_rcv);
5924 			hold_sblock = 1;
5925 		}
5926 		if ((copied_so_far) && (control->length == 0) &&
5927 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5928 			goto release;
5929 		}
5930 		if (so->so_rcv.sb_cc <= control->held_length) {
5931 			error = sbwait(&so->so_rcv);
5932 			if (error) {
5933 				goto release;
5934 			}
5935 			control->held_length = 0;
5936 		}
5937 		if (hold_sblock) {
5938 			SOCKBUF_UNLOCK(&so->so_rcv);
5939 			hold_sblock = 0;
5940 		}
5941 		if (control->length == 0) {
5942 			/* still nothing here */
5943 			if (control->end_added == 1) {
5944 				/* he aborted, or is done i.e.did a shutdown */
5945 				out_flags |= MSG_EOR;
5946 				if (control->pdapi_aborted) {
5947 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5948 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5949 
5950 					out_flags |= MSG_TRUNC;
5951 				} else {
5952 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5953 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5954 				}
5955 				goto done_with_control;
5956 			}
5957 			if (so->so_rcv.sb_cc > held_length) {
5958 				control->held_length = so->so_rcv.sb_cc;
5959 				held_length = 0;
5960 			}
5961 			goto wait_some_more;
5962 		} else if (control->data == NULL) {
5963 			/*
5964 			 * we must re-sync since data is probably being
5965 			 * added
5966 			 */
5967 			SCTP_INP_READ_LOCK(inp);
5968 			if ((control->length > 0) && (control->data == NULL)) {
5969 				/*
5970 				 * big trouble.. we have the lock and its
5971 				 * corrupt?
5972 				 */
5973 #ifdef INVARIANTS
5974 				panic("Impossible data==NULL length !=0");
5975 #endif
5976 				out_flags |= MSG_EOR;
5977 				out_flags |= MSG_TRUNC;
5978 				control->length = 0;
5979 				SCTP_INP_READ_UNLOCK(inp);
5980 				goto done_with_control;
5981 			}
5982 			SCTP_INP_READ_UNLOCK(inp);
5983 			/* We will fall around to get more data */
5984 		}
5985 		goto get_more_data;
5986 	} else {
5987 		/*-
5988 		 * Give caller back the mbuf chain,
5989 		 * store in uio_resid the length
5990 		 */
5991 		wakeup_read_socket = 0;
5992 		if ((control->end_added == 0) ||
5993 		    (TAILQ_NEXT(control, next) == NULL)) {
5994 			/* Need to get rlock */
5995 			if (hold_rlock == 0) {
5996 				SCTP_INP_READ_LOCK(inp);
5997 				hold_rlock = 1;
5998 			}
5999 		}
6000 		if (control->end_added) {
6001 			out_flags |= MSG_EOR;
6002 			if ((control->do_not_ref_stcb == 0) &&
6003 			    (control->stcb != NULL) &&
6004 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6005 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6006 		}
6007 		if (control->spec_flags & M_NOTIFICATION) {
6008 			out_flags |= MSG_NOTIFICATION;
6009 		}
6010 		uio->uio_resid = control->length;
6011 		*mp = control->data;
6012 		m = control->data;
6013 		while (m) {
6014 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6015 				sctp_sblog(&so->so_rcv,
6016 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6017 			}
6018 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6019 			freed_so_far += SCTP_BUF_LEN(m);
6020 			freed_so_far += MSIZE;
6021 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6022 				sctp_sblog(&so->so_rcv,
6023 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6024 			}
6025 			m = SCTP_BUF_NEXT(m);
6026 		}
6027 		control->data = control->tail_mbuf = NULL;
6028 		control->length = 0;
6029 		if (out_flags & MSG_EOR) {
6030 			/* Done with this control */
6031 			goto done_with_control;
6032 		}
6033 	}
6034 release:
6035 	if (hold_rlock == 1) {
6036 		SCTP_INP_READ_UNLOCK(inp);
6037 		hold_rlock = 0;
6038 	}
6039 	if (hold_sblock == 1) {
6040 		SOCKBUF_UNLOCK(&so->so_rcv);
6041 		hold_sblock = 0;
6042 	}
6043 	sbunlock(&so->so_rcv);
6044 	sockbuf_lock = 0;
6045 
6046 release_unlocked:
6047 	if (hold_sblock) {
6048 		SOCKBUF_UNLOCK(&so->so_rcv);
6049 		hold_sblock = 0;
6050 	}
6051 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6052 		if ((freed_so_far >= rwnd_req) &&
6053 		    (control && (control->do_not_ref_stcb == 0)) &&
6054 		    (no_rcv_needed == 0))
6055 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6056 	}
6057 out:
6058 	if (msg_flags) {
6059 		*msg_flags = out_flags;
6060 	}
6061 	if (((out_flags & MSG_EOR) == 0) &&
6062 	    ((in_flags & MSG_PEEK) == 0) &&
6063 	    (sinfo) &&
6064 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6065 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6066 		struct sctp_extrcvinfo *s_extra;
6067 
6068 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6069 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6070 	}
6071 	if (hold_rlock == 1) {
6072 		SCTP_INP_READ_UNLOCK(inp);
6073 	}
6074 	if (hold_sblock) {
6075 		SOCKBUF_UNLOCK(&so->so_rcv);
6076 	}
6077 	if (sockbuf_lock) {
6078 		sbunlock(&so->so_rcv);
6079 	}
6080 	if (freecnt_applied) {
6081 		/*
6082 		 * The lock on the socket buffer protects us so the free
6083 		 * code will stop. But since we used the socketbuf lock and
6084 		 * the sender uses the tcb_lock to increment, we need to use
6085 		 * the atomic add to the refcnt.
6086 		 */
6087 		if (stcb == NULL) {
6088 #ifdef INVARIANTS
6089 			panic("stcb for refcnt has gone NULL?");
6090 			goto stage_left;
6091 #else
6092 			goto stage_left;
6093 #endif
6094 		}
6095 		/* Save the value back for next time */
6096 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6097 		atomic_add_int(&stcb->asoc.refcnt, -1);
6098 	}
6099 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6100 		if (stcb) {
6101 			sctp_misc_ints(SCTP_SORECV_DONE,
6102 			    freed_so_far,
6103 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6104 			    stcb->asoc.my_rwnd,
6105 			    so->so_rcv.sb_cc);
6106 		} else {
6107 			sctp_misc_ints(SCTP_SORECV_DONE,
6108 			    freed_so_far,
6109 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6110 			    0,
6111 			    so->so_rcv.sb_cc);
6112 		}
6113 	}
6114 stage_left:
6115 	if (wakeup_read_socket) {
6116 		sctp_sorwakeup(inp, so);
6117 	}
6118 	return (error);
6119 }
6120 
6121 
6122 #ifdef SCTP_MBUF_LOGGING
6123 struct mbuf *
6124 sctp_m_free(struct mbuf *m)
6125 {
6126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6127 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6128 	}
6129 	return (m_free(m));
6130 }
6131 
6132 void
6133 sctp_m_freem(struct mbuf *mb)
6134 {
6135 	while (mb != NULL)
6136 		mb = sctp_m_free(mb);
6137 }
6138 
6139 #endif
6140 
6141 int
6142 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6143 {
6144 	/*
6145 	 * Given a local address. For all associations that holds the
6146 	 * address, request a peer-set-primary.
6147 	 */
6148 	struct sctp_ifa *ifa;
6149 	struct sctp_laddr *wi;
6150 
6151 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6152 	if (ifa == NULL) {
6153 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6154 		return (EADDRNOTAVAIL);
6155 	}
6156 	/*
6157 	 * Now that we have the ifa we must awaken the iterator with this
6158 	 * message.
6159 	 */
6160 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6161 	if (wi == NULL) {
6162 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6163 		return (ENOMEM);
6164 	}
6165 	/* Now incr the count and int wi structure */
6166 	SCTP_INCR_LADDR_COUNT();
6167 	bzero(wi, sizeof(*wi));
6168 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6169 	wi->ifa = ifa;
6170 	wi->action = SCTP_SET_PRIM_ADDR;
6171 	atomic_add_int(&ifa->refcount, 1);
6172 
6173 	/* Now add it to the work queue */
6174 	SCTP_WQ_ADDR_LOCK();
6175 	/*
6176 	 * Should this really be a tailq? As it is we will process the
6177 	 * newest first :-0
6178 	 */
6179 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6180 	SCTP_WQ_ADDR_UNLOCK();
6181 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6182 	    (struct sctp_inpcb *)NULL,
6183 	    (struct sctp_tcb *)NULL,
6184 	    (struct sctp_nets *)NULL);
6185 	return (0);
6186 }
6187 
6188 
6189 int
6190 sctp_soreceive(struct socket *so,
6191     struct sockaddr **psa,
6192     struct uio *uio,
6193     struct mbuf **mp0,
6194     struct mbuf **controlp,
6195     int *flagsp)
6196 {
6197 	int error, fromlen;
6198 	uint8_t sockbuf[256];
6199 	struct sockaddr *from;
6200 	struct sctp_extrcvinfo sinfo;
6201 	int filling_sinfo = 1;
6202 	struct sctp_inpcb *inp;
6203 
6204 	inp = (struct sctp_inpcb *)so->so_pcb;
6205 	/* pickup the assoc we are reading from */
6206 	if (inp == NULL) {
6207 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6208 		return (EINVAL);
6209 	}
6210 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6211 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6212 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6213 	    (controlp == NULL)) {
6214 		/* user does not want the sndrcv ctl */
6215 		filling_sinfo = 0;
6216 	}
6217 	if (psa) {
6218 		from = (struct sockaddr *)sockbuf;
6219 		fromlen = sizeof(sockbuf);
6220 		from->sa_len = 0;
6221 	} else {
6222 		from = NULL;
6223 		fromlen = 0;
6224 	}
6225 
6226 	if (filling_sinfo) {
6227 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6228 	}
6229 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6230 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6231 	if (controlp != NULL) {
6232 		/* copy back the sinfo in a CMSG format */
6233 		if (filling_sinfo)
6234 			*controlp = sctp_build_ctl_nchunk(inp,
6235 			    (struct sctp_sndrcvinfo *)&sinfo);
6236 		else
6237 			*controlp = NULL;
6238 	}
6239 	if (psa) {
6240 		/* copy back the address info */
6241 		if (from && from->sa_len) {
6242 			*psa = sodupsockaddr(from, M_NOWAIT);
6243 		} else {
6244 			*psa = NULL;
6245 		}
6246 	}
6247 	return (error);
6248 }
6249 
6250 
6251 
6252 
6253 
6254 int
6255 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6256     int totaddr, int *error)
6257 {
6258 	int added = 0;
6259 	int i;
6260 	struct sctp_inpcb *inp;
6261 	struct sockaddr *sa;
6262 	size_t incr = 0;
6263 #ifdef INET
6264 	struct sockaddr_in *sin;
6265 #endif
6266 #ifdef INET6
6267 	struct sockaddr_in6 *sin6;
6268 #endif
6269 
6270 	sa = addr;
6271 	inp = stcb->sctp_ep;
6272 	*error = 0;
6273 	for (i = 0; i < totaddr; i++) {
6274 		switch (sa->sa_family) {
6275 #ifdef INET
6276 		case AF_INET:
6277 			incr = sizeof(struct sockaddr_in);
6278 			sin = (struct sockaddr_in *)sa;
6279 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6280 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6281 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6282 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6283 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6284 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6285 				*error = EINVAL;
6286 				goto out_now;
6287 			}
6288 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6289 			    SCTP_DONOT_SETSCOPE,
6290 			    SCTP_ADDR_IS_CONFIRMED)) {
6291 				/* assoc gone no un-lock */
6292 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6293 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6294 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6295 				*error = ENOBUFS;
6296 				goto out_now;
6297 			}
6298 			added++;
6299 			break;
6300 #endif
6301 #ifdef INET6
6302 		case AF_INET6:
6303 			incr = sizeof(struct sockaddr_in6);
6304 			sin6 = (struct sockaddr_in6 *)sa;
6305 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6306 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6307 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6308 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6309 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6310 				*error = EINVAL;
6311 				goto out_now;
6312 			}
6313 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6314 			    SCTP_DONOT_SETSCOPE,
6315 			    SCTP_ADDR_IS_CONFIRMED)) {
6316 				/* assoc gone no un-lock */
6317 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6318 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6319 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6320 				*error = ENOBUFS;
6321 				goto out_now;
6322 			}
6323 			added++;
6324 			break;
6325 #endif
6326 		default:
6327 			break;
6328 		}
6329 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6330 	}
6331 out_now:
6332 	return (added);
6333 }
6334 
6335 struct sctp_tcb *
6336 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6337     unsigned int *totaddr,
6338     unsigned int *num_v4, unsigned int *num_v6, int *error,
6339     unsigned int limit, int *bad_addr)
6340 {
6341 	struct sockaddr *sa;
6342 	struct sctp_tcb *stcb = NULL;
6343 	unsigned int incr, at, i;
6344 
6345 	at = 0;
6346 	sa = addr;
6347 	*error = *num_v6 = *num_v4 = 0;
6348 	/* account and validate addresses */
6349 	for (i = 0; i < *totaddr; i++) {
6350 		switch (sa->sa_family) {
6351 #ifdef INET
6352 		case AF_INET:
6353 			incr = (unsigned int)sizeof(struct sockaddr_in);
6354 			if (sa->sa_len != incr) {
6355 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 				*error = EINVAL;
6357 				*bad_addr = 1;
6358 				return (NULL);
6359 			}
6360 			(*num_v4) += 1;
6361 			break;
6362 #endif
6363 #ifdef INET6
6364 		case AF_INET6:
6365 			{
6366 				struct sockaddr_in6 *sin6;
6367 
6368 				sin6 = (struct sockaddr_in6 *)sa;
6369 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6370 					/* Must be non-mapped for connectx */
6371 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 					*error = EINVAL;
6373 					*bad_addr = 1;
6374 					return (NULL);
6375 				}
6376 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6377 				if (sa->sa_len != incr) {
6378 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 					*error = EINVAL;
6380 					*bad_addr = 1;
6381 					return (NULL);
6382 				}
6383 				(*num_v6) += 1;
6384 				break;
6385 			}
6386 #endif
6387 		default:
6388 			*totaddr = i;
6389 			incr = 0;
6390 			/* we are done */
6391 			break;
6392 		}
6393 		if (i == *totaddr) {
6394 			break;
6395 		}
6396 		SCTP_INP_INCR_REF(inp);
6397 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6398 		if (stcb != NULL) {
6399 			/* Already have or am bring up an association */
6400 			return (stcb);
6401 		} else {
6402 			SCTP_INP_DECR_REF(inp);
6403 		}
6404 		if ((at + incr) > limit) {
6405 			*totaddr = i;
6406 			break;
6407 		}
6408 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6409 	}
6410 	return ((struct sctp_tcb *)NULL);
6411 }
6412 
6413 /*
6414  * sctp_bindx(ADD) for one address.
6415  * assumes all arguments are valid/checked by caller.
6416  */
6417 void
6418 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6419     struct sockaddr *sa, sctp_assoc_t assoc_id,
6420     uint32_t vrf_id, int *error, void *p)
6421 {
6422 	struct sockaddr *addr_touse;
6423 #if defined(INET) && defined(INET6)
6424 	struct sockaddr_in sin;
6425 #endif
6426 
6427 	/* see if we're bound all already! */
6428 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6429 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 		*error = EINVAL;
6431 		return;
6432 	}
6433 	addr_touse = sa;
6434 #ifdef INET6
6435 	if (sa->sa_family == AF_INET6) {
6436 #ifdef INET
6437 		struct sockaddr_in6 *sin6;
6438 
6439 #endif
6440 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6441 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6442 			*error = EINVAL;
6443 			return;
6444 		}
6445 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6446 			/* can only bind v6 on PF_INET6 sockets */
6447 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6448 			*error = EINVAL;
6449 			return;
6450 		}
6451 #ifdef INET
6452 		sin6 = (struct sockaddr_in6 *)addr_touse;
6453 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6454 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6455 			    SCTP_IPV6_V6ONLY(inp)) {
6456 				/* can't bind v4-mapped on PF_INET sockets */
6457 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6458 				*error = EINVAL;
6459 				return;
6460 			}
6461 			in6_sin6_2_sin(&sin, sin6);
6462 			addr_touse = (struct sockaddr *)&sin;
6463 		}
6464 #endif
6465 	}
6466 #endif
6467 #ifdef INET
6468 	if (sa->sa_family == AF_INET) {
6469 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6470 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6471 			*error = EINVAL;
6472 			return;
6473 		}
6474 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6475 		    SCTP_IPV6_V6ONLY(inp)) {
6476 			/* can't bind v4 on PF_INET sockets */
6477 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478 			*error = EINVAL;
6479 			return;
6480 		}
6481 	}
6482 #endif
6483 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6484 		if (p == NULL) {
6485 			/* Can't get proc for Net/Open BSD */
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6491 		return;
6492 	}
6493 	/*
6494 	 * No locks required here since bind and mgmt_ep_sa all do their own
6495 	 * locking. If we do something for the FIX: below we may need to
6496 	 * lock in that case.
6497 	 */
6498 	if (assoc_id == 0) {
6499 		/* add the address */
6500 		struct sctp_inpcb *lep;
6501 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6502 
6503 		/* validate the incoming port */
6504 		if ((lsin->sin_port != 0) &&
6505 		    (lsin->sin_port != inp->sctp_lport)) {
6506 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6507 			*error = EINVAL;
6508 			return;
6509 		} else {
6510 			/* user specified 0 port, set it to existing port */
6511 			lsin->sin_port = inp->sctp_lport;
6512 		}
6513 
6514 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6515 		if (lep != NULL) {
6516 			/*
6517 			 * We must decrement the refcount since we have the
6518 			 * ep already and are binding. No remove going on
6519 			 * here.
6520 			 */
6521 			SCTP_INP_DECR_REF(lep);
6522 		}
6523 		if (lep == inp) {
6524 			/* already bound to it.. ok */
6525 			return;
6526 		} else if (lep == NULL) {
6527 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6528 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6529 			    SCTP_ADD_IP_ADDRESS,
6530 			    vrf_id, NULL);
6531 		} else {
6532 			*error = EADDRINUSE;
6533 		}
6534 		if (*error)
6535 			return;
6536 	} else {
6537 		/*
6538 		 * FIX: decide whether we allow assoc based bindx
6539 		 */
6540 	}
6541 }
6542 
6543 /*
6544  * sctp_bindx(DELETE) for one address.
6545  * assumes all arguments are valid/checked by caller.
6546  */
6547 void
6548 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6549     struct sockaddr *sa, sctp_assoc_t assoc_id,
6550     uint32_t vrf_id, int *error)
6551 {
6552 	struct sockaddr *addr_touse;
6553 #if defined(INET) && defined(INET6)
6554 	struct sockaddr_in sin;
6555 #endif
6556 
6557 	/* see if we're bound all already! */
6558 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6559 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6560 		*error = EINVAL;
6561 		return;
6562 	}
6563 	addr_touse = sa;
6564 #ifdef INET6
6565 	if (sa->sa_family == AF_INET6) {
6566 #ifdef INET
6567 		struct sockaddr_in6 *sin6;
6568 #endif
6569 
6570 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6571 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6572 			*error = EINVAL;
6573 			return;
6574 		}
6575 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6576 			/* can only bind v6 on PF_INET6 sockets */
6577 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6578 			*error = EINVAL;
6579 			return;
6580 		}
6581 #ifdef INET
6582 		sin6 = (struct sockaddr_in6 *)addr_touse;
6583 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6584 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6585 			    SCTP_IPV6_V6ONLY(inp)) {
6586 				/* can't bind mapped-v4 on PF_INET sockets */
6587 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6588 				*error = EINVAL;
6589 				return;
6590 			}
6591 			in6_sin6_2_sin(&sin, sin6);
6592 			addr_touse = (struct sockaddr *)&sin;
6593 		}
6594 #endif
6595 	}
6596 #endif
6597 #ifdef INET
6598 	if (sa->sa_family == AF_INET) {
6599 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6600 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6601 			*error = EINVAL;
6602 			return;
6603 		}
6604 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6605 		    SCTP_IPV6_V6ONLY(inp)) {
6606 			/* can't bind v4 on PF_INET sockets */
6607 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6608 			*error = EINVAL;
6609 			return;
6610 		}
6611 	}
6612 #endif
6613 	/*
6614 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6615 	 * below is ever changed we may need to lock before calling
6616 	 * association level binding.
6617 	 */
6618 	if (assoc_id == 0) {
6619 		/* delete the address */
6620 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6621 		    SCTP_DEL_IP_ADDRESS,
6622 		    vrf_id, NULL);
6623 	} else {
6624 		/*
6625 		 * FIX: decide whether we allow assoc based bindx
6626 		 */
6627 	}
6628 }
6629 
6630 /*
6631  * returns the valid local address count for an assoc, taking into account
6632  * all scoping rules
6633  */
6634 int
6635 sctp_local_addr_count(struct sctp_tcb *stcb)
6636 {
6637 	int loopback_scope;
6638 #if defined(INET)
6639 	int ipv4_local_scope, ipv4_addr_legal;
6640 #endif
6641 #if defined (INET6)
6642 	int local_scope, site_scope, ipv6_addr_legal;
6643 #endif
6644 	struct sctp_vrf *vrf;
6645 	struct sctp_ifn *sctp_ifn;
6646 	struct sctp_ifa *sctp_ifa;
6647 	int count = 0;
6648 
6649 	/* Turn on all the appropriate scopes */
6650 	loopback_scope = stcb->asoc.scope.loopback_scope;
6651 #if defined(INET)
6652 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6653 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6654 #endif
6655 #if defined(INET6)
6656 	local_scope = stcb->asoc.scope.local_scope;
6657 	site_scope = stcb->asoc.scope.site_scope;
6658 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6659 #endif
6660 	SCTP_IPI_ADDR_RLOCK();
6661 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6662 	if (vrf == NULL) {
6663 		/* no vrf, no addresses */
6664 		SCTP_IPI_ADDR_RUNLOCK();
6665 		return (0);
6666 	}
6667 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6668 		/*
6669 		 * bound all case: go through all ifns on the vrf
6670 		 */
6671 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6672 			if ((loopback_scope == 0) &&
6673 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6674 				continue;
6675 			}
6676 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6677 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6678 					continue;
6679 				switch (sctp_ifa->address.sa.sa_family) {
6680 #ifdef INET
6681 				case AF_INET:
6682 					if (ipv4_addr_legal) {
6683 						struct sockaddr_in *sin;
6684 
6685 						sin = &sctp_ifa->address.sin;
6686 						if (sin->sin_addr.s_addr == 0) {
6687 							/*
6688 							 * skip unspecified
6689 							 * addrs
6690 							 */
6691 							continue;
6692 						}
6693 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6694 						    &sin->sin_addr) != 0) {
6695 							continue;
6696 						}
6697 						if ((ipv4_local_scope == 0) &&
6698 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6699 							continue;
6700 						}
6701 						/* count this one */
6702 						count++;
6703 					} else {
6704 						continue;
6705 					}
6706 					break;
6707 #endif
6708 #ifdef INET6
6709 				case AF_INET6:
6710 					if (ipv6_addr_legal) {
6711 						struct sockaddr_in6 *sin6;
6712 
6713 						sin6 = &sctp_ifa->address.sin6;
6714 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6715 							continue;
6716 						}
6717 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6718 						    &sin6->sin6_addr) != 0) {
6719 							continue;
6720 						}
6721 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6722 							if (local_scope == 0)
6723 								continue;
6724 							if (sin6->sin6_scope_id == 0) {
6725 								if (sa6_recoverscope(sin6) != 0)
6726 									/*
6727 									 *
6728 									 * bad
6729 									 * link
6730 									 *
6731 									 * local
6732 									 *
6733 									 * address
6734 									 */
6735 									continue;
6736 							}
6737 						}
6738 						if ((site_scope == 0) &&
6739 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6740 							continue;
6741 						}
6742 						/* count this one */
6743 						count++;
6744 					}
6745 					break;
6746 #endif
6747 				default:
6748 					/* TSNH */
6749 					break;
6750 				}
6751 			}
6752 		}
6753 	} else {
6754 		/*
6755 		 * subset bound case
6756 		 */
6757 		struct sctp_laddr *laddr;
6758 
6759 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6760 		    sctp_nxt_addr) {
6761 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6762 				continue;
6763 			}
6764 			/* count this one */
6765 			count++;
6766 		}
6767 	}
6768 	SCTP_IPI_ADDR_RUNLOCK();
6769 	return (count);
6770 }
6771 
6772 #if defined(SCTP_LOCAL_TRACE_BUF)
6773 
6774 void
6775 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6776 {
6777 	uint32_t saveindex, newindex;
6778 
6779 	do {
6780 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6781 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6782 			newindex = 1;
6783 		} else {
6784 			newindex = saveindex + 1;
6785 		}
6786 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6787 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6788 		saveindex = 0;
6789 	}
6790 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6791 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6792 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6794 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6795 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6796 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6797 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6798 }
6799 
6800 #endif
6801 static void
6802 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6803     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6804 {
6805 	struct ip *iph;
6806 #ifdef INET6
6807 	struct ip6_hdr *ip6;
6808 #endif
6809 	struct mbuf *sp, *last;
6810 	struct udphdr *uhdr;
6811 	uint16_t port;
6812 
6813 	if ((m->m_flags & M_PKTHDR) == 0) {
6814 		/* Can't handle one that is not a pkt hdr */
6815 		goto out;
6816 	}
6817 	/* Pull the src port */
6818 	iph = mtod(m, struct ip *);
6819 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6820 	port = uhdr->uh_sport;
6821 	/*
6822 	 * Split out the mbuf chain. Leave the IP header in m, place the
6823 	 * rest in the sp.
6824 	 */
6825 	sp = m_split(m, off, M_NOWAIT);
6826 	if (sp == NULL) {
6827 		/* Gak, drop packet, we can't do a split */
6828 		goto out;
6829 	}
6830 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6831 		/* Gak, packet can't have an SCTP header in it - too small */
6832 		m_freem(sp);
6833 		goto out;
6834 	}
6835 	/* Now pull up the UDP header and SCTP header together */
6836 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6837 	if (sp == NULL) {
6838 		/* Gak pullup failed */
6839 		goto out;
6840 	}
6841 	/* Trim out the UDP header */
6842 	m_adj(sp, sizeof(struct udphdr));
6843 
6844 	/* Now reconstruct the mbuf chain */
6845 	for (last = m; last->m_next; last = last->m_next);
6846 	last->m_next = sp;
6847 	m->m_pkthdr.len += sp->m_pkthdr.len;
6848 	/*
6849 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6850 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6851 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6852 	 * SCTP checksum. Therefore, clear the bit.
6853 	 */
6854 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6855 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6856 	    m->m_pkthdr.len,
6857 	    if_name(m->m_pkthdr.rcvif),
6858 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6859 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6860 	iph = mtod(m, struct ip *);
6861 	switch (iph->ip_v) {
6862 #ifdef INET
6863 	case IPVERSION:
6864 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6865 		sctp_input_with_port(m, off, port);
6866 		break;
6867 #endif
6868 #ifdef INET6
6869 	case IPV6_VERSION >> 4:
6870 		ip6 = mtod(m, struct ip6_hdr *);
6871 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6872 		sctp6_input_with_port(&m, &off, port);
6873 		break;
6874 #endif
6875 	default:
6876 		goto out;
6877 		break;
6878 	}
6879 	return;
6880 out:
6881 	m_freem(m);
6882 }
6883 
6884 #ifdef INET
6885 static void
6886 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6887 {
6888 	struct ip *outer_ip, *inner_ip;
6889 	struct sctphdr *sh;
6890 	struct icmp *icmp;
6891 	struct udphdr *udp;
6892 	struct sctp_inpcb *inp;
6893 	struct sctp_tcb *stcb;
6894 	struct sctp_nets *net;
6895 	struct sctp_init_chunk *ch;
6896 	struct sockaddr_in src, dst;
6897 	uint8_t type, code;
6898 
6899 	inner_ip = (struct ip *)vip;
6900 	icmp = (struct icmp *)((caddr_t)inner_ip -
6901 	    (sizeof(struct icmp) - sizeof(struct ip)));
6902 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6903 	if (ntohs(outer_ip->ip_len) <
6904 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6905 		return;
6906 	}
6907 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6908 	sh = (struct sctphdr *)(udp + 1);
6909 	memset(&src, 0, sizeof(struct sockaddr_in));
6910 	src.sin_family = AF_INET;
6911 	src.sin_len = sizeof(struct sockaddr_in);
6912 	src.sin_port = sh->src_port;
6913 	src.sin_addr = inner_ip->ip_src;
6914 	memset(&dst, 0, sizeof(struct sockaddr_in));
6915 	dst.sin_family = AF_INET;
6916 	dst.sin_len = sizeof(struct sockaddr_in);
6917 	dst.sin_port = sh->dest_port;
6918 	dst.sin_addr = inner_ip->ip_dst;
6919 	/*
6920 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6921 	 * holds our local endpoint address. Thus we reverse the dst and the
6922 	 * src in the lookup.
6923 	 */
6924 	inp = NULL;
6925 	net = NULL;
6926 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6927 	    (struct sockaddr *)&src,
6928 	    &inp, &net, 1,
6929 	    SCTP_DEFAULT_VRFID);
6930 	if ((stcb != NULL) &&
6931 	    (net != NULL) &&
6932 	    (inp != NULL)) {
6933 		/* Check the UDP port numbers */
6934 		if ((udp->uh_dport != net->port) ||
6935 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6936 			SCTP_TCB_UNLOCK(stcb);
6937 			return;
6938 		}
6939 		/* Check the verification tag */
6940 		if (ntohl(sh->v_tag) != 0) {
6941 			/*
6942 			 * This must be the verification tag used for
6943 			 * sending out packets. We don't consider packets
6944 			 * reflecting the verification tag.
6945 			 */
6946 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6947 				SCTP_TCB_UNLOCK(stcb);
6948 				return;
6949 			}
6950 		} else {
6951 			if (ntohs(outer_ip->ip_len) >=
6952 			    sizeof(struct ip) +
6953 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6954 				/*
6955 				 * In this case we can check if we got an
6956 				 * INIT chunk and if the initiate tag
6957 				 * matches.
6958 				 */
6959 				ch = (struct sctp_init_chunk *)(sh + 1);
6960 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6961 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6962 					SCTP_TCB_UNLOCK(stcb);
6963 					return;
6964 				}
6965 			} else {
6966 				SCTP_TCB_UNLOCK(stcb);
6967 				return;
6968 			}
6969 		}
6970 		type = icmp->icmp_type;
6971 		code = icmp->icmp_code;
6972 		if ((type == ICMP_UNREACH) &&
6973 		    (code == ICMP_UNREACH_PORT)) {
6974 			code = ICMP_UNREACH_PROTOCOL;
6975 		}
6976 		sctp_notify(inp, stcb, net, type, code,
6977 		    ntohs(inner_ip->ip_len),
6978 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6979 	} else {
6980 		if ((stcb == NULL) && (inp != NULL)) {
6981 			/* reduce ref-count */
6982 			SCTP_INP_WLOCK(inp);
6983 			SCTP_INP_DECR_REF(inp);
6984 			SCTP_INP_WUNLOCK(inp);
6985 		}
6986 		if (stcb) {
6987 			SCTP_TCB_UNLOCK(stcb);
6988 		}
6989 	}
6990 	return;
6991 }
6992 #endif
6993 
6994 #ifdef INET6
6995 static void
6996 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6997 {
6998 	struct ip6ctlparam *ip6cp;
6999 	struct sctp_inpcb *inp;
7000 	struct sctp_tcb *stcb;
7001 	struct sctp_nets *net;
7002 	struct sctphdr sh;
7003 	struct udphdr udp;
7004 	struct sockaddr_in6 src, dst;
7005 	uint8_t type, code;
7006 
7007 	ip6cp = (struct ip6ctlparam *)d;
7008 	/*
7009 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7010 	 */
7011 	if (ip6cp->ip6c_m == NULL) {
7012 		return;
7013 	}
7014 	/*
7015 	 * Check if we can safely examine the ports and the verification tag
7016 	 * of the SCTP common header.
7017 	 */
7018 	if (ip6cp->ip6c_m->m_pkthdr.len <
7019 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7020 		return;
7021 	}
7022 	/* Copy out the UDP header. */
7023 	memset(&udp, 0, sizeof(struct udphdr));
7024 	m_copydata(ip6cp->ip6c_m,
7025 	    ip6cp->ip6c_off,
7026 	    sizeof(struct udphdr),
7027 	    (caddr_t)&udp);
7028 	/* Copy out the port numbers and the verification tag. */
7029 	memset(&sh, 0, sizeof(struct sctphdr));
7030 	m_copydata(ip6cp->ip6c_m,
7031 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7032 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7033 	    (caddr_t)&sh);
7034 	memset(&src, 0, sizeof(struct sockaddr_in6));
7035 	src.sin6_family = AF_INET6;
7036 	src.sin6_len = sizeof(struct sockaddr_in6);
7037 	src.sin6_port = sh.src_port;
7038 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7039 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7040 		return;
7041 	}
7042 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7043 	dst.sin6_family = AF_INET6;
7044 	dst.sin6_len = sizeof(struct sockaddr_in6);
7045 	dst.sin6_port = sh.dest_port;
7046 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7047 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7048 		return;
7049 	}
7050 	inp = NULL;
7051 	net = NULL;
7052 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7053 	    (struct sockaddr *)&src,
7054 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7055 	if ((stcb != NULL) &&
7056 	    (net != NULL) &&
7057 	    (inp != NULL)) {
7058 		/* Check the UDP port numbers */
7059 		if ((udp.uh_dport != net->port) ||
7060 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7061 			SCTP_TCB_UNLOCK(stcb);
7062 			return;
7063 		}
7064 		/* Check the verification tag */
7065 		if (ntohl(sh.v_tag) != 0) {
7066 			/*
7067 			 * This must be the verification tag used for
7068 			 * sending out packets. We don't consider packets
7069 			 * reflecting the verification tag.
7070 			 */
7071 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7072 				SCTP_TCB_UNLOCK(stcb);
7073 				return;
7074 			}
7075 		} else {
7076 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7077 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7078 			    sizeof(struct sctphdr) +
7079 			    sizeof(struct sctp_chunkhdr) +
7080 			    offsetof(struct sctp_init, a_rwnd)) {
7081 				/*
7082 				 * In this case we can check if we got an
7083 				 * INIT chunk and if the initiate tag
7084 				 * matches.
7085 				 */
7086 				uint32_t initiate_tag;
7087 				uint8_t chunk_type;
7088 
7089 				m_copydata(ip6cp->ip6c_m,
7090 				    ip6cp->ip6c_off +
7091 				    sizeof(struct udphdr) +
7092 				    sizeof(struct sctphdr),
7093 				    sizeof(uint8_t),
7094 				    (caddr_t)&chunk_type);
7095 				m_copydata(ip6cp->ip6c_m,
7096 				    ip6cp->ip6c_off +
7097 				    sizeof(struct udphdr) +
7098 				    sizeof(struct sctphdr) +
7099 				    sizeof(struct sctp_chunkhdr),
7100 				    sizeof(uint32_t),
7101 				    (caddr_t)&initiate_tag);
7102 				if ((chunk_type != SCTP_INITIATION) ||
7103 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7104 					SCTP_TCB_UNLOCK(stcb);
7105 					return;
7106 				}
7107 			} else {
7108 				SCTP_TCB_UNLOCK(stcb);
7109 				return;
7110 			}
7111 		}
7112 		type = ip6cp->ip6c_icmp6->icmp6_type;
7113 		code = ip6cp->ip6c_icmp6->icmp6_code;
7114 		if ((type == ICMP6_DST_UNREACH) &&
7115 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7116 			type = ICMP6_PARAM_PROB;
7117 			code = ICMP6_PARAMPROB_NEXTHEADER;
7118 		}
7119 		sctp6_notify(inp, stcb, net, type, code,
7120 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7121 	} else {
7122 		if ((stcb == NULL) && (inp != NULL)) {
7123 			/* reduce inp's ref-count */
7124 			SCTP_INP_WLOCK(inp);
7125 			SCTP_INP_DECR_REF(inp);
7126 			SCTP_INP_WUNLOCK(inp);
7127 		}
7128 		if (stcb) {
7129 			SCTP_TCB_UNLOCK(stcb);
7130 		}
7131 	}
7132 }
7133 #endif
7134 
7135 void
7136 sctp_over_udp_stop(void)
7137 {
7138 	/*
7139 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7140 	 * for writting!
7141 	 */
7142 #ifdef INET
7143 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7144 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7145 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7146 	}
7147 #endif
7148 #ifdef INET6
7149 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7150 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7151 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7152 	}
7153 #endif
7154 }
7155 
7156 int
7157 sctp_over_udp_start(void)
7158 {
7159 	uint16_t port;
7160 	int ret;
7161 #ifdef INET
7162 	struct sockaddr_in sin;
7163 #endif
7164 #ifdef INET6
7165 	struct sockaddr_in6 sin6;
7166 #endif
7167 	/*
7168 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7169 	 * for writting!
7170 	 */
7171 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7172 	if (ntohs(port) == 0) {
7173 		/* Must have a port set */
7174 		return (EINVAL);
7175 	}
7176 #ifdef INET
7177 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7178 		/* Already running -- must stop first */
7179 		return (EALREADY);
7180 	}
7181 #endif
7182 #ifdef INET6
7183 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7184 		/* Already running -- must stop first */
7185 		return (EALREADY);
7186 	}
7187 #endif
7188 #ifdef INET
7189 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7190 	    SOCK_DGRAM, IPPROTO_UDP,
7191 	    curthread->td_ucred, curthread))) {
7192 		sctp_over_udp_stop();
7193 		return (ret);
7194 	}
7195 	/* Call the special UDP hook. */
7196 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7197 	    sctp_recv_udp_tunneled_packet,
7198 	    sctp_recv_icmp_tunneled_packet,
7199 	    NULL))) {
7200 		sctp_over_udp_stop();
7201 		return (ret);
7202 	}
7203 	/* Ok, we have a socket, bind it to the port. */
7204 	memset(&sin, 0, sizeof(struct sockaddr_in));
7205 	sin.sin_len = sizeof(struct sockaddr_in);
7206 	sin.sin_family = AF_INET;
7207 	sin.sin_port = htons(port);
7208 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7209 	    (struct sockaddr *)&sin, curthread))) {
7210 		sctp_over_udp_stop();
7211 		return (ret);
7212 	}
7213 #endif
7214 #ifdef INET6
7215 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7216 	    SOCK_DGRAM, IPPROTO_UDP,
7217 	    curthread->td_ucred, curthread))) {
7218 		sctp_over_udp_stop();
7219 		return (ret);
7220 	}
7221 	/* Call the special UDP hook. */
7222 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7223 	    sctp_recv_udp_tunneled_packet,
7224 	    sctp_recv_icmp6_tunneled_packet,
7225 	    NULL))) {
7226 		sctp_over_udp_stop();
7227 		return (ret);
7228 	}
7229 	/* Ok, we have a socket, bind it to the port. */
7230 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7231 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7232 	sin6.sin6_family = AF_INET6;
7233 	sin6.sin6_port = htons(port);
7234 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7235 	    (struct sockaddr *)&sin6, curthread))) {
7236 		sctp_over_udp_stop();
7237 		return (ret);
7238 	}
7239 #endif
7240 	return (0);
7241 }
7242 
7243 #if defined(INET6) || defined(INET)
7244 
7245 /*
7246  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7247  * If all arguments are zero, zero is returned.
7248  */
7249 uint32_t
7250 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7251 {
7252 	if (mtu1 > 0) {
7253 		if (mtu2 > 0) {
7254 			if (mtu3 > 0) {
7255 				return (min(mtu1, min(mtu2, mtu3)));
7256 			} else {
7257 				return (min(mtu1, mtu2));
7258 			}
7259 		} else {
7260 			if (mtu3 > 0) {
7261 				return (min(mtu1, mtu3));
7262 			} else {
7263 				return (mtu1);
7264 			}
7265 		}
7266 	} else {
7267 		if (mtu2 > 0) {
7268 			if (mtu3 > 0) {
7269 				return (min(mtu2, mtu3));
7270 			} else {
7271 				return (mtu2);
7272 			}
7273 		} else {
7274 			return (mtu3);
7275 		}
7276 	}
7277 }
7278 
7279 void
7280 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7281 {
7282 	struct in_conninfo inc;
7283 
7284 	memset(&inc, 0, sizeof(struct in_conninfo));
7285 	inc.inc_fibnum = fibnum;
7286 	switch (addr->sa.sa_family) {
7287 #ifdef INET
7288 	case AF_INET:
7289 		inc.inc_faddr = addr->sin.sin_addr;
7290 		break;
7291 #endif
7292 #ifdef INET6
7293 	case AF_INET6:
7294 		inc.inc_flags |= INC_ISIPV6;
7295 		inc.inc6_faddr = addr->sin6.sin6_addr;
7296 		break;
7297 #endif
7298 	default:
7299 		return;
7300 	}
7301 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7302 }
7303 
7304 uint32_t
7305 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7306 {
7307 	struct in_conninfo inc;
7308 
7309 	memset(&inc, 0, sizeof(struct in_conninfo));
7310 	inc.inc_fibnum = fibnum;
7311 	switch (addr->sa.sa_family) {
7312 #ifdef INET
7313 	case AF_INET:
7314 		inc.inc_faddr = addr->sin.sin_addr;
7315 		break;
7316 #endif
7317 #ifdef INET6
7318 	case AF_INET6:
7319 		inc.inc_flags |= INC_ISIPV6;
7320 		inc.inc6_faddr = addr->sin6.sin6_addr;
7321 		break;
7322 #endif
7323 	default:
7324 		return (0);
7325 	}
7326 	return ((uint32_t)tcp_hc_getmtu(&inc));
7327 }
7328 #endif
7329