xref: /freebsd/sys/netinet/sctputil.c (revision 39beb93c)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 #ifdef MICHAELS_EXPERIMENT
921 		if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
922 			/*
923 			 * It must be in the time-wait hash, we put it there
924 			 * when we aloc one. If not the peer is playing
925 			 * games.
926 			 */
927 			asoc->my_vtag = override_tag;
928 		} else {
929 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
930 #ifdef INVARIANTS
931 			panic("Huh is_in_timewait fails");
932 #endif
933 			return (ENOMEM);
934 		}
935 #else
936 		asoc->my_vtag = override_tag;
937 #endif
938 	} else {
939 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
940 	}
941 	/* Get the nonce tags */
942 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
943 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
944 	asoc->vrf_id = vrf_id;
945 
946 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
947 		asoc->hb_is_disabled = 1;
948 	else
949 		asoc->hb_is_disabled = 0;
950 
951 #ifdef SCTP_ASOCLOG_OF_TSNS
952 	asoc->tsn_in_at = 0;
953 	asoc->tsn_out_at = 0;
954 	asoc->tsn_in_wrapped = 0;
955 	asoc->tsn_out_wrapped = 0;
956 	asoc->cumack_log_at = 0;
957 	asoc->cumack_log_atsnt = 0;
958 #endif
959 #ifdef SCTP_FS_SPEC_LOG
960 	asoc->fs_index = 0;
961 #endif
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
965 	    sctp_select_initial_TSN(&m->sctp_ep);
966 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 	asoc->peer_supports_nat = 0;
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_data_came_from = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
977 	asoc->last_acked_seq = asoc->init_seq_number - 1;
978 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
979 	asoc->asconf_seq_in = asoc->last_acked_seq;
980 
981 	/* here we are different, we hold the next one we expect */
982 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
983 
984 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
985 	asoc->initial_rto = m->sctp_ep.initial_rto;
986 
987 	asoc->max_init_times = m->sctp_ep.max_init_times;
988 	asoc->max_send_times = m->sctp_ep.max_send_times;
989 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 	/* ECN Nonce initialization */
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->ecn_nonce_allowed = 0;
997 	asoc->receiver_nonce_sum = 1;
998 	asoc->nonce_sum_expect_base = 1;
999 	asoc->nonce_sum_check = 1;
1000 	asoc->nonce_resync_tsn = 0;
1001 	asoc->nonce_wait_for_ecne = 0;
1002 	asoc->nonce_wait_tsn = 0;
1003 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1004 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1005 	asoc->pr_sctp_cnt = 0;
1006 	asoc->total_output_queue_size = 0;
1007 
1008 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1009 		struct in6pcb *inp6;
1010 
1011 		/* Its a V6 socket */
1012 		inp6 = (struct in6pcb *)m;
1013 		asoc->ipv6_addr_legal = 1;
1014 		/* Now look at the binding flag to see if V4 will be legal */
1015 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1016 			asoc->ipv4_addr_legal = 1;
1017 		} else {
1018 			/* V4 addresses are NOT legal on the association */
1019 			asoc->ipv4_addr_legal = 0;
1020 		}
1021 	} else {
1022 		/* Its a V4 socket, no - V6 */
1023 		asoc->ipv4_addr_legal = 1;
1024 		asoc->ipv6_addr_legal = 0;
1025 	}
1026 
1027 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1028 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1029 
1030 	asoc->smallest_mtu = m->sctp_frag_point;
1031 #ifdef SCTP_PRINT_FOR_B_AND_M
1032 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1033 	    asoc->smallest_mtu);
1034 #endif
1035 	asoc->minrto = m->sctp_ep.sctp_minrto;
1036 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1037 
1038 	asoc->locked_on_sending = NULL;
1039 	asoc->stream_locked_on = 0;
1040 	asoc->ecn_echo_cnt_onq = 0;
1041 	asoc->stream_locked = 0;
1042 
1043 	asoc->send_sack = 1;
1044 
1045 	LIST_INIT(&asoc->sctp_restricted_addrs);
1046 
1047 	TAILQ_INIT(&asoc->nets);
1048 	TAILQ_INIT(&asoc->pending_reply_queue);
1049 	TAILQ_INIT(&asoc->asconf_ack_sent);
1050 	/* Setup to fill the hb random cache at first HB */
1051 	asoc->hb_random_idx = 4;
1052 
1053 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1054 
1055 	/*
1056 	 * JRS - Pick the default congestion control module based on the
1057 	 * sysctl.
1058 	 */
1059 	switch (m->sctp_ep.sctp_default_cc_module) {
1060 		/* JRS - Standard TCP congestion control */
1061 	case SCTP_CC_RFC2581:
1062 		{
1063 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1064 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1072 			break;
1073 		}
1074 		/* JRS - High Speed TCP congestion control (Floyd) */
1075 	case SCTP_CC_HSTCP:
1076 		{
1077 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1078 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1086 			break;
1087 		}
1088 		/* JRS - HTCP congestion control */
1089 	case SCTP_CC_HTCP:
1090 		{
1091 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1092 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1098 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1100 			break;
1101 		}
1102 		/* JRS - By default, use RFC2581 */
1103 	default:
1104 		{
1105 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1106 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1107 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1108 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1109 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1110 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1111 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1112 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1113 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1114 			break;
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Now the stream parameters, here we allocate space for all streams
1120 	 * that we request by default.
1121 	 */
1122 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1123 	    m->sctp_ep.pre_open_stream_count;
1124 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1125 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1126 	    SCTP_M_STRMO);
1127 	if (asoc->strmout == NULL) {
1128 		/* big trouble no memory */
1129 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1130 		return (ENOMEM);
1131 	}
1132 	for (i = 0; i < asoc->streamoutcnt; i++) {
1133 		/*
1134 		 * inbound side must be set to 0xffff, also NOTE when we get
1135 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1136 		 * count (streamoutcnt) but first check if we sent to any of
1137 		 * the upper streams that were dropped (if some were). Those
1138 		 * that were dropped must be notified to the upper layer as
1139 		 * failed to send.
1140 		 */
1141 		asoc->strmout[i].next_sequence_sent = 0x0;
1142 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1143 		asoc->strmout[i].stream_no = i;
1144 		asoc->strmout[i].last_msg_incomplete = 0;
1145 		asoc->strmout[i].next_spoke.tqe_next = 0;
1146 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1147 	}
1148 	/* Now the mapping array */
1149 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1150 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1155 		return (ENOMEM);
1156 	}
1157 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1158 	/* EY  - initialize the nr_mapping_array just like mapping array */
1159 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1160 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1161 	    SCTP_M_MAP);
1162 	/*
1163 	 * if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
1164 	 * SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
1165 	 * SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
1166 	 */
1167 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1168 
1169 	/* Now the init of the other outqueues */
1170 	TAILQ_INIT(&asoc->free_chunks);
1171 	TAILQ_INIT(&asoc->out_wheel);
1172 	TAILQ_INIT(&asoc->control_send_queue);
1173 	TAILQ_INIT(&asoc->asconf_send_queue);
1174 	TAILQ_INIT(&asoc->send_queue);
1175 	TAILQ_INIT(&asoc->sent_queue);
1176 	TAILQ_INIT(&asoc->reasmqueue);
1177 	TAILQ_INIT(&asoc->resetHead);
1178 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1179 	TAILQ_INIT(&asoc->asconf_queue);
1180 	/* authentication fields */
1181 	asoc->authinfo.random = NULL;
1182 	asoc->authinfo.active_keyid = 0;
1183 	asoc->authinfo.assoc_key = NULL;
1184 	asoc->authinfo.assoc_keyid = 0;
1185 	asoc->authinfo.recv_key = NULL;
1186 	asoc->authinfo.recv_keyid = 0;
1187 	LIST_INIT(&asoc->shared_keys);
1188 	asoc->marked_retrans = 0;
1189 	asoc->timoinit = 0;
1190 	asoc->timodata = 0;
1191 	asoc->timosack = 0;
1192 	asoc->timoshutdown = 0;
1193 	asoc->timoheartbeat = 0;
1194 	asoc->timocookie = 0;
1195 	asoc->timoshutdownack = 0;
1196 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1197 	asoc->discontinuity_time = asoc->start_time;
1198 	/*
1199 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200 	 * freed later whe the association is freed.
1201 	 */
1202 	return (0);
1203 }
1204 
1205 int
1206 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1207 {
1208 	/* mapping array needs to grow */
1209 	uint8_t *new_array;
1210 	uint32_t new_size;
1211 
1212 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1213 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1214 	if (new_array == NULL) {
1215 		/* can't get more, forget it */
1216 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1217 		    new_size);
1218 		return (-1);
1219 	}
1220 	memset(new_array, 0, new_size);
1221 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1222 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1223 	asoc->mapping_array = new_array;
1224 	asoc->mapping_array_size = new_size;
1225 	return (0);
1226 }
1227 
1228 /* EY - nr_sack version of the above method */
1229 int
1230 sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
1231 {
1232 	/* nr mapping array needs to grow */
1233 	uint8_t *new_array;
1234 	uint32_t new_size;
1235 
1236 	new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1237 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1238 	if (new_array == NULL) {
1239 		/* can't get more, forget it */
1240 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1241 		    new_size);
1242 		return (-1);
1243 	}
1244 	memset(new_array, 0, new_size);
1245 	memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1246 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1247 	asoc->nr_mapping_array = new_array;
1248 	asoc->nr_mapping_array_size = new_size;
1249 	return (0);
1250 }
1251 
1252 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1253 static void
1254 sctp_iterator_work(struct sctp_iterator *it)
1255 {
1256 	int iteration_count = 0;
1257 	int inp_skip = 0;
1258 
1259 	SCTP_ITERATOR_LOCK();
1260 	if (it->inp) {
1261 		SCTP_INP_DECR_REF(it->inp);
1262 	}
1263 	if (it->inp == NULL) {
1264 		/* iterator is complete */
1265 done_with_iterator:
1266 		SCTP_ITERATOR_UNLOCK();
1267 		if (it->function_atend != NULL) {
1268 			(*it->function_atend) (it->pointer, it->val);
1269 		}
1270 		SCTP_FREE(it, SCTP_M_ITER);
1271 		return;
1272 	}
1273 select_a_new_ep:
1274 	SCTP_INP_WLOCK(it->inp);
1275 	while (((it->pcb_flags) &&
1276 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1277 	    ((it->pcb_features) &&
1278 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1279 		/* endpoint flags or features don't match, so keep looking */
1280 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1281 			SCTP_INP_WUNLOCK(it->inp);
1282 			goto done_with_iterator;
1283 		}
1284 		SCTP_INP_WUNLOCK(it->inp);
1285 		it->inp = LIST_NEXT(it->inp, sctp_list);
1286 		if (it->inp == NULL) {
1287 			goto done_with_iterator;
1288 		}
1289 		SCTP_INP_WLOCK(it->inp);
1290 	}
1291 
1292 	SCTP_INP_WUNLOCK(it->inp);
1293 	SCTP_INP_RLOCK(it->inp);
1294 
1295 	/* now go through each assoc which is in the desired state */
1296 	if (it->done_current_ep == 0) {
1297 		if (it->function_inp != NULL)
1298 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1299 		it->done_current_ep = 1;
1300 	}
1301 	if (it->stcb == NULL) {
1302 		/* run the per instance function */
1303 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1304 	}
1305 	if ((inp_skip) || it->stcb == NULL) {
1306 		if (it->function_inp_end != NULL) {
1307 			inp_skip = (*it->function_inp_end) (it->inp,
1308 			    it->pointer,
1309 			    it->val);
1310 		}
1311 		SCTP_INP_RUNLOCK(it->inp);
1312 		goto no_stcb;
1313 	}
1314 	while (it->stcb) {
1315 		SCTP_TCB_LOCK(it->stcb);
1316 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1317 			/* not in the right state... keep looking */
1318 			SCTP_TCB_UNLOCK(it->stcb);
1319 			goto next_assoc;
1320 		}
1321 		/* see if we have limited out the iterator loop */
1322 		iteration_count++;
1323 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1324 			/* Pause to let others grab the lock */
1325 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1326 			SCTP_TCB_UNLOCK(it->stcb);
1327 
1328 			SCTP_INP_INCR_REF(it->inp);
1329 			SCTP_INP_RUNLOCK(it->inp);
1330 			SCTP_ITERATOR_UNLOCK();
1331 			SCTP_ITERATOR_LOCK();
1332 			SCTP_INP_RLOCK(it->inp);
1333 
1334 			SCTP_INP_DECR_REF(it->inp);
1335 			SCTP_TCB_LOCK(it->stcb);
1336 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1337 			iteration_count = 0;
1338 		}
1339 		/* run function on this one */
1340 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1341 
1342 		/*
1343 		 * we lie here, it really needs to have its own type but
1344 		 * first I must verify that this won't effect things :-0
1345 		 */
1346 		if (it->no_chunk_output == 0)
1347 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1348 
1349 		SCTP_TCB_UNLOCK(it->stcb);
1350 next_assoc:
1351 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1352 		if (it->stcb == NULL) {
1353 			/* Run last function */
1354 			if (it->function_inp_end != NULL) {
1355 				inp_skip = (*it->function_inp_end) (it->inp,
1356 				    it->pointer,
1357 				    it->val);
1358 			}
1359 		}
1360 	}
1361 	SCTP_INP_RUNLOCK(it->inp);
1362 no_stcb:
1363 	/* done with all assocs on this endpoint, move on to next endpoint */
1364 	it->done_current_ep = 0;
1365 	SCTP_INP_WLOCK(it->inp);
1366 	SCTP_INP_WUNLOCK(it->inp);
1367 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1368 		it->inp = NULL;
1369 	} else {
1370 		SCTP_INP_INFO_RLOCK();
1371 		it->inp = LIST_NEXT(it->inp, sctp_list);
1372 		SCTP_INP_INFO_RUNLOCK();
1373 	}
1374 	if (it->inp == NULL) {
1375 		goto done_with_iterator;
1376 	}
1377 	goto select_a_new_ep;
1378 }
1379 
1380 void
1381 sctp_iterator_worker(void)
1382 {
1383 	struct sctp_iterator *it = NULL;
1384 
1385 	/* This function is called with the WQ lock in place */
1386 
1387 	SCTP_BASE_INFO(iterator_running) = 1;
1388 again:
1389 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1390 	while (it) {
1391 		/* now lets work on this one */
1392 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1393 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1394 		sctp_iterator_work(it);
1395 		SCTP_IPI_ITERATOR_WQ_LOCK();
1396 		/* sa_ignore FREED_MEMORY */
1397 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1398 	}
1399 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1400 		goto again;
1401 	}
1402 	SCTP_BASE_INFO(iterator_running) = 0;
1403 	return;
1404 }
1405 
1406 #endif
1407 
1408 
1409 static void
1410 sctp_handle_addr_wq(void)
1411 {
1412 	/* deal with the ADDR wq from the rtsock calls */
1413 	struct sctp_laddr *wi;
1414 	struct sctp_asconf_iterator *asc;
1415 
1416 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1417 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1418 	if (asc == NULL) {
1419 		/* Try later, no memory */
1420 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1421 		    (struct sctp_inpcb *)NULL,
1422 		    (struct sctp_tcb *)NULL,
1423 		    (struct sctp_nets *)NULL);
1424 		return;
1425 	}
1426 	LIST_INIT(&asc->list_of_work);
1427 	asc->cnt = 0;
1428 	SCTP_IPI_ITERATOR_WQ_LOCK();
1429 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1430 	while (wi != NULL) {
1431 		LIST_REMOVE(wi, sctp_nxt_addr);
1432 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1433 		asc->cnt++;
1434 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1435 	}
1436 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1437 	if (asc->cnt == 0) {
1438 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1439 	} else {
1440 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1441 		    sctp_asconf_iterator_stcb,
1442 		    NULL,	/* No ep end for boundall */
1443 		    SCTP_PCB_FLAGS_BOUNDALL,
1444 		    SCTP_PCB_ANY_FEATURES,
1445 		    SCTP_ASOC_ANY_STATE,
1446 		    (void *)asc, 0,
1447 		    sctp_asconf_iterator_end, NULL, 0);
1448 	}
1449 }
1450 
1451 int retcode = 0;
1452 int cur_oerr = 0;
1453 
1454 void
1455 sctp_timeout_handler(void *t)
1456 {
1457 	struct sctp_inpcb *inp;
1458 	struct sctp_tcb *stcb;
1459 	struct sctp_nets *net;
1460 	struct sctp_timer *tmr;
1461 
1462 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1463 	struct socket *so;
1464 
1465 #endif
1466 	int did_output, type;
1467 	struct sctp_iterator *it = NULL;
1468 
1469 	tmr = (struct sctp_timer *)t;
1470 	inp = (struct sctp_inpcb *)tmr->ep;
1471 	stcb = (struct sctp_tcb *)tmr->tcb;
1472 	net = (struct sctp_nets *)tmr->net;
1473 	did_output = 1;
1474 
1475 #ifdef SCTP_AUDITING_ENABLED
1476 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1477 	sctp_auditing(3, inp, stcb, net);
1478 #endif
1479 
1480 	/* sanity checks... */
1481 	if (tmr->self != (void *)tmr) {
1482 		/*
1483 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1484 		 * tmr);
1485 		 */
1486 		return;
1487 	}
1488 	tmr->stopped_from = 0xa001;
1489 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1490 		/*
1491 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1492 		 * tmr->type);
1493 		 */
1494 		return;
1495 	}
1496 	tmr->stopped_from = 0xa002;
1497 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1498 		return;
1499 	}
1500 	/* if this is an iterator timeout, get the struct and clear inp */
1501 	tmr->stopped_from = 0xa003;
1502 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1503 		it = (struct sctp_iterator *)inp;
1504 		inp = NULL;
1505 	}
1506 	type = tmr->type;
1507 	if (inp) {
1508 		SCTP_INP_INCR_REF(inp);
1509 		if ((inp->sctp_socket == 0) &&
1510 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1511 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1512 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1513 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1514 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1515 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1516 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1517 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1518 		    ) {
1519 			SCTP_INP_DECR_REF(inp);
1520 			return;
1521 		}
1522 	}
1523 	tmr->stopped_from = 0xa004;
1524 	if (stcb) {
1525 		atomic_add_int(&stcb->asoc.refcnt, 1);
1526 		if (stcb->asoc.state == 0) {
1527 			atomic_add_int(&stcb->asoc.refcnt, -1);
1528 			if (inp) {
1529 				SCTP_INP_DECR_REF(inp);
1530 			}
1531 			return;
1532 		}
1533 	}
1534 	tmr->stopped_from = 0xa005;
1535 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1536 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1537 		if (inp) {
1538 			SCTP_INP_DECR_REF(inp);
1539 		}
1540 		if (stcb) {
1541 			atomic_add_int(&stcb->asoc.refcnt, -1);
1542 		}
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa006;
1546 
1547 	if (stcb) {
1548 		SCTP_TCB_LOCK(stcb);
1549 		atomic_add_int(&stcb->asoc.refcnt, -1);
1550 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1551 		    ((stcb->asoc.state == 0) ||
1552 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1553 			SCTP_TCB_UNLOCK(stcb);
1554 			if (inp) {
1555 				SCTP_INP_DECR_REF(inp);
1556 			}
1557 			return;
1558 		}
1559 	}
1560 	/* record in stopped what t-o occured */
1561 	tmr->stopped_from = tmr->type;
1562 
1563 	/* mark as being serviced now */
1564 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1565 		/*
1566 		 * Callout has been rescheduled.
1567 		 */
1568 		goto get_out;
1569 	}
1570 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1571 		/*
1572 		 * Not active, so no action.
1573 		 */
1574 		goto get_out;
1575 	}
1576 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1577 
1578 	/* call the handler for the appropriate timer type */
1579 	switch (tmr->type) {
1580 	case SCTP_TIMER_TYPE_ZERO_COPY:
1581 		if (inp == NULL) {
1582 			break;
1583 		}
1584 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1585 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1586 		}
1587 		break;
1588 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1589 		if (inp == NULL) {
1590 			break;
1591 		}
1592 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1593 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1594 		}
1595 		break;
1596 	case SCTP_TIMER_TYPE_ADDR_WQ:
1597 		sctp_handle_addr_wq();
1598 		break;
1599 	case SCTP_TIMER_TYPE_ITERATOR:
1600 		SCTP_STAT_INCR(sctps_timoiterator);
1601 		sctp_iterator_timer(it);
1602 		break;
1603 	case SCTP_TIMER_TYPE_SEND:
1604 		if ((stcb == NULL) || (inp == NULL)) {
1605 			break;
1606 		}
1607 		SCTP_STAT_INCR(sctps_timodata);
1608 		stcb->asoc.timodata++;
1609 		stcb->asoc.num_send_timers_up--;
1610 		if (stcb->asoc.num_send_timers_up < 0) {
1611 			stcb->asoc.num_send_timers_up = 0;
1612 		}
1613 		SCTP_TCB_LOCK_ASSERT(stcb);
1614 		cur_oerr = stcb->asoc.overall_error_count;
1615 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1616 		if (retcode) {
1617 			/* no need to unlock on tcb its gone */
1618 
1619 			goto out_decr;
1620 		}
1621 		SCTP_TCB_LOCK_ASSERT(stcb);
1622 #ifdef SCTP_AUDITING_ENABLED
1623 		sctp_auditing(4, inp, stcb, net);
1624 #endif
1625 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1626 		if ((stcb->asoc.num_send_timers_up == 0) &&
1627 		    (stcb->asoc.sent_queue_cnt > 0)
1628 		    ) {
1629 			struct sctp_tmit_chunk *chk;
1630 
1631 			/*
1632 			 * safeguard. If there on some on the sent queue
1633 			 * somewhere but no timers running something is
1634 			 * wrong... so we start a timer on the first chunk
1635 			 * on the send queue on whatever net it is sent to.
1636 			 */
1637 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1638 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1639 			    chk->whoTo);
1640 		}
1641 		break;
1642 	case SCTP_TIMER_TYPE_INIT:
1643 		if ((stcb == NULL) || (inp == NULL)) {
1644 			break;
1645 		}
1646 		SCTP_STAT_INCR(sctps_timoinit);
1647 		stcb->asoc.timoinit++;
1648 		if (sctp_t1init_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		/* We do output but not here */
1653 		did_output = 0;
1654 		break;
1655 	case SCTP_TIMER_TYPE_RECV:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		} {
1659 			int abort_flag;
1660 
1661 			SCTP_STAT_INCR(sctps_timosack);
1662 			stcb->asoc.timosack++;
1663 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1664 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1665 
1666 			/*
1667 			 * EY if nr_sacks used then send an nr-sack , a sack
1668 			 * otherwise
1669 			 */
1670 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1671 				sctp_send_nr_sack(stcb);
1672 			else
1673 				sctp_send_sack(stcb);
1674 		}
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1679 		break;
1680 	case SCTP_TIMER_TYPE_SHUTDOWN:
1681 		if ((stcb == NULL) || (inp == NULL)) {
1682 			break;
1683 		}
1684 		if (sctp_shutdown_timer(inp, stcb, net)) {
1685 			/* no need to unlock on tcb its gone */
1686 			goto out_decr;
1687 		}
1688 		SCTP_STAT_INCR(sctps_timoshutdown);
1689 		stcb->asoc.timoshutdown++;
1690 #ifdef SCTP_AUDITING_ENABLED
1691 		sctp_auditing(4, inp, stcb, net);
1692 #endif
1693 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1694 		break;
1695 	case SCTP_TIMER_TYPE_HEARTBEAT:
1696 		{
1697 			struct sctp_nets *lnet;
1698 			int cnt_of_unconf = 0;
1699 
1700 			if ((stcb == NULL) || (inp == NULL)) {
1701 				break;
1702 			}
1703 			SCTP_STAT_INCR(sctps_timoheartbeat);
1704 			stcb->asoc.timoheartbeat++;
1705 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1706 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1707 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1708 					cnt_of_unconf++;
1709 				}
1710 			}
1711 			if (cnt_of_unconf == 0) {
1712 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1713 				    cnt_of_unconf)) {
1714 					/* no need to unlock on tcb its gone */
1715 					goto out_decr;
1716 				}
1717 			}
1718 #ifdef SCTP_AUDITING_ENABLED
1719 			sctp_auditing(4, inp, stcb, lnet);
1720 #endif
1721 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1722 			    stcb->sctp_ep, stcb, lnet);
1723 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1724 		}
1725 		break;
1726 	case SCTP_TIMER_TYPE_COOKIE:
1727 		if ((stcb == NULL) || (inp == NULL)) {
1728 			break;
1729 		}
1730 		if (sctp_cookie_timer(inp, stcb, net)) {
1731 			/* no need to unlock on tcb its gone */
1732 			goto out_decr;
1733 		}
1734 		SCTP_STAT_INCR(sctps_timocookie);
1735 		stcb->asoc.timocookie++;
1736 #ifdef SCTP_AUDITING_ENABLED
1737 		sctp_auditing(4, inp, stcb, net);
1738 #endif
1739 		/*
1740 		 * We consider T3 and Cookie timer pretty much the same with
1741 		 * respect to where from in chunk_output.
1742 		 */
1743 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1744 		break;
1745 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1746 		{
1747 			struct timeval tv;
1748 			int i, secret;
1749 
1750 			if (inp == NULL) {
1751 				break;
1752 			}
1753 			SCTP_STAT_INCR(sctps_timosecret);
1754 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1755 			SCTP_INP_WLOCK(inp);
1756 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1757 			inp->sctp_ep.last_secret_number =
1758 			    inp->sctp_ep.current_secret_number;
1759 			inp->sctp_ep.current_secret_number++;
1760 			if (inp->sctp_ep.current_secret_number >=
1761 			    SCTP_HOW_MANY_SECRETS) {
1762 				inp->sctp_ep.current_secret_number = 0;
1763 			}
1764 			secret = (int)inp->sctp_ep.current_secret_number;
1765 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1766 				inp->sctp_ep.secret_key[secret][i] =
1767 				    sctp_select_initial_TSN(&inp->sctp_ep);
1768 			}
1769 			SCTP_INP_WUNLOCK(inp);
1770 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1771 		}
1772 		did_output = 0;
1773 		break;
1774 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		SCTP_STAT_INCR(sctps_timopathmtu);
1779 		sctp_pathmtu_timer(inp, stcb, net);
1780 		did_output = 0;
1781 		break;
1782 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1783 		if ((stcb == NULL) || (inp == NULL)) {
1784 			break;
1785 		}
1786 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1787 			/* no need to unlock on tcb its gone */
1788 			goto out_decr;
1789 		}
1790 		SCTP_STAT_INCR(sctps_timoshutdownack);
1791 		stcb->asoc.timoshutdownack++;
1792 #ifdef SCTP_AUDITING_ENABLED
1793 		sctp_auditing(4, inp, stcb, net);
1794 #endif
1795 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1796 		break;
1797 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1802 		sctp_abort_an_association(inp, stcb,
1803 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1804 		/* no need to unlock on tcb its gone */
1805 		goto out_decr;
1806 
1807 	case SCTP_TIMER_TYPE_STRRESET:
1808 		if ((stcb == NULL) || (inp == NULL)) {
1809 			break;
1810 		}
1811 		if (sctp_strreset_timer(inp, stcb, net)) {
1812 			/* no need to unlock on tcb its gone */
1813 			goto out_decr;
1814 		}
1815 		SCTP_STAT_INCR(sctps_timostrmrst);
1816 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1817 		break;
1818 	case SCTP_TIMER_TYPE_EARLYFR:
1819 		/* Need to do FR of things for net */
1820 		if ((stcb == NULL) || (inp == NULL)) {
1821 			break;
1822 		}
1823 		SCTP_STAT_INCR(sctps_timoearlyfr);
1824 		sctp_early_fr_timer(inp, stcb, net);
1825 		break;
1826 	case SCTP_TIMER_TYPE_ASCONF:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		if (sctp_asconf_timer(inp, stcb, net)) {
1831 			/* no need to unlock on tcb its gone */
1832 			goto out_decr;
1833 		}
1834 		SCTP_STAT_INCR(sctps_timoasconf);
1835 #ifdef SCTP_AUDITING_ENABLED
1836 		sctp_auditing(4, inp, stcb, net);
1837 #endif
1838 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1839 		break;
1840 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		sctp_delete_prim_timer(inp, stcb, net);
1845 		SCTP_STAT_INCR(sctps_timodelprim);
1846 		break;
1847 
1848 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1849 		if ((stcb == NULL) || (inp == NULL)) {
1850 			break;
1851 		}
1852 		SCTP_STAT_INCR(sctps_timoautoclose);
1853 		sctp_autoclose_timer(inp, stcb, net);
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1855 		did_output = 0;
1856 		break;
1857 	case SCTP_TIMER_TYPE_ASOCKILL:
1858 		if ((stcb == NULL) || (inp == NULL)) {
1859 			break;
1860 		}
1861 		SCTP_STAT_INCR(sctps_timoassockill);
1862 		/* Can we free it yet? */
1863 		SCTP_INP_DECR_REF(inp);
1864 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1865 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1866 		so = SCTP_INP_SO(inp);
1867 		atomic_add_int(&stcb->asoc.refcnt, 1);
1868 		SCTP_TCB_UNLOCK(stcb);
1869 		SCTP_SOCKET_LOCK(so, 1);
1870 		SCTP_TCB_LOCK(stcb);
1871 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1872 #endif
1873 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1874 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1875 		SCTP_SOCKET_UNLOCK(so, 1);
1876 #endif
1877 		/*
1878 		 * free asoc, always unlocks (or destroy's) so prevent
1879 		 * duplicate unlock or unlock of a free mtx :-0
1880 		 */
1881 		stcb = NULL;
1882 		goto out_no_decr;
1883 	case SCTP_TIMER_TYPE_INPKILL:
1884 		SCTP_STAT_INCR(sctps_timoinpkill);
1885 		if (inp == NULL) {
1886 			break;
1887 		}
1888 		/*
1889 		 * special case, take away our increment since WE are the
1890 		 * killer
1891 		 */
1892 		SCTP_INP_DECR_REF(inp);
1893 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1894 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1895 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1896 		inp = NULL;
1897 		goto out_no_decr;
1898 	default:
1899 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1900 		    tmr->type);
1901 		break;
1902 	};
1903 #ifdef SCTP_AUDITING_ENABLED
1904 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1905 	if (inp)
1906 		sctp_auditing(5, inp, stcb, net);
1907 #endif
1908 	if ((did_output) && stcb) {
1909 		/*
1910 		 * Now we need to clean up the control chunk chain if an
1911 		 * ECNE is on it. It must be marked as UNSENT again so next
1912 		 * call will continue to send it until such time that we get
1913 		 * a CWR, to remove it. It is, however, less likely that we
1914 		 * will find a ecn echo on the chain though.
1915 		 */
1916 		sctp_fix_ecn_echo(&stcb->asoc);
1917 	}
1918 get_out:
1919 	if (stcb) {
1920 		SCTP_TCB_UNLOCK(stcb);
1921 	}
1922 out_decr:
1923 	if (inp) {
1924 		SCTP_INP_DECR_REF(inp);
1925 	}
1926 out_no_decr:
1927 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1928 	    type);
1929 }
1930 
1931 void
1932 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1933     struct sctp_nets *net)
1934 {
1935 	int to_ticks;
1936 	struct sctp_timer *tmr;
1937 
1938 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1939 		return;
1940 
1941 	to_ticks = 0;
1942 
1943 	tmr = NULL;
1944 	if (stcb) {
1945 		SCTP_TCB_LOCK_ASSERT(stcb);
1946 	}
1947 	switch (t_type) {
1948 	case SCTP_TIMER_TYPE_ZERO_COPY:
1949 		tmr = &inp->sctp_ep.zero_copy_timer;
1950 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1951 		break;
1952 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1953 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1954 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ADDR_WQ:
1957 		/* Only 1 tick away :-) */
1958 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1959 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1960 		break;
1961 	case SCTP_TIMER_TYPE_ITERATOR:
1962 		{
1963 			struct sctp_iterator *it;
1964 
1965 			it = (struct sctp_iterator *)inp;
1966 			tmr = &it->tmr;
1967 			to_ticks = SCTP_ITERATOR_TICKS;
1968 		}
1969 		break;
1970 	case SCTP_TIMER_TYPE_SEND:
1971 		/* Here we use the RTO timer */
1972 		{
1973 			int rto_val;
1974 
1975 			if ((stcb == NULL) || (net == NULL)) {
1976 				return;
1977 			}
1978 			tmr = &net->rxt_timer;
1979 			if (net->RTO == 0) {
1980 				rto_val = stcb->asoc.initial_rto;
1981 			} else {
1982 				rto_val = net->RTO;
1983 			}
1984 			to_ticks = MSEC_TO_TICKS(rto_val);
1985 		}
1986 		break;
1987 	case SCTP_TIMER_TYPE_INIT:
1988 		/*
1989 		 * Here we use the INIT timer default usually about 1
1990 		 * minute.
1991 		 */
1992 		if ((stcb == NULL) || (net == NULL)) {
1993 			return;
1994 		}
1995 		tmr = &net->rxt_timer;
1996 		if (net->RTO == 0) {
1997 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1998 		} else {
1999 			to_ticks = MSEC_TO_TICKS(net->RTO);
2000 		}
2001 		break;
2002 	case SCTP_TIMER_TYPE_RECV:
2003 		/*
2004 		 * Here we use the Delayed-Ack timer value from the inp
2005 		 * ususually about 200ms.
2006 		 */
2007 		if (stcb == NULL) {
2008 			return;
2009 		}
2010 		tmr = &stcb->asoc.dack_timer;
2011 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2012 		break;
2013 	case SCTP_TIMER_TYPE_SHUTDOWN:
2014 		/* Here we use the RTO of the destination. */
2015 		if ((stcb == NULL) || (net == NULL)) {
2016 			return;
2017 		}
2018 		if (net->RTO == 0) {
2019 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2020 		} else {
2021 			to_ticks = MSEC_TO_TICKS(net->RTO);
2022 		}
2023 		tmr = &net->rxt_timer;
2024 		break;
2025 	case SCTP_TIMER_TYPE_HEARTBEAT:
2026 		/*
2027 		 * the net is used here so that we can add in the RTO. Even
2028 		 * though we use a different timer. We also add the HB timer
2029 		 * PLUS a random jitter.
2030 		 */
2031 		if ((inp == NULL) || (stcb == NULL)) {
2032 			return;
2033 		} else {
2034 			uint32_t rndval;
2035 			uint8_t this_random;
2036 			int cnt_of_unconf = 0;
2037 			struct sctp_nets *lnet;
2038 
2039 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2040 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2041 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2042 					cnt_of_unconf++;
2043 				}
2044 			}
2045 			if (cnt_of_unconf) {
2046 				net = lnet = NULL;
2047 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2048 			}
2049 			if (stcb->asoc.hb_random_idx > 3) {
2050 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2051 				memcpy(stcb->asoc.hb_random_values, &rndval,
2052 				    sizeof(stcb->asoc.hb_random_values));
2053 				stcb->asoc.hb_random_idx = 0;
2054 			}
2055 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2056 			stcb->asoc.hb_random_idx++;
2057 			stcb->asoc.hb_ect_randombit = 0;
2058 			/*
2059 			 * this_random will be 0 - 256 ms RTO is in ms.
2060 			 */
2061 			if ((stcb->asoc.hb_is_disabled) &&
2062 			    (cnt_of_unconf == 0)) {
2063 				return;
2064 			}
2065 			if (net) {
2066 				int delay;
2067 
2068 				delay = stcb->asoc.heart_beat_delay;
2069 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2070 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2071 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2072 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2073 						delay = 0;
2074 					}
2075 				}
2076 				if (net->RTO == 0) {
2077 					/* Never been checked */
2078 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2079 				} else {
2080 					/* set rto_val to the ms */
2081 					to_ticks = delay + net->RTO + this_random;
2082 				}
2083 			} else {
2084 				if (cnt_of_unconf) {
2085 					to_ticks = this_random + stcb->asoc.initial_rto;
2086 				} else {
2087 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2088 				}
2089 			}
2090 			/*
2091 			 * Now we must convert the to_ticks that are now in
2092 			 * ms to ticks.
2093 			 */
2094 			to_ticks = MSEC_TO_TICKS(to_ticks);
2095 			tmr = &stcb->asoc.hb_timer;
2096 		}
2097 		break;
2098 	case SCTP_TIMER_TYPE_COOKIE:
2099 		/*
2100 		 * Here we can use the RTO timer from the network since one
2101 		 * RTT was compelete. If a retran happened then we will be
2102 		 * using the RTO initial value.
2103 		 */
2104 		if ((stcb == NULL) || (net == NULL)) {
2105 			return;
2106 		}
2107 		if (net->RTO == 0) {
2108 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2109 		} else {
2110 			to_ticks = MSEC_TO_TICKS(net->RTO);
2111 		}
2112 		tmr = &net->rxt_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2115 		/*
2116 		 * nothing needed but the endpoint here ususually about 60
2117 		 * minutes.
2118 		 */
2119 		if (inp == NULL) {
2120 			return;
2121 		}
2122 		tmr = &inp->sctp_ep.signature_change;
2123 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2124 		break;
2125 	case SCTP_TIMER_TYPE_ASOCKILL:
2126 		if (stcb == NULL) {
2127 			return;
2128 		}
2129 		tmr = &stcb->asoc.strreset_timer;
2130 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2131 		break;
2132 	case SCTP_TIMER_TYPE_INPKILL:
2133 		/*
2134 		 * The inp is setup to die. We re-use the signature_chage
2135 		 * timer since that has stopped and we are in the GONE
2136 		 * state.
2137 		 */
2138 		if (inp == NULL) {
2139 			return;
2140 		}
2141 		tmr = &inp->sctp_ep.signature_change;
2142 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2143 		break;
2144 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2145 		/*
2146 		 * Here we use the value found in the EP for PMTU ususually
2147 		 * about 10 minutes.
2148 		 */
2149 		if ((stcb == NULL) || (inp == NULL)) {
2150 			return;
2151 		}
2152 		if (net == NULL) {
2153 			return;
2154 		}
2155 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2156 		tmr = &net->pmtu_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2159 		/* Here we use the RTO of the destination */
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		if (net->RTO == 0) {
2164 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2165 		} else {
2166 			to_ticks = MSEC_TO_TICKS(net->RTO);
2167 		}
2168 		tmr = &net->rxt_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2171 		/*
2172 		 * Here we use the endpoints shutdown guard timer usually
2173 		 * about 3 minutes.
2174 		 */
2175 		if ((inp == NULL) || (stcb == NULL)) {
2176 			return;
2177 		}
2178 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2179 		tmr = &stcb->asoc.shut_guard_timer;
2180 		break;
2181 	case SCTP_TIMER_TYPE_STRRESET:
2182 		/*
2183 		 * Here the timer comes from the stcb but its value is from
2184 		 * the net's RTO.
2185 		 */
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		if (net->RTO == 0) {
2190 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191 		} else {
2192 			to_ticks = MSEC_TO_TICKS(net->RTO);
2193 		}
2194 		tmr = &stcb->asoc.strreset_timer;
2195 		break;
2196 
2197 	case SCTP_TIMER_TYPE_EARLYFR:
2198 		{
2199 			unsigned int msec;
2200 
2201 			if ((stcb == NULL) || (net == NULL)) {
2202 				return;
2203 			}
2204 			if (net->flight_size > net->cwnd) {
2205 				/* no need to start */
2206 				return;
2207 			}
2208 			SCTP_STAT_INCR(sctps_earlyfrstart);
2209 			if (net->lastsa == 0) {
2210 				/* Hmm no rtt estimate yet? */
2211 				msec = stcb->asoc.initial_rto >> 2;
2212 			} else {
2213 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2214 			}
2215 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2216 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2217 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2218 					msec = SCTP_MINFR_MSEC_FLOOR;
2219 				}
2220 			}
2221 			to_ticks = MSEC_TO_TICKS(msec);
2222 			tmr = &net->fr_timer;
2223 		}
2224 		break;
2225 	case SCTP_TIMER_TYPE_ASCONF:
2226 		/*
2227 		 * Here the timer comes from the stcb but its value is from
2228 		 * the net's RTO.
2229 		 */
2230 		if ((stcb == NULL) || (net == NULL)) {
2231 			return;
2232 		}
2233 		if (net->RTO == 0) {
2234 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2235 		} else {
2236 			to_ticks = MSEC_TO_TICKS(net->RTO);
2237 		}
2238 		tmr = &stcb->asoc.asconf_timer;
2239 		break;
2240 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2241 		if ((stcb == NULL) || (net != NULL)) {
2242 			return;
2243 		}
2244 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2245 		tmr = &stcb->asoc.delete_prim_timer;
2246 		break;
2247 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2248 		if (stcb == NULL) {
2249 			return;
2250 		}
2251 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2252 			/*
2253 			 * Really an error since stcb is NOT set to
2254 			 * autoclose
2255 			 */
2256 			return;
2257 		}
2258 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2259 		tmr = &stcb->asoc.autoclose_timer;
2260 		break;
2261 	default:
2262 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2263 		    __FUNCTION__, t_type);
2264 		return;
2265 		break;
2266 	};
2267 	if ((to_ticks <= 0) || (tmr == NULL)) {
2268 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2269 		    __FUNCTION__, t_type, to_ticks, tmr);
2270 		return;
2271 	}
2272 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2273 		/*
2274 		 * we do NOT allow you to have it already running. if it is
2275 		 * we leave the current one up unchanged
2276 		 */
2277 		return;
2278 	}
2279 	/* At this point we can proceed */
2280 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2281 		stcb->asoc.num_send_timers_up++;
2282 	}
2283 	tmr->stopped_from = 0;
2284 	tmr->type = t_type;
2285 	tmr->ep = (void *)inp;
2286 	tmr->tcb = (void *)stcb;
2287 	tmr->net = (void *)net;
2288 	tmr->self = (void *)tmr;
2289 	tmr->ticks = sctp_get_tick_count();
2290 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2291 	return;
2292 }
2293 
2294 void
2295 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2296     struct sctp_nets *net, uint32_t from)
2297 {
2298 	struct sctp_timer *tmr;
2299 
2300 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2301 	    (inp == NULL))
2302 		return;
2303 
2304 	tmr = NULL;
2305 	if (stcb) {
2306 		SCTP_TCB_LOCK_ASSERT(stcb);
2307 	}
2308 	switch (t_type) {
2309 	case SCTP_TIMER_TYPE_ZERO_COPY:
2310 		tmr = &inp->sctp_ep.zero_copy_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2313 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_ADDR_WQ:
2316 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2317 		break;
2318 	case SCTP_TIMER_TYPE_EARLYFR:
2319 		if ((stcb == NULL) || (net == NULL)) {
2320 			return;
2321 		}
2322 		tmr = &net->fr_timer;
2323 		SCTP_STAT_INCR(sctps_earlyfrstop);
2324 		break;
2325 	case SCTP_TIMER_TYPE_ITERATOR:
2326 		{
2327 			struct sctp_iterator *it;
2328 
2329 			it = (struct sctp_iterator *)inp;
2330 			tmr = &it->tmr;
2331 		}
2332 		break;
2333 	case SCTP_TIMER_TYPE_SEND:
2334 		if ((stcb == NULL) || (net == NULL)) {
2335 			return;
2336 		}
2337 		tmr = &net->rxt_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_INIT:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->rxt_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_RECV:
2346 		if (stcb == NULL) {
2347 			return;
2348 		}
2349 		tmr = &stcb->asoc.dack_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_SHUTDOWN:
2352 		if ((stcb == NULL) || (net == NULL)) {
2353 			return;
2354 		}
2355 		tmr = &net->rxt_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_HEARTBEAT:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.hb_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_COOKIE:
2364 		if ((stcb == NULL) || (net == NULL)) {
2365 			return;
2366 		}
2367 		tmr = &net->rxt_timer;
2368 		break;
2369 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2370 		/* nothing needed but the endpoint here */
2371 		tmr = &inp->sctp_ep.signature_change;
2372 		/*
2373 		 * We re-use the newcookie timer for the INP kill timer. We
2374 		 * must assure that we do not kill it by accident.
2375 		 */
2376 		break;
2377 	case SCTP_TIMER_TYPE_ASOCKILL:
2378 		/*
2379 		 * Stop the asoc kill timer.
2380 		 */
2381 		if (stcb == NULL) {
2382 			return;
2383 		}
2384 		tmr = &stcb->asoc.strreset_timer;
2385 		break;
2386 
2387 	case SCTP_TIMER_TYPE_INPKILL:
2388 		/*
2389 		 * The inp is setup to die. We re-use the signature_chage
2390 		 * timer since that has stopped and we are in the GONE
2391 		 * state.
2392 		 */
2393 		tmr = &inp->sctp_ep.signature_change;
2394 		break;
2395 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2396 		if ((stcb == NULL) || (net == NULL)) {
2397 			return;
2398 		}
2399 		tmr = &net->pmtu_timer;
2400 		break;
2401 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2402 		if ((stcb == NULL) || (net == NULL)) {
2403 			return;
2404 		}
2405 		tmr = &net->rxt_timer;
2406 		break;
2407 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2408 		if (stcb == NULL) {
2409 			return;
2410 		}
2411 		tmr = &stcb->asoc.shut_guard_timer;
2412 		break;
2413 	case SCTP_TIMER_TYPE_STRRESET:
2414 		if (stcb == NULL) {
2415 			return;
2416 		}
2417 		tmr = &stcb->asoc.strreset_timer;
2418 		break;
2419 	case SCTP_TIMER_TYPE_ASCONF:
2420 		if (stcb == NULL) {
2421 			return;
2422 		}
2423 		tmr = &stcb->asoc.asconf_timer;
2424 		break;
2425 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2426 		if (stcb == NULL) {
2427 			return;
2428 		}
2429 		tmr = &stcb->asoc.delete_prim_timer;
2430 		break;
2431 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2432 		if (stcb == NULL) {
2433 			return;
2434 		}
2435 		tmr = &stcb->asoc.autoclose_timer;
2436 		break;
2437 	default:
2438 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2439 		    __FUNCTION__, t_type);
2440 		break;
2441 	};
2442 	if (tmr == NULL) {
2443 		return;
2444 	}
2445 	if ((tmr->type != t_type) && tmr->type) {
2446 		/*
2447 		 * Ok we have a timer that is under joint use. Cookie timer
2448 		 * per chance with the SEND timer. We therefore are NOT
2449 		 * running the timer that the caller wants stopped.  So just
2450 		 * return.
2451 		 */
2452 		return;
2453 	}
2454 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2455 		stcb->asoc.num_send_timers_up--;
2456 		if (stcb->asoc.num_send_timers_up < 0) {
2457 			stcb->asoc.num_send_timers_up = 0;
2458 		}
2459 	}
2460 	tmr->self = NULL;
2461 	tmr->stopped_from = from;
2462 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2463 	return;
2464 }
2465 
2466 uint32_t
2467 sctp_calculate_len(struct mbuf *m)
2468 {
2469 	uint32_t tlen = 0;
2470 	struct mbuf *at;
2471 
2472 	at = m;
2473 	while (at) {
2474 		tlen += SCTP_BUF_LEN(at);
2475 		at = SCTP_BUF_NEXT(at);
2476 	}
2477 	return (tlen);
2478 }
2479 
2480 void
2481 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2482     struct sctp_association *asoc, uint32_t mtu)
2483 {
2484 	/*
2485 	 * Reset the P-MTU size on this association, this involves changing
2486 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2487 	 * allow the DF flag to be cleared.
2488 	 */
2489 	struct sctp_tmit_chunk *chk;
2490 	unsigned int eff_mtu, ovh;
2491 
2492 #ifdef SCTP_PRINT_FOR_B_AND_M
2493 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2494 	    inp, asoc, mtu);
2495 #endif
2496 	asoc->smallest_mtu = mtu;
2497 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2498 		ovh = SCTP_MIN_OVERHEAD;
2499 	} else {
2500 		ovh = SCTP_MIN_V4_OVERHEAD;
2501 	}
2502 	eff_mtu = mtu - ovh;
2503 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2504 
2505 		if (chk->send_size > eff_mtu) {
2506 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2507 		}
2508 	}
2509 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2510 		if (chk->send_size > eff_mtu) {
2511 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2512 		}
2513 	}
2514 }
2515 
2516 
2517 /*
2518  * given an association and starting time of the current RTT period return
2519  * RTO in number of msecs net should point to the current network
2520  */
2521 uint32_t
2522 sctp_calculate_rto(struct sctp_tcb *stcb,
2523     struct sctp_association *asoc,
2524     struct sctp_nets *net,
2525     struct timeval *told,
2526     int safe)
2527 {
2528 	/*-
2529 	 * given an association and the starting time of the current RTT
2530 	 * period (in value1/value2) return RTO in number of msecs.
2531 	 */
2532 	int calc_time = 0;
2533 	int o_calctime;
2534 	uint32_t new_rto = 0;
2535 	int first_measure = 0;
2536 	struct timeval now, then, *old;
2537 
2538 	/* Copy it out for sparc64 */
2539 	if (safe == sctp_align_unsafe_makecopy) {
2540 		old = &then;
2541 		memcpy(&then, told, sizeof(struct timeval));
2542 	} else if (safe == sctp_align_safe_nocopy) {
2543 		old = told;
2544 	} else {
2545 		/* error */
2546 		SCTP_PRINTF("Huh, bad rto calc call\n");
2547 		return (0);
2548 	}
2549 	/************************/
2550 	/* 1. calculate new RTT */
2551 	/************************/
2552 	/* get the current time */
2553 	(void)SCTP_GETTIME_TIMEVAL(&now);
2554 	/* compute the RTT value */
2555 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2556 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2557 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2558 			calc_time += (((u_long)now.tv_usec -
2559 			    (u_long)old->tv_usec) / 1000);
2560 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2561 			/* Borrow 1,000ms from current calculation */
2562 			calc_time -= 1000;
2563 			/* Add in the slop over */
2564 			calc_time += ((int)now.tv_usec / 1000);
2565 			/* Add in the pre-second ms's */
2566 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2567 		}
2568 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2569 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2570 			calc_time = ((u_long)now.tv_usec -
2571 			    (u_long)old->tv_usec) / 1000;
2572 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2573 			/* impossible .. garbage in nothing out */
2574 			goto calc_rto;
2575 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2576 			/*
2577 			 * We have to have 1 usec :-D this must be the
2578 			 * loopback.
2579 			 */
2580 			calc_time = 1;
2581 		} else {
2582 			/* impossible .. garbage in nothing out */
2583 			goto calc_rto;
2584 		}
2585 	} else {
2586 		/* Clock wrapped? */
2587 		goto calc_rto;
2588 	}
2589 	/***************************/
2590 	/* 2. update RTTVAR & SRTT */
2591 	/***************************/
2592 	o_calctime = calc_time;
2593 	/* this is Van Jacobson's integer version */
2594 	if (net->RTO_measured) {
2595 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2596 								 * shift=3 */
2597 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2598 			rto_logging(net, SCTP_LOG_RTTVAR);
2599 		}
2600 		net->prev_rtt = o_calctime;
2601 		net->lastsa += calc_time;	/* add 7/8th into sa when
2602 						 * shift=3 */
2603 		if (calc_time < 0) {
2604 			calc_time = -calc_time;
2605 		}
2606 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2607 									 * VAR shift=2 */
2608 		net->lastsv += calc_time;
2609 		if (net->lastsv == 0) {
2610 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2611 		}
2612 	} else {
2613 		/* First RTO measurment */
2614 		net->RTO_measured = 1;
2615 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2616 								 * shift=3 */
2617 		net->lastsv = calc_time;
2618 		if (net->lastsv == 0) {
2619 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2620 		}
2621 		first_measure = 1;
2622 		net->prev_rtt = o_calctime;
2623 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2624 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2625 		}
2626 	}
2627 calc_rto:
2628 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2629 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2630 	    (stcb->asoc.sat_network_lockout == 0)) {
2631 		stcb->asoc.sat_network = 1;
2632 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2633 		stcb->asoc.sat_network = 0;
2634 		stcb->asoc.sat_network_lockout = 1;
2635 	}
2636 	/* bound it, per C6/C7 in Section 5.3.1 */
2637 	if (new_rto < stcb->asoc.minrto) {
2638 		new_rto = stcb->asoc.minrto;
2639 	}
2640 	if (new_rto > stcb->asoc.maxrto) {
2641 		new_rto = stcb->asoc.maxrto;
2642 	}
2643 	/* we are now returning the RTO */
2644 	return (new_rto);
2645 }
2646 
2647 /*
2648  * return a pointer to a contiguous piece of data from the given mbuf chain
2649  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2650  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2651  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2652  */
2653 caddr_t
2654 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2655 {
2656 	uint32_t count;
2657 	uint8_t *ptr;
2658 
2659 	ptr = in_ptr;
2660 	if ((off < 0) || (len <= 0))
2661 		return (NULL);
2662 
2663 	/* find the desired start location */
2664 	while ((m != NULL) && (off > 0)) {
2665 		if (off < SCTP_BUF_LEN(m))
2666 			break;
2667 		off -= SCTP_BUF_LEN(m);
2668 		m = SCTP_BUF_NEXT(m);
2669 	}
2670 	if (m == NULL)
2671 		return (NULL);
2672 
2673 	/* is the current mbuf large enough (eg. contiguous)? */
2674 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2675 		return (mtod(m, caddr_t)+off);
2676 	} else {
2677 		/* else, it spans more than one mbuf, so save a temp copy... */
2678 		while ((m != NULL) && (len > 0)) {
2679 			count = min(SCTP_BUF_LEN(m) - off, len);
2680 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2681 			len -= count;
2682 			ptr += count;
2683 			off = 0;
2684 			m = SCTP_BUF_NEXT(m);
2685 		}
2686 		if ((m == NULL) && (len > 0))
2687 			return (NULL);
2688 		else
2689 			return ((caddr_t)in_ptr);
2690 	}
2691 }
2692 
2693 
2694 
2695 struct sctp_paramhdr *
2696 sctp_get_next_param(struct mbuf *m,
2697     int offset,
2698     struct sctp_paramhdr *pull,
2699     int pull_limit)
2700 {
2701 	/* This just provides a typed signature to Peter's Pull routine */
2702 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2703 	    (uint8_t *) pull));
2704 }
2705 
2706 
2707 int
2708 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2709 {
2710 	/*
2711 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2712 	 * padlen is > 3 this routine will fail.
2713 	 */
2714 	uint8_t *dp;
2715 	int i;
2716 
2717 	if (padlen > 3) {
2718 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2719 		return (ENOBUFS);
2720 	}
2721 	if (padlen <= M_TRAILINGSPACE(m)) {
2722 		/*
2723 		 * The easy way. We hope the majority of the time we hit
2724 		 * here :)
2725 		 */
2726 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2727 		SCTP_BUF_LEN(m) += padlen;
2728 	} else {
2729 		/* Hard way we must grow the mbuf */
2730 		struct mbuf *tmp;
2731 
2732 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2733 		if (tmp == NULL) {
2734 			/* Out of space GAK! we are in big trouble. */
2735 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2736 			return (ENOSPC);
2737 		}
2738 		/* setup and insert in middle */
2739 		SCTP_BUF_LEN(tmp) = padlen;
2740 		SCTP_BUF_NEXT(tmp) = NULL;
2741 		SCTP_BUF_NEXT(m) = tmp;
2742 		dp = mtod(tmp, uint8_t *);
2743 	}
2744 	/* zero out the pad */
2745 	for (i = 0; i < padlen; i++) {
2746 		*dp = 0;
2747 		dp++;
2748 	}
2749 	return (0);
2750 }
2751 
2752 int
2753 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2754 {
2755 	/* find the last mbuf in chain and pad it */
2756 	struct mbuf *m_at;
2757 
2758 	m_at = m;
2759 	if (last_mbuf) {
2760 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2761 	} else {
2762 		while (m_at) {
2763 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2764 				return (sctp_add_pad_tombuf(m_at, padval));
2765 			}
2766 			m_at = SCTP_BUF_NEXT(m_at);
2767 		}
2768 	}
2769 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2770 	return (EFAULT);
2771 }
2772 
2773 int sctp_asoc_change_wake = 0;
2774 
2775 static void
2776 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2777     uint32_t error, void *data, int so_locked
2778 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2779     SCTP_UNUSED
2780 #endif
2781 )
2782 {
2783 	struct mbuf *m_notify;
2784 	struct sctp_assoc_change *sac;
2785 	struct sctp_queued_to_read *control;
2786 
2787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2788 	struct socket *so;
2789 
2790 #endif
2791 
2792 	/*
2793 	 * For TCP model AND UDP connected sockets we will send an error up
2794 	 * when an ABORT comes in.
2795 	 */
2796 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2797 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2798 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2799 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2800 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2801 			stcb->sctp_socket->so_error = ECONNREFUSED;
2802 		} else {
2803 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2804 			stcb->sctp_socket->so_error = ECONNRESET;
2805 		}
2806 		/* Wake ANY sleepers */
2807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2808 		so = SCTP_INP_SO(stcb->sctp_ep);
2809 		if (!so_locked) {
2810 			atomic_add_int(&stcb->asoc.refcnt, 1);
2811 			SCTP_TCB_UNLOCK(stcb);
2812 			SCTP_SOCKET_LOCK(so, 1);
2813 			SCTP_TCB_LOCK(stcb);
2814 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2815 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2816 				SCTP_SOCKET_UNLOCK(so, 1);
2817 				return;
2818 			}
2819 		}
2820 #endif
2821 		sorwakeup(stcb->sctp_socket);
2822 		sowwakeup(stcb->sctp_socket);
2823 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 		if (!so_locked) {
2825 			SCTP_SOCKET_UNLOCK(so, 1);
2826 		}
2827 #endif
2828 		sctp_asoc_change_wake++;
2829 	}
2830 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2831 		/* event not enabled */
2832 		return;
2833 	}
2834 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2835 	if (m_notify == NULL)
2836 		/* no space left */
2837 		return;
2838 	SCTP_BUF_LEN(m_notify) = 0;
2839 
2840 	sac = mtod(m_notify, struct sctp_assoc_change *);
2841 	sac->sac_type = SCTP_ASSOC_CHANGE;
2842 	sac->sac_flags = 0;
2843 	sac->sac_length = sizeof(struct sctp_assoc_change);
2844 	sac->sac_state = event;
2845 	sac->sac_error = error;
2846 	/* XXX verify these stream counts */
2847 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2848 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2849 	sac->sac_assoc_id = sctp_get_associd(stcb);
2850 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2851 	SCTP_BUF_NEXT(m_notify) = NULL;
2852 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2853 	    0, 0, 0, 0, 0, 0,
2854 	    m_notify);
2855 	if (control == NULL) {
2856 		/* no memory */
2857 		sctp_m_freem(m_notify);
2858 		return;
2859 	}
2860 	control->length = SCTP_BUF_LEN(m_notify);
2861 	/* not that we need this */
2862 	control->tail_mbuf = m_notify;
2863 	control->spec_flags = M_NOTIFICATION;
2864 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2865 	    control,
2866 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2867 	if (event == SCTP_COMM_LOST) {
2868 		/* Wake up any sleeper */
2869 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2870 		so = SCTP_INP_SO(stcb->sctp_ep);
2871 		if (!so_locked) {
2872 			atomic_add_int(&stcb->asoc.refcnt, 1);
2873 			SCTP_TCB_UNLOCK(stcb);
2874 			SCTP_SOCKET_LOCK(so, 1);
2875 			SCTP_TCB_LOCK(stcb);
2876 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2877 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2878 				SCTP_SOCKET_UNLOCK(so, 1);
2879 				return;
2880 			}
2881 		}
2882 #endif
2883 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2884 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2885 		if (!so_locked) {
2886 			SCTP_SOCKET_UNLOCK(so, 1);
2887 		}
2888 #endif
2889 	}
2890 }
2891 
2892 static void
2893 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2894     struct sockaddr *sa, uint32_t error)
2895 {
2896 	struct mbuf *m_notify;
2897 	struct sctp_paddr_change *spc;
2898 	struct sctp_queued_to_read *control;
2899 
2900 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2901 		/* event not enabled */
2902 		return;
2903 	}
2904 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2905 	if (m_notify == NULL)
2906 		return;
2907 	SCTP_BUF_LEN(m_notify) = 0;
2908 	spc = mtod(m_notify, struct sctp_paddr_change *);
2909 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2910 	spc->spc_flags = 0;
2911 	spc->spc_length = sizeof(struct sctp_paddr_change);
2912 	switch (sa->sa_family) {
2913 	case AF_INET:
2914 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2915 		break;
2916 #ifdef INET6
2917 	case AF_INET6:
2918 		{
2919 			struct sockaddr_in6 *sin6;
2920 
2921 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2922 
2923 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2924 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2925 				if (sin6->sin6_scope_id == 0) {
2926 					/* recover scope_id for user */
2927 					(void)sa6_recoverscope(sin6);
2928 				} else {
2929 					/* clear embedded scope_id for user */
2930 					in6_clearscope(&sin6->sin6_addr);
2931 				}
2932 			}
2933 			break;
2934 		}
2935 #endif
2936 	default:
2937 		/* TSNH */
2938 		break;
2939 	}
2940 	spc->spc_state = state;
2941 	spc->spc_error = error;
2942 	spc->spc_assoc_id = sctp_get_associd(stcb);
2943 
2944 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2945 	SCTP_BUF_NEXT(m_notify) = NULL;
2946 
2947 	/* append to socket */
2948 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2949 	    0, 0, 0, 0, 0, 0,
2950 	    m_notify);
2951 	if (control == NULL) {
2952 		/* no memory */
2953 		sctp_m_freem(m_notify);
2954 		return;
2955 	}
2956 	control->length = SCTP_BUF_LEN(m_notify);
2957 	control->spec_flags = M_NOTIFICATION;
2958 	/* not that we need this */
2959 	control->tail_mbuf = m_notify;
2960 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2961 	    control,
2962 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2963 }
2964 
2965 
2966 static void
2967 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2968     struct sctp_tmit_chunk *chk, int so_locked
2969 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2970     SCTP_UNUSED
2971 #endif
2972 )
2973 {
2974 	struct mbuf *m_notify;
2975 	struct sctp_send_failed *ssf;
2976 	struct sctp_queued_to_read *control;
2977 	int length;
2978 
2979 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2980 		/* event not enabled */
2981 		return;
2982 	}
2983 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2984 	if (m_notify == NULL)
2985 		/* no space left */
2986 		return;
2987 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2988 	length -= sizeof(struct sctp_data_chunk);
2989 	SCTP_BUF_LEN(m_notify) = 0;
2990 	ssf = mtod(m_notify, struct sctp_send_failed *);
2991 	ssf->ssf_type = SCTP_SEND_FAILED;
2992 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2993 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2994 	else
2995 		ssf->ssf_flags = SCTP_DATA_SENT;
2996 	ssf->ssf_length = length;
2997 	ssf->ssf_error = error;
2998 	/* not exactly what the user sent in, but should be close :) */
2999 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3000 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3001 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3002 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3003 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3004 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3005 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3006 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3007 
3008 	SCTP_BUF_NEXT(m_notify) = chk->data;
3009 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3010 	if (chk->data) {
3011 		/*
3012 		 * trim off the sctp chunk header(it should be there)
3013 		 */
3014 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3015 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3016 			sctp_mbuf_crush(chk->data);
3017 			chk->send_size -= sizeof(struct sctp_data_chunk);
3018 		}
3019 	}
3020 	/* Steal off the mbuf */
3021 	chk->data = NULL;
3022 	/*
3023 	 * For this case, we check the actual socket buffer, since the assoc
3024 	 * is going away we don't want to overfill the socket buffer for a
3025 	 * non-reader
3026 	 */
3027 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3028 		sctp_m_freem(m_notify);
3029 		return;
3030 	}
3031 	/* append to socket */
3032 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3033 	    0, 0, 0, 0, 0, 0,
3034 	    m_notify);
3035 	if (control == NULL) {
3036 		/* no memory */
3037 		sctp_m_freem(m_notify);
3038 		return;
3039 	}
3040 	control->spec_flags = M_NOTIFICATION;
3041 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3042 	    control,
3043 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3044 }
3045 
3046 
3047 static void
3048 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3049     struct sctp_stream_queue_pending *sp, int so_locked
3050 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3051     SCTP_UNUSED
3052 #endif
3053 )
3054 {
3055 	struct mbuf *m_notify;
3056 	struct sctp_send_failed *ssf;
3057 	struct sctp_queued_to_read *control;
3058 	int length;
3059 
3060 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3061 		/* event not enabled */
3062 		return;
3063 	}
3064 	length = sizeof(struct sctp_send_failed) + sp->length;
3065 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3066 	if (m_notify == NULL)
3067 		/* no space left */
3068 		return;
3069 	SCTP_BUF_LEN(m_notify) = 0;
3070 	ssf = mtod(m_notify, struct sctp_send_failed *);
3071 	ssf->ssf_type = SCTP_SEND_FAILED;
3072 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3073 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3074 	else
3075 		ssf->ssf_flags = SCTP_DATA_SENT;
3076 	ssf->ssf_length = length;
3077 	ssf->ssf_error = error;
3078 	/* not exactly what the user sent in, but should be close :) */
3079 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3080 	ssf->ssf_info.sinfo_stream = sp->stream;
3081 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3082 	if (sp->some_taken) {
3083 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3084 	} else {
3085 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3086 	}
3087 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3088 	ssf->ssf_info.sinfo_context = sp->context;
3089 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3090 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3091 	SCTP_BUF_NEXT(m_notify) = sp->data;
3092 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3093 
3094 	/* Steal off the mbuf */
3095 	sp->data = NULL;
3096 	/*
3097 	 * For this case, we check the actual socket buffer, since the assoc
3098 	 * is going away we don't want to overfill the socket buffer for a
3099 	 * non-reader
3100 	 */
3101 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3102 		sctp_m_freem(m_notify);
3103 		return;
3104 	}
3105 	/* append to socket */
3106 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3107 	    0, 0, 0, 0, 0, 0,
3108 	    m_notify);
3109 	if (control == NULL) {
3110 		/* no memory */
3111 		sctp_m_freem(m_notify);
3112 		return;
3113 	}
3114 	control->spec_flags = M_NOTIFICATION;
3115 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3116 	    control,
3117 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3118 }
3119 
3120 
3121 
3122 static void
3123 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3124     uint32_t error)
3125 {
3126 	struct mbuf *m_notify;
3127 	struct sctp_adaptation_event *sai;
3128 	struct sctp_queued_to_read *control;
3129 
3130 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3131 		/* event not enabled */
3132 		return;
3133 	}
3134 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3135 	if (m_notify == NULL)
3136 		/* no space left */
3137 		return;
3138 	SCTP_BUF_LEN(m_notify) = 0;
3139 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3140 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3141 	sai->sai_flags = 0;
3142 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3143 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3144 	sai->sai_assoc_id = sctp_get_associd(stcb);
3145 
3146 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3147 	SCTP_BUF_NEXT(m_notify) = NULL;
3148 
3149 	/* append to socket */
3150 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3151 	    0, 0, 0, 0, 0, 0,
3152 	    m_notify);
3153 	if (control == NULL) {
3154 		/* no memory */
3155 		sctp_m_freem(m_notify);
3156 		return;
3157 	}
3158 	control->length = SCTP_BUF_LEN(m_notify);
3159 	control->spec_flags = M_NOTIFICATION;
3160 	/* not that we need this */
3161 	control->tail_mbuf = m_notify;
3162 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3163 	    control,
3164 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3165 }
3166 
3167 /* This always must be called with the read-queue LOCKED in the INP */
3168 void
3169 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3170     int nolock, uint32_t val)
3171 {
3172 	struct mbuf *m_notify;
3173 	struct sctp_pdapi_event *pdapi;
3174 	struct sctp_queued_to_read *control;
3175 	struct sockbuf *sb;
3176 
3177 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3178 		/* event not enabled */
3179 		return;
3180 	}
3181 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3182 	if (m_notify == NULL)
3183 		/* no space left */
3184 		return;
3185 	SCTP_BUF_LEN(m_notify) = 0;
3186 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3187 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3188 	pdapi->pdapi_flags = 0;
3189 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3190 	pdapi->pdapi_indication = error;
3191 	pdapi->pdapi_stream = (val >> 16);
3192 	pdapi->pdapi_seq = (val & 0x0000ffff);
3193 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3194 
3195 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3196 	SCTP_BUF_NEXT(m_notify) = NULL;
3197 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3198 	    0, 0, 0, 0, 0, 0,
3199 	    m_notify);
3200 	if (control == NULL) {
3201 		/* no memory */
3202 		sctp_m_freem(m_notify);
3203 		return;
3204 	}
3205 	control->spec_flags = M_NOTIFICATION;
3206 	control->length = SCTP_BUF_LEN(m_notify);
3207 	/* not that we need this */
3208 	control->tail_mbuf = m_notify;
3209 	control->held_length = 0;
3210 	control->length = 0;
3211 	if (nolock == 0) {
3212 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3213 	}
3214 	sb = &stcb->sctp_socket->so_rcv;
3215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3216 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3217 	}
3218 	sctp_sballoc(stcb, sb, m_notify);
3219 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3220 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3221 	}
3222 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3223 	control->end_added = 1;
3224 	if (stcb->asoc.control_pdapi)
3225 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3226 	else {
3227 		/* we really should not see this case */
3228 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3229 	}
3230 	if (nolock == 0) {
3231 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3232 	}
3233 	if (stcb->sctp_ep && stcb->sctp_socket) {
3234 		/* This should always be the case */
3235 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3236 	}
3237 }
3238 
3239 static void
3240 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3241 {
3242 	struct mbuf *m_notify;
3243 	struct sctp_shutdown_event *sse;
3244 	struct sctp_queued_to_read *control;
3245 
3246 	/*
3247 	 * For TCP model AND UDP connected sockets we will send an error up
3248 	 * when an SHUTDOWN completes
3249 	 */
3250 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3251 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3252 		/* mark socket closed for read/write and wakeup! */
3253 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3254 		struct socket *so;
3255 
3256 		so = SCTP_INP_SO(stcb->sctp_ep);
3257 		atomic_add_int(&stcb->asoc.refcnt, 1);
3258 		SCTP_TCB_UNLOCK(stcb);
3259 		SCTP_SOCKET_LOCK(so, 1);
3260 		SCTP_TCB_LOCK(stcb);
3261 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3262 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3263 			SCTP_SOCKET_UNLOCK(so, 1);
3264 			return;
3265 		}
3266 #endif
3267 		socantsendmore(stcb->sctp_socket);
3268 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3269 		SCTP_SOCKET_UNLOCK(so, 1);
3270 #endif
3271 	}
3272 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3273 		/* event not enabled */
3274 		return;
3275 	}
3276 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3277 	if (m_notify == NULL)
3278 		/* no space left */
3279 		return;
3280 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3281 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3282 	sse->sse_flags = 0;
3283 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3284 	sse->sse_assoc_id = sctp_get_associd(stcb);
3285 
3286 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3287 	SCTP_BUF_NEXT(m_notify) = NULL;
3288 
3289 	/* append to socket */
3290 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3291 	    0, 0, 0, 0, 0, 0,
3292 	    m_notify);
3293 	if (control == NULL) {
3294 		/* no memory */
3295 		sctp_m_freem(m_notify);
3296 		return;
3297 	}
3298 	control->spec_flags = M_NOTIFICATION;
3299 	control->length = SCTP_BUF_LEN(m_notify);
3300 	/* not that we need this */
3301 	control->tail_mbuf = m_notify;
3302 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3303 	    control,
3304 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3305 }
3306 
3307 static void
3308 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3309     int so_locked
3310 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3311     SCTP_UNUSED
3312 #endif
3313 )
3314 {
3315 	struct mbuf *m_notify;
3316 	struct sctp_sender_dry_event *event;
3317 	struct sctp_queued_to_read *control;
3318 
3319 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3320 		/* event not enabled */
3321 		return;
3322 	}
3323 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3324 	if (m_notify == NULL) {
3325 		/* no space left */
3326 		return;
3327 	}
3328 	SCTP_BUF_LEN(m_notify) = 0;
3329 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3330 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3331 	event->sender_dry_flags = 0;
3332 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3333 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3334 
3335 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3336 	SCTP_BUF_NEXT(m_notify) = NULL;
3337 
3338 	/* append to socket */
3339 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3340 	    0, 0, 0, 0, 0, 0, m_notify);
3341 	if (control == NULL) {
3342 		/* no memory */
3343 		sctp_m_freem(m_notify);
3344 		return;
3345 	}
3346 	control->length = SCTP_BUF_LEN(m_notify);
3347 	control->spec_flags = M_NOTIFICATION;
3348 	/* not that we need this */
3349 	control->tail_mbuf = m_notify;
3350 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3351 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3352 }
3353 
3354 
3355 static void
3356 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3357 {
3358 	struct mbuf *m_notify;
3359 	struct sctp_queued_to_read *control;
3360 	struct sctp_stream_reset_event *strreset;
3361 	int len;
3362 
3363 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3364 		/* event not enabled */
3365 		return;
3366 	}
3367 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3368 	if (m_notify == NULL)
3369 		/* no space left */
3370 		return;
3371 	SCTP_BUF_LEN(m_notify) = 0;
3372 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3373 	if (len > M_TRAILINGSPACE(m_notify)) {
3374 		/* never enough room */
3375 		sctp_m_freem(m_notify);
3376 		return;
3377 	}
3378 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3379 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3380 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3381 	strreset->strreset_length = len;
3382 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3383 	strreset->strreset_list[0] = number_entries;
3384 
3385 	SCTP_BUF_LEN(m_notify) = len;
3386 	SCTP_BUF_NEXT(m_notify) = NULL;
3387 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3388 		/* no space */
3389 		sctp_m_freem(m_notify);
3390 		return;
3391 	}
3392 	/* append to socket */
3393 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3394 	    0, 0, 0, 0, 0, 0,
3395 	    m_notify);
3396 	if (control == NULL) {
3397 		/* no memory */
3398 		sctp_m_freem(m_notify);
3399 		return;
3400 	}
3401 	control->spec_flags = M_NOTIFICATION;
3402 	control->length = SCTP_BUF_LEN(m_notify);
3403 	/* not that we need this */
3404 	control->tail_mbuf = m_notify;
3405 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3406 	    control,
3407 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3408 }
3409 
3410 
3411 static void
3412 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3413     int number_entries, uint16_t * list, int flag)
3414 {
3415 	struct mbuf *m_notify;
3416 	struct sctp_queued_to_read *control;
3417 	struct sctp_stream_reset_event *strreset;
3418 	int len;
3419 
3420 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3421 		/* event not enabled */
3422 		return;
3423 	}
3424 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3425 	if (m_notify == NULL)
3426 		/* no space left */
3427 		return;
3428 	SCTP_BUF_LEN(m_notify) = 0;
3429 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3430 	if (len > M_TRAILINGSPACE(m_notify)) {
3431 		/* never enough room */
3432 		sctp_m_freem(m_notify);
3433 		return;
3434 	}
3435 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3436 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3437 	if (number_entries == 0) {
3438 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3439 	} else {
3440 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3441 	}
3442 	strreset->strreset_length = len;
3443 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3444 	if (number_entries) {
3445 		int i;
3446 
3447 		for (i = 0; i < number_entries; i++) {
3448 			strreset->strreset_list[i] = ntohs(list[i]);
3449 		}
3450 	}
3451 	SCTP_BUF_LEN(m_notify) = len;
3452 	SCTP_BUF_NEXT(m_notify) = NULL;
3453 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3454 		/* no space */
3455 		sctp_m_freem(m_notify);
3456 		return;
3457 	}
3458 	/* append to socket */
3459 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3460 	    0, 0, 0, 0, 0, 0,
3461 	    m_notify);
3462 	if (control == NULL) {
3463 		/* no memory */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	control->spec_flags = M_NOTIFICATION;
3468 	control->length = SCTP_BUF_LEN(m_notify);
3469 	/* not that we need this */
3470 	control->tail_mbuf = m_notify;
3471 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3472 	    control,
3473 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3474 }
3475 
3476 
3477 void
3478 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3479     uint32_t error, void *data, int so_locked
3480 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3481     SCTP_UNUSED
3482 #endif
3483 )
3484 {
3485 	if ((stcb == NULL) ||
3486 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3487 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3488 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3489 		/* If the socket is gone we are out of here */
3490 		return;
3491 	}
3492 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3493 		return;
3494 	}
3495 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3496 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3497 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3498 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3499 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3500 			/* Don't report these in front states */
3501 			return;
3502 		}
3503 	}
3504 	switch (notification) {
3505 	case SCTP_NOTIFY_ASSOC_UP:
3506 		if (stcb->asoc.assoc_up_sent == 0) {
3507 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3508 			stcb->asoc.assoc_up_sent = 1;
3509 		}
3510 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3511 			sctp_notify_adaptation_layer(stcb, error);
3512 		}
3513 		if (stcb->asoc.peer_supports_auth == 0) {
3514 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3515 			    NULL, so_locked);
3516 		}
3517 		break;
3518 	case SCTP_NOTIFY_ASSOC_DOWN:
3519 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3520 		break;
3521 	case SCTP_NOTIFY_INTERFACE_DOWN:
3522 		{
3523 			struct sctp_nets *net;
3524 
3525 			net = (struct sctp_nets *)data;
3526 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3527 			    (struct sockaddr *)&net->ro._l_addr, error);
3528 			break;
3529 		}
3530 	case SCTP_NOTIFY_INTERFACE_UP:
3531 		{
3532 			struct sctp_nets *net;
3533 
3534 			net = (struct sctp_nets *)data;
3535 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3536 			    (struct sockaddr *)&net->ro._l_addr, error);
3537 			break;
3538 		}
3539 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3540 		{
3541 			struct sctp_nets *net;
3542 
3543 			net = (struct sctp_nets *)data;
3544 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3545 			    (struct sockaddr *)&net->ro._l_addr, error);
3546 			break;
3547 		}
3548 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3549 		sctp_notify_send_failed2(stcb, error,
3550 		    (struct sctp_stream_queue_pending *)data, so_locked);
3551 		break;
3552 	case SCTP_NOTIFY_DG_FAIL:
3553 		sctp_notify_send_failed(stcb, error,
3554 		    (struct sctp_tmit_chunk *)data, so_locked);
3555 		break;
3556 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3557 		{
3558 			uint32_t val;
3559 
3560 			val = *((uint32_t *) data);
3561 
3562 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3563 		}
3564 		break;
3565 	case SCTP_NOTIFY_STRDATA_ERR:
3566 		break;
3567 	case SCTP_NOTIFY_ASSOC_ABORTED:
3568 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3569 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3570 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3571 		} else {
3572 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3573 		}
3574 		break;
3575 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3576 		break;
3577 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3578 		break;
3579 	case SCTP_NOTIFY_ASSOC_RESTART:
3580 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3581 		if (stcb->asoc.peer_supports_auth == 0) {
3582 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3583 			    NULL, so_locked);
3584 		}
3585 		break;
3586 	case SCTP_NOTIFY_HB_RESP:
3587 		break;
3588 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3589 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3590 		break;
3591 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3592 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3593 		break;
3594 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3595 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3596 		break;
3597 
3598 	case SCTP_NOTIFY_STR_RESET_SEND:
3599 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3600 		break;
3601 	case SCTP_NOTIFY_STR_RESET_RECV:
3602 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3603 		break;
3604 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3605 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3606 		break;
3607 
3608 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3609 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3610 		break;
3611 
3612 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3613 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3614 		    error);
3615 		break;
3616 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3617 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3618 		    error);
3619 		break;
3620 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3621 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3622 		    error);
3623 		break;
3624 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3625 		break;
3626 	case SCTP_NOTIFY_ASCONF_FAILED:
3627 		break;
3628 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3629 		sctp_notify_shutdown_event(stcb);
3630 		break;
3631 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3632 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3633 		    (uint16_t) (uintptr_t) data,
3634 		    so_locked);
3635 		break;
3636 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3637 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3638 		    (uint16_t) (uintptr_t) data,
3639 		    so_locked);
3640 		break;
3641 	case SCTP_NOTIFY_NO_PEER_AUTH:
3642 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3643 		    (uint16_t) (uintptr_t) data,
3644 		    so_locked);
3645 		break;
3646 	case SCTP_NOTIFY_SENDER_DRY:
3647 		sctp_notify_sender_dry_event(stcb, so_locked);
3648 		break;
3649 	default:
3650 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3651 		    __FUNCTION__, notification, notification);
3652 		break;
3653 	}			/* end switch */
3654 }
3655 
3656 void
3657 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3658 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3659     SCTP_UNUSED
3660 #endif
3661 )
3662 {
3663 	struct sctp_association *asoc;
3664 	struct sctp_stream_out *outs;
3665 	struct sctp_tmit_chunk *chk;
3666 	struct sctp_stream_queue_pending *sp;
3667 	int i;
3668 
3669 	asoc = &stcb->asoc;
3670 
3671 	if (stcb == NULL) {
3672 		return;
3673 	}
3674 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3675 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3676 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3677 		return;
3678 	}
3679 	/* now through all the gunk freeing chunks */
3680 	if (holds_lock == 0) {
3681 		SCTP_TCB_SEND_LOCK(stcb);
3682 	}
3683 	/* sent queue SHOULD be empty */
3684 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3685 		chk = TAILQ_FIRST(&asoc->sent_queue);
3686 		while (chk) {
3687 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3688 			asoc->sent_queue_cnt--;
3689 			sctp_free_bufspace(stcb, asoc, chk, 1);
3690 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3691 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3692 			if (chk->data) {
3693 				sctp_m_freem(chk->data);
3694 				chk->data = NULL;
3695 			}
3696 			sctp_free_a_chunk(stcb, chk);
3697 			/* sa_ignore FREED_MEMORY */
3698 			chk = TAILQ_FIRST(&asoc->sent_queue);
3699 		}
3700 	}
3701 	/* pending send queue SHOULD be empty */
3702 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3703 		chk = TAILQ_FIRST(&asoc->send_queue);
3704 		while (chk) {
3705 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3706 			asoc->send_queue_cnt--;
3707 			sctp_free_bufspace(stcb, asoc, chk, 1);
3708 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3709 			if (chk->data) {
3710 				sctp_m_freem(chk->data);
3711 				chk->data = NULL;
3712 			}
3713 			sctp_free_a_chunk(stcb, chk);
3714 			/* sa_ignore FREED_MEMORY */
3715 			chk = TAILQ_FIRST(&asoc->send_queue);
3716 		}
3717 	}
3718 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3719 		/* For each stream */
3720 		outs = &stcb->asoc.strmout[i];
3721 		/* clean up any sends there */
3722 		stcb->asoc.locked_on_sending = NULL;
3723 		sp = TAILQ_FIRST(&outs->outqueue);
3724 		while (sp) {
3725 			stcb->asoc.stream_queue_cnt--;
3726 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3727 			sctp_free_spbufspace(stcb, asoc, sp);
3728 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3729 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3730 			if (sp->data) {
3731 				sctp_m_freem(sp->data);
3732 				sp->data = NULL;
3733 			}
3734 			if (sp->net)
3735 				sctp_free_remote_addr(sp->net);
3736 			sp->net = NULL;
3737 			/* Free the chunk */
3738 			sctp_free_a_strmoq(stcb, sp);
3739 			/* sa_ignore FREED_MEMORY */
3740 			sp = TAILQ_FIRST(&outs->outqueue);
3741 		}
3742 	}
3743 
3744 	if (holds_lock == 0) {
3745 		SCTP_TCB_SEND_UNLOCK(stcb);
3746 	}
3747 }
3748 
3749 void
3750 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3751 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3752     SCTP_UNUSED
3753 #endif
3754 )
3755 {
3756 
3757 	if (stcb == NULL) {
3758 		return;
3759 	}
3760 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3761 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3762 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3763 		return;
3764 	}
3765 	/* Tell them we lost the asoc */
3766 	sctp_report_all_outbound(stcb, 1, so_locked);
3767 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3768 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3769 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3770 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3771 	}
3772 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3773 }
3774 
3775 void
3776 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3777     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3778     uint32_t vrf_id, uint16_t port)
3779 {
3780 	uint32_t vtag;
3781 
3782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3783 	struct socket *so;
3784 
3785 #endif
3786 
3787 	vtag = 0;
3788 	if (stcb != NULL) {
3789 		/* We have a TCB to abort, send notification too */
3790 		vtag = stcb->asoc.peer_vtag;
3791 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3792 		/* get the assoc vrf id and table id */
3793 		vrf_id = stcb->asoc.vrf_id;
3794 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3795 	}
3796 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3797 	if (stcb != NULL) {
3798 		/* Ok, now lets free it */
3799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3800 		so = SCTP_INP_SO(inp);
3801 		atomic_add_int(&stcb->asoc.refcnt, 1);
3802 		SCTP_TCB_UNLOCK(stcb);
3803 		SCTP_SOCKET_LOCK(so, 1);
3804 		SCTP_TCB_LOCK(stcb);
3805 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3806 #endif
3807 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3808 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3809 		SCTP_SOCKET_UNLOCK(so, 1);
3810 #endif
3811 	} else {
3812 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3813 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3814 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3815 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3816 			}
3817 		}
3818 	}
3819 }
3820 
3821 #ifdef SCTP_ASOCLOG_OF_TSNS
3822 void
3823 sctp_print_out_track_log(struct sctp_tcb *stcb)
3824 {
3825 #ifdef NOSIY_PRINTS
3826 	int i;
3827 
3828 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3829 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3830 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3831 		SCTP_PRINTF("None rcvd\n");
3832 		goto none_in;
3833 	}
3834 	if (stcb->asoc.tsn_in_wrapped) {
3835 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3836 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3837 			    stcb->asoc.in_tsnlog[i].tsn,
3838 			    stcb->asoc.in_tsnlog[i].strm,
3839 			    stcb->asoc.in_tsnlog[i].seq,
3840 			    stcb->asoc.in_tsnlog[i].flgs,
3841 			    stcb->asoc.in_tsnlog[i].sz);
3842 		}
3843 	}
3844 	if (stcb->asoc.tsn_in_at) {
3845 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3846 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3847 			    stcb->asoc.in_tsnlog[i].tsn,
3848 			    stcb->asoc.in_tsnlog[i].strm,
3849 			    stcb->asoc.in_tsnlog[i].seq,
3850 			    stcb->asoc.in_tsnlog[i].flgs,
3851 			    stcb->asoc.in_tsnlog[i].sz);
3852 		}
3853 	}
3854 none_in:
3855 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3856 	if ((stcb->asoc.tsn_out_at == 0) &&
3857 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3858 		SCTP_PRINTF("None sent\n");
3859 	}
3860 	if (stcb->asoc.tsn_out_wrapped) {
3861 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3862 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3863 			    stcb->asoc.out_tsnlog[i].tsn,
3864 			    stcb->asoc.out_tsnlog[i].strm,
3865 			    stcb->asoc.out_tsnlog[i].seq,
3866 			    stcb->asoc.out_tsnlog[i].flgs,
3867 			    stcb->asoc.out_tsnlog[i].sz);
3868 		}
3869 	}
3870 	if (stcb->asoc.tsn_out_at) {
3871 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3872 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3873 			    stcb->asoc.out_tsnlog[i].tsn,
3874 			    stcb->asoc.out_tsnlog[i].strm,
3875 			    stcb->asoc.out_tsnlog[i].seq,
3876 			    stcb->asoc.out_tsnlog[i].flgs,
3877 			    stcb->asoc.out_tsnlog[i].sz);
3878 		}
3879 	}
3880 #endif
3881 }
3882 
3883 #endif
3884 
3885 void
3886 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3887     int error, struct mbuf *op_err,
3888     int so_locked
3889 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3890     SCTP_UNUSED
3891 #endif
3892 )
3893 {
3894 	uint32_t vtag;
3895 
3896 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3897 	struct socket *so;
3898 
3899 #endif
3900 
3901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3902 	so = SCTP_INP_SO(inp);
3903 #endif
3904 	if (stcb == NULL) {
3905 		/* Got to have a TCB */
3906 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3907 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3908 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3909 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3910 			}
3911 		}
3912 		return;
3913 	} else {
3914 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3915 	}
3916 	vtag = stcb->asoc.peer_vtag;
3917 	/* notify the ulp */
3918 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3919 		sctp_abort_notification(stcb, error, so_locked);
3920 	/* notify the peer */
3921 #if defined(SCTP_PANIC_ON_ABORT)
3922 	panic("aborting an association");
3923 #endif
3924 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3925 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3926 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3927 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3928 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3929 	}
3930 	/* now free the asoc */
3931 #ifdef SCTP_ASOCLOG_OF_TSNS
3932 	sctp_print_out_track_log(stcb);
3933 #endif
3934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3935 	if (!so_locked) {
3936 		atomic_add_int(&stcb->asoc.refcnt, 1);
3937 		SCTP_TCB_UNLOCK(stcb);
3938 		SCTP_SOCKET_LOCK(so, 1);
3939 		SCTP_TCB_LOCK(stcb);
3940 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3941 	}
3942 #endif
3943 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3944 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3945 	if (!so_locked) {
3946 		SCTP_SOCKET_UNLOCK(so, 1);
3947 	}
3948 #endif
3949 }
3950 
3951 void
3952 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3953     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3954 {
3955 	struct sctp_chunkhdr *ch, chunk_buf;
3956 	unsigned int chk_length;
3957 
3958 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3959 	/* Generate a TO address for future reference */
3960 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3961 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3962 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3963 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3964 		}
3965 	}
3966 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3967 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3968 	while (ch != NULL) {
3969 		chk_length = ntohs(ch->chunk_length);
3970 		if (chk_length < sizeof(*ch)) {
3971 			/* break to abort land */
3972 			break;
3973 		}
3974 		switch (ch->chunk_type) {
3975 		case SCTP_COOKIE_ECHO:
3976 			/* We hit here only if the assoc is being freed */
3977 			return;
3978 		case SCTP_PACKET_DROPPED:
3979 			/* we don't respond to pkt-dropped */
3980 			return;
3981 		case SCTP_ABORT_ASSOCIATION:
3982 			/* we don't respond with an ABORT to an ABORT */
3983 			return;
3984 		case SCTP_SHUTDOWN_COMPLETE:
3985 			/*
3986 			 * we ignore it since we are not waiting for it and
3987 			 * peer is gone
3988 			 */
3989 			return;
3990 		case SCTP_SHUTDOWN_ACK:
3991 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3992 			return;
3993 		default:
3994 			break;
3995 		}
3996 		offset += SCTP_SIZE32(chk_length);
3997 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3998 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3999 	}
4000 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4001 }
4002 
4003 /*
4004  * check the inbound datagram to make sure there is not an abort inside it,
4005  * if there is return 1, else return 0.
4006  */
4007 int
4008 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4009 {
4010 	struct sctp_chunkhdr *ch;
4011 	struct sctp_init_chunk *init_chk, chunk_buf;
4012 	int offset;
4013 	unsigned int chk_length;
4014 
4015 	offset = iphlen + sizeof(struct sctphdr);
4016 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4017 	    (uint8_t *) & chunk_buf);
4018 	while (ch != NULL) {
4019 		chk_length = ntohs(ch->chunk_length);
4020 		if (chk_length < sizeof(*ch)) {
4021 			/* packet is probably corrupt */
4022 			break;
4023 		}
4024 		/* we seem to be ok, is it an abort? */
4025 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4026 			/* yep, tell them */
4027 			return (1);
4028 		}
4029 		if (ch->chunk_type == SCTP_INITIATION) {
4030 			/* need to update the Vtag */
4031 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4032 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4033 			if (init_chk != NULL) {
4034 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4035 			}
4036 		}
4037 		/* Nope, move to the next chunk */
4038 		offset += SCTP_SIZE32(chk_length);
4039 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4040 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4041 	}
4042 	return (0);
4043 }
4044 
4045 /*
4046  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4047  * set (i.e. it's 0) so, create this function to compare link local scopes
4048  */
4049 #ifdef INET6
4050 uint32_t
4051 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4052 {
4053 	struct sockaddr_in6 a, b;
4054 
4055 	/* save copies */
4056 	a = *addr1;
4057 	b = *addr2;
4058 
4059 	if (a.sin6_scope_id == 0)
4060 		if (sa6_recoverscope(&a)) {
4061 			/* can't get scope, so can't match */
4062 			return (0);
4063 		}
4064 	if (b.sin6_scope_id == 0)
4065 		if (sa6_recoverscope(&b)) {
4066 			/* can't get scope, so can't match */
4067 			return (0);
4068 		}
4069 	if (a.sin6_scope_id != b.sin6_scope_id)
4070 		return (0);
4071 
4072 	return (1);
4073 }
4074 
4075 /*
4076  * returns a sockaddr_in6 with embedded scope recovered and removed
4077  */
4078 struct sockaddr_in6 *
4079 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4080 {
4081 	/* check and strip embedded scope junk */
4082 	if (addr->sin6_family == AF_INET6) {
4083 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4084 			if (addr->sin6_scope_id == 0) {
4085 				*store = *addr;
4086 				if (!sa6_recoverscope(store)) {
4087 					/* use the recovered scope */
4088 					addr = store;
4089 				}
4090 			} else {
4091 				/* else, return the original "to" addr */
4092 				in6_clearscope(&addr->sin6_addr);
4093 			}
4094 		}
4095 	}
4096 	return (addr);
4097 }
4098 
4099 #endif
4100 
4101 /*
4102  * are the two addresses the same?  currently a "scopeless" check returns: 1
4103  * if same, 0 if not
4104  */
4105 int
4106 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4107 {
4108 
4109 	/* must be valid */
4110 	if (sa1 == NULL || sa2 == NULL)
4111 		return (0);
4112 
4113 	/* must be the same family */
4114 	if (sa1->sa_family != sa2->sa_family)
4115 		return (0);
4116 
4117 	switch (sa1->sa_family) {
4118 #ifdef INET6
4119 	case AF_INET6:
4120 		{
4121 			/* IPv6 addresses */
4122 			struct sockaddr_in6 *sin6_1, *sin6_2;
4123 
4124 			sin6_1 = (struct sockaddr_in6 *)sa1;
4125 			sin6_2 = (struct sockaddr_in6 *)sa2;
4126 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4127 			    sin6_2));
4128 		}
4129 #endif
4130 	case AF_INET:
4131 		{
4132 			/* IPv4 addresses */
4133 			struct sockaddr_in *sin_1, *sin_2;
4134 
4135 			sin_1 = (struct sockaddr_in *)sa1;
4136 			sin_2 = (struct sockaddr_in *)sa2;
4137 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4138 		}
4139 	default:
4140 		/* we don't do these... */
4141 		return (0);
4142 	}
4143 }
4144 
4145 void
4146 sctp_print_address(struct sockaddr *sa)
4147 {
4148 #ifdef INET6
4149 	char ip6buf[INET6_ADDRSTRLEN];
4150 
4151 	ip6buf[0] = 0;
4152 #endif
4153 
4154 	switch (sa->sa_family) {
4155 #ifdef INET6
4156 	case AF_INET6:
4157 		{
4158 			struct sockaddr_in6 *sin6;
4159 
4160 			sin6 = (struct sockaddr_in6 *)sa;
4161 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4162 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4163 			    ntohs(sin6->sin6_port),
4164 			    sin6->sin6_scope_id);
4165 			break;
4166 		}
4167 #endif
4168 	case AF_INET:
4169 		{
4170 			struct sockaddr_in *sin;
4171 			unsigned char *p;
4172 
4173 			sin = (struct sockaddr_in *)sa;
4174 			p = (unsigned char *)&sin->sin_addr;
4175 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4176 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4177 			break;
4178 		}
4179 	default:
4180 		SCTP_PRINTF("?\n");
4181 		break;
4182 	}
4183 }
4184 
4185 void
4186 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4187 {
4188 	switch (iph->ip_v) {
4189 		case IPVERSION:
4190 		{
4191 			struct sockaddr_in lsa, fsa;
4192 
4193 			bzero(&lsa, sizeof(lsa));
4194 			lsa.sin_len = sizeof(lsa);
4195 			lsa.sin_family = AF_INET;
4196 			lsa.sin_addr = iph->ip_src;
4197 			lsa.sin_port = sh->src_port;
4198 			bzero(&fsa, sizeof(fsa));
4199 			fsa.sin_len = sizeof(fsa);
4200 			fsa.sin_family = AF_INET;
4201 			fsa.sin_addr = iph->ip_dst;
4202 			fsa.sin_port = sh->dest_port;
4203 			SCTP_PRINTF("src: ");
4204 			sctp_print_address((struct sockaddr *)&lsa);
4205 			SCTP_PRINTF("dest: ");
4206 			sctp_print_address((struct sockaddr *)&fsa);
4207 			break;
4208 		}
4209 #ifdef INET6
4210 	case IPV6_VERSION >> 4:
4211 		{
4212 			struct ip6_hdr *ip6;
4213 			struct sockaddr_in6 lsa6, fsa6;
4214 
4215 			ip6 = (struct ip6_hdr *)iph;
4216 			bzero(&lsa6, sizeof(lsa6));
4217 			lsa6.sin6_len = sizeof(lsa6);
4218 			lsa6.sin6_family = AF_INET6;
4219 			lsa6.sin6_addr = ip6->ip6_src;
4220 			lsa6.sin6_port = sh->src_port;
4221 			bzero(&fsa6, sizeof(fsa6));
4222 			fsa6.sin6_len = sizeof(fsa6);
4223 			fsa6.sin6_family = AF_INET6;
4224 			fsa6.sin6_addr = ip6->ip6_dst;
4225 			fsa6.sin6_port = sh->dest_port;
4226 			SCTP_PRINTF("src: ");
4227 			sctp_print_address((struct sockaddr *)&lsa6);
4228 			SCTP_PRINTF("dest: ");
4229 			sctp_print_address((struct sockaddr *)&fsa6);
4230 			break;
4231 		}
4232 #endif
4233 	default:
4234 		/* TSNH */
4235 		break;
4236 	}
4237 }
4238 
4239 void
4240 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4241     struct sctp_inpcb *new_inp,
4242     struct sctp_tcb *stcb,
4243     int waitflags)
4244 {
4245 	/*
4246 	 * go through our old INP and pull off any control structures that
4247 	 * belong to stcb and move then to the new inp.
4248 	 */
4249 	struct socket *old_so, *new_so;
4250 	struct sctp_queued_to_read *control, *nctl;
4251 	struct sctp_readhead tmp_queue;
4252 	struct mbuf *m;
4253 	int error = 0;
4254 
4255 	old_so = old_inp->sctp_socket;
4256 	new_so = new_inp->sctp_socket;
4257 	TAILQ_INIT(&tmp_queue);
4258 	error = sblock(&old_so->so_rcv, waitflags);
4259 	if (error) {
4260 		/*
4261 		 * Gak, can't get sblock, we have a problem. data will be
4262 		 * left stranded.. and we don't dare look at it since the
4263 		 * other thread may be reading something. Oh well, its a
4264 		 * screwed up app that does a peeloff OR a accept while
4265 		 * reading from the main socket... actually its only the
4266 		 * peeloff() case, since I think read will fail on a
4267 		 * listening socket..
4268 		 */
4269 		return;
4270 	}
4271 	/* lock the socket buffers */
4272 	SCTP_INP_READ_LOCK(old_inp);
4273 	control = TAILQ_FIRST(&old_inp->read_queue);
4274 	/* Pull off all for out target stcb */
4275 	while (control) {
4276 		nctl = TAILQ_NEXT(control, next);
4277 		if (control->stcb == stcb) {
4278 			/* remove it we want it */
4279 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4280 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4281 			m = control->data;
4282 			while (m) {
4283 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4284 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4285 				}
4286 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4287 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4288 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4289 				}
4290 				m = SCTP_BUF_NEXT(m);
4291 			}
4292 		}
4293 		control = nctl;
4294 	}
4295 	SCTP_INP_READ_UNLOCK(old_inp);
4296 	/* Remove the sb-lock on the old socket */
4297 
4298 	sbunlock(&old_so->so_rcv);
4299 	/* Now we move them over to the new socket buffer */
4300 	control = TAILQ_FIRST(&tmp_queue);
4301 	SCTP_INP_READ_LOCK(new_inp);
4302 	while (control) {
4303 		nctl = TAILQ_NEXT(control, next);
4304 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4305 		m = control->data;
4306 		while (m) {
4307 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4308 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4309 			}
4310 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4311 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4312 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4313 			}
4314 			m = SCTP_BUF_NEXT(m);
4315 		}
4316 		control = nctl;
4317 	}
4318 	SCTP_INP_READ_UNLOCK(new_inp);
4319 }
4320 
4321 void
4322 sctp_add_to_readq(struct sctp_inpcb *inp,
4323     struct sctp_tcb *stcb,
4324     struct sctp_queued_to_read *control,
4325     struct sockbuf *sb,
4326     int end,
4327     int so_locked
4328 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4329     SCTP_UNUSED
4330 #endif
4331 )
4332 {
4333 	/*
4334 	 * Here we must place the control on the end of the socket read
4335 	 * queue AND increment sb_cc so that select will work properly on
4336 	 * read.
4337 	 */
4338 	struct mbuf *m, *prev = NULL;
4339 
4340 	if (inp == NULL) {
4341 		/* Gak, TSNH!! */
4342 #ifdef INVARIANTS
4343 		panic("Gak, inp NULL on add_to_readq");
4344 #endif
4345 		return;
4346 	}
4347 	SCTP_INP_READ_LOCK(inp);
4348 	if (!(control->spec_flags & M_NOTIFICATION)) {
4349 		atomic_add_int(&inp->total_recvs, 1);
4350 		if (!control->do_not_ref_stcb) {
4351 			atomic_add_int(&stcb->total_recvs, 1);
4352 		}
4353 	}
4354 	m = control->data;
4355 	control->held_length = 0;
4356 	control->length = 0;
4357 	while (m) {
4358 		if (SCTP_BUF_LEN(m) == 0) {
4359 			/* Skip mbufs with NO length */
4360 			if (prev == NULL) {
4361 				/* First one */
4362 				control->data = sctp_m_free(m);
4363 				m = control->data;
4364 			} else {
4365 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4366 				m = SCTP_BUF_NEXT(prev);
4367 			}
4368 			if (m == NULL) {
4369 				control->tail_mbuf = prev;;
4370 			}
4371 			continue;
4372 		}
4373 		prev = m;
4374 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4375 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4376 		}
4377 		sctp_sballoc(stcb, sb, m);
4378 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4379 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4380 		}
4381 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4382 		m = SCTP_BUF_NEXT(m);
4383 	}
4384 	if (prev != NULL) {
4385 		control->tail_mbuf = prev;
4386 	} else {
4387 		/* Everything got collapsed out?? */
4388 		return;
4389 	}
4390 	if (end) {
4391 		control->end_added = 1;
4392 	}
4393 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4394 	SCTP_INP_READ_UNLOCK(inp);
4395 	if (inp && inp->sctp_socket) {
4396 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4397 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4398 		} else {
4399 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4400 			struct socket *so;
4401 
4402 			so = SCTP_INP_SO(inp);
4403 			if (!so_locked) {
4404 				atomic_add_int(&stcb->asoc.refcnt, 1);
4405 				SCTP_TCB_UNLOCK(stcb);
4406 				SCTP_SOCKET_LOCK(so, 1);
4407 				SCTP_TCB_LOCK(stcb);
4408 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4409 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4410 					SCTP_SOCKET_UNLOCK(so, 1);
4411 					return;
4412 				}
4413 			}
4414 #endif
4415 			sctp_sorwakeup(inp, inp->sctp_socket);
4416 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4417 			if (!so_locked) {
4418 				SCTP_SOCKET_UNLOCK(so, 1);
4419 			}
4420 #endif
4421 		}
4422 	}
4423 }
4424 
4425 
4426 int
4427 sctp_append_to_readq(struct sctp_inpcb *inp,
4428     struct sctp_tcb *stcb,
4429     struct sctp_queued_to_read *control,
4430     struct mbuf *m,
4431     int end,
4432     int ctls_cumack,
4433     struct sockbuf *sb)
4434 {
4435 	/*
4436 	 * A partial delivery API event is underway. OR we are appending on
4437 	 * the reassembly queue.
4438 	 *
4439 	 * If PDAPI this means we need to add m to the end of the data.
4440 	 * Increase the length in the control AND increment the sb_cc.
4441 	 * Otherwise sb is NULL and all we need to do is put it at the end
4442 	 * of the mbuf chain.
4443 	 */
4444 	int len = 0;
4445 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4446 
4447 	if (inp) {
4448 		SCTP_INP_READ_LOCK(inp);
4449 	}
4450 	if (control == NULL) {
4451 get_out:
4452 		if (inp) {
4453 			SCTP_INP_READ_UNLOCK(inp);
4454 		}
4455 		return (-1);
4456 	}
4457 	if (control->end_added) {
4458 		/* huh this one is complete? */
4459 		goto get_out;
4460 	}
4461 	mm = m;
4462 	if (mm == NULL) {
4463 		goto get_out;
4464 	}
4465 	while (mm) {
4466 		if (SCTP_BUF_LEN(mm) == 0) {
4467 			/* Skip mbufs with NO lenght */
4468 			if (prev == NULL) {
4469 				/* First one */
4470 				m = sctp_m_free(mm);
4471 				mm = m;
4472 			} else {
4473 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4474 				mm = SCTP_BUF_NEXT(prev);
4475 			}
4476 			continue;
4477 		}
4478 		prev = mm;
4479 		len += SCTP_BUF_LEN(mm);
4480 		if (sb) {
4481 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4482 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4483 			}
4484 			sctp_sballoc(stcb, sb, mm);
4485 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4486 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4487 			}
4488 		}
4489 		mm = SCTP_BUF_NEXT(mm);
4490 	}
4491 	if (prev) {
4492 		tail = prev;
4493 	} else {
4494 		/* Really there should always be a prev */
4495 		if (m == NULL) {
4496 			/* Huh nothing left? */
4497 #ifdef INVARIANTS
4498 			panic("Nothing left to add?");
4499 #else
4500 			goto get_out;
4501 #endif
4502 		}
4503 		tail = m;
4504 	}
4505 	if (control->tail_mbuf) {
4506 		/* append */
4507 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4508 		control->tail_mbuf = tail;
4509 	} else {
4510 		/* nothing there */
4511 #ifdef INVARIANTS
4512 		if (control->data != NULL) {
4513 			panic("This should NOT happen");
4514 		}
4515 #endif
4516 		control->data = m;
4517 		control->tail_mbuf = tail;
4518 	}
4519 	atomic_add_int(&control->length, len);
4520 	if (end) {
4521 		/* message is complete */
4522 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4523 			stcb->asoc.control_pdapi = NULL;
4524 		}
4525 		control->held_length = 0;
4526 		control->end_added = 1;
4527 	}
4528 	if (stcb == NULL) {
4529 		control->do_not_ref_stcb = 1;
4530 	}
4531 	/*
4532 	 * When we are appending in partial delivery, the cum-ack is used
4533 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4534 	 * is populated in the outbound sinfo structure from the true cumack
4535 	 * if the association exists...
4536 	 */
4537 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4538 	if (inp) {
4539 		SCTP_INP_READ_UNLOCK(inp);
4540 	}
4541 	if (inp && inp->sctp_socket) {
4542 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4543 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4544 		} else {
4545 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4546 			struct socket *so;
4547 
4548 			so = SCTP_INP_SO(inp);
4549 			atomic_add_int(&stcb->asoc.refcnt, 1);
4550 			SCTP_TCB_UNLOCK(stcb);
4551 			SCTP_SOCKET_LOCK(so, 1);
4552 			SCTP_TCB_LOCK(stcb);
4553 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4554 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4555 				SCTP_SOCKET_UNLOCK(so, 1);
4556 				return (0);
4557 			}
4558 #endif
4559 			sctp_sorwakeup(inp, inp->sctp_socket);
4560 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4561 			SCTP_SOCKET_UNLOCK(so, 1);
4562 #endif
4563 		}
4564 	}
4565 	return (0);
4566 }
4567 
4568 
4569 
4570 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4571  *************ALTERNATE ROUTING CODE
4572  */
4573 
4574 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4575  *************ALTERNATE ROUTING CODE
4576  */
4577 
4578 struct mbuf *
4579 sctp_generate_invmanparam(int err)
4580 {
4581 	/* Return a MBUF with a invalid mandatory parameter */
4582 	struct mbuf *m;
4583 
4584 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4585 	if (m) {
4586 		struct sctp_paramhdr *ph;
4587 
4588 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4589 		ph = mtod(m, struct sctp_paramhdr *);
4590 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4591 		ph->param_type = htons(err);
4592 	}
4593 	return (m);
4594 }
4595 
4596 #ifdef SCTP_MBCNT_LOGGING
4597 void
4598 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4599     struct sctp_tmit_chunk *tp1, int chk_cnt)
4600 {
4601 	if (tp1->data == NULL) {
4602 		return;
4603 	}
4604 	asoc->chunks_on_out_queue -= chk_cnt;
4605 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4606 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4607 		    asoc->total_output_queue_size,
4608 		    tp1->book_size,
4609 		    0,
4610 		    tp1->mbcnt);
4611 	}
4612 	if (asoc->total_output_queue_size >= tp1->book_size) {
4613 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4614 	} else {
4615 		asoc->total_output_queue_size = 0;
4616 	}
4617 
4618 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4619 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4620 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4621 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4622 		} else {
4623 			stcb->sctp_socket->so_snd.sb_cc = 0;
4624 
4625 		}
4626 	}
4627 }
4628 
4629 #endif
4630 
4631 int
4632 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4633     int reason, struct sctpchunk_listhead *queue, int so_locked
4634 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4635     SCTP_UNUSED
4636 #endif
4637 )
4638 {
4639 	int ret_sz = 0;
4640 	int notdone;
4641 	uint8_t foundeom = 0;
4642 
4643 	do {
4644 		ret_sz += tp1->book_size;
4645 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4646 		if (tp1->data) {
4647 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4648 			struct socket *so;
4649 
4650 #endif
4651 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4652 			sctp_flight_size_decrease(tp1);
4653 			sctp_total_flight_decrease(stcb, tp1);
4654 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4655 			sctp_m_freem(tp1->data);
4656 			tp1->data = NULL;
4657 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4658 			so = SCTP_INP_SO(stcb->sctp_ep);
4659 			if (!so_locked) {
4660 				atomic_add_int(&stcb->asoc.refcnt, 1);
4661 				SCTP_TCB_UNLOCK(stcb);
4662 				SCTP_SOCKET_LOCK(so, 1);
4663 				SCTP_TCB_LOCK(stcb);
4664 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4665 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4666 					/*
4667 					 * assoc was freed while we were
4668 					 * unlocked
4669 					 */
4670 					SCTP_SOCKET_UNLOCK(so, 1);
4671 					return (ret_sz);
4672 				}
4673 			}
4674 #endif
4675 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4676 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4677 			if (!so_locked) {
4678 				SCTP_SOCKET_UNLOCK(so, 1);
4679 			}
4680 #endif
4681 		}
4682 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4683 			stcb->asoc.sent_queue_cnt_removeable--;
4684 		}
4685 		if (queue == &stcb->asoc.send_queue) {
4686 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4687 			/* on to the sent queue */
4688 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4689 			    sctp_next);
4690 			stcb->asoc.sent_queue_cnt++;
4691 		}
4692 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4693 		    SCTP_DATA_NOT_FRAG) {
4694 			/* not frag'ed we ae done   */
4695 			notdone = 0;
4696 			foundeom = 1;
4697 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4698 			/* end of frag, we are done */
4699 			notdone = 0;
4700 			foundeom = 1;
4701 		} else {
4702 			/*
4703 			 * Its a begin or middle piece, we must mark all of
4704 			 * it
4705 			 */
4706 			notdone = 1;
4707 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4708 		}
4709 	} while (tp1 && notdone);
4710 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4711 		/*
4712 		 * The multi-part message was scattered across the send and
4713 		 * sent queue.
4714 		 */
4715 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4716 		/*
4717 		 * recurse throught the send_queue too, starting at the
4718 		 * beginning.
4719 		 */
4720 		if (tp1) {
4721 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4722 			    &stcb->asoc.send_queue, so_locked);
4723 		} else {
4724 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4725 		}
4726 	}
4727 	return (ret_sz);
4728 }
4729 
4730 /*
4731  * checks to see if the given address, sa, is one that is currently known by
4732  * the kernel note: can't distinguish the same address on multiple interfaces
4733  * and doesn't handle multiple addresses with different zone/scope id's note:
4734  * ifa_ifwithaddr() compares the entire sockaddr struct
4735  */
4736 struct sctp_ifa *
4737 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4738     int holds_lock)
4739 {
4740 	struct sctp_laddr *laddr;
4741 
4742 	if (holds_lock == 0) {
4743 		SCTP_INP_RLOCK(inp);
4744 	}
4745 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4746 		if (laddr->ifa == NULL)
4747 			continue;
4748 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4749 			continue;
4750 		if (addr->sa_family == AF_INET) {
4751 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4752 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4753 				/* found him. */
4754 				if (holds_lock == 0) {
4755 					SCTP_INP_RUNLOCK(inp);
4756 				}
4757 				return (laddr->ifa);
4758 				break;
4759 			}
4760 		}
4761 #ifdef INET6
4762 		if (addr->sa_family == AF_INET6) {
4763 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4764 			    &laddr->ifa->address.sin6)) {
4765 				/* found him. */
4766 				if (holds_lock == 0) {
4767 					SCTP_INP_RUNLOCK(inp);
4768 				}
4769 				return (laddr->ifa);
4770 				break;
4771 			}
4772 		}
4773 #endif
4774 	}
4775 	if (holds_lock == 0) {
4776 		SCTP_INP_RUNLOCK(inp);
4777 	}
4778 	return (NULL);
4779 }
4780 
4781 uint32_t
4782 sctp_get_ifa_hash_val(struct sockaddr *addr)
4783 {
4784 	if (addr->sa_family == AF_INET) {
4785 		struct sockaddr_in *sin;
4786 
4787 		sin = (struct sockaddr_in *)addr;
4788 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4789 	} else if (addr->sa_family == AF_INET6) {
4790 		struct sockaddr_in6 *sin6;
4791 		uint32_t hash_of_addr;
4792 
4793 		sin6 = (struct sockaddr_in6 *)addr;
4794 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4795 		    sin6->sin6_addr.s6_addr32[1] +
4796 		    sin6->sin6_addr.s6_addr32[2] +
4797 		    sin6->sin6_addr.s6_addr32[3]);
4798 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4799 		return (hash_of_addr);
4800 	}
4801 	return (0);
4802 }
4803 
4804 struct sctp_ifa *
4805 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4806 {
4807 	struct sctp_ifa *sctp_ifap;
4808 	struct sctp_vrf *vrf;
4809 	struct sctp_ifalist *hash_head;
4810 	uint32_t hash_of_addr;
4811 
4812 	if (holds_lock == 0)
4813 		SCTP_IPI_ADDR_RLOCK();
4814 
4815 	vrf = sctp_find_vrf(vrf_id);
4816 	if (vrf == NULL) {
4817 stage_right:
4818 		if (holds_lock == 0)
4819 			SCTP_IPI_ADDR_RUNLOCK();
4820 		return (NULL);
4821 	}
4822 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4823 
4824 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4825 	if (hash_head == NULL) {
4826 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4827 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4828 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4829 		sctp_print_address(addr);
4830 		SCTP_PRINTF("No such bucket for address\n");
4831 		if (holds_lock == 0)
4832 			SCTP_IPI_ADDR_RUNLOCK();
4833 
4834 		return (NULL);
4835 	}
4836 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4837 		if (sctp_ifap == NULL) {
4838 #ifdef INVARIANTS
4839 			panic("Huh LIST_FOREACH corrupt");
4840 			goto stage_right;
4841 #else
4842 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4843 			goto stage_right;
4844 #endif
4845 		}
4846 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4847 			continue;
4848 		if (addr->sa_family == AF_INET) {
4849 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4850 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4851 				/* found him. */
4852 				if (holds_lock == 0)
4853 					SCTP_IPI_ADDR_RUNLOCK();
4854 				return (sctp_ifap);
4855 				break;
4856 			}
4857 		}
4858 #ifdef INET6
4859 		if (addr->sa_family == AF_INET6) {
4860 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4861 			    &sctp_ifap->address.sin6)) {
4862 				/* found him. */
4863 				if (holds_lock == 0)
4864 					SCTP_IPI_ADDR_RUNLOCK();
4865 				return (sctp_ifap);
4866 				break;
4867 			}
4868 		}
4869 #endif
4870 	}
4871 	if (holds_lock == 0)
4872 		SCTP_IPI_ADDR_RUNLOCK();
4873 	return (NULL);
4874 }
4875 
4876 static void
4877 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4878     uint32_t rwnd_req)
4879 {
4880 	/* User pulled some data, do we need a rwnd update? */
4881 	int r_unlocked = 0;
4882 	uint32_t dif, rwnd;
4883 	struct socket *so = NULL;
4884 
4885 	if (stcb == NULL)
4886 		return;
4887 
4888 	atomic_add_int(&stcb->asoc.refcnt, 1);
4889 
4890 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4891 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4892 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4893 		/* Pre-check If we are freeing no update */
4894 		goto no_lock;
4895 	}
4896 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4897 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4898 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4899 		goto out;
4900 	}
4901 	so = stcb->sctp_socket;
4902 	if (so == NULL) {
4903 		goto out;
4904 	}
4905 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4906 	/* Have you have freed enough to look */
4907 	*freed_so_far = 0;
4908 	/* Yep, its worth a look and the lock overhead */
4909 
4910 	/* Figure out what the rwnd would be */
4911 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4912 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4913 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4914 	} else {
4915 		dif = 0;
4916 	}
4917 	if (dif >= rwnd_req) {
4918 		if (hold_rlock) {
4919 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4920 			r_unlocked = 1;
4921 		}
4922 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4923 			/*
4924 			 * One last check before we allow the guy possibly
4925 			 * to get in. There is a race, where the guy has not
4926 			 * reached the gate. In that case
4927 			 */
4928 			goto out;
4929 		}
4930 		SCTP_TCB_LOCK(stcb);
4931 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4932 			/* No reports here */
4933 			SCTP_TCB_UNLOCK(stcb);
4934 			goto out;
4935 		}
4936 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4937 		/*
4938 		 * EY if nr_sacks used then send an nr-sack , a sack
4939 		 * otherwise
4940 		 */
4941 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
4942 			sctp_send_nr_sack(stcb);
4943 		else
4944 			sctp_send_sack(stcb);
4945 
4946 		sctp_chunk_output(stcb->sctp_ep, stcb,
4947 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4948 		/* make sure no timer is running */
4949 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4950 		SCTP_TCB_UNLOCK(stcb);
4951 	} else {
4952 		/* Update how much we have pending */
4953 		stcb->freed_by_sorcv_sincelast = dif;
4954 	}
4955 out:
4956 	if (so && r_unlocked && hold_rlock) {
4957 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4958 	}
4959 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4960 no_lock:
4961 	atomic_add_int(&stcb->asoc.refcnt, -1);
4962 	return;
4963 }
4964 
4965 int
4966 sctp_sorecvmsg(struct socket *so,
4967     struct uio *uio,
4968     struct mbuf **mp,
4969     struct sockaddr *from,
4970     int fromlen,
4971     int *msg_flags,
4972     struct sctp_sndrcvinfo *sinfo,
4973     int filling_sinfo)
4974 {
4975 	/*
4976 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4977 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4978 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4979 	 * On the way out we may send out any combination of:
4980 	 * MSG_NOTIFICATION MSG_EOR
4981 	 *
4982 	 */
4983 	struct sctp_inpcb *inp = NULL;
4984 	int my_len = 0;
4985 	int cp_len = 0, error = 0;
4986 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4987 	struct mbuf *m = NULL, *embuf = NULL;
4988 	struct sctp_tcb *stcb = NULL;
4989 	int wakeup_read_socket = 0;
4990 	int freecnt_applied = 0;
4991 	int out_flags = 0, in_flags = 0;
4992 	int block_allowed = 1;
4993 	uint32_t freed_so_far = 0;
4994 	uint32_t copied_so_far = 0;
4995 	int in_eeor_mode = 0;
4996 	int no_rcv_needed = 0;
4997 	uint32_t rwnd_req = 0;
4998 	int hold_sblock = 0;
4999 	int hold_rlock = 0;
5000 	int slen = 0;
5001 	uint32_t held_length = 0;
5002 	int sockbuf_lock = 0;
5003 
5004 	if (uio == NULL) {
5005 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5006 		return (EINVAL);
5007 	}
5008 	if (msg_flags) {
5009 		in_flags = *msg_flags;
5010 		if (in_flags & MSG_PEEK)
5011 			SCTP_STAT_INCR(sctps_read_peeks);
5012 	} else {
5013 		in_flags = 0;
5014 	}
5015 	slen = uio->uio_resid;
5016 
5017 	/* Pull in and set up our int flags */
5018 	if (in_flags & MSG_OOB) {
5019 		/* Out of band's NOT supported */
5020 		return (EOPNOTSUPP);
5021 	}
5022 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5023 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5024 		return (EINVAL);
5025 	}
5026 	if ((in_flags & (MSG_DONTWAIT
5027 	    | MSG_NBIO
5028 	    )) ||
5029 	    SCTP_SO_IS_NBIO(so)) {
5030 		block_allowed = 0;
5031 	}
5032 	/* setup the endpoint */
5033 	inp = (struct sctp_inpcb *)so->so_pcb;
5034 	if (inp == NULL) {
5035 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5036 		return (EFAULT);
5037 	}
5038 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5039 	/* Must be at least a MTU's worth */
5040 	if (rwnd_req < SCTP_MIN_RWND)
5041 		rwnd_req = SCTP_MIN_RWND;
5042 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5043 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5044 		sctp_misc_ints(SCTP_SORECV_ENTER,
5045 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5046 	}
5047 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5048 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5049 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5050 	}
5051 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5052 	sockbuf_lock = 1;
5053 	if (error) {
5054 		goto release_unlocked;
5055 	}
5056 restart:
5057 
5058 
5059 restart_nosblocks:
5060 	if (hold_sblock == 0) {
5061 		SOCKBUF_LOCK(&so->so_rcv);
5062 		hold_sblock = 1;
5063 	}
5064 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5065 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5066 		goto out;
5067 	}
5068 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5069 		if (so->so_error) {
5070 			error = so->so_error;
5071 			if ((in_flags & MSG_PEEK) == 0)
5072 				so->so_error = 0;
5073 			goto out;
5074 		} else {
5075 			if (so->so_rcv.sb_cc == 0) {
5076 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5077 				/* indicate EOF */
5078 				error = 0;
5079 				goto out;
5080 			}
5081 		}
5082 	}
5083 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5084 		/* we need to wait for data */
5085 		if ((so->so_rcv.sb_cc == 0) &&
5086 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5087 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5088 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5089 				/*
5090 				 * For active open side clear flags for
5091 				 * re-use passive open is blocked by
5092 				 * connect.
5093 				 */
5094 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5095 					/*
5096 					 * You were aborted, passive side
5097 					 * always hits here
5098 					 */
5099 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5100 					error = ECONNRESET;
5101 					/*
5102 					 * You get this once if you are
5103 					 * active open side
5104 					 */
5105 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5106 						/*
5107 						 * Remove flag if on the
5108 						 * active open side
5109 						 */
5110 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5111 					}
5112 				}
5113 				so->so_state &= ~(SS_ISCONNECTING |
5114 				    SS_ISDISCONNECTING |
5115 				    SS_ISCONFIRMING |
5116 				    SS_ISCONNECTED);
5117 				if (error == 0) {
5118 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5119 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5120 						error = ENOTCONN;
5121 					} else {
5122 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5123 					}
5124 				}
5125 				goto out;
5126 			}
5127 		}
5128 		error = sbwait(&so->so_rcv);
5129 		if (error) {
5130 			goto out;
5131 		}
5132 		held_length = 0;
5133 		goto restart_nosblocks;
5134 	} else if (so->so_rcv.sb_cc == 0) {
5135 		if (so->so_error) {
5136 			error = so->so_error;
5137 			if ((in_flags & MSG_PEEK) == 0)
5138 				so->so_error = 0;
5139 		} else {
5140 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5141 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5142 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5143 					/*
5144 					 * For active open side clear flags
5145 					 * for re-use passive open is
5146 					 * blocked by connect.
5147 					 */
5148 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5149 						/*
5150 						 * You were aborted, passive
5151 						 * side always hits here
5152 						 */
5153 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5154 						error = ECONNRESET;
5155 						/*
5156 						 * You get this once if you
5157 						 * are active open side
5158 						 */
5159 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5160 							/*
5161 							 * Remove flag if on
5162 							 * the active open
5163 							 * side
5164 							 */
5165 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5166 						}
5167 					}
5168 					so->so_state &= ~(SS_ISCONNECTING |
5169 					    SS_ISDISCONNECTING |
5170 					    SS_ISCONFIRMING |
5171 					    SS_ISCONNECTED);
5172 					if (error == 0) {
5173 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5174 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5175 							error = ENOTCONN;
5176 						} else {
5177 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5178 						}
5179 					}
5180 					goto out;
5181 				}
5182 			}
5183 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5184 			error = EWOULDBLOCK;
5185 		}
5186 		goto out;
5187 	}
5188 	if (hold_sblock == 1) {
5189 		SOCKBUF_UNLOCK(&so->so_rcv);
5190 		hold_sblock = 0;
5191 	}
5192 	/* we possibly have data we can read */
5193 	/* sa_ignore FREED_MEMORY */
5194 	control = TAILQ_FIRST(&inp->read_queue);
5195 	if (control == NULL) {
5196 		/*
5197 		 * This could be happening since the appender did the
5198 		 * increment but as not yet did the tailq insert onto the
5199 		 * read_queue
5200 		 */
5201 		if (hold_rlock == 0) {
5202 			SCTP_INP_READ_LOCK(inp);
5203 			hold_rlock = 1;
5204 		}
5205 		control = TAILQ_FIRST(&inp->read_queue);
5206 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5207 #ifdef INVARIANTS
5208 			panic("Huh, its non zero and nothing on control?");
5209 #endif
5210 			so->so_rcv.sb_cc = 0;
5211 		}
5212 		SCTP_INP_READ_UNLOCK(inp);
5213 		hold_rlock = 0;
5214 		goto restart;
5215 	}
5216 	if ((control->length == 0) &&
5217 	    (control->do_not_ref_stcb)) {
5218 		/*
5219 		 * Clean up code for freeing assoc that left behind a
5220 		 * pdapi.. maybe a peer in EEOR that just closed after
5221 		 * sending and never indicated a EOR.
5222 		 */
5223 		if (hold_rlock == 0) {
5224 			hold_rlock = 1;
5225 			SCTP_INP_READ_LOCK(inp);
5226 		}
5227 		control->held_length = 0;
5228 		if (control->data) {
5229 			/* Hmm there is data here .. fix */
5230 			struct mbuf *m_tmp;
5231 			int cnt = 0;
5232 
5233 			m_tmp = control->data;
5234 			while (m_tmp) {
5235 				cnt += SCTP_BUF_LEN(m_tmp);
5236 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5237 					control->tail_mbuf = m_tmp;
5238 					control->end_added = 1;
5239 				}
5240 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5241 			}
5242 			control->length = cnt;
5243 		} else {
5244 			/* remove it */
5245 			TAILQ_REMOVE(&inp->read_queue, control, next);
5246 			/* Add back any hiddend data */
5247 			sctp_free_remote_addr(control->whoFrom);
5248 			sctp_free_a_readq(stcb, control);
5249 		}
5250 		if (hold_rlock) {
5251 			hold_rlock = 0;
5252 			SCTP_INP_READ_UNLOCK(inp);
5253 		}
5254 		goto restart;
5255 	}
5256 	if (control->length == 0) {
5257 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5258 		    (filling_sinfo)) {
5259 			/* find a more suitable one then this */
5260 			ctl = TAILQ_NEXT(control, next);
5261 			while (ctl) {
5262 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5263 				    (ctl->some_taken ||
5264 				    (ctl->spec_flags & M_NOTIFICATION) ||
5265 				    ((ctl->do_not_ref_stcb == 0) &&
5266 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5267 				    ) {
5268 					/*-
5269 					 * If we have a different TCB next, and there is data
5270 					 * present. If we have already taken some (pdapi), OR we can
5271 					 * ref the tcb and no delivery as started on this stream, we
5272 					 * take it. Note we allow a notification on a different
5273 					 * assoc to be delivered..
5274 					 */
5275 					control = ctl;
5276 					goto found_one;
5277 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5278 					    (ctl->length) &&
5279 					    ((ctl->some_taken) ||
5280 					    ((ctl->do_not_ref_stcb == 0) &&
5281 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5282 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5283 				    ) {
5284 					/*-
5285 					 * If we have the same tcb, and there is data present, and we
5286 					 * have the strm interleave feature present. Then if we have
5287 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5288 					 * not started a delivery for this stream, we can take it.
5289 					 * Note we do NOT allow a notificaiton on the same assoc to
5290 					 * be delivered.
5291 					 */
5292 					control = ctl;
5293 					goto found_one;
5294 				}
5295 				ctl = TAILQ_NEXT(ctl, next);
5296 			}
5297 		}
5298 		/*
5299 		 * if we reach here, not suitable replacement is available
5300 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5301 		 * into the our held count, and its time to sleep again.
5302 		 */
5303 		held_length = so->so_rcv.sb_cc;
5304 		control->held_length = so->so_rcv.sb_cc;
5305 		goto restart;
5306 	}
5307 	/* Clear the held length since there is something to read */
5308 	control->held_length = 0;
5309 	if (hold_rlock) {
5310 		SCTP_INP_READ_UNLOCK(inp);
5311 		hold_rlock = 0;
5312 	}
5313 found_one:
5314 	/*
5315 	 * If we reach here, control has a some data for us to read off.
5316 	 * Note that stcb COULD be NULL.
5317 	 */
5318 	control->some_taken++;
5319 	if (hold_sblock) {
5320 		SOCKBUF_UNLOCK(&so->so_rcv);
5321 		hold_sblock = 0;
5322 	}
5323 	stcb = control->stcb;
5324 	if (stcb) {
5325 		if ((control->do_not_ref_stcb == 0) &&
5326 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5327 			if (freecnt_applied == 0)
5328 				stcb = NULL;
5329 		} else if (control->do_not_ref_stcb == 0) {
5330 			/* you can't free it on me please */
5331 			/*
5332 			 * The lock on the socket buffer protects us so the
5333 			 * free code will stop. But since we used the
5334 			 * socketbuf lock and the sender uses the tcb_lock
5335 			 * to increment, we need to use the atomic add to
5336 			 * the refcnt
5337 			 */
5338 			if (freecnt_applied) {
5339 #ifdef INVARIANTS
5340 				panic("refcnt already incremented");
5341 #else
5342 				printf("refcnt already incremented?\n");
5343 #endif
5344 			} else {
5345 				atomic_add_int(&stcb->asoc.refcnt, 1);
5346 				freecnt_applied = 1;
5347 			}
5348 			/*
5349 			 * Setup to remember how much we have not yet told
5350 			 * the peer our rwnd has opened up. Note we grab the
5351 			 * value from the tcb from last time. Note too that
5352 			 * sack sending clears this when a sack is sent,
5353 			 * which is fine. Once we hit the rwnd_req, we then
5354 			 * will go to the sctp_user_rcvd() that will not
5355 			 * lock until it KNOWs it MUST send a WUP-SACK.
5356 			 */
5357 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5358 			stcb->freed_by_sorcv_sincelast = 0;
5359 		}
5360 	}
5361 	if (stcb &&
5362 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5363 	    control->do_not_ref_stcb == 0) {
5364 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5365 	}
5366 	/* First lets get off the sinfo and sockaddr info */
5367 	if ((sinfo) && filling_sinfo) {
5368 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5369 		nxt = TAILQ_NEXT(control, next);
5370 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5371 			struct sctp_extrcvinfo *s_extra;
5372 
5373 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5374 			if ((nxt) &&
5375 			    (nxt->length)) {
5376 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5377 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5378 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5379 				}
5380 				if (nxt->spec_flags & M_NOTIFICATION) {
5381 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5382 				}
5383 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5384 				s_extra->sreinfo_next_length = nxt->length;
5385 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5386 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5387 				if (nxt->tail_mbuf != NULL) {
5388 					if (nxt->end_added) {
5389 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5390 					}
5391 				}
5392 			} else {
5393 				/*
5394 				 * we explicitly 0 this, since the memcpy
5395 				 * got some other things beyond the older
5396 				 * sinfo_ that is on the control's structure
5397 				 * :-D
5398 				 */
5399 				nxt = NULL;
5400 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5401 				s_extra->sreinfo_next_aid = 0;
5402 				s_extra->sreinfo_next_length = 0;
5403 				s_extra->sreinfo_next_ppid = 0;
5404 				s_extra->sreinfo_next_stream = 0;
5405 			}
5406 		}
5407 		/*
5408 		 * update off the real current cum-ack, if we have an stcb.
5409 		 */
5410 		if ((control->do_not_ref_stcb == 0) && stcb)
5411 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5412 		/*
5413 		 * mask off the high bits, we keep the actual chunk bits in
5414 		 * there.
5415 		 */
5416 		sinfo->sinfo_flags &= 0x00ff;
5417 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5418 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5419 		}
5420 	}
5421 #ifdef SCTP_ASOCLOG_OF_TSNS
5422 	{
5423 		int index, newindex;
5424 		struct sctp_pcbtsn_rlog *entry;
5425 
5426 		do {
5427 			index = inp->readlog_index;
5428 			newindex = index + 1;
5429 			if (newindex >= SCTP_READ_LOG_SIZE) {
5430 				newindex = 0;
5431 			}
5432 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5433 		entry = &inp->readlog[index];
5434 		entry->vtag = control->sinfo_assoc_id;
5435 		entry->strm = control->sinfo_stream;
5436 		entry->seq = control->sinfo_ssn;
5437 		entry->sz = control->length;
5438 		entry->flgs = control->sinfo_flags;
5439 	}
5440 #endif
5441 	if (fromlen && from) {
5442 		struct sockaddr *to;
5443 
5444 #ifdef INET
5445 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5446 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5447 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5448 #else
5449 		/* No AF_INET use AF_INET6 */
5450 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5451 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5452 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5453 #endif
5454 
5455 		to = from;
5456 #if defined(INET) && defined(INET6)
5457 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5458 		    (to->sa_family == AF_INET) &&
5459 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5460 			struct sockaddr_in *sin;
5461 			struct sockaddr_in6 sin6;
5462 
5463 			sin = (struct sockaddr_in *)to;
5464 			bzero(&sin6, sizeof(sin6));
5465 			sin6.sin6_family = AF_INET6;
5466 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5467 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5468 			bcopy(&sin->sin_addr,
5469 			    &sin6.sin6_addr.s6_addr32[3],
5470 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5471 			sin6.sin6_port = sin->sin_port;
5472 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5473 		}
5474 #endif
5475 #if defined(INET6)
5476 		{
5477 			struct sockaddr_in6 lsa6, *to6;
5478 
5479 			to6 = (struct sockaddr_in6 *)to;
5480 			sctp_recover_scope_mac(to6, (&lsa6));
5481 		}
5482 #endif
5483 	}
5484 	/* now copy out what data we can */
5485 	if (mp == NULL) {
5486 		/* copy out each mbuf in the chain up to length */
5487 get_more_data:
5488 		m = control->data;
5489 		while (m) {
5490 			/* Move out all we can */
5491 			cp_len = (int)uio->uio_resid;
5492 			my_len = (int)SCTP_BUF_LEN(m);
5493 			if (cp_len > my_len) {
5494 				/* not enough in this buf */
5495 				cp_len = my_len;
5496 			}
5497 			if (hold_rlock) {
5498 				SCTP_INP_READ_UNLOCK(inp);
5499 				hold_rlock = 0;
5500 			}
5501 			if (cp_len > 0)
5502 				error = uiomove(mtod(m, char *), cp_len, uio);
5503 			/* re-read */
5504 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5505 				goto release;
5506 			}
5507 			if ((control->do_not_ref_stcb == 0) && stcb &&
5508 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5509 				no_rcv_needed = 1;
5510 			}
5511 			if (error) {
5512 				/* error we are out of here */
5513 				goto release;
5514 			}
5515 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5516 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5517 			    ((control->end_added == 0) ||
5518 			    (control->end_added &&
5519 			    (TAILQ_NEXT(control, next) == NULL)))
5520 			    ) {
5521 				SCTP_INP_READ_LOCK(inp);
5522 				hold_rlock = 1;
5523 			}
5524 			if (cp_len == SCTP_BUF_LEN(m)) {
5525 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5526 				    (control->end_added)) {
5527 					out_flags |= MSG_EOR;
5528 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5529 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5530 				}
5531 				if (control->spec_flags & M_NOTIFICATION) {
5532 					out_flags |= MSG_NOTIFICATION;
5533 				}
5534 				/* we ate up the mbuf */
5535 				if (in_flags & MSG_PEEK) {
5536 					/* just looking */
5537 					m = SCTP_BUF_NEXT(m);
5538 					copied_so_far += cp_len;
5539 				} else {
5540 					/* dispose of the mbuf */
5541 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5542 						sctp_sblog(&so->so_rcv,
5543 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5544 					}
5545 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5546 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5547 						sctp_sblog(&so->so_rcv,
5548 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5549 					}
5550 					embuf = m;
5551 					copied_so_far += cp_len;
5552 					freed_so_far += cp_len;
5553 					freed_so_far += MSIZE;
5554 					atomic_subtract_int(&control->length, cp_len);
5555 					control->data = sctp_m_free(m);
5556 					m = control->data;
5557 					/*
5558 					 * been through it all, must hold sb
5559 					 * lock ok to null tail
5560 					 */
5561 					if (control->data == NULL) {
5562 #ifdef INVARIANTS
5563 						if ((control->end_added == 0) ||
5564 						    (TAILQ_NEXT(control, next) == NULL)) {
5565 							/*
5566 							 * If the end is not
5567 							 * added, OR the
5568 							 * next is NOT null
5569 							 * we MUST have the
5570 							 * lock.
5571 							 */
5572 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5573 								panic("Hmm we don't own the lock?");
5574 							}
5575 						}
5576 #endif
5577 						control->tail_mbuf = NULL;
5578 #ifdef INVARIANTS
5579 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5580 							panic("end_added, nothing left and no MSG_EOR");
5581 						}
5582 #endif
5583 					}
5584 				}
5585 			} else {
5586 				/* Do we need to trim the mbuf? */
5587 				if (control->spec_flags & M_NOTIFICATION) {
5588 					out_flags |= MSG_NOTIFICATION;
5589 				}
5590 				if ((in_flags & MSG_PEEK) == 0) {
5591 					SCTP_BUF_RESV_UF(m, cp_len);
5592 					SCTP_BUF_LEN(m) -= cp_len;
5593 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5594 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5595 					}
5596 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5597 					if ((control->do_not_ref_stcb == 0) &&
5598 					    stcb) {
5599 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5600 					}
5601 					copied_so_far += cp_len;
5602 					embuf = m;
5603 					freed_so_far += cp_len;
5604 					freed_so_far += MSIZE;
5605 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5606 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5607 						    SCTP_LOG_SBRESULT, 0);
5608 					}
5609 					atomic_subtract_int(&control->length, cp_len);
5610 				} else {
5611 					copied_so_far += cp_len;
5612 				}
5613 			}
5614 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5615 				break;
5616 			}
5617 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5618 			    (control->do_not_ref_stcb == 0) &&
5619 			    (freed_so_far >= rwnd_req)) {
5620 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5621 			}
5622 		}		/* end while(m) */
5623 		/*
5624 		 * At this point we have looked at it all and we either have
5625 		 * a MSG_EOR/or read all the user wants... <OR>
5626 		 * control->length == 0.
5627 		 */
5628 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5629 			/* we are done with this control */
5630 			if (control->length == 0) {
5631 				if (control->data) {
5632 #ifdef INVARIANTS
5633 					panic("control->data not null at read eor?");
5634 #else
5635 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5636 					sctp_m_freem(control->data);
5637 					control->data = NULL;
5638 #endif
5639 				}
5640 		done_with_control:
5641 				if (TAILQ_NEXT(control, next) == NULL) {
5642 					/*
5643 					 * If we don't have a next we need a
5644 					 * lock, if there is a next
5645 					 * interrupt is filling ahead of us
5646 					 * and we don't need a lock to
5647 					 * remove this guy (which is the
5648 					 * head of the queue).
5649 					 */
5650 					if (hold_rlock == 0) {
5651 						SCTP_INP_READ_LOCK(inp);
5652 						hold_rlock = 1;
5653 					}
5654 				}
5655 				TAILQ_REMOVE(&inp->read_queue, control, next);
5656 				/* Add back any hiddend data */
5657 				if (control->held_length) {
5658 					held_length = 0;
5659 					control->held_length = 0;
5660 					wakeup_read_socket = 1;
5661 				}
5662 				if (control->aux_data) {
5663 					sctp_m_free(control->aux_data);
5664 					control->aux_data = NULL;
5665 				}
5666 				no_rcv_needed = control->do_not_ref_stcb;
5667 				sctp_free_remote_addr(control->whoFrom);
5668 				control->data = NULL;
5669 				sctp_free_a_readq(stcb, control);
5670 				control = NULL;
5671 				if ((freed_so_far >= rwnd_req) &&
5672 				    (no_rcv_needed == 0))
5673 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5674 
5675 			} else {
5676 				/*
5677 				 * The user did not read all of this
5678 				 * message, turn off the returned MSG_EOR
5679 				 * since we are leaving more behind on the
5680 				 * control to read.
5681 				 */
5682 #ifdef INVARIANTS
5683 				if (control->end_added &&
5684 				    (control->data == NULL) &&
5685 				    (control->tail_mbuf == NULL)) {
5686 					panic("Gak, control->length is corrupt?");
5687 				}
5688 #endif
5689 				no_rcv_needed = control->do_not_ref_stcb;
5690 				out_flags &= ~MSG_EOR;
5691 			}
5692 		}
5693 		if (out_flags & MSG_EOR) {
5694 			goto release;
5695 		}
5696 		if ((uio->uio_resid == 0) ||
5697 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5698 		    ) {
5699 			goto release;
5700 		}
5701 		/*
5702 		 * If I hit here the receiver wants more and this message is
5703 		 * NOT done (pd-api). So two questions. Can we block? if not
5704 		 * we are done. Did the user NOT set MSG_WAITALL?
5705 		 */
5706 		if (block_allowed == 0) {
5707 			goto release;
5708 		}
5709 		/*
5710 		 * We need to wait for more data a few things: - We don't
5711 		 * sbunlock() so we don't get someone else reading. - We
5712 		 * must be sure to account for the case where what is added
5713 		 * is NOT to our control when we wakeup.
5714 		 */
5715 
5716 		/*
5717 		 * Do we need to tell the transport a rwnd update might be
5718 		 * needed before we go to sleep?
5719 		 */
5720 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5721 		    ((freed_so_far >= rwnd_req) &&
5722 		    (control->do_not_ref_stcb == 0) &&
5723 		    (no_rcv_needed == 0))) {
5724 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5725 		}
5726 wait_some_more:
5727 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5728 			goto release;
5729 		}
5730 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5731 			goto release;
5732 
5733 		if (hold_rlock == 1) {
5734 			SCTP_INP_READ_UNLOCK(inp);
5735 			hold_rlock = 0;
5736 		}
5737 		if (hold_sblock == 0) {
5738 			SOCKBUF_LOCK(&so->so_rcv);
5739 			hold_sblock = 1;
5740 		}
5741 		if ((copied_so_far) && (control->length == 0) &&
5742 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5743 		    ) {
5744 			goto release;
5745 		}
5746 		if (so->so_rcv.sb_cc <= control->held_length) {
5747 			error = sbwait(&so->so_rcv);
5748 			if (error) {
5749 				goto release;
5750 			}
5751 			control->held_length = 0;
5752 		}
5753 		if (hold_sblock) {
5754 			SOCKBUF_UNLOCK(&so->so_rcv);
5755 			hold_sblock = 0;
5756 		}
5757 		if (control->length == 0) {
5758 			/* still nothing here */
5759 			if (control->end_added == 1) {
5760 				/* he aborted, or is done i.e.did a shutdown */
5761 				out_flags |= MSG_EOR;
5762 				if (control->pdapi_aborted) {
5763 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5764 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5765 
5766 					out_flags |= MSG_TRUNC;
5767 				} else {
5768 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5769 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5770 				}
5771 				goto done_with_control;
5772 			}
5773 			if (so->so_rcv.sb_cc > held_length) {
5774 				control->held_length = so->so_rcv.sb_cc;
5775 				held_length = 0;
5776 			}
5777 			goto wait_some_more;
5778 		} else if (control->data == NULL) {
5779 			/*
5780 			 * we must re-sync since data is probably being
5781 			 * added
5782 			 */
5783 			SCTP_INP_READ_LOCK(inp);
5784 			if ((control->length > 0) && (control->data == NULL)) {
5785 				/*
5786 				 * big trouble.. we have the lock and its
5787 				 * corrupt?
5788 				 */
5789 #ifdef INVARIANTS
5790 				panic("Impossible data==NULL length !=0");
5791 #endif
5792 				out_flags |= MSG_EOR;
5793 				out_flags |= MSG_TRUNC;
5794 				control->length = 0;
5795 				SCTP_INP_READ_UNLOCK(inp);
5796 				goto done_with_control;
5797 			}
5798 			SCTP_INP_READ_UNLOCK(inp);
5799 			/* We will fall around to get more data */
5800 		}
5801 		goto get_more_data;
5802 	} else {
5803 		/*-
5804 		 * Give caller back the mbuf chain,
5805 		 * store in uio_resid the length
5806 		 */
5807 		wakeup_read_socket = 0;
5808 		if ((control->end_added == 0) ||
5809 		    (TAILQ_NEXT(control, next) == NULL)) {
5810 			/* Need to get rlock */
5811 			if (hold_rlock == 0) {
5812 				SCTP_INP_READ_LOCK(inp);
5813 				hold_rlock = 1;
5814 			}
5815 		}
5816 		if (control->end_added) {
5817 			out_flags |= MSG_EOR;
5818 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5819 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5820 		}
5821 		if (control->spec_flags & M_NOTIFICATION) {
5822 			out_flags |= MSG_NOTIFICATION;
5823 		}
5824 		uio->uio_resid = control->length;
5825 		*mp = control->data;
5826 		m = control->data;
5827 		while (m) {
5828 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5829 				sctp_sblog(&so->so_rcv,
5830 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5831 			}
5832 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5833 			freed_so_far += SCTP_BUF_LEN(m);
5834 			freed_so_far += MSIZE;
5835 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5836 				sctp_sblog(&so->so_rcv,
5837 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5838 			}
5839 			m = SCTP_BUF_NEXT(m);
5840 		}
5841 		control->data = control->tail_mbuf = NULL;
5842 		control->length = 0;
5843 		if (out_flags & MSG_EOR) {
5844 			/* Done with this control */
5845 			goto done_with_control;
5846 		}
5847 	}
5848 release:
5849 	if (hold_rlock == 1) {
5850 		SCTP_INP_READ_UNLOCK(inp);
5851 		hold_rlock = 0;
5852 	}
5853 	if (hold_sblock == 1) {
5854 		SOCKBUF_UNLOCK(&so->so_rcv);
5855 		hold_sblock = 0;
5856 	}
5857 	sbunlock(&so->so_rcv);
5858 	sockbuf_lock = 0;
5859 
5860 release_unlocked:
5861 	if (hold_sblock) {
5862 		SOCKBUF_UNLOCK(&so->so_rcv);
5863 		hold_sblock = 0;
5864 	}
5865 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5866 		if ((freed_so_far >= rwnd_req) &&
5867 		    (control && (control->do_not_ref_stcb == 0)) &&
5868 		    (no_rcv_needed == 0))
5869 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5870 	}
5871 out:
5872 	if (msg_flags) {
5873 		*msg_flags = out_flags;
5874 	}
5875 	if (((out_flags & MSG_EOR) == 0) &&
5876 	    ((in_flags & MSG_PEEK) == 0) &&
5877 	    (sinfo) &&
5878 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5879 		struct sctp_extrcvinfo *s_extra;
5880 
5881 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5882 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5883 	}
5884 	if (hold_rlock == 1) {
5885 		SCTP_INP_READ_UNLOCK(inp);
5886 		hold_rlock = 0;
5887 	}
5888 	if (hold_sblock) {
5889 		SOCKBUF_UNLOCK(&so->so_rcv);
5890 		hold_sblock = 0;
5891 	}
5892 	if (sockbuf_lock) {
5893 		sbunlock(&so->so_rcv);
5894 	}
5895 	if (freecnt_applied) {
5896 		/*
5897 		 * The lock on the socket buffer protects us so the free
5898 		 * code will stop. But since we used the socketbuf lock and
5899 		 * the sender uses the tcb_lock to increment, we need to use
5900 		 * the atomic add to the refcnt.
5901 		 */
5902 		if (stcb == NULL) {
5903 #ifdef INVARIANTS
5904 			panic("stcb for refcnt has gone NULL?");
5905 			goto stage_left;
5906 #else
5907 			goto stage_left;
5908 #endif
5909 		}
5910 		atomic_add_int(&stcb->asoc.refcnt, -1);
5911 		freecnt_applied = 0;
5912 		/* Save the value back for next time */
5913 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5914 	}
5915 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5916 		if (stcb) {
5917 			sctp_misc_ints(SCTP_SORECV_DONE,
5918 			    freed_so_far,
5919 			    ((uio) ? (slen - uio->uio_resid) : slen),
5920 			    stcb->asoc.my_rwnd,
5921 			    so->so_rcv.sb_cc);
5922 		} else {
5923 			sctp_misc_ints(SCTP_SORECV_DONE,
5924 			    freed_so_far,
5925 			    ((uio) ? (slen - uio->uio_resid) : slen),
5926 			    0,
5927 			    so->so_rcv.sb_cc);
5928 		}
5929 	}
5930 stage_left:
5931 	if (wakeup_read_socket) {
5932 		sctp_sorwakeup(inp, so);
5933 	}
5934 	return (error);
5935 }
5936 
5937 
5938 #ifdef SCTP_MBUF_LOGGING
5939 struct mbuf *
5940 sctp_m_free(struct mbuf *m)
5941 {
5942 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5943 		if (SCTP_BUF_IS_EXTENDED(m)) {
5944 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5945 		}
5946 	}
5947 	return (m_free(m));
5948 }
5949 
5950 void
5951 sctp_m_freem(struct mbuf *mb)
5952 {
5953 	while (mb != NULL)
5954 		mb = sctp_m_free(mb);
5955 }
5956 
5957 #endif
5958 
5959 int
5960 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5961 {
5962 	/*
5963 	 * Given a local address. For all associations that holds the
5964 	 * address, request a peer-set-primary.
5965 	 */
5966 	struct sctp_ifa *ifa;
5967 	struct sctp_laddr *wi;
5968 
5969 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5970 	if (ifa == NULL) {
5971 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5972 		return (EADDRNOTAVAIL);
5973 	}
5974 	/*
5975 	 * Now that we have the ifa we must awaken the iterator with this
5976 	 * message.
5977 	 */
5978 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5979 	if (wi == NULL) {
5980 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5981 		return (ENOMEM);
5982 	}
5983 	/* Now incr the count and int wi structure */
5984 	SCTP_INCR_LADDR_COUNT();
5985 	bzero(wi, sizeof(*wi));
5986 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5987 	wi->ifa = ifa;
5988 	wi->action = SCTP_SET_PRIM_ADDR;
5989 	atomic_add_int(&ifa->refcount, 1);
5990 
5991 	/* Now add it to the work queue */
5992 	SCTP_IPI_ITERATOR_WQ_LOCK();
5993 	/*
5994 	 * Should this really be a tailq? As it is we will process the
5995 	 * newest first :-0
5996 	 */
5997 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
5998 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5999 	    (struct sctp_inpcb *)NULL,
6000 	    (struct sctp_tcb *)NULL,
6001 	    (struct sctp_nets *)NULL);
6002 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6003 	return (0);
6004 }
6005 
6006 
6007 int
6008 sctp_soreceive(struct socket *so,
6009     struct sockaddr **psa,
6010     struct uio *uio,
6011     struct mbuf **mp0,
6012     struct mbuf **controlp,
6013     int *flagsp)
6014 {
6015 	int error, fromlen;
6016 	uint8_t sockbuf[256];
6017 	struct sockaddr *from;
6018 	struct sctp_extrcvinfo sinfo;
6019 	int filling_sinfo = 1;
6020 	struct sctp_inpcb *inp;
6021 
6022 	inp = (struct sctp_inpcb *)so->so_pcb;
6023 	/* pickup the assoc we are reading from */
6024 	if (inp == NULL) {
6025 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6026 		return (EINVAL);
6027 	}
6028 	if ((sctp_is_feature_off(inp,
6029 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6030 	    (controlp == NULL)) {
6031 		/* user does not want the sndrcv ctl */
6032 		filling_sinfo = 0;
6033 	}
6034 	if (psa) {
6035 		from = (struct sockaddr *)sockbuf;
6036 		fromlen = sizeof(sockbuf);
6037 		from->sa_len = 0;
6038 	} else {
6039 		from = NULL;
6040 		fromlen = 0;
6041 	}
6042 
6043 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6044 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6045 	if ((controlp) && (filling_sinfo)) {
6046 		/* copy back the sinfo in a CMSG format */
6047 		if (filling_sinfo)
6048 			*controlp = sctp_build_ctl_nchunk(inp,
6049 			    (struct sctp_sndrcvinfo *)&sinfo);
6050 		else
6051 			*controlp = NULL;
6052 	}
6053 	if (psa) {
6054 		/* copy back the address info */
6055 		if (from && from->sa_len) {
6056 			*psa = sodupsockaddr(from, M_NOWAIT);
6057 		} else {
6058 			*psa = NULL;
6059 		}
6060 	}
6061 	return (error);
6062 }
6063 
6064 
6065 int
6066 sctp_l_soreceive(struct socket *so,
6067     struct sockaddr **name,
6068     struct uio *uio,
6069     char **controlp,
6070     int *controllen,
6071     int *flag)
6072 {
6073 	int error, fromlen;
6074 	uint8_t sockbuf[256];
6075 	struct sockaddr *from;
6076 	struct sctp_extrcvinfo sinfo;
6077 	int filling_sinfo = 1;
6078 	struct sctp_inpcb *inp;
6079 
6080 	inp = (struct sctp_inpcb *)so->so_pcb;
6081 	/* pickup the assoc we are reading from */
6082 	if (inp == NULL) {
6083 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6084 		return (EINVAL);
6085 	}
6086 	if ((sctp_is_feature_off(inp,
6087 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6088 	    (controlp == NULL)) {
6089 		/* user does not want the sndrcv ctl */
6090 		filling_sinfo = 0;
6091 	}
6092 	if (name) {
6093 		from = (struct sockaddr *)sockbuf;
6094 		fromlen = sizeof(sockbuf);
6095 		from->sa_len = 0;
6096 	} else {
6097 		from = NULL;
6098 		fromlen = 0;
6099 	}
6100 
6101 	error = sctp_sorecvmsg(so, uio,
6102 	    (struct mbuf **)NULL,
6103 	    from, fromlen, flag,
6104 	    (struct sctp_sndrcvinfo *)&sinfo,
6105 	    filling_sinfo);
6106 	if ((controlp) && (filling_sinfo)) {
6107 		/*
6108 		 * copy back the sinfo in a CMSG format note that the caller
6109 		 * has reponsibility for freeing the memory.
6110 		 */
6111 		if (filling_sinfo)
6112 			*controlp = sctp_build_ctl_cchunk(inp,
6113 			    controllen,
6114 			    (struct sctp_sndrcvinfo *)&sinfo);
6115 	}
6116 	if (name) {
6117 		/* copy back the address info */
6118 		if (from && from->sa_len) {
6119 			*name = sodupsockaddr(from, M_WAIT);
6120 		} else {
6121 			*name = NULL;
6122 		}
6123 	}
6124 	return (error);
6125 }
6126 
6127 
6128 
6129 
6130 
6131 
6132 
6133 int
6134 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6135     int totaddr, int *error)
6136 {
6137 	int added = 0;
6138 	int i;
6139 	struct sctp_inpcb *inp;
6140 	struct sockaddr *sa;
6141 	size_t incr = 0;
6142 
6143 	sa = addr;
6144 	inp = stcb->sctp_ep;
6145 	*error = 0;
6146 	for (i = 0; i < totaddr; i++) {
6147 		if (sa->sa_family == AF_INET) {
6148 			incr = sizeof(struct sockaddr_in);
6149 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6150 				/* assoc gone no un-lock */
6151 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6152 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6153 				*error = ENOBUFS;
6154 				goto out_now;
6155 			}
6156 			added++;
6157 		} else if (sa->sa_family == AF_INET6) {
6158 			incr = sizeof(struct sockaddr_in6);
6159 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6160 				/* assoc gone no un-lock */
6161 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6162 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6163 				*error = ENOBUFS;
6164 				goto out_now;
6165 			}
6166 			added++;
6167 		}
6168 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6169 	}
6170 out_now:
6171 	return (added);
6172 }
6173 
6174 struct sctp_tcb *
6175 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6176     int *totaddr, int *num_v4, int *num_v6, int *error,
6177     int limit, int *bad_addr)
6178 {
6179 	struct sockaddr *sa;
6180 	struct sctp_tcb *stcb = NULL;
6181 	size_t incr, at, i;
6182 
6183 	at = incr = 0;
6184 	sa = addr;
6185 	*error = *num_v6 = *num_v4 = 0;
6186 	/* account and validate addresses */
6187 	for (i = 0; i < (size_t)*totaddr; i++) {
6188 		if (sa->sa_family == AF_INET) {
6189 			(*num_v4) += 1;
6190 			incr = sizeof(struct sockaddr_in);
6191 			if (sa->sa_len != incr) {
6192 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6193 				*error = EINVAL;
6194 				*bad_addr = 1;
6195 				return (NULL);
6196 			}
6197 		} else if (sa->sa_family == AF_INET6) {
6198 			struct sockaddr_in6 *sin6;
6199 
6200 			sin6 = (struct sockaddr_in6 *)sa;
6201 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6202 				/* Must be non-mapped for connectx */
6203 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6204 				*error = EINVAL;
6205 				*bad_addr = 1;
6206 				return (NULL);
6207 			}
6208 			(*num_v6) += 1;
6209 			incr = sizeof(struct sockaddr_in6);
6210 			if (sa->sa_len != incr) {
6211 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6212 				*error = EINVAL;
6213 				*bad_addr = 1;
6214 				return (NULL);
6215 			}
6216 		} else {
6217 			*totaddr = i;
6218 			/* we are done */
6219 			break;
6220 		}
6221 		SCTP_INP_INCR_REF(inp);
6222 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6223 		if (stcb != NULL) {
6224 			/* Already have or am bring up an association */
6225 			return (stcb);
6226 		} else {
6227 			SCTP_INP_DECR_REF(inp);
6228 		}
6229 		if ((at + incr) > (size_t)limit) {
6230 			*totaddr = i;
6231 			break;
6232 		}
6233 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6234 	}
6235 	return ((struct sctp_tcb *)NULL);
6236 }
6237 
6238 /*
6239  * sctp_bindx(ADD) for one address.
6240  * assumes all arguments are valid/checked by caller.
6241  */
6242 void
6243 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6244     struct sockaddr *sa, sctp_assoc_t assoc_id,
6245     uint32_t vrf_id, int *error, void *p)
6246 {
6247 	struct sockaddr *addr_touse;
6248 
6249 #ifdef INET6
6250 	struct sockaddr_in sin;
6251 
6252 #endif
6253 
6254 	/* see if we're bound all already! */
6255 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6256 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6257 		*error = EINVAL;
6258 		return;
6259 	}
6260 	addr_touse = sa;
6261 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6262 	if (sa->sa_family == AF_INET6) {
6263 		struct sockaddr_in6 *sin6;
6264 
6265 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6266 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6267 			*error = EINVAL;
6268 			return;
6269 		}
6270 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6271 			/* can only bind v6 on PF_INET6 sockets */
6272 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6273 			*error = EINVAL;
6274 			return;
6275 		}
6276 		sin6 = (struct sockaddr_in6 *)addr_touse;
6277 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6278 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6279 			    SCTP_IPV6_V6ONLY(inp)) {
6280 				/* can't bind v4-mapped on PF_INET sockets */
6281 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6282 				*error = EINVAL;
6283 				return;
6284 			}
6285 			in6_sin6_2_sin(&sin, sin6);
6286 			addr_touse = (struct sockaddr *)&sin;
6287 		}
6288 	}
6289 #endif
6290 	if (sa->sa_family == AF_INET) {
6291 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6292 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6293 			*error = EINVAL;
6294 			return;
6295 		}
6296 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6297 		    SCTP_IPV6_V6ONLY(inp)) {
6298 			/* can't bind v4 on PF_INET sockets */
6299 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6300 			*error = EINVAL;
6301 			return;
6302 		}
6303 	}
6304 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6305 		if (p == NULL) {
6306 			/* Can't get proc for Net/Open BSD */
6307 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6308 			*error = EINVAL;
6309 			return;
6310 		}
6311 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6312 		return;
6313 	}
6314 	/*
6315 	 * No locks required here since bind and mgmt_ep_sa all do their own
6316 	 * locking. If we do something for the FIX: below we may need to
6317 	 * lock in that case.
6318 	 */
6319 	if (assoc_id == 0) {
6320 		/* add the address */
6321 		struct sctp_inpcb *lep;
6322 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6323 
6324 		/* validate the incoming port */
6325 		if ((lsin->sin_port != 0) &&
6326 		    (lsin->sin_port != inp->sctp_lport)) {
6327 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 			*error = EINVAL;
6329 			return;
6330 		} else {
6331 			/* user specified 0 port, set it to existing port */
6332 			lsin->sin_port = inp->sctp_lport;
6333 		}
6334 
6335 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6336 		if (lep != NULL) {
6337 			/*
6338 			 * We must decrement the refcount since we have the
6339 			 * ep already and are binding. No remove going on
6340 			 * here.
6341 			 */
6342 			SCTP_INP_DECR_REF(lep);
6343 		}
6344 		if (lep == inp) {
6345 			/* already bound to it.. ok */
6346 			return;
6347 		} else if (lep == NULL) {
6348 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6349 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6350 			    SCTP_ADD_IP_ADDRESS,
6351 			    vrf_id, NULL);
6352 		} else {
6353 			*error = EADDRINUSE;
6354 		}
6355 		if (*error)
6356 			return;
6357 	} else {
6358 		/*
6359 		 * FIX: decide whether we allow assoc based bindx
6360 		 */
6361 	}
6362 }
6363 
6364 /*
6365  * sctp_bindx(DELETE) for one address.
6366  * assumes all arguments are valid/checked by caller.
6367  */
6368 void
6369 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6370     struct sockaddr *sa, sctp_assoc_t assoc_id,
6371     uint32_t vrf_id, int *error)
6372 {
6373 	struct sockaddr *addr_touse;
6374 
6375 #ifdef INET6
6376 	struct sockaddr_in sin;
6377 
6378 #endif
6379 
6380 	/* see if we're bound all already! */
6381 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6382 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6383 		*error = EINVAL;
6384 		return;
6385 	}
6386 	addr_touse = sa;
6387 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6388 	if (sa->sa_family == AF_INET6) {
6389 		struct sockaddr_in6 *sin6;
6390 
6391 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6392 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6393 			*error = EINVAL;
6394 			return;
6395 		}
6396 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6397 			/* can only bind v6 on PF_INET6 sockets */
6398 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6399 			*error = EINVAL;
6400 			return;
6401 		}
6402 		sin6 = (struct sockaddr_in6 *)addr_touse;
6403 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6404 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6405 			    SCTP_IPV6_V6ONLY(inp)) {
6406 				/* can't bind mapped-v4 on PF_INET sockets */
6407 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6408 				*error = EINVAL;
6409 				return;
6410 			}
6411 			in6_sin6_2_sin(&sin, sin6);
6412 			addr_touse = (struct sockaddr *)&sin;
6413 		}
6414 	}
6415 #endif
6416 	if (sa->sa_family == AF_INET) {
6417 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6418 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6419 			*error = EINVAL;
6420 			return;
6421 		}
6422 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6423 		    SCTP_IPV6_V6ONLY(inp)) {
6424 			/* can't bind v4 on PF_INET sockets */
6425 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6426 			*error = EINVAL;
6427 			return;
6428 		}
6429 	}
6430 	/*
6431 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6432 	 * below is ever changed we may need to lock before calling
6433 	 * association level binding.
6434 	 */
6435 	if (assoc_id == 0) {
6436 		/* delete the address */
6437 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6438 		    SCTP_DEL_IP_ADDRESS,
6439 		    vrf_id, NULL);
6440 	} else {
6441 		/*
6442 		 * FIX: decide whether we allow assoc based bindx
6443 		 */
6444 	}
6445 }
6446 
6447 /*
6448  * returns the valid local address count for an assoc, taking into account
6449  * all scoping rules
6450  */
6451 int
6452 sctp_local_addr_count(struct sctp_tcb *stcb)
6453 {
6454 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6455 	int ipv4_addr_legal, ipv6_addr_legal;
6456 	struct sctp_vrf *vrf;
6457 	struct sctp_ifn *sctp_ifn;
6458 	struct sctp_ifa *sctp_ifa;
6459 	int count = 0;
6460 
6461 	/* Turn on all the appropriate scopes */
6462 	loopback_scope = stcb->asoc.loopback_scope;
6463 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6464 	local_scope = stcb->asoc.local_scope;
6465 	site_scope = stcb->asoc.site_scope;
6466 	ipv4_addr_legal = ipv6_addr_legal = 0;
6467 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6468 		ipv6_addr_legal = 1;
6469 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6470 			ipv4_addr_legal = 1;
6471 		}
6472 	} else {
6473 		ipv4_addr_legal = 1;
6474 	}
6475 
6476 	SCTP_IPI_ADDR_RLOCK();
6477 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6478 	if (vrf == NULL) {
6479 		/* no vrf, no addresses */
6480 		SCTP_IPI_ADDR_RUNLOCK();
6481 		return (0);
6482 	}
6483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6484 		/*
6485 		 * bound all case: go through all ifns on the vrf
6486 		 */
6487 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6488 			if ((loopback_scope == 0) &&
6489 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6490 				continue;
6491 			}
6492 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6493 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6494 					continue;
6495 				switch (sctp_ifa->address.sa.sa_family) {
6496 				case AF_INET:
6497 					if (ipv4_addr_legal) {
6498 						struct sockaddr_in *sin;
6499 
6500 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6501 						if (sin->sin_addr.s_addr == 0) {
6502 							/*
6503 							 * skip unspecified
6504 							 * addrs
6505 							 */
6506 							continue;
6507 						}
6508 						if ((ipv4_local_scope == 0) &&
6509 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6510 							continue;
6511 						}
6512 						/* count this one */
6513 						count++;
6514 					} else {
6515 						continue;
6516 					}
6517 					break;
6518 #ifdef INET6
6519 				case AF_INET6:
6520 					if (ipv6_addr_legal) {
6521 						struct sockaddr_in6 *sin6;
6522 
6523 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6524 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6525 							continue;
6526 						}
6527 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6528 							if (local_scope == 0)
6529 								continue;
6530 							if (sin6->sin6_scope_id == 0) {
6531 								if (sa6_recoverscope(sin6) != 0)
6532 									/*
6533 									 *
6534 									 * bad
6535 									 *
6536 									 * li
6537 									 * nk
6538 									 *
6539 									 * loc
6540 									 * al
6541 									 *
6542 									 * add
6543 									 * re
6544 									 * ss
6545 									 * */
6546 									continue;
6547 							}
6548 						}
6549 						if ((site_scope == 0) &&
6550 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6551 							continue;
6552 						}
6553 						/* count this one */
6554 						count++;
6555 					}
6556 					break;
6557 #endif
6558 				default:
6559 					/* TSNH */
6560 					break;
6561 				}
6562 			}
6563 		}
6564 	} else {
6565 		/*
6566 		 * subset bound case
6567 		 */
6568 		struct sctp_laddr *laddr;
6569 
6570 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6571 		    sctp_nxt_addr) {
6572 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6573 				continue;
6574 			}
6575 			/* count this one */
6576 			count++;
6577 		}
6578 	}
6579 	SCTP_IPI_ADDR_RUNLOCK();
6580 	return (count);
6581 }
6582 
6583 #if defined(SCTP_LOCAL_TRACE_BUF)
6584 
6585 void
6586 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6587 {
6588 	uint32_t saveindex, newindex;
6589 
6590 	do {
6591 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6592 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6593 			newindex = 1;
6594 		} else {
6595 			newindex = saveindex + 1;
6596 		}
6597 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6598 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6599 		saveindex = 0;
6600 	}
6601 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6602 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6603 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6604 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6605 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6606 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6607 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6608 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6609 }
6610 
6611 #endif
6612 /* We will need to add support
6613  * to bind the ports and such here
6614  * so we can do UDP tunneling. In
6615  * the mean-time, we return error
6616  */
6617 #include <netinet/udp.h>
6618 #include <netinet/udp_var.h>
6619 #include <sys/proc.h>
6620 #ifdef INET6
6621 #include <netinet6/sctp6_var.h>
6622 #endif
6623 
6624 static void
6625 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6626 {
6627 	struct ip *iph;
6628 	struct mbuf *sp, *last;
6629 	struct udphdr *uhdr;
6630 	uint16_t port = 0, len;
6631 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6632 
6633 	/*
6634 	 * Split out the mbuf chain. Leave the IP header in m, place the
6635 	 * rest in the sp.
6636 	 */
6637 	if ((m->m_flags & M_PKTHDR) == 0) {
6638 		/* Can't handle one that is not a pkt hdr */
6639 		goto out;
6640 	}
6641 	/* pull the src port */
6642 	iph = mtod(m, struct ip *);
6643 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6644 
6645 	port = uhdr->uh_sport;
6646 	sp = m_split(m, off, M_DONTWAIT);
6647 	if (sp == NULL) {
6648 		/* Gak, drop packet, we can't do a split */
6649 		goto out;
6650 	}
6651 	if (sp->m_pkthdr.len < header_size) {
6652 		/* Gak, packet can't have an SCTP header in it - to small */
6653 		m_freem(sp);
6654 		goto out;
6655 	}
6656 	/* ok now pull up the UDP header and SCTP header together */
6657 	sp = m_pullup(sp, header_size);
6658 	if (sp == NULL) {
6659 		/* Gak pullup failed */
6660 		goto out;
6661 	}
6662 	/* trim out the UDP header */
6663 	m_adj(sp, sizeof(struct udphdr));
6664 
6665 	/* Now reconstruct the mbuf chain */
6666 	/* 1) find last one */
6667 	last = m;
6668 	while (last->m_next != NULL) {
6669 		last = last->m_next;
6670 	}
6671 	last->m_next = sp;
6672 	m->m_pkthdr.len += sp->m_pkthdr.len;
6673 	last = m;
6674 	while (last != NULL) {
6675 		last = last->m_next;
6676 	}
6677 	/* Now its ready for sctp_input or sctp6_input */
6678 	iph = mtod(m, struct ip *);
6679 	switch (iph->ip_v) {
6680 	case IPVERSION:
6681 		{
6682 			/* its IPv4 */
6683 			len = SCTP_GET_IPV4_LENGTH(iph);
6684 			len -= sizeof(struct udphdr);
6685 			SCTP_GET_IPV4_LENGTH(iph) = len;
6686 			sctp_input_with_port(m, off, port);
6687 			break;
6688 		}
6689 #ifdef INET6
6690 	case IPV6_VERSION >> 4:
6691 		{
6692 			/* its IPv6 - NOT supported */
6693 			goto out;
6694 			break;
6695 
6696 		}
6697 #endif
6698 	default:
6699 		{
6700 			m_freem(m);
6701 			break;
6702 		}
6703 	}
6704 	return;
6705 out:
6706 	m_freem(m);
6707 }
6708 
6709 void
6710 sctp_over_udp_stop(void)
6711 {
6712 	struct socket *sop;
6713 
6714 	/*
6715 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6716 	 * for writting!
6717 	 */
6718 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6719 		/* Nothing to do */
6720 		return;
6721 	}
6722 	sop = SCTP_BASE_INFO(udp_tun_socket);
6723 	soclose(sop);
6724 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6725 }
6726 int
6727 sctp_over_udp_start(void)
6728 {
6729 	uint16_t port;
6730 	int ret;
6731 	struct sockaddr_in sin;
6732 	struct socket *sop = NULL;
6733 	struct thread *th;
6734 	struct ucred *cred;
6735 
6736 	/*
6737 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6738 	 * for writting!
6739 	 */
6740 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6741 	if (port == 0) {
6742 		/* Must have a port set */
6743 		return (EINVAL);
6744 	}
6745 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6746 		/* Already running -- must stop first */
6747 		return (EALREADY);
6748 	}
6749 	th = curthread;
6750 	cred = th->td_ucred;
6751 	if ((ret = socreate(PF_INET, &sop,
6752 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6753 		return (ret);
6754 	}
6755 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6756 	/* call the special UDP hook */
6757 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6758 	if (ret) {
6759 		goto exit_stage_left;
6760 	}
6761 	/* Ok we have a socket, bind it to the port */
6762 	memset(&sin, 0, sizeof(sin));
6763 	sin.sin_len = sizeof(sin);
6764 	sin.sin_family = AF_INET;
6765 	sin.sin_port = htons(port);
6766 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6767 	if (ret) {
6768 		/* Close up we cant get the port */
6769 exit_stage_left:
6770 		sctp_over_udp_stop();
6771 		return (ret);
6772 	}
6773 	/*
6774 	 * Ok we should now get UDP packets directly to our input routine
6775 	 * sctp_recv_upd_tunneled_packet().
6776 	 */
6777 	return (0);
6778 }
6779