1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifdef __FreeBSD__
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 280439 2015-03-24 14:51:46Z tuexen $");
36 #endif
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #if defined(__Userspace__) || defined(__FreeBSD__)
45 #include <netinet6/sctp6_var.h>
46 #endif
47 #endif
48 #include <netinet/sctp_header.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctp_timer.h>
52 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
53 #include <netinet/sctp_auth.h>
54 #include <netinet/sctp_asconf.h>
55 #include <netinet/sctp_bsd_addr.h>
56 #if defined(__Userspace__)
57 #include <netinet/sctp_constants.h>
58 #endif
59 #if defined(__FreeBSD__)
60 #include <netinet/udp.h>
61 #include <netinet/udp_var.h>
62 #include <sys/proc.h>
63 #endif
64 
65 #if defined(__APPLE__)
66 #define APPLE_FILE_NO 8
67 #endif
68 
69 #if defined(__Windows__)
70 #if !defined(SCTP_LOCAL_TRACE_BUF)
71 #include "eventrace_netinet.h"
72 #include "sctputil.tmh" /* this is the file that will be auto generated */
73 #endif
74 #else
75 #ifndef KTR_SCTP
76 #define KTR_SCTP KTR_SUBSYS
77 #endif
78 #endif
79 
80 extern struct sctp_cc_functions sctp_cc_functions[];
81 extern struct sctp_ss_functions sctp_ss_functions[];
82 
83 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)84 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
85 {
86 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.sb.stcb = stcb;
90 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
91 	if (stcb)
92 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
93 	else
94 		sctp_clog.x.sb.stcb_sbcc = 0;
95 	sctp_clog.x.sb.incr = incr;
96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97 	     SCTP_LOG_EVENT_SB,
98 	     from,
99 	     sctp_clog.x.misc.log1,
100 	     sctp_clog.x.misc.log2,
101 	     sctp_clog.x.misc.log3,
102 	     sctp_clog.x.misc.log4);
103 #endif
104 }
105 
106 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)107 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
108 {
109 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	sctp_clog.x.close.inp = (void *)inp;
113 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
114 	if (stcb) {
115 		sctp_clog.x.close.stcb = (void *)stcb;
116 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
117 	} else {
118 		sctp_clog.x.close.stcb = 0;
119 		sctp_clog.x.close.state = 0;
120 	}
121 	sctp_clog.x.close.loc = loc;
122 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
123 	     SCTP_LOG_EVENT_CLOSE,
124 	     0,
125 	     sctp_clog.x.misc.log1,
126 	     sctp_clog.x.misc.log2,
127 	     sctp_clog.x.misc.log3,
128 	     sctp_clog.x.misc.log4);
129 #endif
130 }
131 
132 void
rto_logging(struct sctp_nets * net,int from)133 rto_logging(struct sctp_nets *net, int from)
134 {
135 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
136 	struct sctp_cwnd_log sctp_clog;
137 
138 	memset(&sctp_clog, 0, sizeof(sctp_clog));
139 	sctp_clog.x.rto.net = (void *) net;
140 	sctp_clog.x.rto.rtt = net->rtt / 1000;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	     SCTP_LOG_EVENT_RTT,
143 	     from,
144 	     sctp_clog.x.misc.log1,
145 	     sctp_clog.x.misc.log2,
146 	     sctp_clog.x.misc.log3,
147 	     sctp_clog.x.misc.log4);
148 #endif
149 }
150 
151 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)152 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
153 {
154 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.strlog.stcb = stcb;
158 	sctp_clog.x.strlog.n_tsn = tsn;
159 	sctp_clog.x.strlog.n_sseq = sseq;
160 	sctp_clog.x.strlog.e_tsn = 0;
161 	sctp_clog.x.strlog.e_sseq = 0;
162 	sctp_clog.x.strlog.strm = stream;
163 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
164 	     SCTP_LOG_EVENT_STRM,
165 	     from,
166 	     sctp_clog.x.misc.log1,
167 	     sctp_clog.x.misc.log2,
168 	     sctp_clog.x.misc.log3,
169 	     sctp_clog.x.misc.log4);
170 #endif
171 }
172 
173 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)174 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
175 {
176 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
177 	struct sctp_cwnd_log sctp_clog;
178 
179 	sctp_clog.x.nagle.stcb = (void *)stcb;
180 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
181 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
182 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
183 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
184 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
185 	     SCTP_LOG_EVENT_NAGLE,
186 	     action,
187 	     sctp_clog.x.misc.log1,
188 	     sctp_clog.x.misc.log2,
189 	     sctp_clog.x.misc.log3,
190 	     sctp_clog.x.misc.log4);
191 #endif
192 }
193 
194 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)195 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
196 {
197 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
198 	struct sctp_cwnd_log sctp_clog;
199 
200 	sctp_clog.x.sack.cumack = cumack;
201 	sctp_clog.x.sack.oldcumack = old_cumack;
202 	sctp_clog.x.sack.tsn = tsn;
203 	sctp_clog.x.sack.numGaps = gaps;
204 	sctp_clog.x.sack.numDups = dups;
205 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
206 	     SCTP_LOG_EVENT_SACK,
207 	     from,
208 	     sctp_clog.x.misc.log1,
209 	     sctp_clog.x.misc.log2,
210 	     sctp_clog.x.misc.log3,
211 	     sctp_clog.x.misc.log4);
212 #endif
213 }
214 
215 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)216 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
217 {
218 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
219 	struct sctp_cwnd_log sctp_clog;
220 
221 	memset(&sctp_clog, 0, sizeof(sctp_clog));
222 	sctp_clog.x.map.base = map;
223 	sctp_clog.x.map.cum = cum;
224 	sctp_clog.x.map.high = high;
225 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
226 	     SCTP_LOG_EVENT_MAP,
227 	     from,
228 	     sctp_clog.x.misc.log1,
229 	     sctp_clog.x.misc.log2,
230 	     sctp_clog.x.misc.log3,
231 	     sctp_clog.x.misc.log4);
232 #endif
233 }
234 
235 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)236 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
237 {
238 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
239 	struct sctp_cwnd_log sctp_clog;
240 
241 	memset(&sctp_clog, 0, sizeof(sctp_clog));
242 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
243 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
244 	sctp_clog.x.fr.tsn = tsn;
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	     SCTP_LOG_EVENT_FR,
247 	     from,
248 	     sctp_clog.x.misc.log1,
249 	     sctp_clog.x.misc.log2,
250 	     sctp_clog.x.misc.log3,
251 	     sctp_clog.x.misc.log4);
252 #endif
253 }
254 
255 #ifdef SCTP_MBUF_LOGGING
256 void
sctp_log_mb(struct mbuf * m,int from)257 sctp_log_mb(struct mbuf *m, int from)
258 {
259 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
260 	struct sctp_cwnd_log sctp_clog;
261 
262 	sctp_clog.x.mb.mp = m;
263 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
264 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
265 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
266 	if (SCTP_BUF_IS_EXTENDED(m)) {
267 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
268 #if defined(__APPLE__)
269 		/* APPLE does not use a ref_cnt, but a forward/backward ref queue */
270 #else
271 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
272 #endif
273 	} else {
274 		sctp_clog.x.mb.ext = 0;
275 		sctp_clog.x.mb.refcnt = 0;
276 	}
277 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
278 	     SCTP_LOG_EVENT_MBUF,
279 	     from,
280 	     sctp_clog.x.misc.log1,
281 	     sctp_clog.x.misc.log2,
282 	     sctp_clog.x.misc.log3,
283 	     sctp_clog.x.misc.log4);
284 #endif
285 }
286 
287 void
sctp_log_mbc(struct mbuf * m,int from)288 sctp_log_mbc(struct mbuf *m, int from)
289 {
290 	struct mbuf *mat;
291 
292 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
293 		sctp_log_mb(mat, from);
294 	}
295 }
296 #endif
297 
298 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)299 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
300 {
301 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
302 	struct sctp_cwnd_log sctp_clog;
303 
304 	if (control == NULL) {
305 		SCTP_PRINTF("Gak log of NULL?\n");
306 		return;
307 	}
308 	sctp_clog.x.strlog.stcb = control->stcb;
309 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
310 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
311 	sctp_clog.x.strlog.strm = control->sinfo_stream;
312 	if (poschk != NULL) {
313 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
314 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
315 	} else {
316 		sctp_clog.x.strlog.e_tsn = 0;
317 		sctp_clog.x.strlog.e_sseq = 0;
318 	}
319 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
320 	     SCTP_LOG_EVENT_STRM,
321 	     from,
322 	     sctp_clog.x.misc.log1,
323 	     sctp_clog.x.misc.log2,
324 	     sctp_clog.x.misc.log3,
325 	     sctp_clog.x.misc.log4);
326 #endif
327 }
328 
329 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)330 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
331 {
332 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
333 	struct sctp_cwnd_log sctp_clog;
334 
335 	sctp_clog.x.cwnd.net = net;
336 	if (stcb->asoc.send_queue_cnt > 255)
337 		sctp_clog.x.cwnd.cnt_in_send = 255;
338 	else
339 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
340 	if (stcb->asoc.stream_queue_cnt > 255)
341 		sctp_clog.x.cwnd.cnt_in_str = 255;
342 	else
343 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
344 
345 	if (net) {
346 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
347 		sctp_clog.x.cwnd.inflight = net->flight_size;
348 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
349 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
350 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
351 	}
352 	if (SCTP_CWNDLOG_PRESEND == from) {
353 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
354 	}
355 	sctp_clog.x.cwnd.cwnd_augment = augment;
356 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
357 	     SCTP_LOG_EVENT_CWND,
358 	     from,
359 	     sctp_clog.x.misc.log1,
360 	     sctp_clog.x.misc.log2,
361 	     sctp_clog.x.misc.log3,
362 	     sctp_clog.x.misc.log4);
363 #endif
364 }
365 
366 #ifndef __APPLE__
367 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)368 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
369 {
370 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
371 	struct sctp_cwnd_log sctp_clog;
372 
373 	memset(&sctp_clog, 0, sizeof(sctp_clog));
374 	if (inp) {
375 		sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
376 
377 	} else {
378 		sctp_clog.x.lock.sock = (void *) NULL;
379 	}
380 	sctp_clog.x.lock.inp = (void *) inp;
381 #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
382 	if (stcb) {
383 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
384 	} else {
385 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
386 	}
387 	if (inp) {
388 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
389 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
390 	} else {
391 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
392 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
393 	}
394 #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
395 	sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
396 #else
397 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
398 #endif
399 	if (inp && (inp->sctp_socket)) {
400 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
401 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
402 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
403 	} else {
404 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
405 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
406 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
407 	}
408 #endif
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	     SCTP_LOG_LOCK_EVENT,
411 	     from,
412 	     sctp_clog.x.misc.log1,
413 	     sctp_clog.x.misc.log2,
414 	     sctp_clog.x.misc.log3,
415 	     sctp_clog.x.misc.log4);
416 #endif
417 }
418 #endif
419 
420 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)421 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
422 {
423 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
424 	struct sctp_cwnd_log sctp_clog;
425 
426 	memset(&sctp_clog, 0, sizeof(sctp_clog));
427 	sctp_clog.x.cwnd.net = net;
428 	sctp_clog.x.cwnd.cwnd_new_value = error;
429 	sctp_clog.x.cwnd.inflight = net->flight_size;
430 	sctp_clog.x.cwnd.cwnd_augment = burst;
431 	if (stcb->asoc.send_queue_cnt > 255)
432 		sctp_clog.x.cwnd.cnt_in_send = 255;
433 	else
434 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
435 	if (stcb->asoc.stream_queue_cnt > 255)
436 		sctp_clog.x.cwnd.cnt_in_str = 255;
437 	else
438 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	     SCTP_LOG_EVENT_MAXBURST,
441 	     from,
442 	     sctp_clog.x.misc.log1,
443 	     sctp_clog.x.misc.log2,
444 	     sctp_clog.x.misc.log3,
445 	     sctp_clog.x.misc.log4);
446 #endif
447 }
448 
449 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)450 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
451 {
452 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
456 	sctp_clog.x.rwnd.send_size = snd_size;
457 	sctp_clog.x.rwnd.overhead = overhead;
458 	sctp_clog.x.rwnd.new_rwnd = 0;
459 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
460 	     SCTP_LOG_EVENT_RWND,
461 	     from,
462 	     sctp_clog.x.misc.log1,
463 	     sctp_clog.x.misc.log2,
464 	     sctp_clog.x.misc.log3,
465 	     sctp_clog.x.misc.log4);
466 #endif
467 }
468 
469 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)470 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
471 {
472 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
473 	struct sctp_cwnd_log sctp_clog;
474 
475 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
476 	sctp_clog.x.rwnd.send_size = flight_size;
477 	sctp_clog.x.rwnd.overhead = overhead;
478 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
479 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
480 	     SCTP_LOG_EVENT_RWND,
481 	     from,
482 	     sctp_clog.x.misc.log1,
483 	     sctp_clog.x.misc.log2,
484 	     sctp_clog.x.misc.log3,
485 	     sctp_clog.x.misc.log4);
486 #endif
487 }
488 
489 #ifdef SCTP_MBCNT_LOGGING
490 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)491 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
492 {
493 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
494 	struct sctp_cwnd_log sctp_clog;
495 
496 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
497 	sctp_clog.x.mbcnt.size_change = book;
498 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
499 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	     SCTP_LOG_EVENT_MBCNT,
502 	     from,
503 	     sctp_clog.x.misc.log1,
504 	     sctp_clog.x.misc.log2,
505 	     sctp_clog.x.misc.log3,
506 	     sctp_clog.x.misc.log4);
507 #endif
508 }
509 #endif
510 
511 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)512 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
513 {
514 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	     SCTP_LOG_MISC_EVENT,
517 	     from,
518 	     a, b, c, d);
519 #endif
520 }
521 
522 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)523 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
524 {
525 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
526 	struct sctp_cwnd_log sctp_clog;
527 
528 	sctp_clog.x.wake.stcb = (void *)stcb;
529 	sctp_clog.x.wake.wake_cnt = wake_cnt;
530 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
531 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
532 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
533 
534 	if (stcb->asoc.stream_queue_cnt < 0xff)
535 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
536 	else
537 		sctp_clog.x.wake.stream_qcnt = 0xff;
538 
539 	if (stcb->asoc.chunks_on_out_queue < 0xff)
540 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
541 	else
542 		sctp_clog.x.wake.chunks_on_oque = 0xff;
543 
544 	sctp_clog.x.wake.sctpflags = 0;
545 	/* set in the defered mode stuff */
546 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
547 		sctp_clog.x.wake.sctpflags |= 1;
548 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
549 		sctp_clog.x.wake.sctpflags |= 2;
550 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
551 		sctp_clog.x.wake.sctpflags |= 4;
552 	/* what about the sb */
553 	if (stcb->sctp_socket) {
554 		struct socket *so = stcb->sctp_socket;
555 
556 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
557 	} else {
558 		sctp_clog.x.wake.sbflags = 0xff;
559 	}
560 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
561 	     SCTP_LOG_EVENT_WAKE,
562 	     from,
563 	     sctp_clog.x.misc.log1,
564 	     sctp_clog.x.misc.log2,
565 	     sctp_clog.x.misc.log3,
566 	     sctp_clog.x.misc.log4);
567 #endif
568 }
569 
570 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,int sendlen)571 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
572 {
573 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
574 	struct sctp_cwnd_log sctp_clog;
575 
576 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
577 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
578 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
579 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
580 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
581 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
582 	sctp_clog.x.blk.sndlen = sendlen;
583 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
584 	     SCTP_LOG_EVENT_BLOCK,
585 	     from,
586 	     sctp_clog.x.misc.log1,
587 	     sctp_clog.x.misc.log2,
588 	     sctp_clog.x.misc.log3,
589 	     sctp_clog.x.misc.log4);
590 #endif
591 }
592 
593 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)594 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
595 {
596 	/* May need to fix this if ktrdump does not work */
597 	return (0);
598 }
599 
600 #ifdef SCTP_AUDITING_ENABLED
601 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
602 static int sctp_audit_indx = 0;
603 
604 static
605 void
sctp_print_audit_report(void)606 sctp_print_audit_report(void)
607 {
608 	int i;
609 	int cnt;
610 
611 	cnt = 0;
612 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
613 		if ((sctp_audit_data[i][0] == 0xe0) &&
614 		    (sctp_audit_data[i][1] == 0x01)) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if (sctp_audit_data[i][0] == 0xf0) {
618 			cnt = 0;
619 			SCTP_PRINTF("\n");
620 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
621 		    (sctp_audit_data[i][1] == 0x01)) {
622 			SCTP_PRINTF("\n");
623 			cnt = 0;
624 		}
625 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
626 			    (uint32_t) sctp_audit_data[i][1]);
627 		cnt++;
628 		if ((cnt % 14) == 0)
629 			SCTP_PRINTF("\n");
630 	}
631 	for (i = 0; i < sctp_audit_indx; i++) {
632 		if ((sctp_audit_data[i][0] == 0xe0) &&
633 		    (sctp_audit_data[i][1] == 0x01)) {
634 			cnt = 0;
635 			SCTP_PRINTF("\n");
636 		} else if (sctp_audit_data[i][0] == 0xf0) {
637 			cnt = 0;
638 			SCTP_PRINTF("\n");
639 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
640 		    (sctp_audit_data[i][1] == 0x01)) {
641 			SCTP_PRINTF("\n");
642 			cnt = 0;
643 		}
644 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
645 			    (uint32_t) sctp_audit_data[i][1]);
646 		cnt++;
647 		if ((cnt % 14) == 0)
648 			SCTP_PRINTF("\n");
649 	}
650 	SCTP_PRINTF("\n");
651 }
652 
653 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)654 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
655     struct sctp_nets *net)
656 {
657 	int resend_cnt, tot_out, rep, tot_book_cnt;
658 	struct sctp_nets *lnet;
659 	struct sctp_tmit_chunk *chk;
660 
661 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
662 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
663 	sctp_audit_indx++;
664 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 		sctp_audit_indx = 0;
666 	}
667 	if (inp == NULL) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		return;
675 	}
676 	if (stcb == NULL) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		return;
684 	}
685 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
686 	sctp_audit_data[sctp_audit_indx][1] =
687 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
688 	sctp_audit_indx++;
689 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 		sctp_audit_indx = 0;
691 	}
692 	rep = 0;
693 	tot_book_cnt = 0;
694 	resend_cnt = tot_out = 0;
695 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
696 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
697 			resend_cnt++;
698 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
699 			tot_out += chk->book_size;
700 			tot_book_cnt++;
701 		}
702 	}
703 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
704 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
705 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
706 		sctp_audit_indx++;
707 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 			sctp_audit_indx = 0;
709 		}
710 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
711 			    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
712 		rep = 1;
713 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
714 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
715 		sctp_audit_data[sctp_audit_indx][1] =
716 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
717 		sctp_audit_indx++;
718 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
719 			sctp_audit_indx = 0;
720 		}
721 	}
722 	if (tot_out != stcb->asoc.total_flight) {
723 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
724 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
725 		sctp_audit_indx++;
726 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 			sctp_audit_indx = 0;
728 		}
729 		rep = 1;
730 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
731 			    (int)stcb->asoc.total_flight);
732 		stcb->asoc.total_flight = tot_out;
733 	}
734 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
735 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
736 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
737 		sctp_audit_indx++;
738 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
739 			sctp_audit_indx = 0;
740 		}
741 		rep = 1;
742 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
743 
744 		stcb->asoc.total_flight_count = tot_book_cnt;
745 	}
746 	tot_out = 0;
747 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
748 		tot_out += lnet->flight_size;
749 	}
750 	if (tot_out != stcb->asoc.total_flight) {
751 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
752 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
753 		sctp_audit_indx++;
754 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
755 			sctp_audit_indx = 0;
756 		}
757 		rep = 1;
758 		SCTP_PRINTF("real flight:%d net total was %d\n",
759 			    stcb->asoc.total_flight, tot_out);
760 		/* now corrective action */
761 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
762 
763 			tot_out = 0;
764 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
765 				if ((chk->whoTo == lnet) &&
766 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
767 					tot_out += chk->book_size;
768 				}
769 			}
770 			if (lnet->flight_size != tot_out) {
771 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
772 					    (void *)lnet, lnet->flight_size,
773 					    tot_out);
774 				lnet->flight_size = tot_out;
775 			}
776 		}
777 	}
778 	if (rep) {
779 		sctp_print_audit_report();
780 	}
781 }
782 
783 void
sctp_audit_log(uint8_t ev,uint8_t fd)784 sctp_audit_log(uint8_t ev, uint8_t fd)
785 {
786 
787 	sctp_audit_data[sctp_audit_indx][0] = ev;
788 	sctp_audit_data[sctp_audit_indx][1] = fd;
789 	sctp_audit_indx++;
790 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
791 		sctp_audit_indx = 0;
792 	}
793 }
794 
795 #endif
796 
797 /*
798  * sctp_stop_timers_for_shutdown() should be called
799  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
800  * state to make sure that all timers are stopped.
801  */
802 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)803 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
804 {
805 	struct sctp_association *asoc;
806 	struct sctp_nets *net;
807 
808 	asoc = &stcb->asoc;
809 
810 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
811 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
812 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
813 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
814 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
815 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
816 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
817 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
818 	}
819 }
820 
821 /*
822  * a list of sizes based on typical mtu's, used only if next hop size not
823  * returned.
824  */
825 static uint32_t sctp_mtu_sizes[] = {
826 	68,
827 	296,
828 	508,
829 	512,
830 	544,
831 	576,
832 	1006,
833 	1492,
834 	1500,
835 	1536,
836 	2002,
837 	2048,
838 	4352,
839 	4464,
840 	8166,
841 	17914,
842 	32000,
843 	65535
844 };
845 
846 /*
847  * Return the largest MTU smaller than val. If there is no
848  * entry, just return val.
849  */
850 uint32_t
sctp_get_prev_mtu(uint32_t val)851 sctp_get_prev_mtu(uint32_t val)
852 {
853 	uint32_t i;
854 
855 	if (val <= sctp_mtu_sizes[0]) {
856 		return (val);
857 	}
858 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
859 		if (val <= sctp_mtu_sizes[i]) {
860 			break;
861 		}
862 	}
863 	return (sctp_mtu_sizes[i - 1]);
864 }
865 
866 /*
867  * Return the smallest MTU larger than val. If there is no
868  * entry, just return val.
869  */
870 uint32_t
sctp_get_next_mtu(uint32_t val)871 sctp_get_next_mtu(uint32_t val)
872 {
873 	/* select another MTU that is just bigger than this one */
874 	uint32_t i;
875 
876 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
877 		if (val < sctp_mtu_sizes[i]) {
878 			return (sctp_mtu_sizes[i]);
879 		}
880 	}
881 	return (val);
882 }
883 
884 void
sctp_fill_random_store(struct sctp_pcb * m)885 sctp_fill_random_store(struct sctp_pcb *m)
886 {
887 	/*
888 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
889 	 * our counter. The result becomes our good random numbers and we
890 	 * then setup to give these out. Note that we do no locking to
891 	 * protect this. This is ok, since if competing folks call this we
892 	 * will get more gobbled gook in the random store which is what we
893 	 * want. There is a danger that two guys will use the same random
894 	 * numbers, but thats ok too since that is random as well :->
895 	 */
896 	m->store_at = 0;
897 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
898 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
899 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
900 	m->random_counter++;
901 }
902 
903 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)904 sctp_select_initial_TSN(struct sctp_pcb *inp)
905 {
906 	/*
907 	 * A true implementation should use random selection process to get
908 	 * the initial stream sequence number, using RFC1750 as a good
909 	 * guideline
910 	 */
911 	uint32_t x, *xp;
912 	uint8_t *p;
913 	int store_at, new_store;
914 
915 	if (inp->initial_sequence_debug != 0) {
916 		uint32_t ret;
917 
918 		ret = inp->initial_sequence_debug;
919 		inp->initial_sequence_debug++;
920 		return (ret);
921 	}
922  retry:
923 	store_at = inp->store_at;
924 	new_store = store_at + sizeof(uint32_t);
925 	if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
926 		new_store = 0;
927 	}
928 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
929 		goto retry;
930 	}
931 	if (new_store == 0) {
932 		/* Refill the random store */
933 		sctp_fill_random_store(inp);
934 	}
935 	p = &inp->random_store[store_at];
936 	xp = (uint32_t *)p;
937 	x = *xp;
938 	return (x);
939 }
940 
941 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)942 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
943 {
944 	uint32_t x;
945 	struct timeval now;
946 
947 	if (check) {
948 		(void)SCTP_GETTIME_TIMEVAL(&now);
949 	}
950 	for (;;) {
951 		x = sctp_select_initial_TSN(&inp->sctp_ep);
952 		if (x == 0) {
953 			/* we never use 0 */
954 			continue;
955 		}
956 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
957 			break;
958 		}
959 	}
960 	return (x);
961 }
962 
963 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id)964 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
965                uint32_t override_tag, uint32_t vrf_id)
966 {
967 	struct sctp_association *asoc;
968 	/*
969 	 * Anything set to zero is taken care of by the allocation routine's
970 	 * bzero
971 	 */
972 
973 	/*
974 	 * Up front select what scoping to apply on addresses I tell my peer
975 	 * Not sure what to do with these right now, we will need to come up
976 	 * with a way to set them. We may need to pass them through from the
977 	 * caller in the sctp_aloc_assoc() function.
978 	 */
979 	int i;
980 #if defined(SCTP_DETAILED_STR_STATS)
981 	int j;
982 #endif
983 
984 	asoc = &stcb->asoc;
985 	/* init all variables to a known value. */
986 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
987 	asoc->max_burst = inp->sctp_ep.max_burst;
988 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
989 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
990 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
991 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
992 	asoc->ecn_supported = inp->ecn_supported;
993 	asoc->prsctp_supported = inp->prsctp_supported;
994 	asoc->auth_supported = inp->auth_supported;
995 	asoc->asconf_supported = inp->asconf_supported;
996 	asoc->reconfig_supported = inp->reconfig_supported;
997 	asoc->nrsack_supported = inp->nrsack_supported;
998 	asoc->pktdrop_supported = inp->pktdrop_supported;
999 	asoc->sctp_cmt_pf = (uint8_t)0;
1000 	asoc->sctp_frag_point = inp->sctp_frag_point;
1001 	asoc->sctp_features = inp->sctp_features;
1002 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1003 	asoc->max_cwnd = inp->max_cwnd;
1004 #ifdef INET6
1005 	if (inp->sctp_ep.default_flowlabel) {
1006 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1007 	} else {
1008 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1009 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1010 			asoc->default_flowlabel &= 0x000fffff;
1011 			asoc->default_flowlabel |= 0x80000000;
1012 		} else {
1013 			asoc->default_flowlabel = 0;
1014 		}
1015 	}
1016 #endif
1017 	asoc->sb_send_resv = 0;
1018 	if (override_tag) {
1019 		asoc->my_vtag = override_tag;
1020 	} else {
1021 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
1022 	}
1023 	/* Get the nonce tags */
1024 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1025 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1026 	asoc->vrf_id = vrf_id;
1027 
1028 #ifdef SCTP_ASOCLOG_OF_TSNS
1029 	asoc->tsn_in_at = 0;
1030 	asoc->tsn_out_at = 0;
1031 	asoc->tsn_in_wrapped = 0;
1032 	asoc->tsn_out_wrapped = 0;
1033 	asoc->cumack_log_at = 0;
1034 	asoc->cumack_log_atsnt = 0;
1035 #endif
1036 #ifdef SCTP_FS_SPEC_LOG
1037 	asoc->fs_index = 0;
1038 #endif
1039 	asoc->refcnt = 0;
1040 	asoc->assoc_up_sent = 0;
1041 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1042 	    sctp_select_initial_TSN(&inp->sctp_ep);
1043 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1044 	/* we are optimisitic here */
1045 	asoc->peer_supports_nat = 0;
1046 	asoc->sent_queue_retran_cnt = 0;
1047 
1048 	/* for CMT */
1049         asoc->last_net_cmt_send_started = NULL;
1050 
1051 	/* This will need to be adjusted */
1052 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1053 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1054 	asoc->asconf_seq_in = asoc->last_acked_seq;
1055 
1056 	/* here we are different, we hold the next one we expect */
1057 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1058 
1059 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1060 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1061 
1062 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1063 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1064 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1065 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1066 	asoc->free_chunk_cnt = 0;
1067 
1068 	asoc->iam_blocking = 0;
1069 	asoc->context = inp->sctp_context;
1070 	asoc->local_strreset_support = inp->local_strreset_support;
1071 	asoc->def_send = inp->def_send;
1072 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1073 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1074 	asoc->pr_sctp_cnt = 0;
1075 	asoc->total_output_queue_size = 0;
1076 
1077 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1078 		asoc->scope.ipv6_addr_legal = 1;
1079 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1080 			asoc->scope.ipv4_addr_legal = 1;
1081 		} else {
1082 			asoc->scope.ipv4_addr_legal = 0;
1083 		}
1084 #if defined(__Userspace__)
1085 			asoc->scope.conn_addr_legal = 0;
1086 #endif
1087 	} else {
1088 		asoc->scope.ipv6_addr_legal = 0;
1089 #if defined(__Userspace__)
1090 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1091 			asoc->scope.conn_addr_legal = 1;
1092 			asoc->scope.ipv4_addr_legal = 0;
1093 		} else {
1094 			asoc->scope.conn_addr_legal = 0;
1095 			asoc->scope.ipv4_addr_legal = 1;
1096 		}
1097 #else
1098 		asoc->scope.ipv4_addr_legal = 1;
1099 #endif
1100 	}
1101 
1102 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1103 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1104 
1105 	asoc->smallest_mtu = inp->sctp_frag_point;
1106 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1107 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1108 
1109 	asoc->locked_on_sending = NULL;
1110 	asoc->stream_locked_on = 0;
1111 	asoc->ecn_echo_cnt_onq = 0;
1112 	asoc->stream_locked = 0;
1113 
1114 	asoc->send_sack = 1;
1115 
1116 	LIST_INIT(&asoc->sctp_restricted_addrs);
1117 
1118 	TAILQ_INIT(&asoc->nets);
1119 	TAILQ_INIT(&asoc->pending_reply_queue);
1120 	TAILQ_INIT(&asoc->asconf_ack_sent);
1121 	/* Setup to fill the hb random cache at first HB */
1122 	asoc->hb_random_idx = 4;
1123 
1124 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1125 
1126 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1127 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1128 
1129 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1130 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1131 
1132 	/*
1133 	 * Now the stream parameters, here we allocate space for all streams
1134 	 * that we request by default.
1135 	 */
1136 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1137 	    inp->sctp_ep.pre_open_stream_count;
1138 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1139 		    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1140 		    SCTP_M_STRMO);
1141 	if (asoc->strmout == NULL) {
1142 		/* big trouble no memory */
1143 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1144 		return (ENOMEM);
1145 	}
1146 	for (i = 0; i < asoc->streamoutcnt; i++) {
1147 		/*
1148 		 * inbound side must be set to 0xffff, also NOTE when we get
1149 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1150 		 * count (streamoutcnt) but first check if we sent to any of
1151 		 * the upper streams that were dropped (if some were). Those
1152 		 * that were dropped must be notified to the upper layer as
1153 		 * failed to send.
1154 		 */
1155 		asoc->strmout[i].next_sequence_send = 0x0;
1156 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1157 		asoc->strmout[i].chunks_on_queues = 0;
1158 #if defined(SCTP_DETAILED_STR_STATS)
1159 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1160 			asoc->strmout[i].abandoned_sent[j] = 0;
1161 			asoc->strmout[i].abandoned_unsent[j] = 0;
1162 		}
1163 #else
1164 		asoc->strmout[i].abandoned_sent[0] = 0;
1165 		asoc->strmout[i].abandoned_unsent[0] = 0;
1166 #endif
1167 		asoc->strmout[i].stream_no = i;
1168 		asoc->strmout[i].last_msg_incomplete = 0;
1169 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1170 	}
1171 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1172 
1173 	/* Now the mapping array */
1174 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1175 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1176 		    SCTP_M_MAP);
1177 	if (asoc->mapping_array == NULL) {
1178 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1179 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1180 		return (ENOMEM);
1181 	}
1182 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1183 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1184 	    SCTP_M_MAP);
1185 	if (asoc->nr_mapping_array == NULL) {
1186 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1187 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1188 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1189 		return (ENOMEM);
1190 	}
1191 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1192 
1193 	/* Now the init of the other outqueues */
1194 	TAILQ_INIT(&asoc->free_chunks);
1195 	TAILQ_INIT(&asoc->control_send_queue);
1196 	TAILQ_INIT(&asoc->asconf_send_queue);
1197 	TAILQ_INIT(&asoc->send_queue);
1198 	TAILQ_INIT(&asoc->sent_queue);
1199 	TAILQ_INIT(&asoc->reasmqueue);
1200 	TAILQ_INIT(&asoc->resetHead);
1201 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1202 	TAILQ_INIT(&asoc->asconf_queue);
1203 	/* authentication fields */
1204 	asoc->authinfo.random = NULL;
1205 	asoc->authinfo.active_keyid = 0;
1206 	asoc->authinfo.assoc_key = NULL;
1207 	asoc->authinfo.assoc_keyid = 0;
1208 	asoc->authinfo.recv_key = NULL;
1209 	asoc->authinfo.recv_keyid = 0;
1210 	LIST_INIT(&asoc->shared_keys);
1211 	asoc->marked_retrans = 0;
1212 	asoc->port = inp->sctp_ep.port;
1213 	asoc->timoinit = 0;
1214 	asoc->timodata = 0;
1215 	asoc->timosack = 0;
1216 	asoc->timoshutdown = 0;
1217 	asoc->timoheartbeat = 0;
1218 	asoc->timocookie = 0;
1219 	asoc->timoshutdownack = 0;
1220 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1221 	asoc->discontinuity_time = asoc->start_time;
1222 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1223 		asoc->abandoned_unsent[i] = 0;
1224 		asoc->abandoned_sent[i] = 0;
1225 	}
1226 	/* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1227 	 * the association is freed.
1228 	 */
1229 	return (0);
1230 }
1231 
1232 void
sctp_print_mapping_array(struct sctp_association * asoc)1233 sctp_print_mapping_array(struct sctp_association *asoc)
1234 {
1235 	unsigned int i, limit;
1236 
1237 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1238 	            asoc->mapping_array_size,
1239 	            asoc->mapping_array_base_tsn,
1240 	            asoc->cumulative_tsn,
1241 	            asoc->highest_tsn_inside_map,
1242 	            asoc->highest_tsn_inside_nr_map);
1243 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1244 		if (asoc->mapping_array[limit - 1] != 0) {
1245 			break;
1246 		}
1247 	}
1248 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1249 	for (i = 0; i < limit; i++) {
1250 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1251 	}
1252 	if (limit % 16)
1253 		SCTP_PRINTF("\n");
1254 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1255 		if (asoc->nr_mapping_array[limit - 1]) {
1256 			break;
1257 		}
1258 	}
1259 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1260 	for (i = 0; i < limit; i++) {
1261 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1262 	}
1263 	if (limit % 16)
1264 		SCTP_PRINTF("\n");
1265 }
1266 
1267 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1268 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1269 {
1270 	/* mapping array needs to grow */
1271 	uint8_t *new_array1, *new_array2;
1272 	uint32_t new_size;
1273 
1274 	new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1275 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1276 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1277 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1278 		/* can't get more, forget it */
1279 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1280 		if (new_array1) {
1281 			SCTP_FREE(new_array1, SCTP_M_MAP);
1282 		}
1283 		if (new_array2) {
1284 			SCTP_FREE(new_array2, SCTP_M_MAP);
1285 		}
1286 		return (-1);
1287 	}
1288 	memset(new_array1, 0, new_size);
1289 	memset(new_array2, 0, new_size);
1290 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1291 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1292 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1293 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1294 	asoc->mapping_array = new_array1;
1295 	asoc->nr_mapping_array = new_array2;
1296 	asoc->mapping_array_size = new_size;
1297 	return (0);
1298 }
1299 
1300 
1301 static void
sctp_iterator_work(struct sctp_iterator * it)1302 sctp_iterator_work(struct sctp_iterator *it)
1303 {
1304 	int iteration_count = 0;
1305 	int inp_skip = 0;
1306 	int first_in = 1;
1307 	struct sctp_inpcb *tinp;
1308 
1309 	SCTP_INP_INFO_RLOCK();
1310 	SCTP_ITERATOR_LOCK();
1311 	sctp_it_ctl.cur_it = it;
1312 	if (it->inp) {
1313 		SCTP_INP_RLOCK(it->inp);
1314 		SCTP_INP_DECR_REF(it->inp);
1315 	}
1316 	if (it->inp == NULL) {
1317 		/* iterator is complete */
1318 done_with_iterator:
1319 		sctp_it_ctl.cur_it = NULL;
1320 		SCTP_ITERATOR_UNLOCK();
1321 		SCTP_INP_INFO_RUNLOCK();
1322 		if (it->function_atend != NULL) {
1323 			(*it->function_atend) (it->pointer, it->val);
1324 		}
1325 		SCTP_FREE(it, SCTP_M_ITER);
1326 		return;
1327 	}
1328 select_a_new_ep:
1329 	if (first_in) {
1330 		first_in = 0;
1331 	} else {
1332 		SCTP_INP_RLOCK(it->inp);
1333 	}
1334 	while (((it->pcb_flags) &&
1335 		((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1336 	       ((it->pcb_features) &&
1337 		((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1338 		/* endpoint flags or features don't match, so keep looking */
1339 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1340 			SCTP_INP_RUNLOCK(it->inp);
1341 			goto done_with_iterator;
1342 		}
1343 		tinp = it->inp;
1344 		it->inp = LIST_NEXT(it->inp, sctp_list);
1345 		SCTP_INP_RUNLOCK(tinp);
1346 		if (it->inp == NULL) {
1347 			goto done_with_iterator;
1348 		}
1349 		SCTP_INP_RLOCK(it->inp);
1350 	}
1351 	/* now go through each assoc which is in the desired state */
1352 	if (it->done_current_ep == 0) {
1353 		if (it->function_inp != NULL)
1354 			inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1355 		it->done_current_ep = 1;
1356 	}
1357 	if (it->stcb == NULL) {
1358 		/* run the per instance function */
1359 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1360 	}
1361 	if ((inp_skip) || it->stcb == NULL) {
1362 		if (it->function_inp_end != NULL) {
1363 			inp_skip = (*it->function_inp_end)(it->inp,
1364 							   it->pointer,
1365 							   it->val);
1366 		}
1367 		SCTP_INP_RUNLOCK(it->inp);
1368 		goto no_stcb;
1369 	}
1370 	while (it->stcb) {
1371 		SCTP_TCB_LOCK(it->stcb);
1372 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1373 			/* not in the right state... keep looking */
1374 			SCTP_TCB_UNLOCK(it->stcb);
1375 			goto next_assoc;
1376 		}
1377 		/* see if we have limited out the iterator loop */
1378 		iteration_count++;
1379 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1380 			/* Pause to let others grab the lock */
1381 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1382 			SCTP_TCB_UNLOCK(it->stcb);
1383 			SCTP_INP_INCR_REF(it->inp);
1384 			SCTP_INP_RUNLOCK(it->inp);
1385 			SCTP_ITERATOR_UNLOCK();
1386 			SCTP_INP_INFO_RUNLOCK();
1387 			SCTP_INP_INFO_RLOCK();
1388 			SCTP_ITERATOR_LOCK();
1389 			if (sctp_it_ctl.iterator_flags) {
1390 				/* We won't be staying here */
1391 				SCTP_INP_DECR_REF(it->inp);
1392 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1393 #if !defined(__FreeBSD__)
1394 				if (sctp_it_ctl.iterator_flags &
1395 				   SCTP_ITERATOR_MUST_EXIT) {
1396 					goto done_with_iterator;
1397 				}
1398 #endif
1399 				if (sctp_it_ctl.iterator_flags &
1400 				   SCTP_ITERATOR_STOP_CUR_IT) {
1401 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1402 					goto done_with_iterator;
1403 				}
1404 				if (sctp_it_ctl.iterator_flags &
1405 				   SCTP_ITERATOR_STOP_CUR_INP) {
1406 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1407 					goto no_stcb;
1408 				}
1409 				/* If we reach here huh? */
1410 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1411 					    sctp_it_ctl.iterator_flags);
1412 				sctp_it_ctl.iterator_flags = 0;
1413 			}
1414 			SCTP_INP_RLOCK(it->inp);
1415 			SCTP_INP_DECR_REF(it->inp);
1416 			SCTP_TCB_LOCK(it->stcb);
1417 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1418 			iteration_count = 0;
1419 		}
1420 
1421 		/* run function on this one */
1422 		(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1423 
1424 		/*
1425 		 * we lie here, it really needs to have its own type but
1426 		 * first I must verify that this won't effect things :-0
1427 		 */
1428 		if (it->no_chunk_output == 0)
1429 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1430 
1431 		SCTP_TCB_UNLOCK(it->stcb);
1432 	next_assoc:
1433 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1434 		if (it->stcb == NULL) {
1435 			/* Run last function */
1436 			if (it->function_inp_end != NULL) {
1437 				inp_skip = (*it->function_inp_end)(it->inp,
1438 								   it->pointer,
1439 								   it->val);
1440 			}
1441 		}
1442 	}
1443 	SCTP_INP_RUNLOCK(it->inp);
1444  no_stcb:
1445 	/* done with all assocs on this endpoint, move on to next endpoint */
1446 	it->done_current_ep = 0;
1447 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1448 		it->inp = NULL;
1449 	} else {
1450 		it->inp = LIST_NEXT(it->inp, sctp_list);
1451 	}
1452 	if (it->inp == NULL) {
1453 		goto done_with_iterator;
1454 	}
1455 	goto select_a_new_ep;
1456 }
1457 
1458 void
sctp_iterator_worker(void)1459 sctp_iterator_worker(void)
1460 {
1461 	struct sctp_iterator *it, *nit;
1462 
1463 	/* This function is called with the WQ lock in place */
1464 
1465 	sctp_it_ctl.iterator_running = 1;
1466 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1467 		/* now lets work on this one */
1468 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1469 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1470 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1471 		CURVNET_SET(it->vn);
1472 #endif
1473 		sctp_iterator_work(it);
1474 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1475 		CURVNET_RESTORE();
1476 #endif
1477 		SCTP_IPI_ITERATOR_WQ_LOCK();
1478 #if !defined(__FreeBSD__)
1479 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1480 			break;
1481 		}
1482 #endif
1483 	        /*sa_ignore FREED_MEMORY*/
1484 	}
1485 	sctp_it_ctl.iterator_running = 0;
1486 	return;
1487 }
1488 
1489 
1490 static void
sctp_handle_addr_wq(void)1491 sctp_handle_addr_wq(void)
1492 {
1493 	/* deal with the ADDR wq from the rtsock calls */
1494 	struct sctp_laddr *wi, *nwi;
1495 	struct sctp_asconf_iterator *asc;
1496 
1497 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1498 		    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1499 	if (asc == NULL) {
1500 		/* Try later, no memory */
1501 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1502 				 (struct sctp_inpcb *)NULL,
1503 				 (struct sctp_tcb *)NULL,
1504 				 (struct sctp_nets *)NULL);
1505 		return;
1506 	}
1507 	LIST_INIT(&asc->list_of_work);
1508 	asc->cnt = 0;
1509 
1510 	SCTP_WQ_ADDR_LOCK();
1511 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1512 		LIST_REMOVE(wi, sctp_nxt_addr);
1513 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1514 		asc->cnt++;
1515 	}
1516 	SCTP_WQ_ADDR_UNLOCK();
1517 
1518 	if (asc->cnt == 0) {
1519 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1520 	} else {
1521 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1522 					     sctp_asconf_iterator_stcb,
1523 					     NULL, /* No ep end for boundall */
1524 					     SCTP_PCB_FLAGS_BOUNDALL,
1525 					     SCTP_PCB_ANY_FEATURES,
1526 					     SCTP_ASOC_ANY_STATE,
1527 					     (void *)asc, 0,
1528 					     sctp_asconf_iterator_end, NULL, 0);
1529 	}
1530 }
1531 
1532 void
sctp_timeout_handler(void * t)1533 sctp_timeout_handler(void *t)
1534 {
1535 	struct sctp_inpcb *inp;
1536 	struct sctp_tcb *stcb;
1537 	struct sctp_nets *net;
1538 	struct sctp_timer *tmr;
1539 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 	struct socket *so;
1541 #endif
1542 	int did_output, type;
1543 
1544 	tmr = (struct sctp_timer *)t;
1545 	inp = (struct sctp_inpcb *)tmr->ep;
1546 	stcb = (struct sctp_tcb *)tmr->tcb;
1547 	net = (struct sctp_nets *)tmr->net;
1548 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1549 	CURVNET_SET((struct vnet *)tmr->vnet);
1550 #endif
1551 	did_output = 1;
1552 
1553 #ifdef SCTP_AUDITING_ENABLED
1554 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1555 	sctp_auditing(3, inp, stcb, net);
1556 #endif
1557 
1558 	/* sanity checks... */
1559 	if (tmr->self != (void *)tmr) {
1560 		/*
1561 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1562 		 *             (void *)tmr);
1563 		 */
1564 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1565 		CURVNET_RESTORE();
1566 #endif
1567 		return;
1568 	}
1569 	tmr->stopped_from = 0xa001;
1570 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1571 		/*
1572 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1573 		 * tmr->type);
1574 		 */
1575 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1576 		CURVNET_RESTORE();
1577 #endif
1578 		return;
1579 	}
1580 	tmr->stopped_from = 0xa002;
1581 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1582 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1583 		CURVNET_RESTORE();
1584 #endif
1585 		return;
1586 	}
1587 	/* if this is an iterator timeout, get the struct and clear inp */
1588 	tmr->stopped_from = 0xa003;
1589 	type = tmr->type;
1590 	if (inp) {
1591 		SCTP_INP_INCR_REF(inp);
1592 		if ((inp->sctp_socket == NULL) &&
1593 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1594 		     (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1595 		     (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1596 		     (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1597 		     (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1598 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1599 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1600 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1601 		     (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1602 			) {
1603 			SCTP_INP_DECR_REF(inp);
1604 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1605 			CURVNET_RESTORE();
1606 #endif
1607 			return;
1608 		}
1609 	}
1610 	tmr->stopped_from = 0xa004;
1611 	if (stcb) {
1612 		atomic_add_int(&stcb->asoc.refcnt, 1);
1613 		if (stcb->asoc.state == 0) {
1614 			atomic_add_int(&stcb->asoc.refcnt, -1);
1615 			if (inp) {
1616 				SCTP_INP_DECR_REF(inp);
1617 			}
1618 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1619 			CURVNET_RESTORE();
1620 #endif
1621 			return;
1622 		}
1623 	}
1624 	tmr->stopped_from = 0xa005;
1625 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		if (inp) {
1628 			SCTP_INP_DECR_REF(inp);
1629 		}
1630 		if (stcb) {
1631 			atomic_add_int(&stcb->asoc.refcnt, -1);
1632 		}
1633 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1634 		CURVNET_RESTORE();
1635 #endif
1636 		return;
1637 	}
1638 	tmr->stopped_from = 0xa006;
1639 
1640 	if (stcb) {
1641 		SCTP_TCB_LOCK(stcb);
1642 		atomic_add_int(&stcb->asoc.refcnt, -1);
1643 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1644 		    ((stcb->asoc.state == 0) ||
1645 		     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1646 			SCTP_TCB_UNLOCK(stcb);
1647 			if (inp) {
1648 				SCTP_INP_DECR_REF(inp);
1649 			}
1650 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1651 			CURVNET_RESTORE();
1652 #endif
1653 			return;
1654 		}
1655 	}
1656 	/* record in stopped what t-o occured */
1657 	tmr->stopped_from = tmr->type;
1658 
1659 	/* mark as being serviced now */
1660 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1661 		/*
1662 		 * Callout has been rescheduled.
1663 		 */
1664 		goto get_out;
1665 	}
1666 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1667 		/*
1668 		 * Not active, so no action.
1669 		 */
1670 		goto get_out;
1671 	}
1672 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1673 
1674 	/* call the handler for the appropriate timer type */
1675 	switch (tmr->type) {
1676 	case SCTP_TIMER_TYPE_ZERO_COPY:
1677 		if (inp == NULL) {
1678 			break;
1679 		}
1680 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1681 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1682 		}
1683 		break;
1684 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1685 		if (inp == NULL) {
1686 			break;
1687 		}
1688 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1689 		    SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1690 		}
1691                 break;
1692 	case SCTP_TIMER_TYPE_ADDR_WQ:
1693 		sctp_handle_addr_wq();
1694 		break;
1695 	case SCTP_TIMER_TYPE_SEND:
1696 		if ((stcb == NULL) || (inp == NULL)) {
1697 			break;
1698 		}
1699 		SCTP_STAT_INCR(sctps_timodata);
1700 		stcb->asoc.timodata++;
1701 		stcb->asoc.num_send_timers_up--;
1702 		if (stcb->asoc.num_send_timers_up < 0) {
1703 			stcb->asoc.num_send_timers_up = 0;
1704 		}
1705 		SCTP_TCB_LOCK_ASSERT(stcb);
1706 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1707 			/* no need to unlock on tcb its gone */
1708 
1709 			goto out_decr;
1710 		}
1711 		SCTP_TCB_LOCK_ASSERT(stcb);
1712 #ifdef SCTP_AUDITING_ENABLED
1713 		sctp_auditing(4, inp, stcb, net);
1714 #endif
1715 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1716 		if ((stcb->asoc.num_send_timers_up == 0) &&
1717 		    (stcb->asoc.sent_queue_cnt > 0)) {
1718 			struct sctp_tmit_chunk *chk;
1719 
1720 			/*
1721 			 * safeguard. If there on some on the sent queue
1722 			 * somewhere but no timers running something is
1723 			 * wrong... so we start a timer on the first chunk
1724 			 * on the send queue on whatever net it is sent to.
1725 			 */
1726 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1727 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1728 			    chk->whoTo);
1729 		}
1730 		break;
1731 	case SCTP_TIMER_TYPE_INIT:
1732 		if ((stcb == NULL) || (inp == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoinit);
1736 		stcb->asoc.timoinit++;
1737 		if (sctp_t1init_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 		/* We do output but not here */
1742 		did_output = 0;
1743 		break;
1744 	case SCTP_TIMER_TYPE_RECV:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		SCTP_STAT_INCR(sctps_timosack);
1749 		stcb->asoc.timosack++;
1750 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1751 #ifdef SCTP_AUDITING_ENABLED
1752 		sctp_auditing(4, inp, stcb, net);
1753 #endif
1754 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1755 		break;
1756 	case SCTP_TIMER_TYPE_SHUTDOWN:
1757 		if ((stcb == NULL) || (inp == NULL)) {
1758 			break;
1759 		}
1760 		if (sctp_shutdown_timer(inp, stcb, net)) {
1761 			/* no need to unlock on tcb its gone */
1762 			goto out_decr;
1763 		}
1764 		SCTP_STAT_INCR(sctps_timoshutdown);
1765 		stcb->asoc.timoshutdown++;
1766 #ifdef SCTP_AUDITING_ENABLED
1767 		sctp_auditing(4, inp, stcb, net);
1768 #endif
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_HEARTBEAT:
1772 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1773 			break;
1774 		}
1775 		SCTP_STAT_INCR(sctps_timoheartbeat);
1776 		stcb->asoc.timoheartbeat++;
1777 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1778 			/* no need to unlock on tcb its gone */
1779 			goto out_decr;
1780 		}
1781 #ifdef SCTP_AUDITING_ENABLED
1782 		sctp_auditing(4, inp, stcb, net);
1783 #endif
1784 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1785 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1786 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1787 		}
1788 		break;
1789 	case SCTP_TIMER_TYPE_COOKIE:
1790 		if ((stcb == NULL) || (inp == NULL)) {
1791 			break;
1792 		}
1793 
1794 		if (sctp_cookie_timer(inp, stcb, net)) {
1795 			/* no need to unlock on tcb its gone */
1796 			goto out_decr;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timocookie);
1799 		stcb->asoc.timocookie++;
1800 #ifdef SCTP_AUDITING_ENABLED
1801 		sctp_auditing(4, inp, stcb, net);
1802 #endif
1803 		/*
1804 		 * We consider T3 and Cookie timer pretty much the same with
1805 		 * respect to where from in chunk_output.
1806 		 */
1807 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1808 		break;
1809 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1810 		{
1811 			struct timeval tv;
1812 			int i, secret;
1813 			if (inp == NULL) {
1814 				break;
1815 			}
1816 			SCTP_STAT_INCR(sctps_timosecret);
1817 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1818 			SCTP_INP_WLOCK(inp);
1819 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1820 			inp->sctp_ep.last_secret_number =
1821 			    inp->sctp_ep.current_secret_number;
1822 			inp->sctp_ep.current_secret_number++;
1823 			if (inp->sctp_ep.current_secret_number >=
1824 			    SCTP_HOW_MANY_SECRETS) {
1825 				inp->sctp_ep.current_secret_number = 0;
1826 			}
1827 			secret = (int)inp->sctp_ep.current_secret_number;
1828 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1829 				inp->sctp_ep.secret_key[secret][i] =
1830 				    sctp_select_initial_TSN(&inp->sctp_ep);
1831 			}
1832 			SCTP_INP_WUNLOCK(inp);
1833 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1834 		}
1835 		did_output = 0;
1836 		break;
1837 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1838 		if ((stcb == NULL) || (inp == NULL)) {
1839 			break;
1840 		}
1841 		SCTP_STAT_INCR(sctps_timopathmtu);
1842 		sctp_pathmtu_timer(inp, stcb, net);
1843 		did_output = 0;
1844 		break;
1845 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1846 		if ((stcb == NULL) || (inp == NULL)) {
1847 			break;
1848 		}
1849 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1850 			/* no need to unlock on tcb its gone */
1851 			goto out_decr;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoshutdownack);
1854 		stcb->asoc.timoshutdownack++;
1855 #ifdef SCTP_AUDITING_ENABLED
1856 		sctp_auditing(4, inp, stcb, net);
1857 #endif
1858 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1859 		break;
1860 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1861 		if ((stcb == NULL) || (inp == NULL)) {
1862 			break;
1863 		}
1864 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1865 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1866 		/* no need to unlock on tcb its gone */
1867 		goto out_decr;
1868 
1869 	case SCTP_TIMER_TYPE_STRRESET:
1870 		if ((stcb == NULL) || (inp == NULL)) {
1871 			break;
1872 		}
1873 		if (sctp_strreset_timer(inp, stcb, net)) {
1874 			/* no need to unlock on tcb its gone */
1875 			goto out_decr;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timostrmrst);
1878 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1879 		break;
1880 	case SCTP_TIMER_TYPE_ASCONF:
1881 		if ((stcb == NULL) || (inp == NULL)) {
1882 			break;
1883 		}
1884 		if (sctp_asconf_timer(inp, stcb, net)) {
1885 			/* no need to unlock on tcb its gone */
1886 			goto out_decr;
1887 		}
1888 		SCTP_STAT_INCR(sctps_timoasconf);
1889 #ifdef SCTP_AUDITING_ENABLED
1890 		sctp_auditing(4, inp, stcb, net);
1891 #endif
1892 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1893 		break;
1894 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1895 		if ((stcb == NULL) || (inp == NULL)) {
1896 			break;
1897 		}
1898 		sctp_delete_prim_timer(inp, stcb, net);
1899 		SCTP_STAT_INCR(sctps_timodelprim);
1900 		break;
1901 
1902 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1903 		if ((stcb == NULL) || (inp == NULL)) {
1904 			break;
1905 		}
1906 		SCTP_STAT_INCR(sctps_timoautoclose);
1907 		sctp_autoclose_timer(inp, stcb, net);
1908 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1909 		did_output = 0;
1910 		break;
1911 	case SCTP_TIMER_TYPE_ASOCKILL:
1912 		if ((stcb == NULL) || (inp == NULL)) {
1913 			break;
1914 		}
1915 		SCTP_STAT_INCR(sctps_timoassockill);
1916 		/* Can we free it yet? */
1917 		SCTP_INP_DECR_REF(inp);
1918 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
1919 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1920 		so = SCTP_INP_SO(inp);
1921 		atomic_add_int(&stcb->asoc.refcnt, 1);
1922 		SCTP_TCB_UNLOCK(stcb);
1923 		SCTP_SOCKET_LOCK(so, 1);
1924 		SCTP_TCB_LOCK(stcb);
1925 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1926 #endif
1927 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
1928 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1929 		SCTP_SOCKET_UNLOCK(so, 1);
1930 #endif
1931 		/*
1932 		 * free asoc, always unlocks (or destroy's) so prevent
1933 		 * duplicate unlock or unlock of a free mtx :-0
1934 		 */
1935 		stcb = NULL;
1936 		goto out_no_decr;
1937 	case SCTP_TIMER_TYPE_INPKILL:
1938 		SCTP_STAT_INCR(sctps_timoinpkill);
1939 		if (inp == NULL) {
1940 			break;
1941 		}
1942 		/*
1943 		 * special case, take away our increment since WE are the
1944 		 * killer
1945 		 */
1946 		SCTP_INP_DECR_REF(inp);
1947 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
1948 #if defined(__APPLE__)
1949 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
1950 #endif
1951 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1952 				SCTP_CALLED_FROM_INPKILL_TIMER);
1953 #if defined(__APPLE__)
1954 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
1955 #endif
1956 		inp = NULL;
1957 		goto out_no_decr;
1958 	default:
1959 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1960 			tmr->type);
1961 		break;
1962 	}
1963 #ifdef SCTP_AUDITING_ENABLED
1964 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1965 	if (inp)
1966 		sctp_auditing(5, inp, stcb, net);
1967 #endif
1968 	if ((did_output) && stcb) {
1969 		/*
1970 		 * Now we need to clean up the control chunk chain if an
1971 		 * ECNE is on it. It must be marked as UNSENT again so next
1972 		 * call will continue to send it until such time that we get
1973 		 * a CWR, to remove it. It is, however, less likely that we
1974 		 * will find a ecn echo on the chain though.
1975 		 */
1976 		sctp_fix_ecn_echo(&stcb->asoc);
1977 	}
1978 get_out:
1979 	if (stcb) {
1980 		SCTP_TCB_UNLOCK(stcb);
1981 	}
1982 
1983 out_decr:
1984 	if (inp) {
1985 		SCTP_INP_DECR_REF(inp);
1986 	}
1987 
1988 out_no_decr:
1989 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1990 			  type);
1991 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1992 	CURVNET_RESTORE();
1993 #endif
1994 }
1995 
1996 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)1997 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1998     struct sctp_nets *net)
1999 {
2000 	uint32_t to_ticks;
2001 	struct sctp_timer *tmr;
2002 
2003 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2004 		return;
2005 
2006 	tmr = NULL;
2007 	if (stcb) {
2008 		SCTP_TCB_LOCK_ASSERT(stcb);
2009 	}
2010 	switch (t_type) {
2011 	case SCTP_TIMER_TYPE_ZERO_COPY:
2012 		tmr = &inp->sctp_ep.zero_copy_timer;
2013 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
2014 		break;
2015 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2016 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2017 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
2018 		break;
2019 	case SCTP_TIMER_TYPE_ADDR_WQ:
2020 		/* Only 1 tick away :-) */
2021 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2022 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2023 		break;
2024 	case SCTP_TIMER_TYPE_SEND:
2025 		/* Here we use the RTO timer */
2026 		{
2027 			int rto_val;
2028 
2029 			if ((stcb == NULL) || (net == NULL)) {
2030 				return;
2031 			}
2032 			tmr = &net->rxt_timer;
2033 			if (net->RTO == 0) {
2034 				rto_val = stcb->asoc.initial_rto;
2035 			} else {
2036 				rto_val = net->RTO;
2037 			}
2038 			to_ticks = MSEC_TO_TICKS(rto_val);
2039 		}
2040 		break;
2041 	case SCTP_TIMER_TYPE_INIT:
2042 		/*
2043 		 * Here we use the INIT timer default usually about 1
2044 		 * minute.
2045 		 */
2046 		if ((stcb == NULL) || (net == NULL)) {
2047 			return;
2048 		}
2049 		tmr = &net->rxt_timer;
2050 		if (net->RTO == 0) {
2051 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2052 		} else {
2053 			to_ticks = MSEC_TO_TICKS(net->RTO);
2054 		}
2055 		break;
2056 	case SCTP_TIMER_TYPE_RECV:
2057 		/*
2058 		 * Here we use the Delayed-Ack timer value from the inp
2059 		 * ususually about 200ms.
2060 		 */
2061 		if (stcb == NULL) {
2062 			return;
2063 		}
2064 		tmr = &stcb->asoc.dack_timer;
2065 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2066 		break;
2067 	case SCTP_TIMER_TYPE_SHUTDOWN:
2068 		/* Here we use the RTO of the destination. */
2069 		if ((stcb == NULL) || (net == NULL)) {
2070 			return;
2071 		}
2072 		if (net->RTO == 0) {
2073 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2074 		} else {
2075 			to_ticks = MSEC_TO_TICKS(net->RTO);
2076 		}
2077 		tmr = &net->rxt_timer;
2078 		break;
2079 	case SCTP_TIMER_TYPE_HEARTBEAT:
2080 		/*
2081 		 * the net is used here so that we can add in the RTO. Even
2082 		 * though we use a different timer. We also add the HB timer
2083 		 * PLUS a random jitter.
2084 		 */
2085 		if ((stcb == NULL) || (net == NULL)) {
2086 			return;
2087 		} else {
2088 			uint32_t rndval;
2089 			uint32_t jitter;
2090 
2091 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2092 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2093 				return;
2094 			}
2095 			if (net->RTO == 0) {
2096 				to_ticks = stcb->asoc.initial_rto;
2097 			} else {
2098 				to_ticks = net->RTO;
2099 			}
2100 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2101 			jitter = rndval % to_ticks;
2102 			if (jitter >= (to_ticks >> 1)) {
2103 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2104 			} else {
2105 				to_ticks = to_ticks - jitter;
2106 			}
2107 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2108 			    !(net->dest_state & SCTP_ADDR_PF)) {
2109 				to_ticks += net->heart_beat_delay;
2110 			}
2111 			/*
2112 			 * Now we must convert the to_ticks that are now in
2113 			 * ms to ticks.
2114 			 */
2115 			to_ticks = MSEC_TO_TICKS(to_ticks);
2116 			tmr = &net->hb_timer;
2117 		}
2118 		break;
2119 	case SCTP_TIMER_TYPE_COOKIE:
2120 		/*
2121 		 * Here we can use the RTO timer from the network since one
2122 		 * RTT was compelete. If a retran happened then we will be
2123 		 * using the RTO initial value.
2124 		 */
2125 		if ((stcb == NULL) || (net == NULL)) {
2126 			return;
2127 		}
2128 		if (net->RTO == 0) {
2129 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2130 		} else {
2131 			to_ticks = MSEC_TO_TICKS(net->RTO);
2132 		}
2133 		tmr = &net->rxt_timer;
2134 		break;
2135 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2136 		/*
2137 		 * nothing needed but the endpoint here ususually about 60
2138 		 * minutes.
2139 		 */
2140 		tmr = &inp->sctp_ep.signature_change;
2141 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2142 		break;
2143 	case SCTP_TIMER_TYPE_ASOCKILL:
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		tmr = &stcb->asoc.strreset_timer;
2148 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2149 		break;
2150 	case SCTP_TIMER_TYPE_INPKILL:
2151 		/*
2152 		 * The inp is setup to die. We re-use the signature_chage
2153 		 * timer since that has stopped and we are in the GONE
2154 		 * state.
2155 		 */
2156 		tmr = &inp->sctp_ep.signature_change;
2157 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2158 		break;
2159 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2160 		/*
2161 		 * Here we use the value found in the EP for PMTU ususually
2162 		 * about 10 minutes.
2163 		 */
2164 		if ((stcb == NULL) || (net == NULL)) {
2165 			return;
2166 		}
2167 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2168 			return;
2169 		}
2170 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2171 		tmr = &net->pmtu_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2174 		/* Here we use the RTO of the destination */
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		if (net->RTO == 0) {
2179 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2180 		} else {
2181 			to_ticks = MSEC_TO_TICKS(net->RTO);
2182 		}
2183 		tmr = &net->rxt_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2186 		/*
2187 		 * Here we use the endpoints shutdown guard timer usually
2188 		 * about 3 minutes.
2189 		 */
2190 		if (stcb == NULL) {
2191 			return;
2192 		}
2193 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2194 		tmr = &stcb->asoc.shut_guard_timer;
2195 		break;
2196 	case SCTP_TIMER_TYPE_STRRESET:
2197 		/*
2198 		 * Here the timer comes from the stcb but its value is from
2199 		 * the net's RTO.
2200 		 */
2201 		if ((stcb == NULL) || (net == NULL)) {
2202 			return;
2203 		}
2204 		if (net->RTO == 0) {
2205 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2206 		} else {
2207 			to_ticks = MSEC_TO_TICKS(net->RTO);
2208 		}
2209 		tmr = &stcb->asoc.strreset_timer;
2210 		break;
2211 	case SCTP_TIMER_TYPE_ASCONF:
2212 		/*
2213 		 * Here the timer comes from the stcb but its value is from
2214 		 * the net's RTO.
2215 		 */
2216 		if ((stcb == NULL) || (net == NULL)) {
2217 			return;
2218 		}
2219 		if (net->RTO == 0) {
2220 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2221 		} else {
2222 			to_ticks = MSEC_TO_TICKS(net->RTO);
2223 		}
2224 		tmr = &stcb->asoc.asconf_timer;
2225 		break;
2226 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2227 		if ((stcb == NULL) || (net != NULL)) {
2228 			return;
2229 		}
2230 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2231 		tmr = &stcb->asoc.delete_prim_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2234 		if (stcb == NULL) {
2235 			return;
2236 		}
2237 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2238 			/*
2239 			 * Really an error since stcb is NOT set to
2240 			 * autoclose
2241 			 */
2242 			return;
2243 		}
2244 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2245 		tmr = &stcb->asoc.autoclose_timer;
2246 		break;
2247 	default:
2248 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2249 			__FUNCTION__, t_type);
2250 		return;
2251 		break;
2252 	}
2253 	if ((to_ticks <= 0) || (tmr == NULL)) {
2254 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2255 			__FUNCTION__, t_type, to_ticks, (void *)tmr);
2256 		return;
2257 	}
2258 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2259 		/*
2260 		 * we do NOT allow you to have it already running. if it is
2261 		 * we leave the current one up unchanged
2262 		 */
2263 		return;
2264 	}
2265 	/* At this point we can proceed */
2266 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2267 		stcb->asoc.num_send_timers_up++;
2268 	}
2269 	tmr->stopped_from = 0;
2270 	tmr->type = t_type;
2271 	tmr->ep = (void *)inp;
2272 	tmr->tcb = (void *)stcb;
2273 	tmr->net = (void *)net;
2274 	tmr->self = (void *)tmr;
2275 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
2276 	tmr->vnet = (void *)curvnet;
2277 #endif
2278 #ifndef __Panda__
2279 	tmr->ticks = sctp_get_tick_count();
2280 #endif
2281 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2282 	return;
2283 }
2284 
2285 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2286 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2287     struct sctp_nets *net, uint32_t from)
2288 {
2289 	struct sctp_timer *tmr;
2290 
2291 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2292 	    (inp == NULL))
2293 		return;
2294 
2295 	tmr = NULL;
2296 	if (stcb) {
2297 		SCTP_TCB_LOCK_ASSERT(stcb);
2298 	}
2299 	switch (t_type) {
2300 	case SCTP_TIMER_TYPE_ZERO_COPY:
2301 		tmr = &inp->sctp_ep.zero_copy_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2304 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2305 		break;
2306 	case SCTP_TIMER_TYPE_ADDR_WQ:
2307 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2308 		break;
2309 	case SCTP_TIMER_TYPE_SEND:
2310 		if ((stcb == NULL) || (net == NULL)) {
2311 			return;
2312 		}
2313 		tmr = &net->rxt_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_INIT:
2316 		if ((stcb == NULL) || (net == NULL)) {
2317 			return;
2318 		}
2319 		tmr = &net->rxt_timer;
2320 		break;
2321 	case SCTP_TIMER_TYPE_RECV:
2322 		if (stcb == NULL) {
2323 			return;
2324 		}
2325 		tmr = &stcb->asoc.dack_timer;
2326 		break;
2327 	case SCTP_TIMER_TYPE_SHUTDOWN:
2328 		if ((stcb == NULL) || (net == NULL)) {
2329 			return;
2330 		}
2331 		tmr = &net->rxt_timer;
2332 		break;
2333 	case SCTP_TIMER_TYPE_HEARTBEAT:
2334 		if ((stcb == NULL) || (net == NULL)) {
2335 			return;
2336 		}
2337 		tmr = &net->hb_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_COOKIE:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->rxt_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2346 		/* nothing needed but the endpoint here */
2347 		tmr = &inp->sctp_ep.signature_change;
2348 		/*
2349 		 * We re-use the newcookie timer for the INP kill timer. We
2350 		 * must assure that we do not kill it by accident.
2351 		 */
2352 		break;
2353 	case SCTP_TIMER_TYPE_ASOCKILL:
2354 		/*
2355 		 * Stop the asoc kill timer.
2356 		 */
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.strreset_timer;
2361 		break;
2362 
2363 	case SCTP_TIMER_TYPE_INPKILL:
2364 		/*
2365 		 * The inp is setup to die. We re-use the signature_chage
2366 		 * timer since that has stopped and we are in the GONE
2367 		 * state.
2368 		 */
2369 		tmr = &inp->sctp_ep.signature_change;
2370 		break;
2371 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2372 		if ((stcb == NULL) || (net == NULL)) {
2373 			return;
2374 		}
2375 		tmr = &net->pmtu_timer;
2376 		break;
2377 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2378 		if ((stcb == NULL) || (net == NULL)) {
2379 			return;
2380 		}
2381 		tmr = &net->rxt_timer;
2382 		break;
2383 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2384 		if (stcb == NULL) {
2385 			return;
2386 		}
2387 		tmr = &stcb->asoc.shut_guard_timer;
2388 		break;
2389 	case SCTP_TIMER_TYPE_STRRESET:
2390 		if (stcb == NULL) {
2391 			return;
2392 		}
2393 		tmr = &stcb->asoc.strreset_timer;
2394 		break;
2395 	case SCTP_TIMER_TYPE_ASCONF:
2396 		if (stcb == NULL) {
2397 			return;
2398 		}
2399 		tmr = &stcb->asoc.asconf_timer;
2400 		break;
2401 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2402 		if (stcb == NULL) {
2403 			return;
2404 		}
2405 		tmr = &stcb->asoc.delete_prim_timer;
2406 		break;
2407 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2408 		if (stcb == NULL) {
2409 			return;
2410 		}
2411 		tmr = &stcb->asoc.autoclose_timer;
2412 		break;
2413 	default:
2414 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2415 			__FUNCTION__, t_type);
2416 		break;
2417 	}
2418 	if (tmr == NULL) {
2419 		return;
2420 	}
2421 	if ((tmr->type != t_type) && tmr->type) {
2422 		/*
2423 		 * Ok we have a timer that is under joint use. Cookie timer
2424 		 * per chance with the SEND timer. We therefore are NOT
2425 		 * running the timer that the caller wants stopped.  So just
2426 		 * return.
2427 		 */
2428 		return;
2429 	}
2430 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2431 		stcb->asoc.num_send_timers_up--;
2432 		if (stcb->asoc.num_send_timers_up < 0) {
2433 			stcb->asoc.num_send_timers_up = 0;
2434 		}
2435 	}
2436 	tmr->self = NULL;
2437 	tmr->stopped_from = from;
2438 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2439 	return;
2440 }
2441 
2442 uint32_t
sctp_calculate_len(struct mbuf * m)2443 sctp_calculate_len(struct mbuf *m)
2444 {
2445 	uint32_t tlen = 0;
2446 	struct mbuf *at;
2447 
2448 	at = m;
2449 	while (at) {
2450 		tlen += SCTP_BUF_LEN(at);
2451 		at = SCTP_BUF_NEXT(at);
2452 	}
2453 	return (tlen);
2454 }
2455 
2456 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2457 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2458     struct sctp_association *asoc, uint32_t mtu)
2459 {
2460 	/*
2461 	 * Reset the P-MTU size on this association, this involves changing
2462 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2463 	 * allow the DF flag to be cleared.
2464 	 */
2465 	struct sctp_tmit_chunk *chk;
2466 	unsigned int eff_mtu, ovh;
2467 
2468 	asoc->smallest_mtu = mtu;
2469 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2470 		ovh = SCTP_MIN_OVERHEAD;
2471 	} else {
2472 		ovh = SCTP_MIN_V4_OVERHEAD;
2473 	}
2474 	eff_mtu = mtu - ovh;
2475 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2476 		if (chk->send_size > eff_mtu) {
2477 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2478 		}
2479 	}
2480 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2481 		if (chk->send_size > eff_mtu) {
2482 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2483 		}
2484 	}
2485 }
2486 
2487 
2488 /*
2489  * given an association and starting time of the current RTT period return
2490  * RTO in number of msecs net should point to the current network
2491  */
2492 
2493 uint32_t
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * told,int safe,int rtt_from_sack)2494 sctp_calculate_rto(struct sctp_tcb *stcb,
2495 		   struct sctp_association *asoc,
2496 		   struct sctp_nets *net,
2497 		   struct timeval *told,
2498 		   int safe, int rtt_from_sack)
2499 {
2500 	/*-
2501 	 * given an association and the starting time of the current RTT
2502 	 * period (in value1/value2) return RTO in number of msecs.
2503 	 */
2504 	int32_t rtt; /* RTT in ms */
2505 	uint32_t new_rto;
2506 	int first_measure = 0;
2507 	struct timeval now, then, *old;
2508 
2509 	/* Copy it out for sparc64 */
2510 	if (safe == sctp_align_unsafe_makecopy) {
2511 		old = &then;
2512 		memcpy(&then, told, sizeof(struct timeval));
2513 	} else if (safe == sctp_align_safe_nocopy) {
2514 		old = told;
2515 	} else {
2516 		/* error */
2517 		SCTP_PRINTF("Huh, bad rto calc call\n");
2518 		return (0);
2519 	}
2520 	/************************/
2521 	/* 1. calculate new RTT */
2522 	/************************/
2523 	/* get the current time */
2524 	if (stcb->asoc.use_precise_time) {
2525 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2526 	} else {
2527 		(void)SCTP_GETTIME_TIMEVAL(&now);
2528 	}
2529 	timevalsub(&now, old);
2530 	/* store the current RTT in us */
2531 	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2532 	           (uint64_t)now.tv_usec;
2533 	/* compute rtt in ms */
2534 	rtt = (int32_t)(net->rtt / 1000);
2535 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2536 		/* Tell the CC module that a new update has just occurred from a sack */
2537 		(*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
2538 	}
2539 	/* Do we need to determine the lan? We do this only
2540 	 * on sacks i.e. RTT being determined from data not
2541 	 * non-data (HB/INIT->INITACK).
2542 	 */
2543 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2544 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2545 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2546 			net->lan_type = SCTP_LAN_INTERNET;
2547 		} else {
2548 			net->lan_type = SCTP_LAN_LOCAL;
2549 		}
2550 	}
2551 
2552 	/***************************/
2553 	/* 2. update RTTVAR & SRTT */
2554 	/***************************/
2555 	/*-
2556 	 * Compute the scaled average lastsa and the
2557 	 * scaled variance lastsv as described in van Jacobson
2558 	 * Paper "Congestion Avoidance and Control", Annex A.
2559 	 *
2560 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2561 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2562 	 */
2563 	if (net->RTO_measured) {
2564 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2565 		net->lastsa += rtt;
2566 		if (rtt < 0) {
2567 			rtt = -rtt;
2568 		}
2569 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2570 		net->lastsv += rtt;
2571 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2572 			rto_logging(net, SCTP_LOG_RTTVAR);
2573 		}
2574 	} else {
2575 		/* First RTO measurment */
2576 		net->RTO_measured = 1;
2577 		first_measure = 1;
2578 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2579 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2580 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2581 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2582 		}
2583 	}
2584 	if (net->lastsv == 0) {
2585 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2586 	}
2587 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2588 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2589 	    (stcb->asoc.sat_network_lockout == 0)) {
2590 		stcb->asoc.sat_network = 1;
2591 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2592 		stcb->asoc.sat_network = 0;
2593 		stcb->asoc.sat_network_lockout = 1;
2594 	}
2595 	/* bound it, per C6/C7 in Section 5.3.1 */
2596 	if (new_rto < stcb->asoc.minrto) {
2597 		new_rto = stcb->asoc.minrto;
2598 	}
2599 	if (new_rto > stcb->asoc.maxrto) {
2600 		new_rto = stcb->asoc.maxrto;
2601 	}
2602 	/* we are now returning the RTO */
2603 	return (new_rto);
2604 }
2605 
2606 /*
2607  * return a pointer to a contiguous piece of data from the given mbuf chain
2608  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2609  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2610  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2611  */
2612 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)2613 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2614 {
2615 	uint32_t count;
2616 	uint8_t *ptr;
2617 
2618 	ptr = in_ptr;
2619 	if ((off < 0) || (len <= 0))
2620 		return (NULL);
2621 
2622 	/* find the desired start location */
2623 	while ((m != NULL) && (off > 0)) {
2624 		if (off < SCTP_BUF_LEN(m))
2625 			break;
2626 		off -= SCTP_BUF_LEN(m);
2627 		m = SCTP_BUF_NEXT(m);
2628 	}
2629 	if (m == NULL)
2630 		return (NULL);
2631 
2632 	/* is the current mbuf large enough (eg. contiguous)? */
2633 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2634 		return (mtod(m, caddr_t) + off);
2635 	} else {
2636 		/* else, it spans more than one mbuf, so save a temp copy... */
2637 		while ((m != NULL) && (len > 0)) {
2638 			count = min(SCTP_BUF_LEN(m) - off, len);
2639 			bcopy(mtod(m, caddr_t) + off, ptr, count);
2640 			len -= count;
2641 			ptr += count;
2642 			off = 0;
2643 			m = SCTP_BUF_NEXT(m);
2644 		}
2645 		if ((m == NULL) && (len > 0))
2646 			return (NULL);
2647 		else
2648 			return ((caddr_t)in_ptr);
2649 	}
2650 }
2651 
2652 
2653 
2654 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)2655 sctp_get_next_param(struct mbuf *m,
2656     int offset,
2657     struct sctp_paramhdr *pull,
2658     int pull_limit)
2659 {
2660 	/* This just provides a typed signature to Peter's Pull routine */
2661 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2662 	    (uint8_t *) pull));
2663 }
2664 
2665 
2666 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)2667 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2668 {
2669 	struct mbuf *m_last;
2670 	caddr_t dp;
2671 
2672 	if (padlen > 3) {
2673 		return (NULL);
2674 	}
2675 	if (padlen <= M_TRAILINGSPACE(m)) {
2676 		/*
2677 		 * The easy way. We hope the majority of the time we hit
2678 		 * here :)
2679 		 */
2680 		m_last = m;
2681 	} else {
2682 		/* Hard way we must grow the mbuf chain */
2683 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2684 		if (m_last == NULL) {
2685 			return (NULL);
2686 		}
2687 		SCTP_BUF_LEN(m_last) = 0;
2688 		SCTP_BUF_NEXT(m_last) = NULL;
2689 		SCTP_BUF_NEXT(m) = m_last;
2690 	}
2691 	dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
2692 	SCTP_BUF_LEN(m_last) += padlen;
2693 	memset(dp, 0, padlen);
2694 	return (m_last);
2695 }
2696 
2697 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)2698 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2699 {
2700 	/* find the last mbuf in chain and pad it */
2701 	struct mbuf *m_at;
2702 
2703 	if (last_mbuf != NULL) {
2704 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2705 	} else {
2706 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2707 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2708 				return (sctp_add_pad_tombuf(m_at, padval));
2709 			}
2710 		}
2711 	}
2712 	return (NULL);
2713 }
2714 
2715 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked SCTP_UNUSED)2716 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2717     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2718 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2719     SCTP_UNUSED
2720 #endif
2721     )
2722 {
2723 	struct mbuf *m_notify;
2724 	struct sctp_assoc_change *sac;
2725 	struct sctp_queued_to_read *control;
2726 	size_t notif_len, abort_len;
2727 	unsigned int i;
2728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2729 	struct socket *so;
2730 #endif
2731 
2732 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2733 		notif_len = sizeof(struct sctp_assoc_change);
2734 		if (abort != NULL) {
2735 			abort_len = ntohs(abort->ch.chunk_length);
2736 		} else {
2737 			abort_len = 0;
2738 		}
2739 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2740 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2741 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2742 			notif_len += abort_len;
2743 		}
2744 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2745 		if (m_notify == NULL) {
2746 			/* Retry with smaller value. */
2747 			notif_len = sizeof(struct sctp_assoc_change);
2748 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2749 			if (m_notify == NULL) {
2750 				goto set_error;
2751 			}
2752 		}
2753 		SCTP_BUF_NEXT(m_notify) = NULL;
2754 		sac = mtod(m_notify, struct sctp_assoc_change *);
2755 		memset(sac, 0, notif_len);
2756 		sac->sac_type = SCTP_ASSOC_CHANGE;
2757 		sac->sac_flags = 0;
2758 		sac->sac_length = sizeof(struct sctp_assoc_change);
2759 		sac->sac_state = state;
2760 		sac->sac_error = error;
2761 		/* XXX verify these stream counts */
2762 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2763 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2764 		sac->sac_assoc_id = sctp_get_associd(stcb);
2765 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2766 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2767 				i = 0;
2768 				if (stcb->asoc.prsctp_supported == 1) {
2769 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2770 				}
2771 				if (stcb->asoc.auth_supported == 1) {
2772 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2773 				}
2774 				if (stcb->asoc.asconf_supported == 1) {
2775 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2776 				}
2777 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2778 				if (stcb->asoc.reconfig_supported == 1) {
2779 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2780 				}
2781 				sac->sac_length += i;
2782 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2783 				memcpy(sac->sac_info, abort, abort_len);
2784 				sac->sac_length += abort_len;
2785 			}
2786 		}
2787 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2788 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2789 		                                 0, 0, stcb->asoc.context, 0, 0, 0,
2790 		                                 m_notify);
2791 		if (control != NULL) {
2792 			control->length = SCTP_BUF_LEN(m_notify);
2793 			/* not that we need this */
2794 			control->tail_mbuf = m_notify;
2795 			control->spec_flags = M_NOTIFICATION;
2796 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2797 			                  control,
2798 			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2799 			                  so_locked);
2800 		} else {
2801 			sctp_m_freem(m_notify);
2802 		}
2803 	}
2804 	/*
2805 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2806 	 * comes in.
2807 	 */
2808 set_error:
2809 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2810 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2811 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2812 		SOCK_LOCK(stcb->sctp_socket);
2813 		if (from_peer) {
2814 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2816 				stcb->sctp_socket->so_error = ECONNREFUSED;
2817 			} else {
2818 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2819 				stcb->sctp_socket->so_error = ECONNRESET;
2820 			}
2821 		} else {
2822 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2823 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2824 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2825 				stcb->sctp_socket->so_error = ETIMEDOUT;
2826 			} else {
2827 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2828 				stcb->sctp_socket->so_error = ECONNABORTED;
2829 			}
2830 		}
2831 	}
2832 	/* Wake ANY sleepers */
2833 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2834 	so = SCTP_INP_SO(stcb->sctp_ep);
2835 	if (!so_locked) {
2836 		atomic_add_int(&stcb->asoc.refcnt, 1);
2837 		SCTP_TCB_UNLOCK(stcb);
2838 		SCTP_SOCKET_LOCK(so, 1);
2839 		SCTP_TCB_LOCK(stcb);
2840 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2841 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2842 			SCTP_SOCKET_UNLOCK(so, 1);
2843 			return;
2844 		}
2845 	}
2846 #endif
2847 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2848 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2849 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2850 #if defined(__APPLE__)
2851 		socantrcvmore(stcb->sctp_socket);
2852 #else
2853 		socantrcvmore_locked(stcb->sctp_socket);
2854 #endif
2855 	}
2856 	sorwakeup(stcb->sctp_socket);
2857 	sowwakeup(stcb->sctp_socket);
2858 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2859 	if (!so_locked) {
2860 		SCTP_SOCKET_UNLOCK(so, 1);
2861 	}
2862 #endif
2863 }
2864 
2865 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked SCTP_UNUSED)2866 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2867     struct sockaddr *sa, uint32_t error, int so_locked
2868 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2869     SCTP_UNUSED
2870 #endif
2871 )
2872 {
2873 	struct mbuf *m_notify;
2874 	struct sctp_paddr_change *spc;
2875 	struct sctp_queued_to_read *control;
2876 
2877 	if ((stcb == NULL) ||
2878 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2879 		/* event not enabled */
2880 		return;
2881 	}
2882 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2883 	if (m_notify == NULL)
2884 		return;
2885 	SCTP_BUF_LEN(m_notify) = 0;
2886 	spc = mtod(m_notify, struct sctp_paddr_change *);
2887 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2888 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2889 	spc->spc_flags = 0;
2890 	spc->spc_length = sizeof(struct sctp_paddr_change);
2891 	switch (sa->sa_family) {
2892 #ifdef INET
2893 	case AF_INET:
2894 #ifdef INET6
2895 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2896 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2897 			                    (struct sockaddr_in6 *)&spc->spc_aaddr);
2898 		} else {
2899 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2900 		}
2901 #else
2902 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2903 #endif
2904 		break;
2905 #endif
2906 #ifdef INET6
2907 	case AF_INET6:
2908 	{
2909 #ifdef SCTP_EMBEDDED_V6_SCOPE
2910 		struct sockaddr_in6 *sin6;
2911 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2912 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2913 
2914 #ifdef SCTP_EMBEDDED_V6_SCOPE
2915 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2916 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2917 			if (sin6->sin6_scope_id == 0) {
2918 				/* recover scope_id for user */
2919 #ifdef SCTP_KAME
2920 				(void)sa6_recoverscope(sin6);
2921 #else
2922 				(void)in6_recoverscope(sin6, &sin6->sin6_addr,
2923 						       NULL);
2924 #endif
2925 			} else {
2926 				/* clear embedded scope_id for user */
2927 				in6_clearscope(&sin6->sin6_addr);
2928 			}
2929 		}
2930 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2931 		break;
2932 	}
2933 #endif
2934 #if defined(__Userspace__)
2935 	case AF_CONN:
2936 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
2937 		break;
2938 #endif
2939 	default:
2940 		/* TSNH */
2941 		break;
2942 	}
2943 	spc->spc_state = state;
2944 	spc->spc_error = error;
2945 	spc->spc_assoc_id = sctp_get_associd(stcb);
2946 
2947 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2948 	SCTP_BUF_NEXT(m_notify) = NULL;
2949 
2950 	/* append to socket */
2951 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2952 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
2953 	                                 m_notify);
2954 	if (control == NULL) {
2955 		/* no memory */
2956 		sctp_m_freem(m_notify);
2957 		return;
2958 	}
2959 	control->length = SCTP_BUF_LEN(m_notify);
2960 	control->spec_flags = M_NOTIFICATION;
2961 	/* not that we need this */
2962 	control->tail_mbuf = m_notify;
2963 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2964 	                  control,
2965 	                  &stcb->sctp_socket->so_rcv, 1,
2966 	                  SCTP_READ_LOCK_NOT_HELD,
2967 	                  so_locked);
2968 }
2969 
2970 
2971 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked SCTP_UNUSED)2972 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2973     struct sctp_tmit_chunk *chk, int so_locked
2974 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2975     SCTP_UNUSED
2976 #endif
2977     )
2978 {
2979 	struct mbuf *m_notify;
2980 	struct sctp_send_failed *ssf;
2981 	struct sctp_send_failed_event *ssfe;
2982 	struct sctp_queued_to_read *control;
2983 	int length;
2984 
2985 	if ((stcb == NULL) ||
2986 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2987 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2988 		/* event not enabled */
2989 		return;
2990 	}
2991 
2992 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2993 		length = sizeof(struct sctp_send_failed_event);
2994 	} else {
2995 		length = sizeof(struct sctp_send_failed);
2996 	}
2997 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2998 	if (m_notify == NULL)
2999 		/* no space left */
3000 		return;
3001 	SCTP_BUF_LEN(m_notify) = 0;
3002 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3003 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3004 		memset(ssfe, 0, length);
3005 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3006 		if (sent) {
3007 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3008 		} else {
3009 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3010 		}
3011 		length += chk->send_size;
3012 		length -= sizeof(struct sctp_data_chunk);
3013 		ssfe->ssfe_length = length;
3014 		ssfe->ssfe_error = error;
3015 		/* not exactly what the user sent in, but should be close :) */
3016 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
3017 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3018 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
3019 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3020 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3021 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3022 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3023 	} else {
3024 		ssf = mtod(m_notify, struct sctp_send_failed *);
3025 		memset(ssf, 0, length);
3026 		ssf->ssf_type = SCTP_SEND_FAILED;
3027 		if (sent) {
3028 			ssf->ssf_flags = SCTP_DATA_SENT;
3029 		} else {
3030 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3031 		}
3032 		length += chk->send_size;
3033 		length -= sizeof(struct sctp_data_chunk);
3034 		ssf->ssf_length = length;
3035 		ssf->ssf_error = error;
3036 		/* not exactly what the user sent in, but should be close :) */
3037 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3038 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3039 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3040 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3041 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3042 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3043 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3044 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3045 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3046 	}
3047 	if (chk->data) {
3048 		/*
3049 		 * trim off the sctp chunk header(it should
3050 		 * be there)
3051 		 */
3052 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3053 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3054 			sctp_mbuf_crush(chk->data);
3055 			chk->send_size -= sizeof(struct sctp_data_chunk);
3056 		}
3057 	}
3058 	SCTP_BUF_NEXT(m_notify) = chk->data;
3059 	/* Steal off the mbuf */
3060 	chk->data = NULL;
3061 	/*
3062 	 * For this case, we check the actual socket buffer, since the assoc
3063 	 * is going away we don't want to overfill the socket buffer for a
3064 	 * non-reader
3065 	 */
3066 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3067 		sctp_m_freem(m_notify);
3068 		return;
3069 	}
3070 	/* append to socket */
3071 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3072 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3073 	                                 m_notify);
3074 	if (control == NULL) {
3075 		/* no memory */
3076 		sctp_m_freem(m_notify);
3077 		return;
3078 	}
3079 	control->spec_flags = M_NOTIFICATION;
3080 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3081 	                  control,
3082 	                  &stcb->sctp_socket->so_rcv, 1,
3083 	                  SCTP_READ_LOCK_NOT_HELD,
3084 	                  so_locked);
3085 }
3086 
3087 
3088 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked SCTP_UNUSED)3089 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3090 			 struct sctp_stream_queue_pending *sp, int so_locked
3091 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3092                          SCTP_UNUSED
3093 #endif
3094                          )
3095 {
3096 	struct mbuf *m_notify;
3097 	struct sctp_send_failed *ssf;
3098 	struct sctp_send_failed_event *ssfe;
3099 	struct sctp_queued_to_read *control;
3100 	int length;
3101 
3102 	if ((stcb == NULL) ||
3103 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3104 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3105 		/* event not enabled */
3106 		return;
3107 	}
3108 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3109 		length = sizeof(struct sctp_send_failed_event);
3110 	} else {
3111 		length = sizeof(struct sctp_send_failed);
3112 	}
3113 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3114 	if (m_notify == NULL) {
3115 		/* no space left */
3116 		return;
3117 	}
3118 	SCTP_BUF_LEN(m_notify) = 0;
3119 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3120 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3121 		memset(ssfe, 0, length);
3122 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3123 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3124 		length += sp->length;
3125 		ssfe->ssfe_length = length;
3126 		ssfe->ssfe_error = error;
3127 		/* not exactly what the user sent in, but should be close :) */
3128 		ssfe->ssfe_info.snd_sid = sp->stream;
3129 		if (sp->some_taken) {
3130 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3131 		} else {
3132 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3133 		}
3134 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3135 		ssfe->ssfe_info.snd_context = sp->context;
3136 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3137 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3138 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3139 	} else {
3140 		ssf = mtod(m_notify, struct sctp_send_failed *);
3141 		memset(ssf, 0, length);
3142 		ssf->ssf_type = SCTP_SEND_FAILED;
3143 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3144 		length += sp->length;
3145 		ssf->ssf_length = length;
3146 		ssf->ssf_error = error;
3147 		/* not exactly what the user sent in, but should be close :) */
3148 		ssf->ssf_info.sinfo_stream = sp->stream;
3149 		ssf->ssf_info.sinfo_ssn = 0;
3150 		if (sp->some_taken) {
3151 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3152 		} else {
3153 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3154 		}
3155 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3156 		ssf->ssf_info.sinfo_context = sp->context;
3157 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3158 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3159 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3160 	}
3161 	SCTP_BUF_NEXT(m_notify) = sp->data;
3162 
3163 	/* Steal off the mbuf */
3164 	sp->data = NULL;
3165 	/*
3166 	 * For this case, we check the actual socket buffer, since the assoc
3167 	 * is going away we don't want to overfill the socket buffer for a
3168 	 * non-reader
3169 	 */
3170 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3171 		sctp_m_freem(m_notify);
3172 		return;
3173 	}
3174 	/* append to socket */
3175 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3176 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3177 	                                 m_notify);
3178 	if (control == NULL) {
3179 		/* no memory */
3180 		sctp_m_freem(m_notify);
3181 		return;
3182 	}
3183 	control->spec_flags = M_NOTIFICATION;
3184 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3185 	    control,
3186 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3187 }
3188 
3189 
3190 
3191 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3192 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3193 {
3194 	struct mbuf *m_notify;
3195 	struct sctp_adaptation_event *sai;
3196 	struct sctp_queued_to_read *control;
3197 
3198 	if ((stcb == NULL) ||
3199 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3200 		/* event not enabled */
3201 		return;
3202 	}
3203 
3204 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3205 	if (m_notify == NULL)
3206 		/* no space left */
3207 		return;
3208 	SCTP_BUF_LEN(m_notify) = 0;
3209 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3210 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3211 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3212 	sai->sai_flags = 0;
3213 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3214 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3215 	sai->sai_assoc_id = sctp_get_associd(stcb);
3216 
3217 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3218 	SCTP_BUF_NEXT(m_notify) = NULL;
3219 
3220 	/* append to socket */
3221 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3222 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3223 	                                 m_notify);
3224 	if (control == NULL) {
3225 		/* no memory */
3226 		sctp_m_freem(m_notify);
3227 		return;
3228 	}
3229 	control->length = SCTP_BUF_LEN(m_notify);
3230 	control->spec_flags = M_NOTIFICATION;
3231 	/* not that we need this */
3232 	control->tail_mbuf = m_notify;
3233 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3234 	    control,
3235 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3236 }
3237 
3238 /* This always must be called with the read-queue LOCKED in the INP */
3239 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked SCTP_UNUSED)3240 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3241 					uint32_t val, int so_locked
3242 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3243                              SCTP_UNUSED
3244 #endif
3245                                         )
3246 {
3247 	struct mbuf *m_notify;
3248 	struct sctp_pdapi_event *pdapi;
3249 	struct sctp_queued_to_read *control;
3250 	struct sockbuf *sb;
3251 
3252 	if ((stcb == NULL) ||
3253 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3254 		/* event not enabled */
3255 		return;
3256 	}
3257 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3258 		return;
3259 	}
3260 
3261 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3262 	if (m_notify == NULL)
3263 		/* no space left */
3264 		return;
3265 	SCTP_BUF_LEN(m_notify) = 0;
3266 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3267 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3268 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3269 	pdapi->pdapi_flags = 0;
3270 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3271 	pdapi->pdapi_indication = error;
3272 	pdapi->pdapi_stream = (val >> 16);
3273 	pdapi->pdapi_seq = (val & 0x0000ffff);
3274 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3275 
3276 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3277 	SCTP_BUF_NEXT(m_notify) = NULL;
3278 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3279 					 0, 0, stcb->asoc.context, 0, 0, 0,
3280 					 m_notify);
3281 	if (control == NULL) {
3282 		/* no memory */
3283 		sctp_m_freem(m_notify);
3284 		return;
3285 	}
3286 	control->spec_flags = M_NOTIFICATION;
3287 	control->length = SCTP_BUF_LEN(m_notify);
3288 	/* not that we need this */
3289 	control->tail_mbuf = m_notify;
3290 	control->held_length = 0;
3291 	control->length = 0;
3292 	sb = &stcb->sctp_socket->so_rcv;
3293 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3294 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3295 	}
3296 	sctp_sballoc(stcb, sb, m_notify);
3297 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3298 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3299 	}
3300 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3301 	control->end_added = 1;
3302 	if (stcb->asoc.control_pdapi)
3303 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi,  control, next);
3304 	else {
3305 		/* we really should not see this case */
3306 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3307 	}
3308 	if (stcb->sctp_ep && stcb->sctp_socket) {
3309 		/* This should always be the case */
3310 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3311 		struct socket *so;
3312 
3313 		so = SCTP_INP_SO(stcb->sctp_ep);
3314 		if (!so_locked) {
3315 			atomic_add_int(&stcb->asoc.refcnt, 1);
3316 			SCTP_TCB_UNLOCK(stcb);
3317 			SCTP_SOCKET_LOCK(so, 1);
3318 			SCTP_TCB_LOCK(stcb);
3319 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3320 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3321 				SCTP_SOCKET_UNLOCK(so, 1);
3322 				return;
3323 			}
3324 		}
3325 #endif
3326 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3327 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3328 		if (!so_locked) {
3329 			SCTP_SOCKET_UNLOCK(so, 1);
3330 		}
3331 #endif
3332 	}
3333 }
3334 
3335 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3336 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3337 {
3338 	struct mbuf *m_notify;
3339 	struct sctp_shutdown_event *sse;
3340 	struct sctp_queued_to_read *control;
3341 
3342 	/*
3343 	 * For TCP model AND UDP connected sockets we will send an error up
3344 	 * when an SHUTDOWN completes
3345 	 */
3346 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3347 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3348 		/* mark socket closed for read/write and wakeup! */
3349 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3350 		struct socket *so;
3351 
3352 		so = SCTP_INP_SO(stcb->sctp_ep);
3353 		atomic_add_int(&stcb->asoc.refcnt, 1);
3354 		SCTP_TCB_UNLOCK(stcb);
3355 		SCTP_SOCKET_LOCK(so, 1);
3356 		SCTP_TCB_LOCK(stcb);
3357 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3358 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3359 			SCTP_SOCKET_UNLOCK(so, 1);
3360 			return;
3361 		}
3362 #endif
3363 		socantsendmore(stcb->sctp_socket);
3364 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3365 		SCTP_SOCKET_UNLOCK(so, 1);
3366 #endif
3367 	}
3368 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3369 		/* event not enabled */
3370 		return;
3371 	}
3372 
3373 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3374 	if (m_notify == NULL)
3375 		/* no space left */
3376 		return;
3377 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3378 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3379 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3380 	sse->sse_flags = 0;
3381 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3382 	sse->sse_assoc_id = sctp_get_associd(stcb);
3383 
3384 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3385 	SCTP_BUF_NEXT(m_notify) = NULL;
3386 
3387 	/* append to socket */
3388 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3389 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3390 	                                 m_notify);
3391 	if (control == NULL) {
3392 		/* no memory */
3393 		sctp_m_freem(m_notify);
3394 		return;
3395 	}
3396 	control->spec_flags = M_NOTIFICATION;
3397 	control->length = SCTP_BUF_LEN(m_notify);
3398 	/* not that we need this */
3399 	control->tail_mbuf = m_notify;
3400 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3401 	    control,
3402 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3403 }
3404 
3405 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)3406 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3407                              int so_locked
3408 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3409                              SCTP_UNUSED
3410 #endif
3411                              )
3412 {
3413 	struct mbuf *m_notify;
3414 	struct sctp_sender_dry_event *event;
3415 	struct sctp_queued_to_read *control;
3416 
3417 	if ((stcb == NULL) ||
3418 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3419 		/* event not enabled */
3420 		return;
3421 	}
3422 
3423 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3424 	if (m_notify == NULL) {
3425 		/* no space left */
3426 		return;
3427 	}
3428 	SCTP_BUF_LEN(m_notify) = 0;
3429 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3430 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3431 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3432 	event->sender_dry_flags = 0;
3433 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3434 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3435 
3436 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3437 	SCTP_BUF_NEXT(m_notify) = NULL;
3438 
3439 	/* append to socket */
3440 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3441 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3442 	                                 m_notify);
3443 	if (control == NULL) {
3444 		/* no memory */
3445 		sctp_m_freem(m_notify);
3446 		return;
3447 	}
3448 	control->length = SCTP_BUF_LEN(m_notify);
3449 	control->spec_flags = M_NOTIFICATION;
3450 	/* not that we need this */
3451 	control->tail_mbuf = m_notify;
3452 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3453 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3454 }
3455 
3456 
3457 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3458 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3459 {
3460 	struct mbuf *m_notify;
3461 	struct sctp_queued_to_read *control;
3462 	struct sctp_stream_change_event *stradd;
3463 
3464 	if ((stcb == NULL) ||
3465 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3466 		/* event not enabled */
3467 		return;
3468 	}
3469 	if ((stcb->asoc.peer_req_out) && flag) {
3470 		/* Peer made the request, don't tell the local user */
3471 		stcb->asoc.peer_req_out = 0;
3472 		return;
3473 	}
3474 	stcb->asoc.peer_req_out = 0;
3475 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3476 	if (m_notify == NULL)
3477 		/* no space left */
3478 		return;
3479 	SCTP_BUF_LEN(m_notify) = 0;
3480 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3481 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3482 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3483 	stradd->strchange_flags = flag;
3484 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3485 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3486 	stradd->strchange_instrms = numberin;
3487 	stradd->strchange_outstrms = numberout;
3488 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3489 	SCTP_BUF_NEXT(m_notify) = NULL;
3490 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3491 		/* no space */
3492 		sctp_m_freem(m_notify);
3493 		return;
3494 	}
3495 	/* append to socket */
3496 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3497 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3498 	                                 m_notify);
3499 	if (control == NULL) {
3500 		/* no memory */
3501 		sctp_m_freem(m_notify);
3502 		return;
3503 	}
3504 	control->spec_flags = M_NOTIFICATION;
3505 	control->length = SCTP_BUF_LEN(m_notify);
3506 	/* not that we need this */
3507 	control->tail_mbuf = m_notify;
3508 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3509 	    control,
3510 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3511 }
3512 
3513 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)3514 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3515 {
3516 	struct mbuf *m_notify;
3517 	struct sctp_queued_to_read *control;
3518 	struct sctp_assoc_reset_event *strasoc;
3519 
3520 	if ((stcb == NULL) ||
3521 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3522 		/* event not enabled */
3523 		return;
3524 	}
3525 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3526 	if (m_notify == NULL)
3527 		/* no space left */
3528 		return;
3529 	SCTP_BUF_LEN(m_notify) = 0;
3530 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event  *);
3531 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3532 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3533 	strasoc->assocreset_flags = flag;
3534 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3535 	strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
3536 	strasoc->assocreset_local_tsn = sending_tsn;
3537 	strasoc->assocreset_remote_tsn = recv_tsn;
3538 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3539 	SCTP_BUF_NEXT(m_notify) = NULL;
3540 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3541 		/* no space */
3542 		sctp_m_freem(m_notify);
3543 		return;
3544 	}
3545 	/* append to socket */
3546 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3547 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3548 	                                 m_notify);
3549 	if (control == NULL) {
3550 		/* no memory */
3551 		sctp_m_freem(m_notify);
3552 		return;
3553 	}
3554 	control->spec_flags = M_NOTIFICATION;
3555 	control->length = SCTP_BUF_LEN(m_notify);
3556 	/* not that we need this */
3557 	control->tail_mbuf = m_notify;
3558 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3559 	    control,
3560 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3561 }
3562 
3563 
3564 
3565 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)3566 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3567     int number_entries, uint16_t * list, int flag)
3568 {
3569 	struct mbuf *m_notify;
3570 	struct sctp_queued_to_read *control;
3571 	struct sctp_stream_reset_event *strreset;
3572 	int len;
3573 
3574 	if ((stcb == NULL) ||
3575 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3576 		/* event not enabled */
3577 		return;
3578 	}
3579 
3580 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3581 	if (m_notify == NULL)
3582 		/* no space left */
3583 		return;
3584 	SCTP_BUF_LEN(m_notify) = 0;
3585 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3586 	if (len > M_TRAILINGSPACE(m_notify)) {
3587 		/* never enough room */
3588 		sctp_m_freem(m_notify);
3589 		return;
3590 	}
3591 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3592 	memset(strreset, 0, len);
3593 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3594 	strreset->strreset_flags = flag;
3595 	strreset->strreset_length = len;
3596 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3597 	if (number_entries) {
3598 		int i;
3599 
3600 		for (i = 0; i < number_entries; i++) {
3601 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3602 		}
3603 	}
3604 	SCTP_BUF_LEN(m_notify) = len;
3605 	SCTP_BUF_NEXT(m_notify) = NULL;
3606 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3607 		/* no space */
3608 		sctp_m_freem(m_notify);
3609 		return;
3610 	}
3611 	/* append to socket */
3612 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3613 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3614 	                                 m_notify);
3615 	if (control == NULL) {
3616 		/* no memory */
3617 		sctp_m_freem(m_notify);
3618 		return;
3619 	}
3620 	control->spec_flags = M_NOTIFICATION;
3621 	control->length = SCTP_BUF_LEN(m_notify);
3622 	/* not that we need this */
3623 	control->tail_mbuf = m_notify;
3624 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3625 	                  control,
3626 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3627 }
3628 
3629 
3630 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)3631 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3632 {
3633 	struct mbuf *m_notify;
3634 	struct sctp_remote_error *sre;
3635 	struct sctp_queued_to_read *control;
3636 	size_t notif_len, chunk_len;
3637 
3638 	if ((stcb == NULL) ||
3639 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3640 		return;
3641 	}
3642 	if (chunk != NULL) {
3643 		chunk_len = ntohs(chunk->ch.chunk_length);
3644 	} else {
3645 		chunk_len = 0;
3646 	}
3647 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3648 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3649 	if (m_notify == NULL) {
3650 		/* Retry with smaller value. */
3651 		notif_len = sizeof(struct sctp_remote_error);
3652 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3653 		if (m_notify == NULL) {
3654 			return;
3655 		}
3656 	}
3657 	SCTP_BUF_NEXT(m_notify) = NULL;
3658 	sre = mtod(m_notify, struct sctp_remote_error *);
3659 	memset(sre, 0, notif_len);
3660 	sre->sre_type = SCTP_REMOTE_ERROR;
3661 	sre->sre_flags = 0;
3662 	sre->sre_length = sizeof(struct sctp_remote_error);
3663 	sre->sre_error = error;
3664 	sre->sre_assoc_id = sctp_get_associd(stcb);
3665 	if (notif_len > sizeof(struct sctp_remote_error)) {
3666 		memcpy(sre->sre_data, chunk, chunk_len);
3667 		sre->sre_length += chunk_len;
3668 	}
3669 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3670 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3671 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
3672 	                                 m_notify);
3673 	if (control != NULL) {
3674 		control->length = SCTP_BUF_LEN(m_notify);
3675 		/* not that we need this */
3676 		control->tail_mbuf = m_notify;
3677 		control->spec_flags = M_NOTIFICATION;
3678 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3679 		                  control,
3680 		                  &stcb->sctp_socket->so_rcv, 1,
3681 				  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3682 	} else {
3683 		sctp_m_freem(m_notify);
3684 	}
3685 }
3686 
3687 
3688 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked SCTP_UNUSED)3689 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3690     uint32_t error, void *data, int so_locked
3691 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3692     SCTP_UNUSED
3693 #endif
3694     )
3695 {
3696 	if ((stcb == NULL) ||
3697 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3698 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3699 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3700 		/* If the socket is gone we are out of here */
3701 		return;
3702 	}
3703 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
3704 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3705 #else
3706 	if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
3707 #endif
3708 		return;
3709 	}
3710 #if defined(__APPLE__)
3711 	if (so_locked) {
3712 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3713 	} else {
3714 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3715 	}
3716 #endif
3717 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3718 	    (stcb->asoc.state &  SCTP_STATE_COOKIE_ECHOED)) {
3719 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3720 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3721 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3722 			/* Don't report these in front states */
3723 			return;
3724 		}
3725 	}
3726 	switch (notification) {
3727 	case SCTP_NOTIFY_ASSOC_UP:
3728 		if (stcb->asoc.assoc_up_sent == 0) {
3729 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3730 			stcb->asoc.assoc_up_sent = 1;
3731 		}
3732 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3733 			sctp_notify_adaptation_layer(stcb);
3734 		}
3735 		if (stcb->asoc.auth_supported == 0) {
3736 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3737 			                NULL, so_locked);
3738 		}
3739 		break;
3740 	case SCTP_NOTIFY_ASSOC_DOWN:
3741 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3742 #if defined(__Userspace__)
3743 		if (stcb->sctp_ep->recv_callback) {
3744 			if (stcb->sctp_socket) {
3745 				union sctp_sockstore addr;
3746 				struct sctp_rcvinfo rcv;
3747 
3748 				memset(&addr, 0, sizeof(union sctp_sockstore));
3749 				memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
3750 				atomic_add_int(&stcb->asoc.refcnt, 1);
3751 				SCTP_TCB_UNLOCK(stcb);
3752 				stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
3753 				SCTP_TCB_LOCK(stcb);
3754 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
3755 			}
3756 		}
3757 #endif
3758 		break;
3759 	case SCTP_NOTIFY_INTERFACE_DOWN:
3760 		{
3761 			struct sctp_nets *net;
3762 
3763 			net = (struct sctp_nets *)data;
3764 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3765 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3766 			break;
3767 		}
3768 	case SCTP_NOTIFY_INTERFACE_UP:
3769 		{
3770 			struct sctp_nets *net;
3771 
3772 			net = (struct sctp_nets *)data;
3773 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3774 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3775 			break;
3776 		}
3777 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3778 		{
3779 			struct sctp_nets *net;
3780 
3781 			net = (struct sctp_nets *)data;
3782 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3783 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3784 			break;
3785 		}
3786 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3787 		sctp_notify_send_failed2(stcb, error,
3788 		                         (struct sctp_stream_queue_pending *)data, so_locked);
3789 		break;
3790 	case SCTP_NOTIFY_SENT_DG_FAIL:
3791 		sctp_notify_send_failed(stcb, 1, error,
3792 		    (struct sctp_tmit_chunk *)data, so_locked);
3793 		break;
3794 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3795 		sctp_notify_send_failed(stcb, 0, error,
3796 		                        (struct sctp_tmit_chunk *)data, so_locked);
3797 		break;
3798 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3799 		{
3800 			uint32_t val;
3801 			val = *((uint32_t *)data);
3802 
3803 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3804 		break;
3805 		}
3806 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3807 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3808 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3809 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3810 		} else {
3811 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3812 		}
3813 		break;
3814 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3815 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3816 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3817 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3818 		} else {
3819 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3820 		}
3821 		break;
3822 	case SCTP_NOTIFY_ASSOC_RESTART:
3823 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3824 		if (stcb->asoc.auth_supported == 0) {
3825 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3826 			                NULL, so_locked);
3827 		}
3828 		break;
3829 	case SCTP_NOTIFY_STR_RESET_SEND:
3830 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3831 		break;
3832 	case SCTP_NOTIFY_STR_RESET_RECV:
3833 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3834 		break;
3835 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3836 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3837 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
3838 		break;
3839 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3840 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3841 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
3842 		break;
3843 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3844 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3845 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
3846 		break;
3847 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3848 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3849 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
3850 		break;
3851 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3852 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3853 		    error, so_locked);
3854 		break;
3855 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3856 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3857 		                             error, so_locked);
3858 		break;
3859 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3860 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3861 		                             error, so_locked);
3862 		break;
3863 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3864 		sctp_notify_shutdown_event(stcb);
3865 		break;
3866 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3867 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3868 		                           (uint16_t)(uintptr_t)data,
3869 		                           so_locked);
3870 		break;
3871 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3872 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3873 		                           (uint16_t)(uintptr_t)data,
3874 		                           so_locked);
3875 		break;
3876 	case SCTP_NOTIFY_NO_PEER_AUTH:
3877 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3878 		                           (uint16_t)(uintptr_t)data,
3879 		                           so_locked);
3880 		break;
3881 	case SCTP_NOTIFY_SENDER_DRY:
3882 		sctp_notify_sender_dry_event(stcb, so_locked);
3883 		break;
3884 	case SCTP_NOTIFY_REMOTE_ERROR:
3885 		sctp_notify_remote_error(stcb, error, data);
3886 		break;
3887 	default:
3888 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3889 			__FUNCTION__, notification, notification);
3890 		break;
3891 	}			/* end switch */
3892 }
3893 
3894 void
3895 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3896 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3897     SCTP_UNUSED
3898 #endif
3899     )
3900 {
3901 	struct sctp_association *asoc;
3902 	struct sctp_stream_out *outs;
3903 	struct sctp_tmit_chunk *chk, *nchk;
3904 	struct sctp_stream_queue_pending *sp, *nsp;
3905 	int i;
3906 
3907 	if (stcb == NULL) {
3908 		return;
3909 	}
3910 	asoc = &stcb->asoc;
3911 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3912 		/* already being freed */
3913 		return;
3914 	}
3915 #if defined(__APPLE__)
3916 	if (so_locked) {
3917 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3918 	} else {
3919 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3920 	}
3921 #endif
3922 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3923 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3924 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3925 		return;
3926 	}
3927 	/* now through all the gunk freeing chunks */
3928 	if (holds_lock == 0) {
3929 		SCTP_TCB_SEND_LOCK(stcb);
3930 	}
3931 	/* sent queue SHOULD be empty */
3932 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3933 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3934 		asoc->sent_queue_cnt--;
3935 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3936 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3937 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3938 #ifdef INVARIANTS
3939 			} else {
3940 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3941 #endif
3942 			}
3943 		}
3944 		if (chk->data != NULL) {
3945 			sctp_free_bufspace(stcb, asoc, chk, 1);
3946 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3947 			                error, chk, so_locked);
3948 			if (chk->data) {
3949 				sctp_m_freem(chk->data);
3950 				chk->data = NULL;
3951 			}
3952 		}
3953 		sctp_free_a_chunk(stcb, chk, so_locked);
3954 		/*sa_ignore FREED_MEMORY*/
3955 	}
3956 	/* pending send queue SHOULD be empty */
3957 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3958 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3959 		asoc->send_queue_cnt--;
3960 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3961 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3962 #ifdef INVARIANTS
3963 		} else {
3964 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3965 #endif
3966 		}
3967 		if (chk->data != NULL) {
3968 			sctp_free_bufspace(stcb, asoc, chk, 1);
3969 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3970 			                error, chk, so_locked);
3971 			if (chk->data) {
3972 				sctp_m_freem(chk->data);
3973 				chk->data = NULL;
3974 			}
3975 		}
3976 		sctp_free_a_chunk(stcb, chk, so_locked);
3977 		/*sa_ignore FREED_MEMORY*/
3978 	}
3979 	for (i = 0; i < asoc->streamoutcnt; i++) {
3980 		/* For each stream */
3981 		outs = &asoc->strmout[i];
3982 		/* clean up any sends there */
3983 		asoc->locked_on_sending = NULL;
3984 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3985 			asoc->stream_queue_cnt--;
3986 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3987 			sctp_free_spbufspace(stcb, asoc, sp);
3988 			if (sp->data) {
3989 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3990 						error, (void *)sp, so_locked);
3991 				if (sp->data) {
3992 					sctp_m_freem(sp->data);
3993 					sp->data = NULL;
3994 					sp->tail_mbuf = NULL;
3995 					sp->length = 0;
3996 				}
3997 			}
3998 			if (sp->net) {
3999 				sctp_free_remote_addr(sp->net);
4000 				sp->net = NULL;
4001 			}
4002 			/* Free the chunk */
4003 			sctp_free_a_strmoq(stcb, sp, so_locked);
4004 			/*sa_ignore FREED_MEMORY*/
4005 		}
4006 	}
4007 
4008 	if (holds_lock == 0) {
4009 		SCTP_TCB_SEND_UNLOCK(stcb);
4010 	}
4011 }
4012 
4013 void
4014 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4015 			struct sctp_abort_chunk *abort, int so_locked
4016 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4017     SCTP_UNUSED
4018 #endif
4019     )
4020 {
4021 	if (stcb == NULL) {
4022 		return;
4023 	}
4024 #if defined(__APPLE__)
4025 	if (so_locked) {
4026 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4027 	} else {
4028 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4029 	}
4030 #endif
4031 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4032 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4033 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4034 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4035 	}
4036 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4037 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4038 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4039 		return;
4040 	}
4041 	/* Tell them we lost the asoc */
4042 	sctp_report_all_outbound(stcb, error, 1, so_locked);
4043 	if (from_peer) {
4044 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4045 	} else {
4046 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4047 	}
4048 }
4049 
4050 void
4051 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4052                        struct mbuf *m, int iphlen,
4053                        struct sockaddr *src, struct sockaddr *dst,
4054                        struct sctphdr *sh, struct mbuf *op_err,
4055 #if defined(__FreeBSD__)
4056                        uint8_t mflowtype, uint32_t mflowid,
4057 #endif
4058                        uint32_t vrf_id, uint16_t port)
4059 {
4060 	uint32_t vtag;
4061 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4062 	struct socket *so;
4063 #endif
4064 
4065 	vtag = 0;
4066 	if (stcb != NULL) {
4067 		vtag = stcb->asoc.peer_vtag;
4068 		vrf_id = stcb->asoc.vrf_id;
4069 	}
4070 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4071 #if defined(__FreeBSD__)
4072 	                mflowtype, mflowid,
4073 #endif
4074 	                vrf_id, port);
4075 	if (stcb != NULL) {
4076 		/* We have a TCB to abort, send notification too */
4077 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4078 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4079 		/* Ok, now lets free it */
4080 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4081 		so = SCTP_INP_SO(inp);
4082 		atomic_add_int(&stcb->asoc.refcnt, 1);
4083 		SCTP_TCB_UNLOCK(stcb);
4084 		SCTP_SOCKET_LOCK(so, 1);
4085 		SCTP_TCB_LOCK(stcb);
4086 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4087 #endif
4088 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4089 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4090 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4091 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4092 		}
4093 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
4094 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4095 		SCTP_SOCKET_UNLOCK(so, 1);
4096 #endif
4097 	}
4098 }
4099 #ifdef SCTP_ASOCLOG_OF_TSNS
4100 void
4101 sctp_print_out_track_log(struct sctp_tcb *stcb)
4102 {
4103 #ifdef NOSIY_PRINTS
4104 	int i;
4105 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4106 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4107 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4108 		SCTP_PRINTF("None rcvd\n");
4109 		goto none_in;
4110 	}
4111 	if (stcb->asoc.tsn_in_wrapped) {
4112 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4113 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4114 				    stcb->asoc.in_tsnlog[i].tsn,
4115 				    stcb->asoc.in_tsnlog[i].strm,
4116 				    stcb->asoc.in_tsnlog[i].seq,
4117 				    stcb->asoc.in_tsnlog[i].flgs,
4118 				    stcb->asoc.in_tsnlog[i].sz);
4119 		}
4120 	}
4121 	if (stcb->asoc.tsn_in_at) {
4122 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4123 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4124 				    stcb->asoc.in_tsnlog[i].tsn,
4125 				    stcb->asoc.in_tsnlog[i].strm,
4126 				    stcb->asoc.in_tsnlog[i].seq,
4127 				    stcb->asoc.in_tsnlog[i].flgs,
4128 				    stcb->asoc.in_tsnlog[i].sz);
4129 		}
4130 	}
4131  none_in:
4132 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4133 	if ((stcb->asoc.tsn_out_at == 0) &&
4134 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4135 		SCTP_PRINTF("None sent\n");
4136 	}
4137 	if (stcb->asoc.tsn_out_wrapped) {
4138 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4139 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4140 				    stcb->asoc.out_tsnlog[i].tsn,
4141 				    stcb->asoc.out_tsnlog[i].strm,
4142 				    stcb->asoc.out_tsnlog[i].seq,
4143 				    stcb->asoc.out_tsnlog[i].flgs,
4144 				    stcb->asoc.out_tsnlog[i].sz);
4145 		}
4146 	}
4147 	if (stcb->asoc.tsn_out_at) {
4148 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4149 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4150 				    stcb->asoc.out_tsnlog[i].tsn,
4151 				    stcb->asoc.out_tsnlog[i].strm,
4152 				    stcb->asoc.out_tsnlog[i].seq,
4153 				    stcb->asoc.out_tsnlog[i].flgs,
4154 				    stcb->asoc.out_tsnlog[i].sz);
4155 		}
4156 	}
4157 #endif
4158 }
4159 #endif
4160 
4161 void
4162 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4163                           struct mbuf *op_err,
4164                           int so_locked
4165 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4166                           SCTP_UNUSED
4167 #endif
4168 )
4169 {
4170 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4171 	struct socket *so;
4172 #endif
4173 
4174 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4175 	so = SCTP_INP_SO(inp);
4176 #endif
4177 #if defined(__APPLE__)
4178 	if (so_locked) {
4179 		sctp_lock_assert(SCTP_INP_SO(inp));
4180 	} else {
4181 		sctp_unlock_assert(SCTP_INP_SO(inp));
4182 	}
4183 #endif
4184 	if (stcb == NULL) {
4185 		/* Got to have a TCB */
4186 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4187 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4188 #if defined(__APPLE__)
4189 				if (!so_locked) {
4190 					SCTP_SOCKET_LOCK(so, 1);
4191 				}
4192 #endif
4193 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4194 						SCTP_CALLED_DIRECTLY_NOCMPSET);
4195 #if defined(__APPLE__)
4196 				if (!so_locked) {
4197 					SCTP_SOCKET_UNLOCK(so, 1);
4198 				}
4199 #endif
4200 			}
4201 		}
4202 		return;
4203 	} else {
4204 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4205 	}
4206 	/* notify the peer */
4207 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4208 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4209 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4210 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4211 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4212 	}
4213 	/* notify the ulp */
4214 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4215 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4216 	}
4217 	/* now free the asoc */
4218 #ifdef SCTP_ASOCLOG_OF_TSNS
4219 	sctp_print_out_track_log(stcb);
4220 #endif
4221 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4222 	if (!so_locked) {
4223 		atomic_add_int(&stcb->asoc.refcnt, 1);
4224 		SCTP_TCB_UNLOCK(stcb);
4225 		SCTP_SOCKET_LOCK(so, 1);
4226 		SCTP_TCB_LOCK(stcb);
4227 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4228 	}
4229 #endif
4230 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
4231 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4232 	if (!so_locked) {
4233 		SCTP_SOCKET_UNLOCK(so, 1);
4234 	}
4235 #endif
4236 }
4237 
4238 void
4239 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4240                  struct sockaddr *src, struct sockaddr *dst,
4241                  struct sctphdr *sh, struct sctp_inpcb *inp,
4242                  struct mbuf *cause,
4243 #if defined(__FreeBSD__)
4244                  uint8_t mflowtype, uint32_t mflowid,
4245 #endif
4246                  uint32_t vrf_id, uint16_t port)
4247 {
4248 	struct sctp_chunkhdr *ch, chunk_buf;
4249 	unsigned int chk_length;
4250 	int contains_init_chunk;
4251 
4252 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4253 	/* Generate a TO address for future reference */
4254 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4255 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4256 #if defined(__APPLE__)
4257 			SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4258 #endif
4259 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4260 					SCTP_CALLED_DIRECTLY_NOCMPSET);
4261 #if defined(__APPLE__)
4262 			SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4263 #endif
4264 		}
4265 	}
4266 	contains_init_chunk = 0;
4267 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4268 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4269 	while (ch != NULL) {
4270 		chk_length = ntohs(ch->chunk_length);
4271 		if (chk_length < sizeof(*ch)) {
4272 			/* break to abort land */
4273 			break;
4274 		}
4275 		switch (ch->chunk_type) {
4276 		case SCTP_INIT:
4277 			contains_init_chunk = 1;
4278 			break;
4279 		case SCTP_PACKET_DROPPED:
4280 			/* we don't respond to pkt-dropped */
4281 			return;
4282 		case SCTP_ABORT_ASSOCIATION:
4283 			/* we don't respond with an ABORT to an ABORT */
4284 			return;
4285 		case SCTP_SHUTDOWN_COMPLETE:
4286 			/*
4287 			 * we ignore it since we are not waiting for it and
4288 			 * peer is gone
4289 			 */
4290 			return;
4291 		case SCTP_SHUTDOWN_ACK:
4292 			sctp_send_shutdown_complete2(src, dst, sh,
4293 #if defined(__FreeBSD__)
4294 			                             mflowtype, mflowid,
4295 #endif
4296 			                             vrf_id, port);
4297 			return;
4298 		default:
4299 			break;
4300 		}
4301 		offset += SCTP_SIZE32(chk_length);
4302 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4303 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4304 	}
4305 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4306 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4307 	     (contains_init_chunk == 0))) {
4308 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4309 #if defined(__FreeBSD__)
4310 		                mflowtype, mflowid,
4311 #endif
4312 		                vrf_id, port);
4313 	}
4314 }
4315 
4316 /*
4317  * check the inbound datagram to make sure there is not an abort inside it,
4318  * if there is return 1, else return 0.
4319  */
4320 int
4321 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4322 {
4323 	struct sctp_chunkhdr *ch;
4324 	struct sctp_init_chunk *init_chk, chunk_buf;
4325 	int offset;
4326 	unsigned int chk_length;
4327 
4328 	offset = iphlen + sizeof(struct sctphdr);
4329 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4330 	    (uint8_t *) & chunk_buf);
4331 	while (ch != NULL) {
4332 		chk_length = ntohs(ch->chunk_length);
4333 		if (chk_length < sizeof(*ch)) {
4334 			/* packet is probably corrupt */
4335 			break;
4336 		}
4337 		/* we seem to be ok, is it an abort? */
4338 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4339 			/* yep, tell them */
4340 			return (1);
4341 		}
4342 		if (ch->chunk_type == SCTP_INITIATION) {
4343 			/* need to update the Vtag */
4344 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4345 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4346 			if (init_chk != NULL) {
4347 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4348 			}
4349 		}
4350 		/* Nope, move to the next chunk */
4351 		offset += SCTP_SIZE32(chk_length);
4352 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4353 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4354 	}
4355 	return (0);
4356 }
4357 
4358 /*
4359  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4360  * set (i.e. it's 0) so, create this function to compare link local scopes
4361  */
4362 #ifdef INET6
4363 uint32_t
4364 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4365 {
4366 #if defined(__Userspace__)
4367     /*__Userspace__ Returning 1 here always */
4368 #endif
4369 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4370 	struct sockaddr_in6 a, b;
4371 
4372 	/* save copies */
4373 	a = *addr1;
4374 	b = *addr2;
4375 
4376 	if (a.sin6_scope_id == 0)
4377 #ifdef SCTP_KAME
4378 		if (sa6_recoverscope(&a)) {
4379 #else
4380 		if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4381 #endif				/* SCTP_KAME */
4382 			/* can't get scope, so can't match */
4383 			return (0);
4384 		}
4385 	if (b.sin6_scope_id == 0)
4386 #ifdef SCTP_KAME
4387 		if (sa6_recoverscope(&b)) {
4388 #else
4389 		if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4390 #endif				/* SCTP_KAME */
4391 			/* can't get scope, so can't match */
4392 			return (0);
4393 		}
4394 	if (a.sin6_scope_id != b.sin6_scope_id)
4395 		return (0);
4396 #else
4397 	if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4398 		return (0);
4399 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4400 
4401 	return (1);
4402 }
4403 
4404 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4405 /*
4406  * returns a sockaddr_in6 with embedded scope recovered and removed
4407  */
4408 struct sockaddr_in6 *
4409 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4410 {
4411 	/* check and strip embedded scope junk */
4412 	if (addr->sin6_family == AF_INET6) {
4413 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4414 			if (addr->sin6_scope_id == 0) {
4415 				*store = *addr;
4416 #ifdef SCTP_KAME
4417 				if (!sa6_recoverscope(store)) {
4418 #else
4419 				if (!in6_recoverscope(store, &store->sin6_addr,
4420 				    NULL)) {
4421 #endif /* SCTP_KAME */
4422 					/* use the recovered scope */
4423 					addr = store;
4424 				}
4425 			} else {
4426 				/* else, return the original "to" addr */
4427 				in6_clearscope(&addr->sin6_addr);
4428 			}
4429 		}
4430 	}
4431 	return (addr);
4432 }
4433 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4434 #endif
4435 
4436 /*
4437  * are the two addresses the same?  currently a "scopeless" check returns: 1
4438  * if same, 0 if not
4439  */
4440 int
4441 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4442 {
4443 
4444 	/* must be valid */
4445 	if (sa1 == NULL || sa2 == NULL)
4446 		return (0);
4447 
4448 	/* must be the same family */
4449 	if (sa1->sa_family != sa2->sa_family)
4450 		return (0);
4451 
4452 	switch (sa1->sa_family) {
4453 #ifdef INET6
4454 	case AF_INET6:
4455 	{
4456 		/* IPv6 addresses */
4457 		struct sockaddr_in6 *sin6_1, *sin6_2;
4458 
4459 		sin6_1 = (struct sockaddr_in6 *)sa1;
4460 		sin6_2 = (struct sockaddr_in6 *)sa2;
4461 		return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4462 		    sin6_2));
4463 	}
4464 #endif
4465 #ifdef INET
4466 	case AF_INET:
4467 	{
4468 		/* IPv4 addresses */
4469 		struct sockaddr_in *sin_1, *sin_2;
4470 
4471 		sin_1 = (struct sockaddr_in *)sa1;
4472 		sin_2 = (struct sockaddr_in *)sa2;
4473 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4474 	}
4475 #endif
4476 #if defined(__Userspace__)
4477 	case AF_CONN:
4478 	{
4479 		struct sockaddr_conn *sconn_1, *sconn_2;
4480 
4481 		sconn_1 = (struct sockaddr_conn *)sa1;
4482 		sconn_2 = (struct sockaddr_conn *)sa2;
4483 		return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4484 	}
4485 #endif
4486 	default:
4487 		/* we don't do these... */
4488 		return (0);
4489 	}
4490 }
4491 
4492 void
4493 sctp_print_address(struct sockaddr *sa)
4494 {
4495 #ifdef INET6
4496 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4497 	char ip6buf[INET6_ADDRSTRLEN];
4498 #endif
4499 #endif
4500 
4501 	switch (sa->sa_family) {
4502 #ifdef INET6
4503 	case AF_INET6:
4504 	{
4505 		struct sockaddr_in6 *sin6;
4506 
4507 		sin6 = (struct sockaddr_in6 *)sa;
4508 #if defined(__Userspace__)
4509 		SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
4510 			    ntohs(sin6->sin6_addr.s6_addr16[0]),
4511 			    ntohs(sin6->sin6_addr.s6_addr16[1]),
4512 			    ntohs(sin6->sin6_addr.s6_addr16[2]),
4513 			    ntohs(sin6->sin6_addr.s6_addr16[3]),
4514 			    ntohs(sin6->sin6_addr.s6_addr16[4]),
4515 			    ntohs(sin6->sin6_addr.s6_addr16[5]),
4516 			    ntohs(sin6->sin6_addr.s6_addr16[6]),
4517 			    ntohs(sin6->sin6_addr.s6_addr16[7]),
4518 			    ntohs(sin6->sin6_port),
4519 			    sin6->sin6_scope_id);
4520 #else
4521 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4522 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4523 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4524 			    ntohs(sin6->sin6_port),
4525 			    sin6->sin6_scope_id);
4526 #else
4527 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4528 			    ip6_sprintf(&sin6->sin6_addr),
4529 			    ntohs(sin6->sin6_port),
4530 			    sin6->sin6_scope_id);
4531 #endif
4532 #endif
4533 		break;
4534 	}
4535 #endif
4536 #ifdef INET
4537 	case AF_INET:
4538 	{
4539 		struct sockaddr_in *sin;
4540 		unsigned char *p;
4541 
4542 		sin = (struct sockaddr_in *)sa;
4543 		p = (unsigned char *)&sin->sin_addr;
4544 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4545 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4546 		break;
4547 	}
4548 #endif
4549 #if defined(__Userspace__)
4550 	case AF_CONN:
4551 	{
4552 		struct sockaddr_conn *sconn;
4553 
4554 		sconn = (struct sockaddr_conn *)sa;
4555 		SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
4556 		break;
4557 	}
4558 #endif
4559 	default:
4560 		SCTP_PRINTF("?\n");
4561 		break;
4562 	}
4563 }
4564 
4565 void
4566 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4567     struct sctp_inpcb *new_inp,
4568     struct sctp_tcb *stcb,
4569     int waitflags)
4570 {
4571 	/*
4572 	 * go through our old INP and pull off any control structures that
4573 	 * belong to stcb and move then to the new inp.
4574 	 */
4575 	struct socket *old_so, *new_so;
4576 	struct sctp_queued_to_read *control, *nctl;
4577 	struct sctp_readhead tmp_queue;
4578 	struct mbuf *m;
4579 	int error = 0;
4580 
4581 	old_so = old_inp->sctp_socket;
4582 	new_so = new_inp->sctp_socket;
4583 	TAILQ_INIT(&tmp_queue);
4584 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4585 	SOCKBUF_LOCK(&(old_so->so_rcv));
4586 #endif
4587 #if defined(__FreeBSD__) || defined(__APPLE__)
4588 	error = sblock(&old_so->so_rcv, waitflags);
4589 #endif
4590 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4591 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4592 #endif
4593 	if (error) {
4594 		/* Gak, can't get sblock, we have a problem.
4595 		 * data will be left stranded.. and we
4596 		 * don't dare look at it since the
4597 		 * other thread may be reading something.
4598 		 * Oh well, its a screwed up app that does
4599 		 * a peeloff OR a accept while reading
4600 		 * from the main socket... actually its
4601 		 * only the peeloff() case, since I think
4602 		 * read will fail on a listening socket..
4603 		 */
4604 		return;
4605 	}
4606 	/* lock the socket buffers */
4607 	SCTP_INP_READ_LOCK(old_inp);
4608 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4609 		/* Pull off all for out target stcb */
4610 		if (control->stcb == stcb) {
4611 			/* remove it we want it */
4612 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4613 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4614 			m = control->data;
4615 			while (m) {
4616 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4617 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
4618 				}
4619 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4620 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4621 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4622 				}
4623 				m = SCTP_BUF_NEXT(m);
4624 			}
4625 		}
4626 	}
4627 	SCTP_INP_READ_UNLOCK(old_inp);
4628 	/* Remove the sb-lock on the old socket */
4629 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4630 	SOCKBUF_LOCK(&(old_so->so_rcv));
4631 #endif
4632 #if defined(__APPLE__)
4633 	sbunlock(&old_so->so_rcv, 1);
4634 #endif
4635 
4636 #if defined(__FreeBSD__)
4637 	sbunlock(&old_so->so_rcv);
4638 #endif
4639 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4640 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
4641 #endif
4642 	/* Now we move them over to the new socket buffer */
4643 	SCTP_INP_READ_LOCK(new_inp);
4644 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4645 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4646 		m = control->data;
4647 		while (m) {
4648 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4649 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4650 			}
4651 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4652 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4653 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4654 			}
4655 			m = SCTP_BUF_NEXT(m);
4656 		}
4657 	}
4658 	SCTP_INP_READ_UNLOCK(new_inp);
4659 }
4660 
4661 void
4662 sctp_add_to_readq(struct sctp_inpcb *inp,
4663     struct sctp_tcb *stcb,
4664     struct sctp_queued_to_read *control,
4665     struct sockbuf *sb,
4666     int end,
4667     int inp_read_lock_held,
4668     int so_locked
4669 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4670     SCTP_UNUSED
4671 #endif
4672     )
4673 {
4674 	/*
4675 	 * Here we must place the control on the end of the socket read
4676 	 * queue AND increment sb_cc so that select will work properly on
4677 	 * read.
4678 	 */
4679 	struct mbuf *m, *prev = NULL;
4680 
4681 	if (inp == NULL) {
4682 		/* Gak, TSNH!! */
4683 #ifdef INVARIANTS
4684 		panic("Gak, inp NULL on add_to_readq");
4685 #endif
4686 		return;
4687 	}
4688 #if defined(__APPLE__)
4689 	if (so_locked) {
4690 		sctp_lock_assert(SCTP_INP_SO(inp));
4691 	} else {
4692 		sctp_unlock_assert(SCTP_INP_SO(inp));
4693 	}
4694 #endif
4695 	if (inp_read_lock_held == 0)
4696 		SCTP_INP_READ_LOCK(inp);
4697 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4698 		sctp_free_remote_addr(control->whoFrom);
4699 		if (control->data) {
4700 			sctp_m_freem(control->data);
4701 			control->data = NULL;
4702 		}
4703 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4704 		if (inp_read_lock_held == 0)
4705 			SCTP_INP_READ_UNLOCK(inp);
4706 		return;
4707 	}
4708 	if (!(control->spec_flags & M_NOTIFICATION)) {
4709 		atomic_add_int(&inp->total_recvs, 1);
4710 		if (!control->do_not_ref_stcb) {
4711 			atomic_add_int(&stcb->total_recvs, 1);
4712 		}
4713 	}
4714 	m = control->data;
4715 	control->held_length = 0;
4716 	control->length = 0;
4717 	while (m) {
4718 		if (SCTP_BUF_LEN(m) == 0) {
4719 			/* Skip mbufs with NO length */
4720 			if (prev == NULL) {
4721 				/* First one */
4722 				control->data = sctp_m_free(m);
4723 				m = control->data;
4724 			} else {
4725 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4726 				m = SCTP_BUF_NEXT(prev);
4727 			}
4728 			if (m == NULL) {
4729 				control->tail_mbuf = prev;
4730 			}
4731 			continue;
4732 		}
4733 		prev = m;
4734 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4735 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4736 		}
4737 		sctp_sballoc(stcb, sb, m);
4738 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4739 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4740 		}
4741 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4742 		m = SCTP_BUF_NEXT(m);
4743 	}
4744 	if (prev != NULL) {
4745 		control->tail_mbuf = prev;
4746 	} else {
4747 		/* Everything got collapsed out?? */
4748 		sctp_free_remote_addr(control->whoFrom);
4749 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4750 		if (inp_read_lock_held == 0)
4751 			SCTP_INP_READ_UNLOCK(inp);
4752 		return;
4753 	}
4754 	if (end) {
4755 		control->end_added = 1;
4756 	}
4757 #if defined(__Userspace__)
4758 	if (inp->recv_callback) {
4759 		if (inp_read_lock_held == 0)
4760 			SCTP_INP_READ_UNLOCK(inp);
4761 		if (control->end_added == 1) {
4762 			struct socket *so;
4763 			struct mbuf *m;
4764 			char *buffer;
4765 			struct sctp_rcvinfo rcv;
4766 			union sctp_sockstore addr;
4767 			int flags;
4768 
4769 			if ((buffer = malloc(control->length)) == NULL) {
4770 				return;
4771 			}
4772 			so = stcb->sctp_socket;
4773 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4774 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4775 			}
4776 			atomic_add_int(&stcb->asoc.refcnt, 1);
4777 			SCTP_TCB_UNLOCK(stcb);
4778 			m_copydata(control->data, 0, control->length, buffer);
4779 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4780 			rcv.rcv_sid = control->sinfo_stream;
4781 			rcv.rcv_ssn = control->sinfo_ssn;
4782 			rcv.rcv_flags = control->sinfo_flags;
4783 			rcv.rcv_ppid = control->sinfo_ppid;
4784 			rcv.rcv_tsn = control->sinfo_tsn;
4785 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
4786 			rcv.rcv_context = control->sinfo_context;
4787 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
4788 			memset(&addr, 0, sizeof(union sctp_sockstore));
4789 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4790 #ifdef INET
4791 			case AF_INET:
4792 				addr.sin = control->whoFrom->ro._l_addr.sin;
4793 				break;
4794 #endif
4795 #ifdef INET6
4796 			case AF_INET6:
4797 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
4798 				break;
4799 #endif
4800 			case AF_CONN:
4801 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
4802 				break;
4803 			default:
4804 				addr.sa = control->whoFrom->ro._l_addr.sa;
4805 				break;
4806 			}
4807 			flags = MSG_EOR;
4808 			if (control->spec_flags & M_NOTIFICATION) {
4809 				flags |= MSG_NOTIFICATION;
4810 			}
4811 			inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
4812 			SCTP_TCB_LOCK(stcb);
4813 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4814 			sctp_free_remote_addr(control->whoFrom);
4815 			control->whoFrom = NULL;
4816 			sctp_m_freem(control->data);
4817 			control->data = NULL;
4818 			control->length = 0;
4819 			sctp_free_a_readq(stcb, control);
4820 		}
4821 		return;
4822 	}
4823 #endif
4824 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4825 	if (inp_read_lock_held == 0)
4826 		SCTP_INP_READ_UNLOCK(inp);
4827 	if (inp && inp->sctp_socket) {
4828 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4829 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4830 		} else {
4831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4832 			struct socket *so;
4833 
4834 			so = SCTP_INP_SO(inp);
4835 			if (!so_locked) {
4836 				if (stcb) {
4837 					atomic_add_int(&stcb->asoc.refcnt, 1);
4838 					SCTP_TCB_UNLOCK(stcb);
4839 				}
4840 				SCTP_SOCKET_LOCK(so, 1);
4841 				if (stcb) {
4842 					SCTP_TCB_LOCK(stcb);
4843 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4844 				}
4845 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4846 					SCTP_SOCKET_UNLOCK(so, 1);
4847 					return;
4848 				}
4849 			}
4850 #endif
4851 			sctp_sorwakeup(inp, inp->sctp_socket);
4852 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4853 			if (!so_locked) {
4854 				SCTP_SOCKET_UNLOCK(so, 1);
4855 			}
4856 #endif
4857 		}
4858 	}
4859 }
4860 
4861 
4862 int
4863 sctp_append_to_readq(struct sctp_inpcb *inp,
4864     struct sctp_tcb *stcb,
4865     struct sctp_queued_to_read *control,
4866     struct mbuf *m,
4867     int end,
4868     int ctls_cumack,
4869     struct sockbuf *sb)
4870 {
4871 	/*
4872 	 * A partial delivery API event is underway. OR we are appending on
4873 	 * the reassembly queue.
4874 	 *
4875 	 * If PDAPI this means we need to add m to the end of the data.
4876 	 * Increase the length in the control AND increment the sb_cc.
4877 	 * Otherwise sb is NULL and all we need to do is put it at the end
4878 	 * of the mbuf chain.
4879 	 */
4880 	int len = 0;
4881 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4882 
4883 	if (inp) {
4884 		SCTP_INP_READ_LOCK(inp);
4885 	}
4886 	if (control == NULL) {
4887 	get_out:
4888 		if (inp) {
4889 			SCTP_INP_READ_UNLOCK(inp);
4890 		}
4891 		return (-1);
4892 	}
4893 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4894 		SCTP_INP_READ_UNLOCK(inp);
4895 		return (0);
4896 	}
4897 	if (control->end_added) {
4898 		/* huh this one is complete? */
4899 		goto get_out;
4900 	}
4901 	mm = m;
4902 	if (mm == NULL) {
4903 		goto get_out;
4904 	}
4905 
4906 	while (mm) {
4907 		if (SCTP_BUF_LEN(mm) == 0) {
4908 			/* Skip mbufs with NO lenght */
4909 			if (prev == NULL) {
4910 				/* First one */
4911 				m = sctp_m_free(mm);
4912 				mm = m;
4913 			} else {
4914 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4915 				mm = SCTP_BUF_NEXT(prev);
4916 			}
4917 			continue;
4918 		}
4919 		prev = mm;
4920 		len += SCTP_BUF_LEN(mm);
4921 		if (sb) {
4922 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4923 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4924 			}
4925 			sctp_sballoc(stcb, sb, mm);
4926 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4927 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4928 			}
4929 		}
4930 		mm = SCTP_BUF_NEXT(mm);
4931 	}
4932 	if (prev) {
4933 		tail = prev;
4934 	} else {
4935 		/* Really there should always be a prev */
4936 		if (m == NULL) {
4937 			/* Huh nothing left? */
4938 #ifdef INVARIANTS
4939 			panic("Nothing left to add?");
4940 #else
4941 			goto get_out;
4942 #endif
4943 		}
4944 		tail = m;
4945 	}
4946 	if (control->tail_mbuf) {
4947 		/* append */
4948 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4949 		control->tail_mbuf = tail;
4950 	} else {
4951 		/* nothing there */
4952 #ifdef INVARIANTS
4953 		if (control->data != NULL) {
4954 			panic("This should NOT happen");
4955 		}
4956 #endif
4957 		control->data = m;
4958 		control->tail_mbuf = tail;
4959 	}
4960 	atomic_add_int(&control->length, len);
4961 	if (end) {
4962 		/* message is complete */
4963 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4964 			stcb->asoc.control_pdapi = NULL;
4965 		}
4966 		control->held_length = 0;
4967 		control->end_added = 1;
4968 	}
4969 	if (stcb == NULL) {
4970 		control->do_not_ref_stcb = 1;
4971 	}
4972 	/*
4973 	 * When we are appending in partial delivery, the cum-ack is used
4974 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4975 	 * is populated in the outbound sinfo structure from the true cumack
4976 	 * if the association exists...
4977 	 */
4978 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4979 #if defined(__Userspace__)
4980 	if (inp->recv_callback) {
4981 		uint32_t pd_point, length;
4982 
4983 		length = control->length;
4984 		if (stcb != NULL && stcb->sctp_socket != NULL) {
4985 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
4986 			               stcb->sctp_ep->partial_delivery_point);
4987 		} else {
4988 			pd_point = inp->partial_delivery_point;
4989 		}
4990 		if ((control->end_added == 1) || (length >= pd_point)) {
4991 			struct socket *so;
4992 			char *buffer;
4993 			struct sctp_rcvinfo rcv;
4994 			union sctp_sockstore addr;
4995 			int flags;
4996 
4997 			if ((buffer = malloc(control->length)) == NULL) {
4998 				return (-1);
4999 			}
5000 			so = stcb->sctp_socket;
5001 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
5002 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
5003 			}
5004 			m_copydata(control->data, 0, control->length, buffer);
5005 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
5006 			rcv.rcv_sid = control->sinfo_stream;
5007 			rcv.rcv_ssn = control->sinfo_ssn;
5008 			rcv.rcv_flags = control->sinfo_flags;
5009 			rcv.rcv_ppid = control->sinfo_ppid;
5010 			rcv.rcv_tsn = control->sinfo_tsn;
5011 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
5012 			rcv.rcv_context = control->sinfo_context;
5013 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
5014 			memset(&addr, 0, sizeof(union sctp_sockstore));
5015 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5016 #ifdef INET
5017 			case AF_INET:
5018 				addr.sin = control->whoFrom->ro._l_addr.sin;
5019 				break;
5020 #endif
5021 #ifdef INET6
5022 			case AF_INET6:
5023 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5024 				break;
5025 #endif
5026 			case AF_CONN:
5027 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
5028 				break;
5029 			default:
5030 				addr.sa = control->whoFrom->ro._l_addr.sa;
5031 				break;
5032 			}
5033 			flags = 0;
5034 			if (control->end_added == 1) {
5035 				flags |= MSG_EOR;
5036 			}
5037 			if (control->spec_flags & M_NOTIFICATION) {
5038 				flags |= MSG_NOTIFICATION;
5039 			}
5040 			sctp_m_freem(control->data);
5041 			control->data = NULL;
5042 			control->tail_mbuf = NULL;
5043 			control->length = 0;
5044 			if (control->end_added) {
5045 				sctp_free_remote_addr(control->whoFrom);
5046 				control->whoFrom = NULL;
5047 				sctp_free_a_readq(stcb, control);
5048 			} else {
5049 				control->some_taken = 1;
5050 			}
5051 			atomic_add_int(&stcb->asoc.refcnt, 1);
5052 			SCTP_TCB_UNLOCK(stcb);
5053 			inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5054 			SCTP_TCB_LOCK(stcb);
5055 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5056 		}
5057 		if (inp)
5058 			SCTP_INP_READ_UNLOCK(inp);
5059 		return (0);
5060 	}
5061 #endif
5062 	if (inp) {
5063 		SCTP_INP_READ_UNLOCK(inp);
5064 	}
5065 	if (inp && inp->sctp_socket) {
5066 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
5067 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
5068 		} else {
5069 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5070 			struct socket *so;
5071 
5072 			so = SCTP_INP_SO(inp);
5073 			if (stcb) {
5074 				atomic_add_int(&stcb->asoc.refcnt, 1);
5075 				SCTP_TCB_UNLOCK(stcb);
5076 			}
5077 			SCTP_SOCKET_LOCK(so, 1);
5078 			if (stcb) {
5079 				SCTP_TCB_LOCK(stcb);
5080 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
5081 			}
5082 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5083 				SCTP_SOCKET_UNLOCK(so, 1);
5084 				return (0);
5085 			}
5086 #endif
5087 			sctp_sorwakeup(inp, inp->sctp_socket);
5088 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5089 			SCTP_SOCKET_UNLOCK(so, 1);
5090 #endif
5091 		}
5092 	}
5093 	return (0);
5094 }
5095 
5096 
5097 
5098 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5099  *************ALTERNATE ROUTING CODE
5100  */
5101 
5102 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5103  *************ALTERNATE ROUTING CODE
5104  */
5105 
5106 struct mbuf *
5107 sctp_generate_cause(uint16_t code, char *info)
5108 {
5109 	struct mbuf *m;
5110 	struct sctp_gen_error_cause *cause;
5111 	size_t info_len, len;
5112 
5113 	if ((code == 0) || (info == NULL)) {
5114 		return (NULL);
5115 	}
5116 	info_len = strlen(info);
5117 	len = sizeof(struct sctp_paramhdr) + info_len;
5118 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5119 	if (m != NULL) {
5120 		SCTP_BUF_LEN(m) = len;
5121 		cause = mtod(m, struct sctp_gen_error_cause *);
5122 		cause->code = htons(code);
5123 		cause->length = htons((uint16_t)len);
5124 		memcpy(cause->info, info, info_len);
5125 	}
5126 	return (m);
5127 }
5128 
5129 struct mbuf *
5130 sctp_generate_no_user_data_cause(uint32_t tsn)
5131 {
5132 	struct mbuf *m;
5133 	struct sctp_error_no_user_data *no_user_data_cause;
5134 	size_t len;
5135 
5136 	len = sizeof(struct sctp_error_no_user_data);
5137 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5138 	if (m != NULL) {
5139 		SCTP_BUF_LEN(m) = len;
5140 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5141 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5142 		no_user_data_cause->cause.length = htons((uint16_t)len);
5143 		no_user_data_cause->tsn = tsn; /* tsn is passed in as NBO */
5144 	}
5145 	return (m);
5146 }
5147 
5148 #ifdef SCTP_MBCNT_LOGGING
5149 void
5150 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5151     struct sctp_tmit_chunk *tp1, int chk_cnt)
5152 {
5153 	if (tp1->data == NULL) {
5154 		return;
5155 	}
5156 	asoc->chunks_on_out_queue -= chk_cnt;
5157 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5158 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5159 			       asoc->total_output_queue_size,
5160 			       tp1->book_size,
5161 			       0,
5162 			       tp1->mbcnt);
5163 	}
5164 	if (asoc->total_output_queue_size >= tp1->book_size) {
5165 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5166 	} else {
5167 		asoc->total_output_queue_size = 0;
5168 	}
5169 
5170 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5171 				  ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5172 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5173 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5174 		} else {
5175 			stcb->sctp_socket->so_snd.sb_cc = 0;
5176 
5177 		}
5178 	}
5179 }
5180 
5181 #endif
5182 
5183 int
5184 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5185 			   uint8_t sent, int so_locked
5186 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5187 			   SCTP_UNUSED
5188 #endif
5189 	)
5190 {
5191 	struct sctp_stream_out *strq;
5192 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5193 	struct sctp_stream_queue_pending *sp;
5194 	uint16_t stream = 0, seq = 0;
5195 	uint8_t foundeom = 0;
5196 	int ret_sz = 0;
5197 	int notdone;
5198 	int do_wakeup_routine = 0;
5199 
5200 #if defined(__APPLE__)
5201 	if (so_locked) {
5202 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5203 	} else {
5204 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5205 	}
5206 #endif
5207 	stream = tp1->rec.data.stream_number;
5208 	seq = tp1->rec.data.stream_seq;
5209 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5210 		stcb->asoc.abandoned_sent[0]++;
5211 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5212 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
5213 #if defined(SCTP_DETAILED_STR_STATS)
5214 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5215 #endif
5216 	} else {
5217 		stcb->asoc.abandoned_unsent[0]++;
5218 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5219 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
5220 #if defined(SCTP_DETAILED_STR_STATS)
5221 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5222 #endif
5223 	}
5224 	do {
5225 		ret_sz += tp1->book_size;
5226 		if (tp1->data != NULL) {
5227 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5228 				sctp_flight_size_decrease(tp1);
5229 				sctp_total_flight_decrease(stcb, tp1);
5230 			}
5231 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5232 			stcb->asoc.peers_rwnd += tp1->send_size;
5233 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5234 			if (sent) {
5235 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5236 			} else {
5237 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5238 			}
5239 			if (tp1->data) {
5240 				sctp_m_freem(tp1->data);
5241 				tp1->data = NULL;
5242 			}
5243 			do_wakeup_routine = 1;
5244 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5245 				stcb->asoc.sent_queue_cnt_removeable--;
5246 			}
5247 		}
5248 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5249 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5250 		    SCTP_DATA_NOT_FRAG) {
5251 			/* not frag'ed we ae done   */
5252 			notdone = 0;
5253 			foundeom = 1;
5254 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5255 			/* end of frag, we are done */
5256 			notdone = 0;
5257 			foundeom = 1;
5258 		} else {
5259 			/*
5260 			 * Its a begin or middle piece, we must mark all of
5261 			 * it
5262 			 */
5263 			notdone = 1;
5264 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5265 		}
5266 	} while (tp1 && notdone);
5267 	if (foundeom == 0) {
5268 		/*
5269 		 * The multi-part message was scattered across the send and
5270 		 * sent queue.
5271 		 */
5272 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5273 			if ((tp1->rec.data.stream_number != stream) ||
5274 			    (tp1->rec.data.stream_seq != seq)) {
5275 				break;
5276 			}
5277 			/* save to chk in case we have some on stream out
5278 			 * queue. If so and we have an un-transmitted one
5279 			 * we don't have to fudge the TSN.
5280 			 */
5281 			chk = tp1;
5282 			ret_sz += tp1->book_size;
5283 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5284 			if (sent) {
5285 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5286 			} else {
5287 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5288 			}
5289 			if (tp1->data) {
5290 				sctp_m_freem(tp1->data);
5291 				tp1->data = NULL;
5292 			}
5293 			/* No flight involved here book the size to 0 */
5294 			tp1->book_size = 0;
5295 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5296 				foundeom = 1;
5297 			}
5298 			do_wakeup_routine = 1;
5299 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5300 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5301 			/* on to the sent queue so we can wait for it to be passed by. */
5302 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5303 					  sctp_next);
5304 			stcb->asoc.send_queue_cnt--;
5305 			stcb->asoc.sent_queue_cnt++;
5306 		}
5307 	}
5308 	if (foundeom == 0) {
5309 		/*
5310 		 * Still no eom found. That means there
5311 		 * is stuff left on the stream out queue.. yuck.
5312 		 */
5313 		SCTP_TCB_SEND_LOCK(stcb);
5314 		strq = &stcb->asoc.strmout[stream];
5315 		sp = TAILQ_FIRST(&strq->outqueue);
5316 		if (sp != NULL) {
5317 			sp->discard_rest = 1;
5318 			/*
5319 			 * We may need to put a chunk on the
5320 			 * queue that holds the TSN that
5321 			 * would have been sent with the LAST
5322 			 * bit.
5323 			 */
5324 			if (chk == NULL) {
5325 				/* Yep, we have to */
5326 				sctp_alloc_a_chunk(stcb, chk);
5327 				if (chk == NULL) {
5328 					/* we are hosed. All we can
5329 					 * do is nothing.. which will
5330 					 * cause an abort if the peer is
5331 					 * paying attention.
5332 					 */
5333 					goto oh_well;
5334 				}
5335 				memset(chk, 0, sizeof(*chk));
5336 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
5337 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5338 				chk->asoc = &stcb->asoc;
5339 				chk->rec.data.stream_seq = strq->next_sequence_send;
5340 				chk->rec.data.stream_number = sp->stream;
5341 				chk->rec.data.payloadtype = sp->ppid;
5342 				chk->rec.data.context = sp->context;
5343 				chk->flags = sp->act_flags;
5344 				chk->whoTo = NULL;
5345 #if defined(__FreeBSD__) || defined(__Panda__)
5346 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5347 #else
5348 				chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
5349 #endif
5350 				strq->chunks_on_queues++;
5351 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5352 				stcb->asoc.sent_queue_cnt++;
5353 				stcb->asoc.pr_sctp_cnt++;
5354 			} else {
5355 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5356 			}
5357 			strq->next_sequence_send++;
5358 		oh_well:
5359 			if (sp->data) {
5360 				/* Pull any data to free up the SB and
5361 				 * allow sender to "add more" while we
5362 				 * will throw away :-)
5363 				 */
5364 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5365 				ret_sz += sp->length;
5366 				do_wakeup_routine = 1;
5367 				sp->some_taken = 1;
5368 				sctp_m_freem(sp->data);
5369 				sp->data = NULL;
5370 				sp->tail_mbuf = NULL;
5371 				sp->length = 0;
5372 			}
5373 		}
5374 		SCTP_TCB_SEND_UNLOCK(stcb);
5375 	}
5376 	if (do_wakeup_routine) {
5377 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5378 		struct socket *so;
5379 
5380 		so = SCTP_INP_SO(stcb->sctp_ep);
5381 		if (!so_locked) {
5382 			atomic_add_int(&stcb->asoc.refcnt, 1);
5383 			SCTP_TCB_UNLOCK(stcb);
5384 			SCTP_SOCKET_LOCK(so, 1);
5385 			SCTP_TCB_LOCK(stcb);
5386 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5387 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5388 				/* assoc was freed while we were unlocked */
5389 				SCTP_SOCKET_UNLOCK(so, 1);
5390 				return (ret_sz);
5391 			}
5392 		}
5393 #endif
5394 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5395 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5396 		if (!so_locked) {
5397 			SCTP_SOCKET_UNLOCK(so, 1);
5398 		}
5399 #endif
5400 	}
5401 	return (ret_sz);
5402 }
5403 
5404 /*
5405  * checks to see if the given address, sa, is one that is currently known by
5406  * the kernel note: can't distinguish the same address on multiple interfaces
5407  * and doesn't handle multiple addresses with different zone/scope id's note:
5408  * ifa_ifwithaddr() compares the entire sockaddr struct
5409  */
5410 struct sctp_ifa *
5411 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5412 		    int holds_lock)
5413 {
5414 	struct sctp_laddr *laddr;
5415 
5416 	if (holds_lock == 0) {
5417 		SCTP_INP_RLOCK(inp);
5418 	}
5419 
5420 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5421 		if (laddr->ifa == NULL)
5422 			continue;
5423 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5424 			continue;
5425 #ifdef INET
5426 		if (addr->sa_family == AF_INET) {
5427 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5428 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5429 				/* found him. */
5430 				if (holds_lock == 0) {
5431 					SCTP_INP_RUNLOCK(inp);
5432 				}
5433 				return (laddr->ifa);
5434 				break;
5435 			}
5436 		}
5437 #endif
5438 #ifdef INET6
5439 		if (addr->sa_family == AF_INET6) {
5440 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5441 						 &laddr->ifa->address.sin6)) {
5442 				/* found him. */
5443 				if (holds_lock == 0) {
5444 					SCTP_INP_RUNLOCK(inp);
5445 				}
5446 				return (laddr->ifa);
5447 				break;
5448 			}
5449 		}
5450 #endif
5451 #if defined(__Userspace__)
5452 		if (addr->sa_family == AF_CONN) {
5453 			if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5454 				/* found him. */
5455 				if (holds_lock == 0) {
5456 					SCTP_INP_RUNLOCK(inp);
5457 				}
5458 				return (laddr->ifa);
5459 				break;
5460 			}
5461 		}
5462 #endif
5463 	}
5464 	if (holds_lock == 0) {
5465 		SCTP_INP_RUNLOCK(inp);
5466 	}
5467 	return (NULL);
5468 }
5469 
5470 uint32_t
5471 sctp_get_ifa_hash_val(struct sockaddr *addr)
5472 {
5473 	switch (addr->sa_family) {
5474 #ifdef INET
5475 	case AF_INET:
5476 	{
5477 		struct sockaddr_in *sin;
5478 
5479 		sin = (struct sockaddr_in *)addr;
5480 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5481 	}
5482 #endif
5483 #ifdef INET6
5484 	case AF_INET6:
5485 	{
5486 		struct sockaddr_in6 *sin6;
5487 		uint32_t hash_of_addr;
5488 
5489 		sin6 = (struct sockaddr_in6 *)addr;
5490 #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
5491 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5492 				sin6->sin6_addr.s6_addr32[1] +
5493 				sin6->sin6_addr.s6_addr32[2] +
5494 				sin6->sin6_addr.s6_addr32[3]);
5495 #else
5496 		hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5497 				((uint32_t *)&sin6->sin6_addr)[1] +
5498 				((uint32_t *)&sin6->sin6_addr)[2] +
5499 				((uint32_t *)&sin6->sin6_addr)[3]);
5500 #endif
5501 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5502 		return (hash_of_addr);
5503 	}
5504 #endif
5505 #if defined(__Userspace__)
5506 	case AF_CONN:
5507 	{
5508 		struct sockaddr_conn *sconn;
5509 		uintptr_t temp;
5510 
5511 		sconn = (struct sockaddr_conn *)addr;
5512 		temp = (uintptr_t)sconn->sconn_addr;
5513 		return ((uint32_t)(temp ^ (temp >> 16)));
5514 	}
5515 #endif
5516 	default:
5517 		break;
5518 	}
5519 	return (0);
5520 }
5521 
5522 struct sctp_ifa *
5523 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5524 {
5525 	struct sctp_ifa *sctp_ifap;
5526 	struct sctp_vrf *vrf;
5527 	struct sctp_ifalist *hash_head;
5528 	uint32_t hash_of_addr;
5529 
5530 	if (holds_lock == 0)
5531 		SCTP_IPI_ADDR_RLOCK();
5532 
5533 	vrf = sctp_find_vrf(vrf_id);
5534 	if (vrf == NULL) {
5535 		if (holds_lock == 0)
5536 			SCTP_IPI_ADDR_RUNLOCK();
5537 		return (NULL);
5538 	}
5539 
5540 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5541 
5542 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5543 	if (hash_head == NULL) {
5544 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5545 			    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5546 			    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5547 		sctp_print_address(addr);
5548 		SCTP_PRINTF("No such bucket for address\n");
5549 		if (holds_lock == 0)
5550 			SCTP_IPI_ADDR_RUNLOCK();
5551 
5552 		return (NULL);
5553 	}
5554 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5555 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5556 			continue;
5557 #ifdef INET
5558 		if (addr->sa_family == AF_INET) {
5559 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5560 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5561 				/* found him. */
5562 				if (holds_lock == 0)
5563 					SCTP_IPI_ADDR_RUNLOCK();
5564 				return (sctp_ifap);
5565 				break;
5566 			}
5567 		}
5568 #endif
5569 #ifdef INET6
5570 		if (addr->sa_family == AF_INET6) {
5571 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5572 						 &sctp_ifap->address.sin6)) {
5573 				/* found him. */
5574 				if (holds_lock == 0)
5575 					SCTP_IPI_ADDR_RUNLOCK();
5576 				return (sctp_ifap);
5577 				break;
5578 			}
5579 		}
5580 #endif
5581 #if defined(__Userspace__)
5582 		if (addr->sa_family == AF_CONN) {
5583 			if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5584 				/* found him. */
5585 				if (holds_lock == 0)
5586 					SCTP_IPI_ADDR_RUNLOCK();
5587 				return (sctp_ifap);
5588 				break;
5589 			}
5590 		}
5591 #endif
5592 	}
5593 	if (holds_lock == 0)
5594 		SCTP_IPI_ADDR_RUNLOCK();
5595 	return (NULL);
5596 }
5597 
5598 static void
5599 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5600 	       uint32_t rwnd_req)
5601 {
5602 	/* User pulled some data, do we need a rwnd update? */
5603 	int r_unlocked = 0;
5604 	uint32_t dif, rwnd;
5605 	struct socket *so = NULL;
5606 
5607 	if (stcb == NULL)
5608 		return;
5609 
5610 	atomic_add_int(&stcb->asoc.refcnt, 1);
5611 
5612 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5613 				SCTP_STATE_SHUTDOWN_RECEIVED |
5614 				SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5615 		/* Pre-check If we are freeing no update */
5616 		goto no_lock;
5617 	}
5618 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5619 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5620 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5621 		goto out;
5622 	}
5623 	so = stcb->sctp_socket;
5624 	if (so == NULL) {
5625 		goto out;
5626 	}
5627 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5628 	/* Have you have freed enough to look */
5629 	*freed_so_far = 0;
5630 	/* Yep, its worth a look and the lock overhead */
5631 
5632 	/* Figure out what the rwnd would be */
5633 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5634 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5635 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5636 	} else {
5637 		dif = 0;
5638 	}
5639 	if (dif >= rwnd_req) {
5640 		if (hold_rlock) {
5641 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5642 			r_unlocked = 1;
5643 		}
5644 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5645 			/*
5646 			 * One last check before we allow the guy possibly
5647 			 * to get in. There is a race, where the guy has not
5648 			 * reached the gate. In that case
5649 			 */
5650 			goto out;
5651 		}
5652 		SCTP_TCB_LOCK(stcb);
5653 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5654 			/* No reports here */
5655 			SCTP_TCB_UNLOCK(stcb);
5656 			goto out;
5657 		}
5658 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5659 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5660 
5661 		sctp_chunk_output(stcb->sctp_ep, stcb,
5662 				  SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5663 		/* make sure no timer is running */
5664 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
5665 		SCTP_TCB_UNLOCK(stcb);
5666 	} else {
5667 		/* Update how much we have pending */
5668 		stcb->freed_by_sorcv_sincelast = dif;
5669 	}
5670  out:
5671 	if (so && r_unlocked && hold_rlock) {
5672 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5673 	}
5674 
5675 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5676  no_lock:
5677 	atomic_add_int(&stcb->asoc.refcnt, -1);
5678 	return;
5679 }
5680 
5681 int
5682 sctp_sorecvmsg(struct socket *so,
5683     struct uio *uio,
5684     struct mbuf **mp,
5685     struct sockaddr *from,
5686     int fromlen,
5687     int *msg_flags,
5688     struct sctp_sndrcvinfo *sinfo,
5689     int filling_sinfo)
5690 {
5691 	/*
5692 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5693 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5694 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5695 	 * On the way out we may send out any combination of:
5696 	 * MSG_NOTIFICATION MSG_EOR
5697 	 *
5698 	 */
5699 	struct sctp_inpcb *inp = NULL;
5700 	int my_len = 0;
5701 	int cp_len = 0, error = 0;
5702 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5703 	struct mbuf *m = NULL;
5704 	struct sctp_tcb *stcb = NULL;
5705 	int wakeup_read_socket = 0;
5706 	int freecnt_applied = 0;
5707 	int out_flags = 0, in_flags = 0;
5708 	int block_allowed = 1;
5709 	uint32_t freed_so_far = 0;
5710 	uint32_t copied_so_far = 0;
5711 	int in_eeor_mode = 0;
5712 	int no_rcv_needed = 0;
5713 	uint32_t rwnd_req = 0;
5714 	int hold_sblock = 0;
5715 	int hold_rlock = 0;
5716 	int slen = 0;
5717 	uint32_t held_length = 0;
5718 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5719 	int sockbuf_lock = 0;
5720 #endif
5721 
5722 	if (uio == NULL) {
5723 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5724 		return (EINVAL);
5725 	}
5726 
5727 	if (msg_flags) {
5728 		in_flags = *msg_flags;
5729 		if (in_flags & MSG_PEEK)
5730 			SCTP_STAT_INCR(sctps_read_peeks);
5731 	} else {
5732 		in_flags = 0;
5733 	}
5734 #if defined(__APPLE__)
5735 #if defined(APPLE_LEOPARD)
5736 	slen = uio->uio_resid;
5737 #else
5738 	slen = uio_resid(uio);
5739 #endif
5740 #else
5741 	slen = uio->uio_resid;
5742 #endif
5743 
5744 	/* Pull in and set up our int flags */
5745 	if (in_flags & MSG_OOB) {
5746 		/* Out of band's NOT supported */
5747 		return (EOPNOTSUPP);
5748 	}
5749 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5750 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5751 		return (EINVAL);
5752 	}
5753 	if ((in_flags & (MSG_DONTWAIT
5754 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
5755 			 | MSG_NBIO
5756 #endif
5757 		     )) ||
5758 	    SCTP_SO_IS_NBIO(so)) {
5759 		block_allowed = 0;
5760 	}
5761 	/* setup the endpoint */
5762 	inp = (struct sctp_inpcb *)so->so_pcb;
5763 	if (inp == NULL) {
5764 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5765 		return (EFAULT);
5766 	}
5767 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5768 	/* Must be at least a MTU's worth */
5769 	if (rwnd_req < SCTP_MIN_RWND)
5770 		rwnd_req = SCTP_MIN_RWND;
5771 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5772 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5773 #if defined(__APPLE__)
5774 #if defined(APPLE_LEOPARD)
5775 		sctp_misc_ints(SCTP_SORECV_ENTER,
5776 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5777 #else
5778 		sctp_misc_ints(SCTP_SORECV_ENTER,
5779 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
5780 #endif
5781 #else
5782 		sctp_misc_ints(SCTP_SORECV_ENTER,
5783 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5784 #endif
5785 	}
5786 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5787 	SOCKBUF_LOCK(&so->so_rcv);
5788 	hold_sblock = 1;
5789 #endif
5790 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
5791 #if defined(__APPLE__)
5792 #if defined(APPLE_LEOPARD)
5793 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5794 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5795 #else
5796 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5797 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
5798 #endif
5799 #else
5800 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5801 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5802 #endif
5803 	}
5804 
5805 #if defined(__APPLE__)
5806 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5807 #endif
5808 
5809 #if defined(__FreeBSD__)
5810 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5811 #endif
5812 	if (error) {
5813 		goto release_unlocked;
5814 	}
5815 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5816         sockbuf_lock = 1;
5817 #endif
5818  restart:
5819 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5820 	if (hold_sblock == 0) {
5821 		SOCKBUF_LOCK(&so->so_rcv);
5822 		hold_sblock = 1;
5823 	}
5824 #endif
5825 #if defined(__APPLE__)
5826 	sbunlock(&so->so_rcv, 1);
5827 #endif
5828 
5829 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5830 	sbunlock(&so->so_rcv);
5831 #endif
5832 
5833  restart_nosblocks:
5834 	if (hold_sblock == 0) {
5835 		SOCKBUF_LOCK(&so->so_rcv);
5836 		hold_sblock = 1;
5837 	}
5838 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5839 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5840 		goto out;
5841 	}
5842 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
5843 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5844 #else
5845 	if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5846 #endif
5847 		if (so->so_error) {
5848 			error = so->so_error;
5849 			if ((in_flags & MSG_PEEK) == 0)
5850 				so->so_error = 0;
5851 			goto out;
5852 		} else {
5853 			if (so->so_rcv.sb_cc == 0) {
5854 				/* indicate EOF */
5855 				error = 0;
5856 				goto out;
5857 			}
5858 		}
5859 	}
5860 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5861 		/* we need to wait for data */
5862 		if ((so->so_rcv.sb_cc == 0) &&
5863 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5864 		     (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5865 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5866 				/* For active open side clear flags for re-use
5867 				 * passive open is blocked by connect.
5868 				 */
5869 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5870 					/* You were aborted, passive side always hits here */
5871 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5872 					error = ECONNRESET;
5873 				}
5874 				so->so_state &= ~(SS_ISCONNECTING |
5875 						  SS_ISDISCONNECTING |
5876 						  SS_ISCONFIRMING |
5877 						  SS_ISCONNECTED);
5878 				if (error == 0) {
5879 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5880 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5881 						error = ENOTCONN;
5882 					}
5883 				}
5884 				goto out;
5885 			}
5886 		}
5887 		error = sbwait(&so->so_rcv);
5888 		if (error) {
5889 			goto out;
5890 		}
5891 		held_length = 0;
5892 		goto restart_nosblocks;
5893 	} else if (so->so_rcv.sb_cc == 0) {
5894 		if (so->so_error) {
5895 			error = so->so_error;
5896 			if ((in_flags & MSG_PEEK) == 0)
5897 				so->so_error = 0;
5898 		} else {
5899 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5900 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5901 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5902 					/* For active open side clear flags for re-use
5903 					 * passive open is blocked by connect.
5904 					 */
5905 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5906 						/* You were aborted, passive side always hits here */
5907 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5908 						error = ECONNRESET;
5909 					}
5910 					so->so_state &= ~(SS_ISCONNECTING |
5911 							  SS_ISDISCONNECTING |
5912 							  SS_ISCONFIRMING |
5913 							  SS_ISCONNECTED);
5914 					if (error == 0) {
5915 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5916 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5917 							error = ENOTCONN;
5918 						}
5919 					}
5920 					goto out;
5921 				}
5922 			}
5923 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5924 			error = EWOULDBLOCK;
5925 		}
5926 		goto out;
5927 	}
5928 	if (hold_sblock == 1) {
5929 		SOCKBUF_UNLOCK(&so->so_rcv);
5930 		hold_sblock = 0;
5931 	}
5932 #if defined(__APPLE__)
5933 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5934 #endif
5935 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5936 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
5937 #endif
5938 	/* we possibly have data we can read */
5939 	/*sa_ignore FREED_MEMORY*/
5940 	control = TAILQ_FIRST(&inp->read_queue);
5941 	if (control == NULL) {
5942 		/* This could be happening since
5943 		 * the appender did the increment but as not
5944 		 * yet did the tailq insert onto the read_queue
5945 		 */
5946 		if (hold_rlock == 0) {
5947 			SCTP_INP_READ_LOCK(inp);
5948 		}
5949 		control = TAILQ_FIRST(&inp->read_queue);
5950 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5951 #ifdef INVARIANTS
5952 			panic("Huh, its non zero and nothing on control?");
5953 #endif
5954 			so->so_rcv.sb_cc = 0;
5955 		}
5956 		SCTP_INP_READ_UNLOCK(inp);
5957 		hold_rlock = 0;
5958 		goto restart;
5959 	}
5960 
5961 	if ((control->length == 0) &&
5962 	    (control->do_not_ref_stcb)) {
5963 		/* Clean up code for freeing assoc that left behind a pdapi..
5964 		 * maybe a peer in EEOR that just closed after sending and
5965 		 * never indicated a EOR.
5966 		 */
5967 		if (hold_rlock == 0) {
5968 			hold_rlock = 1;
5969 			SCTP_INP_READ_LOCK(inp);
5970 		}
5971 		control->held_length = 0;
5972 		if (control->data) {
5973 			/* Hmm there is data here .. fix */
5974 			struct mbuf *m_tmp;
5975 			int cnt = 0;
5976 			m_tmp = control->data;
5977 			while (m_tmp) {
5978 				cnt += SCTP_BUF_LEN(m_tmp);
5979 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5980 					control->tail_mbuf = m_tmp;
5981 					control->end_added = 1;
5982 				}
5983 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5984 			}
5985 			control->length = cnt;
5986 		} else {
5987 			/* remove it */
5988 			TAILQ_REMOVE(&inp->read_queue, control, next);
5989 			/* Add back any hiddend data */
5990 			sctp_free_remote_addr(control->whoFrom);
5991 			sctp_free_a_readq(stcb, control);
5992 		}
5993 		if (hold_rlock) {
5994 			hold_rlock = 0;
5995 			SCTP_INP_READ_UNLOCK(inp);
5996 		}
5997 		goto restart;
5998 	}
5999 	if ((control->length == 0) &&
6000 	    (control->end_added == 1)) {
6001 		/* Do we also need to check for (control->pdapi_aborted == 1)? */
6002 		if (hold_rlock == 0) {
6003 			hold_rlock = 1;
6004 			SCTP_INP_READ_LOCK(inp);
6005 		}
6006 		TAILQ_REMOVE(&inp->read_queue, control, next);
6007 		if (control->data) {
6008 #ifdef INVARIANTS
6009 			panic("control->data not null but control->length == 0");
6010 #else
6011 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
6012 			sctp_m_freem(control->data);
6013 			control->data = NULL;
6014 #endif
6015 		}
6016 		if (control->aux_data) {
6017 			sctp_m_free (control->aux_data);
6018 			control->aux_data = NULL;
6019 		}
6020 		sctp_free_remote_addr(control->whoFrom);
6021 		sctp_free_a_readq(stcb, control);
6022 		if (hold_rlock) {
6023 			hold_rlock = 0;
6024 			SCTP_INP_READ_UNLOCK(inp);
6025 		}
6026 		goto restart;
6027 	}
6028 	if (control->length == 0) {
6029 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6030 		    (filling_sinfo)) {
6031 			/* find a more suitable one then this */
6032 			ctl = TAILQ_NEXT(control, next);
6033 			while (ctl) {
6034 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
6035 				    (ctl->some_taken ||
6036 				     (ctl->spec_flags & M_NOTIFICATION) ||
6037 				     ((ctl->do_not_ref_stcb == 0) &&
6038 				      (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6039 					) {
6040 					/*-
6041 					 * If we have a different TCB next, and there is data
6042 					 * present. If we have already taken some (pdapi), OR we can
6043 					 * ref the tcb and no delivery as started on this stream, we
6044 					 * take it. Note we allow a notification on a different
6045 					 * assoc to be delivered..
6046 					 */
6047 					control = ctl;
6048 					goto found_one;
6049 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6050 					   (ctl->length) &&
6051 					   ((ctl->some_taken) ||
6052 					    ((ctl->do_not_ref_stcb == 0) &&
6053 					     ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6054 					     (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6055 					/*-
6056 					 * If we have the same tcb, and there is data present, and we
6057 					 * have the strm interleave feature present. Then if we have
6058 					 * taken some (pdapi) or we can refer to tht tcb AND we have
6059 					 * not started a delivery for this stream, we can take it.
6060 					 * Note we do NOT allow a notificaiton on the same assoc to
6061 					 * be delivered.
6062 					 */
6063 					control = ctl;
6064 					goto found_one;
6065 				}
6066 				ctl = TAILQ_NEXT(ctl, next);
6067 			}
6068 		}
6069 		/*
6070 		 * if we reach here, not suitable replacement is available
6071 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
6072 		 * into the our held count, and its time to sleep again.
6073 		 */
6074 		held_length = so->so_rcv.sb_cc;
6075 		control->held_length = so->so_rcv.sb_cc;
6076 		goto restart;
6077 	}
6078 	/* Clear the held length since there is something to read */
6079 	control->held_length = 0;
6080 	if (hold_rlock) {
6081 		SCTP_INP_READ_UNLOCK(inp);
6082 		hold_rlock = 0;
6083 	}
6084  found_one:
6085 	/*
6086 	 * If we reach here, control has a some data for us to read off.
6087 	 * Note that stcb COULD be NULL.
6088 	 */
6089 	control->some_taken++;
6090 	if (hold_sblock) {
6091 		SOCKBUF_UNLOCK(&so->so_rcv);
6092 		hold_sblock = 0;
6093 	}
6094 	stcb = control->stcb;
6095 	if (stcb) {
6096 		if ((control->do_not_ref_stcb == 0) &&
6097 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6098 			if (freecnt_applied == 0)
6099 				stcb = NULL;
6100 		} else if (control->do_not_ref_stcb == 0) {
6101 			/* you can't free it on me please */
6102 			/*
6103 			 * The lock on the socket buffer protects us so the
6104 			 * free code will stop. But since we used the socketbuf
6105 			 * lock and the sender uses the tcb_lock to increment,
6106 			 * we need to use the atomic add to the refcnt
6107 			 */
6108 			if (freecnt_applied) {
6109 #ifdef INVARIANTS
6110 				panic("refcnt already incremented");
6111 #else
6112 				SCTP_PRINTF("refcnt already incremented?\n");
6113 #endif
6114 			} else {
6115 				atomic_add_int(&stcb->asoc.refcnt, 1);
6116 				freecnt_applied = 1;
6117 			}
6118 			/*
6119 			 * Setup to remember how much we have not yet told
6120 			 * the peer our rwnd has opened up. Note we grab
6121 			 * the value from the tcb from last time.
6122 			 * Note too that sack sending clears this when a sack
6123 			 * is sent, which is fine. Once we hit the rwnd_req,
6124 			 * we then will go to the sctp_user_rcvd() that will
6125 			 * not lock until it KNOWs it MUST send a WUP-SACK.
6126 			 */
6127 			freed_so_far = stcb->freed_by_sorcv_sincelast;
6128 			stcb->freed_by_sorcv_sincelast = 0;
6129 		}
6130         }
6131 	if (stcb &&
6132 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6133 	    control->do_not_ref_stcb == 0) {
6134 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6135 	}
6136 
6137 	/* First lets get off the sinfo and sockaddr info */
6138 	if ((sinfo) && filling_sinfo) {
6139 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
6140 		nxt = TAILQ_NEXT(control, next);
6141 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6142 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6143 			struct sctp_extrcvinfo *s_extra;
6144 			s_extra = (struct sctp_extrcvinfo *)sinfo;
6145 			if ((nxt) &&
6146 			    (nxt->length)) {
6147 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6148 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6149 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6150 				}
6151 				if (nxt->spec_flags & M_NOTIFICATION) {
6152 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6153 				}
6154 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
6155 				s_extra->sreinfo_next_length = nxt->length;
6156 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
6157 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
6158 				if (nxt->tail_mbuf != NULL) {
6159 					if (nxt->end_added) {
6160 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6161 					}
6162 				}
6163 			} else {
6164 				/* we explicitly 0 this, since the memcpy got
6165 				 * some other things beyond the older sinfo_
6166 				 * that is on the control's structure :-D
6167 				 */
6168 				nxt = NULL;
6169 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6170 				s_extra->sreinfo_next_aid = 0;
6171 				s_extra->sreinfo_next_length = 0;
6172 				s_extra->sreinfo_next_ppid = 0;
6173 				s_extra->sreinfo_next_stream = 0;
6174 			}
6175 		}
6176 		/*
6177 		 * update off the real current cum-ack, if we have an stcb.
6178 		 */
6179 		if ((control->do_not_ref_stcb == 0) && stcb)
6180 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6181 		/*
6182 		 * mask off the high bits, we keep the actual chunk bits in
6183 		 * there.
6184 		 */
6185 		sinfo->sinfo_flags &= 0x00ff;
6186 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6187 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6188 		}
6189 	}
6190 #ifdef SCTP_ASOCLOG_OF_TSNS
6191 	{
6192 		int index, newindex;
6193 		struct sctp_pcbtsn_rlog *entry;
6194 		do {
6195 			index = inp->readlog_index;
6196 			newindex = index + 1;
6197 			if (newindex >= SCTP_READ_LOG_SIZE) {
6198 				newindex = 0;
6199 			}
6200 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6201 		entry = &inp->readlog[index];
6202 		entry->vtag = control->sinfo_assoc_id;
6203 		entry->strm = control->sinfo_stream;
6204 		entry->seq = control->sinfo_ssn;
6205 		entry->sz = control->length;
6206 		entry->flgs = control->sinfo_flags;
6207 	}
6208 #endif
6209 	if ((fromlen > 0) && (from != NULL)) {
6210 		union sctp_sockstore store;
6211 		size_t len;
6212 
6213 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6214 #ifdef INET6
6215 			case AF_INET6:
6216 				len = sizeof(struct sockaddr_in6);
6217 				store.sin6 = control->whoFrom->ro._l_addr.sin6;
6218 				store.sin6.sin6_port = control->port_from;
6219 				break;
6220 #endif
6221 #ifdef INET
6222 			case AF_INET:
6223 #ifdef INET6
6224 				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6225 					len = sizeof(struct sockaddr_in6);
6226 					in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6227 							    &store.sin6);
6228 					store.sin6.sin6_port = control->port_from;
6229 				} else {
6230 					len = sizeof(struct sockaddr_in);
6231 					store.sin = control->whoFrom->ro._l_addr.sin;
6232 					store.sin.sin_port = control->port_from;
6233 				}
6234 #else
6235 				len = sizeof(struct sockaddr_in);
6236 				store.sin = control->whoFrom->ro._l_addr.sin;
6237 				store.sin.sin_port = control->port_from;
6238 #endif
6239 				break;
6240 #endif
6241 #if defined(__Userspace__)
6242 			case AF_CONN:
6243 				len = sizeof(struct sockaddr_conn);
6244 				store.sconn = control->whoFrom->ro._l_addr.sconn;
6245 				store.sconn.sconn_port = control->port_from;
6246 				break;
6247 #endif
6248 			default:
6249 				len = 0;
6250 				break;
6251 		}
6252 		memcpy(from, &store, min((size_t)fromlen, len));
6253 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6254 #ifdef INET6
6255 		{
6256 			struct sockaddr_in6 lsa6, *from6;
6257 
6258 			from6 = (struct sockaddr_in6 *)from;
6259 			sctp_recover_scope_mac(from6, (&lsa6));
6260 		}
6261 #endif
6262 #endif
6263 	}
6264 	/* now copy out what data we can */
6265 	if (mp == NULL) {
6266 		/* copy out each mbuf in the chain up to length */
6267 	get_more_data:
6268 		m = control->data;
6269 		while (m) {
6270 			/* Move out all we can */
6271 #if defined(__APPLE__)
6272 #if defined(APPLE_LEOPARD)
6273 			cp_len = (int)uio->uio_resid;
6274 #else
6275 			cp_len = (int)uio_resid(uio);
6276 #endif
6277 #else
6278 			cp_len = (int)uio->uio_resid;
6279 #endif
6280 			my_len = (int)SCTP_BUF_LEN(m);
6281 			if (cp_len > my_len) {
6282 				/* not enough in this buf */
6283 				cp_len = my_len;
6284 			}
6285 			if (hold_rlock) {
6286 				SCTP_INP_READ_UNLOCK(inp);
6287 				hold_rlock = 0;
6288 			}
6289 #if defined(__APPLE__)
6290 			SCTP_SOCKET_UNLOCK(so, 0);
6291 #endif
6292 			if (cp_len > 0)
6293 				error = uiomove(mtod(m, char *), cp_len, uio);
6294 #if defined(__APPLE__)
6295 			SCTP_SOCKET_LOCK(so, 0);
6296 #endif
6297 			/* re-read */
6298 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6299 				goto release;
6300 			}
6301 
6302 			if ((control->do_not_ref_stcb == 0) && stcb &&
6303 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6304 				no_rcv_needed = 1;
6305 			}
6306 			if (error) {
6307 				/* error we are out of here */
6308 				goto release;
6309 			}
6310 			if ((SCTP_BUF_NEXT(m) == NULL) &&
6311 			    (cp_len >= SCTP_BUF_LEN(m)) &&
6312 			    ((control->end_added == 0) ||
6313 			     (control->end_added &&
6314 			      (TAILQ_NEXT(control, next) == NULL)))
6315 				) {
6316 				SCTP_INP_READ_LOCK(inp);
6317 				hold_rlock = 1;
6318 			}
6319 			if (cp_len == SCTP_BUF_LEN(m)) {
6320 				if ((SCTP_BUF_NEXT(m)== NULL) &&
6321 				    (control->end_added)) {
6322 					out_flags |= MSG_EOR;
6323 					if ((control->do_not_ref_stcb == 0)  &&
6324 					    (control->stcb != NULL) &&
6325 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6326 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6327 				}
6328 				if (control->spec_flags & M_NOTIFICATION) {
6329 					out_flags |= MSG_NOTIFICATION;
6330 				}
6331 				/* we ate up the mbuf */
6332 				if (in_flags & MSG_PEEK) {
6333 					/* just looking */
6334 					m = SCTP_BUF_NEXT(m);
6335 					copied_so_far += cp_len;
6336 				} else {
6337 					/* dispose of the mbuf */
6338 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6339 						sctp_sblog(&so->so_rcv,
6340 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6341 					}
6342 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6343 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6344 						sctp_sblog(&so->so_rcv,
6345 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6346 					}
6347 					copied_so_far += cp_len;
6348 					freed_so_far += cp_len;
6349 					freed_so_far += MSIZE;
6350 					atomic_subtract_int(&control->length, cp_len);
6351 					control->data = sctp_m_free(m);
6352 					m = control->data;
6353 					/* been through it all, must hold sb lock ok to null tail */
6354 					if (control->data == NULL) {
6355 #ifdef INVARIANTS
6356 #if !defined(__APPLE__)
6357 						if ((control->end_added == 0) ||
6358 						    (TAILQ_NEXT(control, next) == NULL)) {
6359 							/* If the end is not added, OR the
6360 							 * next is NOT null we MUST have the lock.
6361 							 */
6362 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6363 								panic("Hmm we don't own the lock?");
6364 							}
6365 						}
6366 #endif
6367 #endif
6368 						control->tail_mbuf = NULL;
6369 #ifdef INVARIANTS
6370 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6371 							panic("end_added, nothing left and no MSG_EOR");
6372 						}
6373 #endif
6374 					}
6375 				}
6376 			} else {
6377 				/* Do we need to trim the mbuf? */
6378 				if (control->spec_flags & M_NOTIFICATION) {
6379 					out_flags |= MSG_NOTIFICATION;
6380 				}
6381 				if ((in_flags & MSG_PEEK) == 0) {
6382 					SCTP_BUF_RESV_UF(m, cp_len);
6383 					SCTP_BUF_LEN(m) -= cp_len;
6384 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6385 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
6386 					}
6387 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6388 					if ((control->do_not_ref_stcb == 0) &&
6389 					    stcb) {
6390 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6391 					}
6392 					copied_so_far += cp_len;
6393 					freed_so_far += cp_len;
6394 					freed_so_far += MSIZE;
6395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6396 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6397 							   SCTP_LOG_SBRESULT, 0);
6398 					}
6399 					atomic_subtract_int(&control->length, cp_len);
6400 				} else {
6401 					copied_so_far += cp_len;
6402 				}
6403 			}
6404 #if defined(__APPLE__)
6405 #if defined(APPLE_LEOPARD)
6406 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6407 #else
6408 			if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6409 #endif
6410 #else
6411 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6412 #endif
6413 				break;
6414 			}
6415 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6416 			    (control->do_not_ref_stcb == 0) &&
6417 			    (freed_so_far >= rwnd_req)) {
6418 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6419 			}
6420 		} /* end while(m) */
6421 		/*
6422 		 * At this point we have looked at it all and we either have
6423 		 * a MSG_EOR/or read all the user wants... <OR>
6424 		 * control->length == 0.
6425 		 */
6426 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6427 			/* we are done with this control */
6428 			if (control->length == 0) {
6429 				if (control->data) {
6430 #ifdef INVARIANTS
6431 					panic("control->data not null at read eor?");
6432 #else
6433 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6434 					sctp_m_freem(control->data);
6435 					control->data = NULL;
6436 #endif
6437 				}
6438 			done_with_control:
6439 				if (TAILQ_NEXT(control, next) == NULL) {
6440 					/* If we don't have a next we need a
6441 					 * lock, if there is a next interrupt
6442 					 * is filling ahead of us and we don't
6443 					 * need a lock to remove this guy
6444 					 * (which is the head of the queue).
6445 					 */
6446 					if (hold_rlock == 0) {
6447 						SCTP_INP_READ_LOCK(inp);
6448 						hold_rlock = 1;
6449 					}
6450 				}
6451 				TAILQ_REMOVE(&inp->read_queue, control, next);
6452 				/* Add back any hiddend data */
6453 				if (control->held_length) {
6454 					held_length = 0;
6455 					control->held_length = 0;
6456 					wakeup_read_socket = 1;
6457 				}
6458 				if (control->aux_data) {
6459 					sctp_m_free (control->aux_data);
6460 					control->aux_data = NULL;
6461 				}
6462 				no_rcv_needed = control->do_not_ref_stcb;
6463 				sctp_free_remote_addr(control->whoFrom);
6464 				control->data = NULL;
6465 				sctp_free_a_readq(stcb, control);
6466 				control = NULL;
6467 				if ((freed_so_far >= rwnd_req) &&
6468 				    (no_rcv_needed == 0))
6469 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6470 
6471 			} else {
6472 				/*
6473 				 * The user did not read all of this
6474 				 * message, turn off the returned MSG_EOR
6475 				 * since we are leaving more behind on the
6476 				 * control to read.
6477 				 */
6478 #ifdef INVARIANTS
6479 				if (control->end_added &&
6480 				    (control->data == NULL) &&
6481 				    (control->tail_mbuf == NULL)) {
6482 					panic("Gak, control->length is corrupt?");
6483 				}
6484 #endif
6485 				no_rcv_needed = control->do_not_ref_stcb;
6486 				out_flags &= ~MSG_EOR;
6487 			}
6488 		}
6489 		if (out_flags & MSG_EOR) {
6490 			goto release;
6491 		}
6492 #if defined(__APPLE__)
6493 #if defined(APPLE_LEOPARD)
6494 		if ((uio->uio_resid == 0) ||
6495 #else
6496 		if ((uio_resid(uio) == 0) ||
6497 #endif
6498 #else
6499 		if ((uio->uio_resid == 0) ||
6500 #endif
6501 		    ((in_eeor_mode) &&
6502 		     (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
6503 			goto release;
6504 		}
6505 		/*
6506 		 * If I hit here the receiver wants more and this message is
6507 		 * NOT done (pd-api). So two questions. Can we block? if not
6508 		 * we are done. Did the user NOT set MSG_WAITALL?
6509 		 */
6510 		if (block_allowed == 0) {
6511 			goto release;
6512 		}
6513 		/*
6514 		 * We need to wait for more data a few things: - We don't
6515 		 * sbunlock() so we don't get someone else reading. - We
6516 		 * must be sure to account for the case where what is added
6517 		 * is NOT to our control when we wakeup.
6518 		 */
6519 
6520 		/* Do we need to tell the transport a rwnd update might be
6521 		 * needed before we go to sleep?
6522 		 */
6523 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6524 		    ((freed_so_far >= rwnd_req) &&
6525 		     (control->do_not_ref_stcb == 0) &&
6526 		     (no_rcv_needed == 0))) {
6527 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6528 		}
6529 	wait_some_more:
6530 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6531 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6532 			goto release;
6533 		}
6534 #else
6535 		if (so->so_state & SS_CANTRCVMORE) {
6536 			goto release;
6537 		}
6538 #endif
6539 
6540 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6541 			goto release;
6542 
6543 		if (hold_rlock == 1) {
6544 			SCTP_INP_READ_UNLOCK(inp);
6545 			hold_rlock = 0;
6546 		}
6547 		if (hold_sblock == 0) {
6548 			SOCKBUF_LOCK(&so->so_rcv);
6549 			hold_sblock = 1;
6550 		}
6551 		if ((copied_so_far) && (control->length == 0) &&
6552 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6553 			goto release;
6554 		}
6555 #if defined(__APPLE__)
6556 		sbunlock(&so->so_rcv, 1);
6557 #endif
6558 		if (so->so_rcv.sb_cc <= control->held_length) {
6559 			error = sbwait(&so->so_rcv);
6560 			if (error) {
6561 #if defined(__FreeBSD__)
6562 				goto release;
6563 #else
6564 				goto release_unlocked;
6565 #endif
6566 			}
6567 			control->held_length = 0;
6568 		}
6569 #if defined(__APPLE__)
6570 		error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6571 #endif
6572 		if (hold_sblock) {
6573 			SOCKBUF_UNLOCK(&so->so_rcv);
6574 			hold_sblock = 0;
6575 		}
6576 		if (control->length == 0) {
6577 			/* still nothing here */
6578 			if (control->end_added == 1) {
6579 				/* he aborted, or is done i.e.did a shutdown */
6580 				out_flags |= MSG_EOR;
6581 				if (control->pdapi_aborted) {
6582 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6583 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6584 
6585 					out_flags |= MSG_TRUNC;
6586 				} else {
6587 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6588 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6589 				}
6590 				goto done_with_control;
6591 			}
6592 			if (so->so_rcv.sb_cc > held_length) {
6593 				control->held_length = so->so_rcv.sb_cc;
6594 				held_length = 0;
6595 			}
6596 			goto wait_some_more;
6597 		} else if (control->data == NULL) {
6598 			/* we must re-sync since data
6599 			 * is probably being added
6600 			 */
6601 			SCTP_INP_READ_LOCK(inp);
6602 			if ((control->length > 0) && (control->data == NULL)) {
6603 				/* big trouble.. we have the lock and its corrupt? */
6604 #ifdef INVARIANTS
6605 				panic ("Impossible data==NULL length !=0");
6606 #endif
6607 				out_flags |= MSG_EOR;
6608 				out_flags |= MSG_TRUNC;
6609 				control->length = 0;
6610 				SCTP_INP_READ_UNLOCK(inp);
6611 				goto done_with_control;
6612 			}
6613 			SCTP_INP_READ_UNLOCK(inp);
6614 			/* We will fall around to get more data */
6615 		}
6616 		goto get_more_data;
6617 	} else {
6618 		/*-
6619 		 * Give caller back the mbuf chain,
6620 		 * store in uio_resid the length
6621 		 */
6622 		wakeup_read_socket = 0;
6623 		if ((control->end_added == 0) ||
6624 		    (TAILQ_NEXT(control, next) == NULL)) {
6625 			/* Need to get rlock */
6626 			if (hold_rlock == 0) {
6627 				SCTP_INP_READ_LOCK(inp);
6628 				hold_rlock = 1;
6629 			}
6630 		}
6631 		if (control->end_added) {
6632 			out_flags |= MSG_EOR;
6633 			if ((control->do_not_ref_stcb == 0) &&
6634 			    (control->stcb != NULL) &&
6635 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6636 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6637 		}
6638 		if (control->spec_flags & M_NOTIFICATION) {
6639 			out_flags |= MSG_NOTIFICATION;
6640 		}
6641 #if defined(__APPLE__)
6642 #if defined(APPLE_LEOPARD)
6643 		uio->uio_resid = control->length;
6644 #else
6645 		uio_setresid(uio, control->length);
6646 #endif
6647 #else
6648 		uio->uio_resid = control->length;
6649 #endif
6650 		*mp = control->data;
6651 		m = control->data;
6652 		while (m) {
6653 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6654 				sctp_sblog(&so->so_rcv,
6655 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6656 			}
6657 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6658 			freed_so_far += SCTP_BUF_LEN(m);
6659 			freed_so_far += MSIZE;
6660 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6661 				sctp_sblog(&so->so_rcv,
6662 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6663 			}
6664 			m = SCTP_BUF_NEXT(m);
6665 		}
6666 		control->data = control->tail_mbuf = NULL;
6667 		control->length = 0;
6668 		if (out_flags & MSG_EOR) {
6669 			/* Done with this control */
6670 			goto done_with_control;
6671 		}
6672 	}
6673  release:
6674 	if (hold_rlock == 1) {
6675 		SCTP_INP_READ_UNLOCK(inp);
6676 		hold_rlock = 0;
6677 	}
6678 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
6679 	if (hold_sblock == 0) {
6680 		SOCKBUF_LOCK(&so->so_rcv);
6681 		hold_sblock = 1;
6682 	}
6683 #else
6684 	if (hold_sblock == 1) {
6685 		SOCKBUF_UNLOCK(&so->so_rcv);
6686 		hold_sblock = 0;
6687 	}
6688 #endif
6689 #if defined(__APPLE__)
6690 	sbunlock(&so->so_rcv, 1);
6691 #endif
6692 
6693 #if defined(__FreeBSD__)
6694 	sbunlock(&so->so_rcv);
6695 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6696 	sockbuf_lock = 0;
6697 #endif
6698 #endif
6699 
6700  release_unlocked:
6701 	if (hold_sblock) {
6702 		SOCKBUF_UNLOCK(&so->so_rcv);
6703 		hold_sblock = 0;
6704 	}
6705 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6706 		if ((freed_so_far >= rwnd_req) &&
6707 		    (control && (control->do_not_ref_stcb == 0)) &&
6708 		    (no_rcv_needed == 0))
6709 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6710 	}
6711  out:
6712 	if (msg_flags) {
6713 		*msg_flags = out_flags;
6714 	}
6715 	if (((out_flags & MSG_EOR) == 0) &&
6716 	    ((in_flags & MSG_PEEK) == 0) &&
6717 	    (sinfo) &&
6718 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6719 	     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6720 		struct sctp_extrcvinfo *s_extra;
6721 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6722 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6723 	}
6724 	if (hold_rlock == 1) {
6725 		SCTP_INP_READ_UNLOCK(inp);
6726 	}
6727 	if (hold_sblock) {
6728 		SOCKBUF_UNLOCK(&so->so_rcv);
6729 	}
6730 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6731 	if (sockbuf_lock) {
6732 		sbunlock(&so->so_rcv);
6733 	}
6734 #endif
6735 
6736 	if (freecnt_applied) {
6737 		/*
6738 		 * The lock on the socket buffer protects us so the free
6739 		 * code will stop. But since we used the socketbuf lock and
6740 		 * the sender uses the tcb_lock to increment, we need to use
6741 		 * the atomic add to the refcnt.
6742 		 */
6743 		if (stcb == NULL) {
6744 #ifdef INVARIANTS
6745 			panic("stcb for refcnt has gone NULL?");
6746 			goto stage_left;
6747 #else
6748 			goto stage_left;
6749 #endif
6750 		}
6751 		atomic_add_int(&stcb->asoc.refcnt, -1);
6752 		/* Save the value back for next time */
6753 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6754 	}
6755 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6756 		if (stcb) {
6757 			sctp_misc_ints(SCTP_SORECV_DONE,
6758 				       freed_so_far,
6759 #if defined(__APPLE__)
6760 #if defined(APPLE_LEOPARD)
6761 				       ((uio) ? (slen - uio->uio_resid) : slen),
6762 #else
6763 				       ((uio) ? (slen - uio_resid(uio)) : slen),
6764 #endif
6765 #else
6766 				       ((uio) ? (slen - uio->uio_resid) : slen),
6767 #endif
6768 				       stcb->asoc.my_rwnd,
6769 				       so->so_rcv.sb_cc);
6770 		} else {
6771 			sctp_misc_ints(SCTP_SORECV_DONE,
6772 				       freed_so_far,
6773 #if defined(__APPLE__)
6774 #if defined(APPLE_LEOPARD)
6775 				       ((uio) ? (slen - uio->uio_resid) : slen),
6776 #else
6777 				       ((uio) ? (slen - uio_resid(uio)) : slen),
6778 #endif
6779 #else
6780 				       ((uio) ? (slen - uio->uio_resid) : slen),
6781 #endif
6782 				       0,
6783 				       so->so_rcv.sb_cc);
6784 		}
6785 	}
6786  stage_left:
6787 	if (wakeup_read_socket) {
6788 		sctp_sorwakeup(inp, so);
6789 	}
6790 	return (error);
6791 }
6792 
6793 
6794 #ifdef SCTP_MBUF_LOGGING
6795 struct mbuf *
6796 sctp_m_free(struct mbuf *m)
6797 {
6798 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6799 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6800 	}
6801 	return (m_free(m));
6802 }
6803 
6804 void sctp_m_freem(struct mbuf *mb)
6805 {
6806 	while (mb != NULL)
6807 		mb = sctp_m_free(mb);
6808 }
6809 
6810 #endif
6811 
6812 int
6813 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6814 {
6815 	/* Given a local address. For all associations
6816 	 * that holds the address, request a peer-set-primary.
6817 	 */
6818 	struct sctp_ifa *ifa;
6819 	struct sctp_laddr *wi;
6820 
6821 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6822 	if (ifa == NULL) {
6823 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6824 		return (EADDRNOTAVAIL);
6825 	}
6826 	/* Now that we have the ifa we must awaken the
6827 	 * iterator with this message.
6828 	 */
6829 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6830 	if (wi == NULL) {
6831 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6832 		return (ENOMEM);
6833 	}
6834 	/* Now incr the count and int wi structure */
6835 	SCTP_INCR_LADDR_COUNT();
6836 	bzero(wi, sizeof(*wi));
6837 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6838 	wi->ifa = ifa;
6839 	wi->action = SCTP_SET_PRIM_ADDR;
6840 	atomic_add_int(&ifa->refcount, 1);
6841 
6842 	/* Now add it to the work queue */
6843 	SCTP_WQ_ADDR_LOCK();
6844 	/*
6845 	 * Should this really be a tailq? As it is we will process the
6846 	 * newest first :-0
6847 	 */
6848 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6849 	SCTP_WQ_ADDR_UNLOCK();
6850 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6851 			 (struct sctp_inpcb *)NULL,
6852 			 (struct sctp_tcb *)NULL,
6853 			 (struct sctp_nets *)NULL);
6854 	return (0);
6855 }
6856 
6857 #if defined(__Userspace__)
6858 /* no sctp_soreceive for __Userspace__ now */
6859 #endif
6860 
6861 #if !defined(__Userspace__)
6862 int
6863 sctp_soreceive(	struct socket *so,
6864 		struct sockaddr **psa,
6865 		struct uio *uio,
6866 		struct mbuf **mp0,
6867 		struct mbuf **controlp,
6868 		int *flagsp)
6869 {
6870 	int error, fromlen;
6871 	uint8_t sockbuf[256];
6872 	struct sockaddr *from;
6873 	struct sctp_extrcvinfo sinfo;
6874 	int filling_sinfo = 1;
6875 	struct sctp_inpcb *inp;
6876 
6877 	inp = (struct sctp_inpcb *)so->so_pcb;
6878 	/* pickup the assoc we are reading from */
6879 	if (inp == NULL) {
6880 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6881 		return (EINVAL);
6882 	}
6883 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6884 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6885 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6886 	    (controlp == NULL)) {
6887 		/* user does not want the sndrcv ctl */
6888 		filling_sinfo = 0;
6889 	}
6890 	if (psa) {
6891 		from = (struct sockaddr *)sockbuf;
6892 		fromlen = sizeof(sockbuf);
6893 #ifdef HAVE_SA_LEN
6894 		from->sa_len = 0;
6895 #endif
6896 	} else {
6897 		from = NULL;
6898 		fromlen = 0;
6899 	}
6900 
6901 #if defined(__APPLE__)
6902 	SCTP_SOCKET_LOCK(so, 1);
6903 #endif
6904 	if (filling_sinfo) {
6905 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6906 	}
6907 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6908 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6909 	if (controlp != NULL) {
6910 		/* copy back the sinfo in a CMSG format */
6911 		if (filling_sinfo)
6912 			*controlp = sctp_build_ctl_nchunk(inp,
6913 			                                  (struct sctp_sndrcvinfo *)&sinfo);
6914 		else
6915 			*controlp = NULL;
6916 	}
6917 	if (psa) {
6918 		/* copy back the address info */
6919 #ifdef HAVE_SA_LEN
6920 		if (from && from->sa_len) {
6921 #else
6922 		if (from) {
6923 #endif
6924 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6925 			*psa = sodupsockaddr(from, M_NOWAIT);
6926 #else
6927 			*psa = dup_sockaddr(from, mp0 == 0);
6928 #endif
6929 		} else {
6930 			*psa = NULL;
6931 		}
6932 	}
6933 #if defined(__APPLE__)
6934 	SCTP_SOCKET_UNLOCK(so, 1);
6935 #endif
6936 	return (error);
6937 }
6938 
6939 
6940 #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
6941 /*
6942  * General routine to allocate a hash table with control of memory flags.
6943  * is in 7.0 and beyond for sure :-)
6944  */
6945 void *
6946 sctp_hashinit_flags(int elements, struct malloc_type *type,
6947                     u_long *hashmask, int flags)
6948 {
6949 	long hashsize;
6950 	LIST_HEAD(generic, generic) *hashtbl;
6951 	int i;
6952 
6953 
6954 	if (elements <= 0) {
6955 #ifdef INVARIANTS
6956 		panic("hashinit: bad elements");
6957 #else
6958 		SCTP_PRINTF("hashinit: bad elements?");
6959 		elements = 1;
6960 #endif
6961 	}
6962 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6963 		continue;
6964 	hashsize >>= 1;
6965 	if (flags & HASH_WAITOK)
6966 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
6967 	else if (flags & HASH_NOWAIT)
6968 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
6969 	else {
6970 #ifdef INVARIANTS
6971 		panic("flag incorrect in hashinit_flags");
6972 #else
6973 		return (NULL);
6974 #endif
6975 	}
6976 
6977 	/* no memory? */
6978 	if (hashtbl == NULL)
6979 		return (NULL);
6980 
6981 	for (i = 0; i < hashsize; i++)
6982 		LIST_INIT(&hashtbl[i]);
6983 	*hashmask = hashsize - 1;
6984 	return (hashtbl);
6985 }
6986 #endif
6987 
6988 #else /*  __Userspace__ ifdef above sctp_soreceive */
6989 /*
6990  * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
6991  * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
6992  *__FreeBSD__ must be excluded.
6993  *
6994  */
6995 
6996 void *
6997 sctp_hashinit_flags(int elements, struct malloc_type *type,
6998                     u_long *hashmask, int flags)
6999 {
7000 	long hashsize;
7001 	LIST_HEAD(generic, generic) *hashtbl;
7002 	int i;
7003 
7004 	if (elements <= 0) {
7005 		SCTP_PRINTF("hashinit: bad elements?");
7006 #ifdef INVARIANTS
7007 		return (NULL);
7008 #else
7009 		elements = 1;
7010 #endif
7011 	}
7012 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7013 		continue;
7014 	hashsize >>= 1;
7015 	/*cannot use MALLOC here because it has to be declared or defined
7016 	  using MALLOC_DECLARE or MALLOC_DEFINE first. */
7017 	if (flags & HASH_WAITOK)
7018 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7019 	else if (flags & HASH_NOWAIT)
7020 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7021 	else {
7022 #ifdef INVARIANTS
7023 		SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7024 #endif
7025 		return (NULL);
7026 	}
7027 
7028 	/* no memory? */
7029 	if (hashtbl == NULL)
7030 		return (NULL);
7031 
7032 	for (i = 0; i < hashsize; i++)
7033 		LIST_INIT(&hashtbl[i]);
7034 	*hashmask = hashsize - 1;
7035 	return (hashtbl);
7036 }
7037 
7038 
7039 void
7040 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7041 {
7042 	LIST_HEAD(generic, generic) *hashtbl, *hp;
7043 
7044 	hashtbl = vhashtbl;
7045 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7046 		if (!LIST_EMPTY(hp)) {
7047 			SCTP_PRINTF("hashdestroy: hash not empty.\n");
7048 			return;
7049 		}
7050 	FREE(hashtbl, type);
7051 }
7052 
7053 
7054 void
7055 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7056 {
7057 	LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7058 	/*
7059 	LIST_ENTRY(type) *start, *temp;
7060 	 */
7061 	hashtbl = vhashtbl;
7062 	/* Apparently temp is not dynamically allocated, so attempts to
7063 	   free it results in error.
7064 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7065 		if (!LIST_EMPTY(hp)) {
7066 			start = LIST_FIRST(hp);
7067 			while (start != NULL) {
7068 				temp = start;
7069 				start = start->le_next;
7070 				SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7071 				FREE(temp, type);
7072 			}
7073 		}
7074 	 */
7075 	FREE(hashtbl, type);
7076 }
7077 
7078 
7079 #endif
7080 
7081 
7082 int
7083 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7084 			 int totaddr, int *error)
7085 {
7086 	int added = 0;
7087 	int i;
7088 	struct sctp_inpcb *inp;
7089 	struct sockaddr *sa;
7090 	size_t incr = 0;
7091 #ifdef INET
7092 	struct sockaddr_in *sin;
7093 #endif
7094 #ifdef INET6
7095 	struct sockaddr_in6 *sin6;
7096 #endif
7097 
7098 	sa = addr;
7099 	inp = stcb->sctp_ep;
7100 	*error = 0;
7101 	for (i = 0; i < totaddr; i++) {
7102 		switch (sa->sa_family) {
7103 #ifdef INET
7104 		case AF_INET:
7105 			incr = sizeof(struct sockaddr_in);
7106 			sin = (struct sockaddr_in *)sa;
7107 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7108 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7109 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7110 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7111 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7112 				*error = EINVAL;
7113 				goto out_now;
7114 			}
7115 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7116 				/* assoc gone no un-lock */
7117 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7118 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
7119 				*error = ENOBUFS;
7120 				goto out_now;
7121 			}
7122 			added++;
7123 			break;
7124 #endif
7125 #ifdef INET6
7126 		case AF_INET6:
7127 			incr = sizeof(struct sockaddr_in6);
7128 			sin6 = (struct sockaddr_in6 *)sa;
7129 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7130 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7131 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7132 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7133 				*error = EINVAL;
7134 				goto out_now;
7135 			}
7136 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7137 				/* assoc gone no un-lock */
7138 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7139 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7140 				*error = ENOBUFS;
7141 				goto out_now;
7142 			}
7143 			added++;
7144 			break;
7145 #endif
7146 #if defined(__Userspace__)
7147 		case AF_CONN:
7148 			incr = sizeof(struct sockaddr_in6);
7149 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
7150 				/* assoc gone no un-lock */
7151 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7152 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
7153 				*error = ENOBUFS;
7154 				goto out_now;
7155 			}
7156 			added++;
7157 			break;
7158 #endif
7159 		default:
7160 			break;
7161 		}
7162 		sa = (struct sockaddr *)((caddr_t)sa + incr);
7163 	}
7164  out_now:
7165 	return (added);
7166 }
7167 
7168 struct sctp_tcb *
7169 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7170 			  int *totaddr, int *num_v4, int *num_v6, int *error,
7171 			  int limit, int *bad_addr)
7172 {
7173 	struct sockaddr *sa;
7174 	struct sctp_tcb *stcb = NULL;
7175 	size_t incr, at, i;
7176 	at = incr = 0;
7177 	sa = addr;
7178 
7179 	*error = *num_v6 = *num_v4 = 0;
7180 	/* account and validate addresses */
7181 	for (i = 0; i < (size_t)*totaddr; i++) {
7182 		switch (sa->sa_family) {
7183 #ifdef INET
7184 		case AF_INET:
7185 			(*num_v4) += 1;
7186 			incr = sizeof(struct sockaddr_in);
7187 #ifdef HAVE_SA_LEN
7188 			if (sa->sa_len != incr) {
7189 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7190 				*error = EINVAL;
7191 				*bad_addr = 1;
7192 				return (NULL);
7193 			}
7194 #endif
7195 			break;
7196 #endif
7197 #ifdef INET6
7198 		case AF_INET6:
7199 		{
7200 			struct sockaddr_in6 *sin6;
7201 
7202 			sin6 = (struct sockaddr_in6 *)sa;
7203 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7204 				/* Must be non-mapped for connectx */
7205 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7206 				*error = EINVAL;
7207 				*bad_addr = 1;
7208 				return (NULL);
7209 			}
7210 			(*num_v6) += 1;
7211 			incr = sizeof(struct sockaddr_in6);
7212 #ifdef HAVE_SA_LEN
7213 			if (sa->sa_len != incr) {
7214 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7215 				*error = EINVAL;
7216 				*bad_addr = 1;
7217 				return (NULL);
7218 			}
7219 #endif
7220 			break;
7221 		}
7222 #endif
7223 		default:
7224 			*totaddr = i;
7225 			/* we are done */
7226 			break;
7227 		}
7228 		if (i == (size_t)*totaddr) {
7229 			break;
7230 		}
7231 		SCTP_INP_INCR_REF(inp);
7232 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7233 		if (stcb != NULL) {
7234 			/* Already have or am bring up an association */
7235 			return (stcb);
7236 		} else {
7237 			SCTP_INP_DECR_REF(inp);
7238 		}
7239 		if ((at + incr) > (size_t)limit) {
7240 			*totaddr = i;
7241 			break;
7242 		}
7243 		sa = (struct sockaddr *)((caddr_t)sa + incr);
7244 	}
7245 	return ((struct sctp_tcb *)NULL);
7246 }
7247 
7248 /*
7249  * sctp_bindx(ADD) for one address.
7250  * assumes all arguments are valid/checked by caller.
7251  */
7252 void
7253 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7254 		       struct sockaddr *sa, sctp_assoc_t assoc_id,
7255 		       uint32_t vrf_id, int *error, void *p)
7256 {
7257 	struct sockaddr *addr_touse;
7258 #if defined(INET) && defined(INET6)
7259 	struct sockaddr_in sin;
7260 #endif
7261 #ifdef SCTP_MVRF
7262 	int i, fnd = 0;
7263 #endif
7264 
7265 	/* see if we're bound all already! */
7266 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7267 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7268 		*error = EINVAL;
7269 		return;
7270 	}
7271 #ifdef SCTP_MVRF
7272 	/* Is the VRF one we have */
7273 	for (i = 0; i < inp->num_vrfs; i++) {
7274 		if (vrf_id == inp->m_vrf_ids[i]) {
7275 			fnd = 1;
7276 			break;
7277 		}
7278 	}
7279 	if (!fnd) {
7280 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7281 		*error = EINVAL;
7282 		return;
7283 	}
7284 #endif
7285 	addr_touse = sa;
7286 #ifdef INET6
7287 	if (sa->sa_family == AF_INET6) {
7288 #ifdef INET
7289 		struct sockaddr_in6 *sin6;
7290 
7291 #endif
7292 #ifdef HAVE_SA_LEN
7293 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7294 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7295 			*error = EINVAL;
7296 			return;
7297 		}
7298 #endif
7299 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7300 			/* can only bind v6 on PF_INET6 sockets */
7301 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7302 			*error = EINVAL;
7303 			return;
7304 		}
7305 #ifdef INET
7306 		sin6 = (struct sockaddr_in6 *)addr_touse;
7307 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7308 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7309 			    SCTP_IPV6_V6ONLY(inp)) {
7310 				/* can't bind v4-mapped on PF_INET sockets */
7311 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7312 				*error = EINVAL;
7313 				return;
7314 			}
7315 			in6_sin6_2_sin(&sin, sin6);
7316 			addr_touse = (struct sockaddr *)&sin;
7317 		}
7318 #endif
7319 	}
7320 #endif
7321 #ifdef INET
7322 	if (sa->sa_family == AF_INET) {
7323 #ifdef HAVE_SA_LEN
7324 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7325 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7326 			*error = EINVAL;
7327 			return;
7328 		}
7329 #endif
7330 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7331 		    SCTP_IPV6_V6ONLY(inp)) {
7332 			/* can't bind v4 on PF_INET sockets */
7333 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7334 			*error = EINVAL;
7335 			return;
7336 		}
7337 	}
7338 #endif
7339 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7340 #if !(defined(__Panda__) || defined(__Windows__))
7341 		if (p == NULL) {
7342 			/* Can't get proc for Net/Open BSD */
7343 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7344 			*error = EINVAL;
7345 			return;
7346 		}
7347 #endif
7348 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
7349 		return;
7350 	}
7351 	/*
7352 	 * No locks required here since bind and mgmt_ep_sa
7353 	 * all do their own locking. If we do something for
7354 	 * the FIX: below we may need to lock in that case.
7355 	 */
7356 	if (assoc_id == 0) {
7357 		/* add the address */
7358 		struct sctp_inpcb *lep;
7359 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7360 
7361 		/* validate the incoming port */
7362 		if ((lsin->sin_port != 0) &&
7363 		    (lsin->sin_port != inp->sctp_lport)) {
7364 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7365 			*error = EINVAL;
7366 			return;
7367 		} else {
7368 			/* user specified 0 port, set it to existing port */
7369 			lsin->sin_port = inp->sctp_lport;
7370 		}
7371 
7372 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7373 		if (lep != NULL) {
7374 			/*
7375 			 * We must decrement the refcount
7376 			 * since we have the ep already and
7377 			 * are binding. No remove going on
7378 			 * here.
7379 			 */
7380 			SCTP_INP_DECR_REF(lep);
7381 		}
7382 		if (lep == inp) {
7383 			/* already bound to it.. ok */
7384 			return;
7385 		} else if (lep == NULL) {
7386 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7387 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7388 						      SCTP_ADD_IP_ADDRESS,
7389 						      vrf_id, NULL);
7390 		} else {
7391 			*error = EADDRINUSE;
7392 		}
7393 		if (*error)
7394 			return;
7395 	} else {
7396 		/*
7397 		 * FIX: decide whether we allow assoc based
7398 		 * bindx
7399 		 */
7400 	}
7401 }
7402 
7403 /*
7404  * sctp_bindx(DELETE) for one address.
7405  * assumes all arguments are valid/checked by caller.
7406  */
7407 void
7408 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7409 			  struct sockaddr *sa, sctp_assoc_t assoc_id,
7410 			  uint32_t vrf_id, int *error)
7411 {
7412 	struct sockaddr *addr_touse;
7413 #if defined(INET) && defined(INET6)
7414 	struct sockaddr_in sin;
7415 #endif
7416 #ifdef SCTP_MVRF
7417 	int i, fnd = 0;
7418 #endif
7419 
7420 	/* see if we're bound all already! */
7421 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7422 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7423 		*error = EINVAL;
7424 		return;
7425 	}
7426 #ifdef SCTP_MVRF
7427 	/* Is the VRF one we have */
7428 	for (i = 0; i < inp->num_vrfs; i++) {
7429 		if (vrf_id == inp->m_vrf_ids[i]) {
7430 			fnd = 1;
7431 			break;
7432 		}
7433 	}
7434 	if (!fnd) {
7435 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7436 		*error = EINVAL;
7437 		return;
7438 	}
7439 #endif
7440 	addr_touse = sa;
7441 #ifdef INET6
7442 	if (sa->sa_family == AF_INET6) {
7443 #ifdef INET
7444 		struct sockaddr_in6 *sin6;
7445 #endif
7446 
7447 #ifdef HAVE_SA_LEN
7448 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7449 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7450 			*error = EINVAL;
7451 			return;
7452 		}
7453 #endif
7454 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7455 			/* can only bind v6 on PF_INET6 sockets */
7456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7457 			*error = EINVAL;
7458 			return;
7459 		}
7460 #ifdef INET
7461 		sin6 = (struct sockaddr_in6 *)addr_touse;
7462 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7463 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7464 			    SCTP_IPV6_V6ONLY(inp)) {
7465 				/* can't bind mapped-v4 on PF_INET sockets */
7466 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7467 				*error = EINVAL;
7468 				return;
7469 			}
7470 			in6_sin6_2_sin(&sin, sin6);
7471 			addr_touse = (struct sockaddr *)&sin;
7472 		}
7473 #endif
7474 	}
7475 #endif
7476 #ifdef INET
7477 	if (sa->sa_family == AF_INET) {
7478 #ifdef HAVE_SA_LEN
7479 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7480 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7481 			*error = EINVAL;
7482 			return;
7483 		}
7484 #endif
7485 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7486 		    SCTP_IPV6_V6ONLY(inp)) {
7487 			/* can't bind v4 on PF_INET sockets */
7488 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7489 			*error = EINVAL;
7490 			return;
7491 		}
7492 	}
7493 #endif
7494 	/*
7495 	 * No lock required mgmt_ep_sa does its own locking.
7496 	 * If the FIX: below is ever changed we may need to
7497 	 * lock before calling association level binding.
7498 	 */
7499 	if (assoc_id == 0) {
7500 		/* delete the address */
7501 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7502 					      SCTP_DEL_IP_ADDRESS,
7503 					      vrf_id, NULL);
7504 	} else {
7505 		/*
7506 		 * FIX: decide whether we allow assoc based
7507 		 * bindx
7508 		 */
7509 	}
7510 }
7511 
7512 /*
7513  * returns the valid local address count for an assoc, taking into account
7514  * all scoping rules
7515  */
7516 int
7517 sctp_local_addr_count(struct sctp_tcb *stcb)
7518 {
7519 	int loopback_scope;
7520 #if defined(INET)
7521 	int ipv4_local_scope, ipv4_addr_legal;
7522 #endif
7523 #if defined (INET6)
7524 	int local_scope, site_scope, ipv6_addr_legal;
7525 #endif
7526 #if defined(__Userspace__)
7527 	int conn_addr_legal;
7528 #endif
7529 	struct sctp_vrf *vrf;
7530 	struct sctp_ifn *sctp_ifn;
7531 	struct sctp_ifa *sctp_ifa;
7532 	int count = 0;
7533 
7534 	/* Turn on all the appropriate scopes */
7535 	loopback_scope = stcb->asoc.scope.loopback_scope;
7536 #if defined(INET)
7537 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7538 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7539 #endif
7540 #if defined(INET6)
7541 	local_scope = stcb->asoc.scope.local_scope;
7542 	site_scope = stcb->asoc.scope.site_scope;
7543 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7544 #endif
7545 #if defined(__Userspace__)
7546 	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7547 #endif
7548 	SCTP_IPI_ADDR_RLOCK();
7549 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7550 	if (vrf == NULL) {
7551 		/* no vrf, no addresses */
7552 		SCTP_IPI_ADDR_RUNLOCK();
7553 		return (0);
7554 	}
7555 
7556 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7557 		/*
7558 		 * bound all case: go through all ifns on the vrf
7559 		 */
7560 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7561 			if ((loopback_scope == 0) &&
7562 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7563 				continue;
7564 			}
7565 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7566 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7567 					continue;
7568 				switch (sctp_ifa->address.sa.sa_family) {
7569 #ifdef INET
7570 				case AF_INET:
7571 					if (ipv4_addr_legal) {
7572 						struct sockaddr_in *sin;
7573 
7574 						sin = &sctp_ifa->address.sin;
7575 						if (sin->sin_addr.s_addr == 0) {
7576 							/* skip unspecified addrs */
7577 							continue;
7578 						}
7579 #if defined(__FreeBSD__)
7580 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7581 						                     &sin->sin_addr) != 0) {
7582 							continue;
7583 						}
7584 #endif
7585 						if ((ipv4_local_scope == 0) &&
7586 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7587 							continue;
7588 						}
7589 						/* count this one */
7590 						count++;
7591 					} else {
7592 						continue;
7593 					}
7594 					break;
7595 #endif
7596 #ifdef INET6
7597 				case AF_INET6:
7598 					if (ipv6_addr_legal) {
7599 						struct sockaddr_in6 *sin6;
7600 
7601 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7602 						struct sockaddr_in6 lsa6;
7603 #endif
7604 						sin6 = &sctp_ifa->address.sin6;
7605 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7606 							continue;
7607 						}
7608 #if defined(__FreeBSD__)
7609 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7610 						                     &sin6->sin6_addr) != 0) {
7611 							continue;
7612 						}
7613 #endif
7614 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7615 							if (local_scope == 0)
7616 								continue;
7617 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7618 							if (sin6->sin6_scope_id == 0) {
7619 #ifdef SCTP_KAME
7620 								if (sa6_recoverscope(sin6) != 0)
7621 									/*
7622 									 * bad link
7623 									 * local
7624 									 * address
7625 									 */
7626 									continue;
7627 #else
7628 								lsa6 = *sin6;
7629 								if (in6_recoverscope(&lsa6,
7630 								                     &lsa6.sin6_addr,
7631 								                     NULL))
7632 									/*
7633 									 * bad link
7634 									 * local
7635 									 * address
7636 									 */
7637 									continue;
7638 								sin6 = &lsa6;
7639 #endif /* SCTP_KAME */
7640 							}
7641 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7642 						}
7643 						if ((site_scope == 0) &&
7644 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7645 							continue;
7646 						}
7647 						/* count this one */
7648 						count++;
7649 					}
7650 					break;
7651 #endif
7652 #if defined(__Userspace__)
7653 				case AF_CONN:
7654 					if (conn_addr_legal) {
7655 						count++;
7656 					}
7657 					break;
7658 #endif
7659 				default:
7660 					/* TSNH */
7661 					break;
7662 				}
7663 			}
7664 		}
7665 	} else {
7666 		/*
7667 		 * subset bound case
7668 		 */
7669 		struct sctp_laddr *laddr;
7670 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7671 			     sctp_nxt_addr) {
7672 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7673 				continue;
7674 			}
7675 			/* count this one */
7676 			count++;
7677 		}
7678 	}
7679 	SCTP_IPI_ADDR_RUNLOCK();
7680 	return (count);
7681 }
7682 
7683 #if defined(SCTP_LOCAL_TRACE_BUF)
7684 
7685 void
7686 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7687 {
7688 	uint32_t saveindex, newindex;
7689 
7690 #if defined(__Windows__)
7691 	if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7692 		return;
7693 	}
7694 	do {
7695 		saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7696 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7697 			newindex = 1;
7698 		} else {
7699 			newindex = saveindex + 1;
7700 		}
7701 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7702 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7703 		saveindex = 0;
7704 	}
7705 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7706 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7707 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7708 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
7709 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
7710 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
7711 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
7712 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
7713 #else
7714 	do {
7715 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7716 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7717 			newindex = 1;
7718 		} else {
7719 			newindex = saveindex + 1;
7720 		}
7721 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7722 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7723 		saveindex = 0;
7724 	}
7725 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7726 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7727 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7728 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7729 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7730 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7731 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7732 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7733 #endif
7734 }
7735 
7736 #endif
7737 #if defined(__FreeBSD__)
7738 #if __FreeBSD_version >= 800044
7739 static void
7740 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored,
7741     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7742 {
7743 	struct ip *iph;
7744 #ifdef INET6
7745 	struct ip6_hdr *ip6;
7746 #endif
7747 	struct mbuf *sp, *last;
7748 	struct udphdr *uhdr;
7749 	uint16_t port;
7750 
7751 	if ((m->m_flags & M_PKTHDR) == 0) {
7752 		/* Can't handle one that is not a pkt hdr */
7753 		goto out;
7754 	}
7755 	/* Pull the src port */
7756 	iph = mtod(m, struct ip *);
7757 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7758 	port = uhdr->uh_sport;
7759 	/* Split out the mbuf chain. Leave the
7760 	 * IP header in m, place the
7761 	 * rest in the sp.
7762 	 */
7763 	sp = m_split(m, off, M_NOWAIT);
7764 	if (sp == NULL) {
7765 		/* Gak, drop packet, we can't do a split */
7766 		goto out;
7767 	}
7768 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7769 		/* Gak, packet can't have an SCTP header in it - too small */
7770 		m_freem(sp);
7771 		goto out;
7772 	}
7773 	/* Now pull up the UDP header and SCTP header together */
7774 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7775 	if (sp == NULL) {
7776 		/* Gak pullup failed */
7777 		goto out;
7778 	}
7779 	/* Trim out the UDP header */
7780 	m_adj(sp, sizeof(struct udphdr));
7781 
7782 	/* Now reconstruct the mbuf chain */
7783 	for (last = m; last->m_next; last = last->m_next);
7784 	last->m_next = sp;
7785 	m->m_pkthdr.len += sp->m_pkthdr.len;
7786 	iph = mtod(m, struct ip *);
7787 	switch (iph->ip_v) {
7788 #ifdef INET
7789 	case IPVERSION:
7790 #if __FreeBSD_version >= 1000000
7791 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7792 #else
7793 		iph->ip_len -= sizeof(struct udphdr);
7794 #endif
7795 		sctp_input_with_port(m, off, port);
7796 		break;
7797 #endif
7798 #ifdef INET6
7799 	case IPV6_VERSION >> 4:
7800 		ip6 = mtod(m, struct ip6_hdr *);
7801 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7802 		sctp6_input_with_port(&m, &off, port);
7803 		break;
7804 #endif
7805 	default:
7806 		goto out;
7807 		break;
7808 	}
7809 	return;
7810  out:
7811 	m_freem(m);
7812 }
7813 #endif
7814 
7815 void
7816 sctp_over_udp_stop(void)
7817 {
7818 	/*
7819 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7820 	 */
7821 #ifdef INET
7822 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7823 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7824 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7825 	}
7826 #endif
7827 #ifdef INET6
7828 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7829 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7830 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7831 	}
7832 #endif
7833 }
7834 
7835 int
7836 sctp_over_udp_start(void)
7837 {
7838 #if __FreeBSD_version >= 800044
7839 	uint16_t port;
7840 	int ret;
7841 #ifdef INET
7842 	struct sockaddr_in sin;
7843 #endif
7844 #ifdef INET6
7845 	struct sockaddr_in6 sin6;
7846 #endif
7847 	/*
7848 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
7849 	 */
7850 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7851 	if (ntohs(port) == 0) {
7852 		/* Must have a port set */
7853 		return (EINVAL);
7854 	}
7855 #ifdef INET
7856 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7857 		/* Already running -- must stop first */
7858 		return (EALREADY);
7859 	}
7860 #endif
7861 #ifdef INET6
7862 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7863 		/* Already running -- must stop first */
7864 		return (EALREADY);
7865 	}
7866 #endif
7867 #ifdef INET
7868 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7869 	                    SOCK_DGRAM, IPPROTO_UDP,
7870 	                    curthread->td_ucred, curthread))) {
7871 		sctp_over_udp_stop();
7872 		return (ret);
7873 	}
7874 	/* Call the special UDP hook. */
7875 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7876 	                                    sctp_recv_udp_tunneled_packet, NULL))) {
7877 		sctp_over_udp_stop();
7878 		return (ret);
7879 	}
7880 	/* Ok, we have a socket, bind it to the port. */
7881 	memset(&sin, 0, sizeof(struct sockaddr_in));
7882 	sin.sin_len = sizeof(struct sockaddr_in);
7883 	sin.sin_family = AF_INET;
7884 	sin.sin_port = htons(port);
7885 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7886 	                  (struct sockaddr *)&sin, curthread))) {
7887 		sctp_over_udp_stop();
7888 		return (ret);
7889 	}
7890 #endif
7891 #ifdef INET6
7892 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7893 	                    SOCK_DGRAM, IPPROTO_UDP,
7894 	                    curthread->td_ucred, curthread))) {
7895 		sctp_over_udp_stop();
7896 		return (ret);
7897 	}
7898 	/* Call the special UDP hook. */
7899 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7900 	                                    sctp_recv_udp_tunneled_packet, NULL))) {
7901 		sctp_over_udp_stop();
7902 		return (ret);
7903 	}
7904 	/* Ok, we have a socket, bind it to the port. */
7905 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7906 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7907 	sin6.sin6_family = AF_INET6;
7908 	sin6.sin6_port = htons(port);
7909 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7910 	                  (struct sockaddr *)&sin6, curthread))) {
7911 		sctp_over_udp_stop();
7912 		return (ret);
7913 	}
7914 #endif
7915 	return (0);
7916 #else
7917 	return (ENOTSUP);
7918 #endif
7919 }
7920 #endif
7921