1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #ifdef __FreeBSD__
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 326163 2017-11-24 12:18:48Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_var.h>
44 #include <netinet/sctp_sysctl.h>
45 #ifdef INET6
46 #if defined(__Userspace__) || defined(__FreeBSD__)
47 #include <netinet6/sctp6_var.h>
48 #endif
49 #endif
50 #include <netinet/sctp_header.h>
51 #include <netinet/sctp_output.h>
52 #include <netinet/sctp_uio.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
55 #include <netinet/sctp_auth.h>
56 #include <netinet/sctp_asconf.h>
57 #include <netinet/sctp_bsd_addr.h>
58 #if defined(__Userspace__)
59 #include <netinet/sctp_constants.h>
60 #endif
61 #if defined(__FreeBSD__)
62 #if defined(INET6) || defined(INET)
63 #include <netinet/tcp_var.h>
64 #endif
65 #include <netinet/udp.h>
66 #include <netinet/udp_var.h>
67 #include <sys/proc.h>
68 #ifdef INET6
69 #include <netinet/icmp6.h>
70 #endif
71 #endif
72
73 #if defined(__APPLE__)
74 #define APPLE_FILE_NO 8
75 #endif
76
77 #if defined(__Windows__)
78 #if !defined(SCTP_LOCAL_TRACE_BUF)
79 #include "eventrace_netinet.h"
80 #include "sctputil.tmh" /* this is the file that will be auto generated */
81 #endif
82 #else
83 #ifndef KTR_SCTP
84 #define KTR_SCTP KTR_SUBSYS
85 #endif
86 #endif
87
88 extern const struct sctp_cc_functions sctp_cc_functions[];
89 extern const struct sctp_ss_functions sctp_ss_functions[];
90
91 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)92 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
93 {
94 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
95 struct sctp_cwnd_log sctp_clog;
96
97 sctp_clog.x.sb.stcb = stcb;
98 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
99 if (stcb)
100 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
101 else
102 sctp_clog.x.sb.stcb_sbcc = 0;
103 sctp_clog.x.sb.incr = incr;
104 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
105 SCTP_LOG_EVENT_SB,
106 from,
107 sctp_clog.x.misc.log1,
108 sctp_clog.x.misc.log2,
109 sctp_clog.x.misc.log3,
110 sctp_clog.x.misc.log4);
111 #endif
112 }
113
114 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)115 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
116 {
117 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
118 struct sctp_cwnd_log sctp_clog;
119
120 sctp_clog.x.close.inp = (void *)inp;
121 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
122 if (stcb) {
123 sctp_clog.x.close.stcb = (void *)stcb;
124 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
125 } else {
126 sctp_clog.x.close.stcb = 0;
127 sctp_clog.x.close.state = 0;
128 }
129 sctp_clog.x.close.loc = loc;
130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
131 SCTP_LOG_EVENT_CLOSE,
132 0,
133 sctp_clog.x.misc.log1,
134 sctp_clog.x.misc.log2,
135 sctp_clog.x.misc.log3,
136 sctp_clog.x.misc.log4);
137 #endif
138 }
139
140 void
rto_logging(struct sctp_nets * net,int from)141 rto_logging(struct sctp_nets *net, int from)
142 {
143 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
144 struct sctp_cwnd_log sctp_clog;
145
146 memset(&sctp_clog, 0, sizeof(sctp_clog));
147 sctp_clog.x.rto.net = (void *) net;
148 sctp_clog.x.rto.rtt = net->rtt / 1000;
149 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
150 SCTP_LOG_EVENT_RTT,
151 from,
152 sctp_clog.x.misc.log1,
153 sctp_clog.x.misc.log2,
154 sctp_clog.x.misc.log3,
155 sctp_clog.x.misc.log4);
156 #endif
157 }
158
159 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)160 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
161 {
162 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
163 struct sctp_cwnd_log sctp_clog;
164
165 sctp_clog.x.strlog.stcb = stcb;
166 sctp_clog.x.strlog.n_tsn = tsn;
167 sctp_clog.x.strlog.n_sseq = sseq;
168 sctp_clog.x.strlog.e_tsn = 0;
169 sctp_clog.x.strlog.e_sseq = 0;
170 sctp_clog.x.strlog.strm = stream;
171 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
172 SCTP_LOG_EVENT_STRM,
173 from,
174 sctp_clog.x.misc.log1,
175 sctp_clog.x.misc.log2,
176 sctp_clog.x.misc.log3,
177 sctp_clog.x.misc.log4);
178 #endif
179 }
180
181 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)182 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
183 {
184 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
185 struct sctp_cwnd_log sctp_clog;
186
187 sctp_clog.x.nagle.stcb = (void *)stcb;
188 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
189 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
190 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
191 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
192 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
193 SCTP_LOG_EVENT_NAGLE,
194 action,
195 sctp_clog.x.misc.log1,
196 sctp_clog.x.misc.log2,
197 sctp_clog.x.misc.log3,
198 sctp_clog.x.misc.log4);
199 #endif
200 }
201
202 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)203 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
204 {
205 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
206 struct sctp_cwnd_log sctp_clog;
207
208 sctp_clog.x.sack.cumack = cumack;
209 sctp_clog.x.sack.oldcumack = old_cumack;
210 sctp_clog.x.sack.tsn = tsn;
211 sctp_clog.x.sack.numGaps = gaps;
212 sctp_clog.x.sack.numDups = dups;
213 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 SCTP_LOG_EVENT_SACK,
215 from,
216 sctp_clog.x.misc.log1,
217 sctp_clog.x.misc.log2,
218 sctp_clog.x.misc.log3,
219 sctp_clog.x.misc.log4);
220 #endif
221 }
222
223 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)224 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
225 {
226 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
227 struct sctp_cwnd_log sctp_clog;
228
229 memset(&sctp_clog, 0, sizeof(sctp_clog));
230 sctp_clog.x.map.base = map;
231 sctp_clog.x.map.cum = cum;
232 sctp_clog.x.map.high = high;
233 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
234 SCTP_LOG_EVENT_MAP,
235 from,
236 sctp_clog.x.misc.log1,
237 sctp_clog.x.misc.log2,
238 sctp_clog.x.misc.log3,
239 sctp_clog.x.misc.log4);
240 #endif
241 }
242
243 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)244 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
245 {
246 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
247 struct sctp_cwnd_log sctp_clog;
248
249 memset(&sctp_clog, 0, sizeof(sctp_clog));
250 sctp_clog.x.fr.largest_tsn = biggest_tsn;
251 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
252 sctp_clog.x.fr.tsn = tsn;
253 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
254 SCTP_LOG_EVENT_FR,
255 from,
256 sctp_clog.x.misc.log1,
257 sctp_clog.x.misc.log2,
258 sctp_clog.x.misc.log3,
259 sctp_clog.x.misc.log4);
260 #endif
261 }
262
263 #ifdef SCTP_MBUF_LOGGING
264 void
sctp_log_mb(struct mbuf * m,int from)265 sctp_log_mb(struct mbuf *m, int from)
266 {
267 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
268 struct sctp_cwnd_log sctp_clog;
269
270 sctp_clog.x.mb.mp = m;
271 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
272 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
273 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
274 if (SCTP_BUF_IS_EXTENDED(m)) {
275 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
276 #if defined(__APPLE__)
277 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
278 #else
279 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
280 #endif
281 } else {
282 sctp_clog.x.mb.ext = 0;
283 sctp_clog.x.mb.refcnt = 0;
284 }
285 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
286 SCTP_LOG_EVENT_MBUF,
287 from,
288 sctp_clog.x.misc.log1,
289 sctp_clog.x.misc.log2,
290 sctp_clog.x.misc.log3,
291 sctp_clog.x.misc.log4);
292 #endif
293 }
294
295 void
sctp_log_mbc(struct mbuf * m,int from)296 sctp_log_mbc(struct mbuf *m, int from)
297 {
298 struct mbuf *mat;
299
300 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
301 sctp_log_mb(mat, from);
302 }
303 }
304 #endif
305
306 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)307 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
308 {
309 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
310 struct sctp_cwnd_log sctp_clog;
311
312 if (control == NULL) {
313 SCTP_PRINTF("Gak log of NULL?\n");
314 return;
315 }
316 sctp_clog.x.strlog.stcb = control->stcb;
317 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
318 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
319 sctp_clog.x.strlog.strm = control->sinfo_stream;
320 if (poschk != NULL) {
321 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
322 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
323 } else {
324 sctp_clog.x.strlog.e_tsn = 0;
325 sctp_clog.x.strlog.e_sseq = 0;
326 }
327 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
328 SCTP_LOG_EVENT_STRM,
329 from,
330 sctp_clog.x.misc.log1,
331 sctp_clog.x.misc.log2,
332 sctp_clog.x.misc.log3,
333 sctp_clog.x.misc.log4);
334 #endif
335 }
336
337 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)338 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
339 {
340 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
341 struct sctp_cwnd_log sctp_clog;
342
343 sctp_clog.x.cwnd.net = net;
344 if (stcb->asoc.send_queue_cnt > 255)
345 sctp_clog.x.cwnd.cnt_in_send = 255;
346 else
347 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
348 if (stcb->asoc.stream_queue_cnt > 255)
349 sctp_clog.x.cwnd.cnt_in_str = 255;
350 else
351 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
352
353 if (net) {
354 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
355 sctp_clog.x.cwnd.inflight = net->flight_size;
356 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
357 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
358 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
359 }
360 if (SCTP_CWNDLOG_PRESEND == from) {
361 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
362 }
363 sctp_clog.x.cwnd.cwnd_augment = augment;
364 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
365 SCTP_LOG_EVENT_CWND,
366 from,
367 sctp_clog.x.misc.log1,
368 sctp_clog.x.misc.log2,
369 sctp_clog.x.misc.log3,
370 sctp_clog.x.misc.log4);
371 #endif
372 }
373
374 #ifndef __APPLE__
375 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)376 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
377 {
378 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
379 struct sctp_cwnd_log sctp_clog;
380
381 memset(&sctp_clog, 0, sizeof(sctp_clog));
382 if (inp) {
383 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
384
385 } else {
386 sctp_clog.x.lock.sock = (void *) NULL;
387 }
388 sctp_clog.x.lock.inp = (void *) inp;
389 #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
390 if (stcb) {
391 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
392 } else {
393 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
394 }
395 if (inp) {
396 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
397 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
398 } else {
399 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
400 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
401 }
402 #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
403 sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
404 #else
405 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
406 #endif
407 if (inp && (inp->sctp_socket)) {
408 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
409 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
410 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
411 } else {
412 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
413 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
414 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
415 }
416 #endif
417 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 SCTP_LOG_LOCK_EVENT,
419 from,
420 sctp_clog.x.misc.log1,
421 sctp_clog.x.misc.log2,
422 sctp_clog.x.misc.log3,
423 sctp_clog.x.misc.log4);
424 #endif
425 }
426 #endif
427
428 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)429 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
430 {
431 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
432 struct sctp_cwnd_log sctp_clog;
433
434 memset(&sctp_clog, 0, sizeof(sctp_clog));
435 sctp_clog.x.cwnd.net = net;
436 sctp_clog.x.cwnd.cwnd_new_value = error;
437 sctp_clog.x.cwnd.inflight = net->flight_size;
438 sctp_clog.x.cwnd.cwnd_augment = burst;
439 if (stcb->asoc.send_queue_cnt > 255)
440 sctp_clog.x.cwnd.cnt_in_send = 255;
441 else
442 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
443 if (stcb->asoc.stream_queue_cnt > 255)
444 sctp_clog.x.cwnd.cnt_in_str = 255;
445 else
446 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
447 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
448 SCTP_LOG_EVENT_MAXBURST,
449 from,
450 sctp_clog.x.misc.log1,
451 sctp_clog.x.misc.log2,
452 sctp_clog.x.misc.log3,
453 sctp_clog.x.misc.log4);
454 #endif
455 }
456
457 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)458 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
459 {
460 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
461 struct sctp_cwnd_log sctp_clog;
462
463 sctp_clog.x.rwnd.rwnd = peers_rwnd;
464 sctp_clog.x.rwnd.send_size = snd_size;
465 sctp_clog.x.rwnd.overhead = overhead;
466 sctp_clog.x.rwnd.new_rwnd = 0;
467 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
468 SCTP_LOG_EVENT_RWND,
469 from,
470 sctp_clog.x.misc.log1,
471 sctp_clog.x.misc.log2,
472 sctp_clog.x.misc.log3,
473 sctp_clog.x.misc.log4);
474 #endif
475 }
476
477 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)478 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
479 {
480 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
481 struct sctp_cwnd_log sctp_clog;
482
483 sctp_clog.x.rwnd.rwnd = peers_rwnd;
484 sctp_clog.x.rwnd.send_size = flight_size;
485 sctp_clog.x.rwnd.overhead = overhead;
486 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
487 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 SCTP_LOG_EVENT_RWND,
489 from,
490 sctp_clog.x.misc.log1,
491 sctp_clog.x.misc.log2,
492 sctp_clog.x.misc.log3,
493 sctp_clog.x.misc.log4);
494 #endif
495 }
496
497 #ifdef SCTP_MBCNT_LOGGING
498 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)499 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
500 {
501 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
502 struct sctp_cwnd_log sctp_clog;
503
504 sctp_clog.x.mbcnt.total_queue_size = total_oq;
505 sctp_clog.x.mbcnt.size_change = book;
506 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
507 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
508 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
509 SCTP_LOG_EVENT_MBCNT,
510 from,
511 sctp_clog.x.misc.log1,
512 sctp_clog.x.misc.log2,
513 sctp_clog.x.misc.log3,
514 sctp_clog.x.misc.log4);
515 #endif
516 }
517 #endif
518
519 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)520 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
521 {
522 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
523 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
524 SCTP_LOG_MISC_EVENT,
525 from,
526 a, b, c, d);
527 #endif
528 }
529
530 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)531 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
532 {
533 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
534 struct sctp_cwnd_log sctp_clog;
535
536 sctp_clog.x.wake.stcb = (void *)stcb;
537 sctp_clog.x.wake.wake_cnt = wake_cnt;
538 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
539 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
540 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
541
542 if (stcb->asoc.stream_queue_cnt < 0xff)
543 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
544 else
545 sctp_clog.x.wake.stream_qcnt = 0xff;
546
547 if (stcb->asoc.chunks_on_out_queue < 0xff)
548 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
549 else
550 sctp_clog.x.wake.chunks_on_oque = 0xff;
551
552 sctp_clog.x.wake.sctpflags = 0;
553 /* set in the defered mode stuff */
554 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
555 sctp_clog.x.wake.sctpflags |= 1;
556 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
557 sctp_clog.x.wake.sctpflags |= 2;
558 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
559 sctp_clog.x.wake.sctpflags |= 4;
560 /* what about the sb */
561 if (stcb->sctp_socket) {
562 struct socket *so = stcb->sctp_socket;
563
564 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
565 } else {
566 sctp_clog.x.wake.sbflags = 0xff;
567 }
568 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
569 SCTP_LOG_EVENT_WAKE,
570 from,
571 sctp_clog.x.misc.log1,
572 sctp_clog.x.misc.log2,
573 sctp_clog.x.misc.log3,
574 sctp_clog.x.misc.log4);
575 #endif
576 }
577
578 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,size_t sendlen)579 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
580 {
581 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
582 struct sctp_cwnd_log sctp_clog;
583
584 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
585 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
586 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
587 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
588 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
589 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
590 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
591 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
592 SCTP_LOG_EVENT_BLOCK,
593 from,
594 sctp_clog.x.misc.log1,
595 sctp_clog.x.misc.log2,
596 sctp_clog.x.misc.log3,
597 sctp_clog.x.misc.log4);
598 #endif
599 }
600
601 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)602 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
603 {
604 /* May need to fix this if ktrdump does not work */
605 return (0);
606 }
607
608 #ifdef SCTP_AUDITING_ENABLED
609 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
610 static int sctp_audit_indx = 0;
611
612 static
613 void
sctp_print_audit_report(void)614 sctp_print_audit_report(void)
615 {
616 int i;
617 int cnt;
618
619 cnt = 0;
620 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
621 if ((sctp_audit_data[i][0] == 0xe0) &&
622 (sctp_audit_data[i][1] == 0x01)) {
623 cnt = 0;
624 SCTP_PRINTF("\n");
625 } else if (sctp_audit_data[i][0] == 0xf0) {
626 cnt = 0;
627 SCTP_PRINTF("\n");
628 } else if ((sctp_audit_data[i][0] == 0xc0) &&
629 (sctp_audit_data[i][1] == 0x01)) {
630 SCTP_PRINTF("\n");
631 cnt = 0;
632 }
633 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
634 (uint32_t) sctp_audit_data[i][1]);
635 cnt++;
636 if ((cnt % 14) == 0)
637 SCTP_PRINTF("\n");
638 }
639 for (i = 0; i < sctp_audit_indx; i++) {
640 if ((sctp_audit_data[i][0] == 0xe0) &&
641 (sctp_audit_data[i][1] == 0x01)) {
642 cnt = 0;
643 SCTP_PRINTF("\n");
644 } else if (sctp_audit_data[i][0] == 0xf0) {
645 cnt = 0;
646 SCTP_PRINTF("\n");
647 } else if ((sctp_audit_data[i][0] == 0xc0) &&
648 (sctp_audit_data[i][1] == 0x01)) {
649 SCTP_PRINTF("\n");
650 cnt = 0;
651 }
652 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
653 (uint32_t) sctp_audit_data[i][1]);
654 cnt++;
655 if ((cnt % 14) == 0)
656 SCTP_PRINTF("\n");
657 }
658 SCTP_PRINTF("\n");
659 }
660
661 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)662 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
663 struct sctp_nets *net)
664 {
665 int resend_cnt, tot_out, rep, tot_book_cnt;
666 struct sctp_nets *lnet;
667 struct sctp_tmit_chunk *chk;
668
669 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
670 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
671 sctp_audit_indx++;
672 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 sctp_audit_indx = 0;
674 }
675 if (inp == NULL) {
676 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
677 sctp_audit_data[sctp_audit_indx][1] = 0x01;
678 sctp_audit_indx++;
679 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
680 sctp_audit_indx = 0;
681 }
682 return;
683 }
684 if (stcb == NULL) {
685 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
686 sctp_audit_data[sctp_audit_indx][1] = 0x02;
687 sctp_audit_indx++;
688 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
689 sctp_audit_indx = 0;
690 }
691 return;
692 }
693 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
694 sctp_audit_data[sctp_audit_indx][1] =
695 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
696 sctp_audit_indx++;
697 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
698 sctp_audit_indx = 0;
699 }
700 rep = 0;
701 tot_book_cnt = 0;
702 resend_cnt = tot_out = 0;
703 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
704 if (chk->sent == SCTP_DATAGRAM_RESEND) {
705 resend_cnt++;
706 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
707 tot_out += chk->book_size;
708 tot_book_cnt++;
709 }
710 }
711 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
712 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
713 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
714 sctp_audit_indx++;
715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
716 sctp_audit_indx = 0;
717 }
718 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
719 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
720 rep = 1;
721 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
722 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
723 sctp_audit_data[sctp_audit_indx][1] =
724 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
725 sctp_audit_indx++;
726 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 sctp_audit_indx = 0;
728 }
729 }
730 if (tot_out != stcb->asoc.total_flight) {
731 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
732 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
733 sctp_audit_indx++;
734 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
735 sctp_audit_indx = 0;
736 }
737 rep = 1;
738 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
739 (int)stcb->asoc.total_flight);
740 stcb->asoc.total_flight = tot_out;
741 }
742 if (tot_book_cnt != stcb->asoc.total_flight_count) {
743 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
744 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
745 sctp_audit_indx++;
746 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
747 sctp_audit_indx = 0;
748 }
749 rep = 1;
750 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
751
752 stcb->asoc.total_flight_count = tot_book_cnt;
753 }
754 tot_out = 0;
755 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
756 tot_out += lnet->flight_size;
757 }
758 if (tot_out != stcb->asoc.total_flight) {
759 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
760 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
761 sctp_audit_indx++;
762 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
763 sctp_audit_indx = 0;
764 }
765 rep = 1;
766 SCTP_PRINTF("real flight:%d net total was %d\n",
767 stcb->asoc.total_flight, tot_out);
768 /* now corrective action */
769 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
770
771 tot_out = 0;
772 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
773 if ((chk->whoTo == lnet) &&
774 (chk->sent < SCTP_DATAGRAM_RESEND)) {
775 tot_out += chk->book_size;
776 }
777 }
778 if (lnet->flight_size != tot_out) {
779 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
780 (void *)lnet, lnet->flight_size,
781 tot_out);
782 lnet->flight_size = tot_out;
783 }
784 }
785 }
786 if (rep) {
787 sctp_print_audit_report();
788 }
789 }
790
791 void
sctp_audit_log(uint8_t ev,uint8_t fd)792 sctp_audit_log(uint8_t ev, uint8_t fd)
793 {
794
795 sctp_audit_data[sctp_audit_indx][0] = ev;
796 sctp_audit_data[sctp_audit_indx][1] = fd;
797 sctp_audit_indx++;
798 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
799 sctp_audit_indx = 0;
800 }
801 }
802
803 #endif
804
805 /*
806 * sctp_stop_timers_for_shutdown() should be called
807 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
808 * state to make sure that all timers are stopped.
809 */
810 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)811 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
812 {
813 struct sctp_association *asoc;
814 struct sctp_nets *net;
815
816 asoc = &stcb->asoc;
817
818 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
819 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
820 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
821 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
822 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
823 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
824 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
825 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
826 }
827 }
828
829 /*
830 * a list of sizes based on typical mtu's, used only if next hop size not
831 * returned.
832 */
833 static uint32_t sctp_mtu_sizes[] = {
834 68,
835 296,
836 508,
837 512,
838 544,
839 576,
840 1006,
841 1492,
842 1500,
843 1536,
844 2002,
845 2048,
846 4352,
847 4464,
848 8166,
849 17914,
850 32000,
851 65535
852 };
853
854 /*
855 * Return the largest MTU smaller than val. If there is no
856 * entry, just return val.
857 */
858 uint32_t
sctp_get_prev_mtu(uint32_t val)859 sctp_get_prev_mtu(uint32_t val)
860 {
861 uint32_t i;
862
863 if (val <= sctp_mtu_sizes[0]) {
864 return (val);
865 }
866 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
867 if (val <= sctp_mtu_sizes[i]) {
868 break;
869 }
870 }
871 return (sctp_mtu_sizes[i - 1]);
872 }
873
874 /*
875 * Return the smallest MTU larger than val. If there is no
876 * entry, just return val.
877 */
878 uint32_t
sctp_get_next_mtu(uint32_t val)879 sctp_get_next_mtu(uint32_t val)
880 {
881 /* select another MTU that is just bigger than this one */
882 uint32_t i;
883
884 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
885 if (val < sctp_mtu_sizes[i]) {
886 return (sctp_mtu_sizes[i]);
887 }
888 }
889 return (val);
890 }
891
892 void
sctp_fill_random_store(struct sctp_pcb * m)893 sctp_fill_random_store(struct sctp_pcb *m)
894 {
895 /*
896 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
897 * our counter. The result becomes our good random numbers and we
898 * then setup to give these out. Note that we do no locking to
899 * protect this. This is ok, since if competing folks call this we
900 * will get more gobbled gook in the random store which is what we
901 * want. There is a danger that two guys will use the same random
902 * numbers, but thats ok too since that is random as well :->
903 */
904 m->store_at = 0;
905 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
906 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
907 sizeof(m->random_counter), (uint8_t *)m->random_store);
908 m->random_counter++;
909 }
910
911 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)912 sctp_select_initial_TSN(struct sctp_pcb *inp)
913 {
914 /*
915 * A true implementation should use random selection process to get
916 * the initial stream sequence number, using RFC1750 as a good
917 * guideline
918 */
919 uint32_t x, *xp;
920 uint8_t *p;
921 int store_at, new_store;
922
923 if (inp->initial_sequence_debug != 0) {
924 uint32_t ret;
925
926 ret = inp->initial_sequence_debug;
927 inp->initial_sequence_debug++;
928 return (ret);
929 }
930 retry:
931 store_at = inp->store_at;
932 new_store = store_at + sizeof(uint32_t);
933 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
934 new_store = 0;
935 }
936 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
937 goto retry;
938 }
939 if (new_store == 0) {
940 /* Refill the random store */
941 sctp_fill_random_store(inp);
942 }
943 p = &inp->random_store[store_at];
944 xp = (uint32_t *)p;
945 x = *xp;
946 return (x);
947 }
948
949 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)950 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
951 {
952 uint32_t x;
953 struct timeval now;
954
955 if (check) {
956 (void)SCTP_GETTIME_TIMEVAL(&now);
957 }
958 for (;;) {
959 x = sctp_select_initial_TSN(&inp->sctp_ep);
960 if (x == 0) {
961 /* we never use 0 */
962 continue;
963 }
964 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
965 break;
966 }
967 }
968 return (x);
969 }
970
971 int32_t
sctp_map_assoc_state(int kernel_state)972 sctp_map_assoc_state(int kernel_state)
973 {
974 int32_t user_state;
975
976 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
977 user_state = SCTP_CLOSED;
978 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
979 user_state = SCTP_SHUTDOWN_PENDING;
980 } else {
981 switch (kernel_state & SCTP_STATE_MASK) {
982 case SCTP_STATE_EMPTY:
983 user_state = SCTP_CLOSED;
984 break;
985 case SCTP_STATE_INUSE:
986 user_state = SCTP_CLOSED;
987 break;
988 case SCTP_STATE_COOKIE_WAIT:
989 user_state = SCTP_COOKIE_WAIT;
990 break;
991 case SCTP_STATE_COOKIE_ECHOED:
992 user_state = SCTP_COOKIE_ECHOED;
993 break;
994 case SCTP_STATE_OPEN:
995 user_state = SCTP_ESTABLISHED;
996 break;
997 case SCTP_STATE_SHUTDOWN_SENT:
998 user_state = SCTP_SHUTDOWN_SENT;
999 break;
1000 case SCTP_STATE_SHUTDOWN_RECEIVED:
1001 user_state = SCTP_SHUTDOWN_RECEIVED;
1002 break;
1003 case SCTP_STATE_SHUTDOWN_ACK_SENT:
1004 user_state = SCTP_SHUTDOWN_ACK_SENT;
1005 break;
1006 default:
1007 user_state = SCTP_CLOSED;
1008 break;
1009 }
1010 }
1011 return (user_state);
1012 }
1013
1014 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id,uint16_t o_strms)1015 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1016 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1017 {
1018 struct sctp_association *asoc;
1019 /*
1020 * Anything set to zero is taken care of by the allocation routine's
1021 * bzero
1022 */
1023
1024 /*
1025 * Up front select what scoping to apply on addresses I tell my peer
1026 * Not sure what to do with these right now, we will need to come up
1027 * with a way to set them. We may need to pass them through from the
1028 * caller in the sctp_aloc_assoc() function.
1029 */
1030 int i;
1031 #if defined(SCTP_DETAILED_STR_STATS)
1032 int j;
1033 #endif
1034
1035 asoc = &stcb->asoc;
1036 /* init all variables to a known value. */
1037 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
1038 asoc->max_burst = inp->sctp_ep.max_burst;
1039 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1040 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1041 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1042 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1043 asoc->ecn_supported = inp->ecn_supported;
1044 asoc->prsctp_supported = inp->prsctp_supported;
1045 asoc->idata_supported = inp->idata_supported;
1046 asoc->auth_supported = inp->auth_supported;
1047 asoc->asconf_supported = inp->asconf_supported;
1048 asoc->reconfig_supported = inp->reconfig_supported;
1049 asoc->nrsack_supported = inp->nrsack_supported;
1050 asoc->pktdrop_supported = inp->pktdrop_supported;
1051 asoc->idata_supported = inp->idata_supported;
1052 asoc->sctp_cmt_pf = (uint8_t)0;
1053 asoc->sctp_frag_point = inp->sctp_frag_point;
1054 asoc->sctp_features = inp->sctp_features;
1055 asoc->default_dscp = inp->sctp_ep.default_dscp;
1056 asoc->max_cwnd = inp->max_cwnd;
1057 #ifdef INET6
1058 if (inp->sctp_ep.default_flowlabel) {
1059 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1060 } else {
1061 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1062 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1063 asoc->default_flowlabel &= 0x000fffff;
1064 asoc->default_flowlabel |= 0x80000000;
1065 } else {
1066 asoc->default_flowlabel = 0;
1067 }
1068 }
1069 #endif
1070 asoc->sb_send_resv = 0;
1071 if (override_tag) {
1072 asoc->my_vtag = override_tag;
1073 } else {
1074 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1075 }
1076 /* Get the nonce tags */
1077 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1078 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1079 asoc->vrf_id = vrf_id;
1080
1081 #ifdef SCTP_ASOCLOG_OF_TSNS
1082 asoc->tsn_in_at = 0;
1083 asoc->tsn_out_at = 0;
1084 asoc->tsn_in_wrapped = 0;
1085 asoc->tsn_out_wrapped = 0;
1086 asoc->cumack_log_at = 0;
1087 asoc->cumack_log_atsnt = 0;
1088 #endif
1089 #ifdef SCTP_FS_SPEC_LOG
1090 asoc->fs_index = 0;
1091 #endif
1092 asoc->refcnt = 0;
1093 asoc->assoc_up_sent = 0;
1094 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1095 sctp_select_initial_TSN(&inp->sctp_ep);
1096 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1097 /* we are optimisitic here */
1098 asoc->peer_supports_nat = 0;
1099 asoc->sent_queue_retran_cnt = 0;
1100
1101 /* for CMT */
1102 asoc->last_net_cmt_send_started = NULL;
1103
1104 /* This will need to be adjusted */
1105 asoc->last_acked_seq = asoc->init_seq_number - 1;
1106 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1107 asoc->asconf_seq_in = asoc->last_acked_seq;
1108
1109 /* here we are different, we hold the next one we expect */
1110 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1111
1112 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1113 asoc->initial_rto = inp->sctp_ep.initial_rto;
1114
1115 asoc->default_mtu = inp->sctp_ep.default_mtu;
1116 asoc->max_init_times = inp->sctp_ep.max_init_times;
1117 asoc->max_send_times = inp->sctp_ep.max_send_times;
1118 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1119 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1120 asoc->free_chunk_cnt = 0;
1121
1122 asoc->iam_blocking = 0;
1123 asoc->context = inp->sctp_context;
1124 asoc->local_strreset_support = inp->local_strreset_support;
1125 asoc->def_send = inp->def_send;
1126 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1127 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1128 asoc->pr_sctp_cnt = 0;
1129 asoc->total_output_queue_size = 0;
1130
1131 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1132 asoc->scope.ipv6_addr_legal = 1;
1133 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1134 asoc->scope.ipv4_addr_legal = 1;
1135 } else {
1136 asoc->scope.ipv4_addr_legal = 0;
1137 }
1138 #if defined(__Userspace__)
1139 asoc->scope.conn_addr_legal = 0;
1140 #endif
1141 } else {
1142 asoc->scope.ipv6_addr_legal = 0;
1143 #if defined(__Userspace__)
1144 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1145 asoc->scope.conn_addr_legal = 1;
1146 asoc->scope.ipv4_addr_legal = 0;
1147 } else {
1148 asoc->scope.conn_addr_legal = 0;
1149 asoc->scope.ipv4_addr_legal = 1;
1150 }
1151 #else
1152 asoc->scope.ipv4_addr_legal = 1;
1153 #endif
1154 }
1155
1156 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1157 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1158
1159 asoc->smallest_mtu = inp->sctp_frag_point;
1160 asoc->minrto = inp->sctp_ep.sctp_minrto;
1161 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1162
1163 asoc->stream_locked_on = 0;
1164 asoc->ecn_echo_cnt_onq = 0;
1165 asoc->stream_locked = 0;
1166
1167 asoc->send_sack = 1;
1168
1169 LIST_INIT(&asoc->sctp_restricted_addrs);
1170
1171 TAILQ_INIT(&asoc->nets);
1172 TAILQ_INIT(&asoc->pending_reply_queue);
1173 TAILQ_INIT(&asoc->asconf_ack_sent);
1174 /* Setup to fill the hb random cache at first HB */
1175 asoc->hb_random_idx = 4;
1176
1177 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1178
1179 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1180 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1181
1182 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1183 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1184
1185 /*
1186 * Now the stream parameters, here we allocate space for all streams
1187 * that we request by default.
1188 */
1189 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1190 o_strms;
1191 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1192 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1193 SCTP_M_STRMO);
1194 if (asoc->strmout == NULL) {
1195 /* big trouble no memory */
1196 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1197 return (ENOMEM);
1198 }
1199 for (i = 0; i < asoc->streamoutcnt; i++) {
1200 /*
1201 * inbound side must be set to 0xffff, also NOTE when we get
1202 * the INIT-ACK back (for INIT sender) we MUST reduce the
1203 * count (streamoutcnt) but first check if we sent to any of
1204 * the upper streams that were dropped (if some were). Those
1205 * that were dropped must be notified to the upper layer as
1206 * failed to send.
1207 */
1208 asoc->strmout[i].next_mid_ordered = 0;
1209 asoc->strmout[i].next_mid_unordered = 0;
1210 TAILQ_INIT(&asoc->strmout[i].outqueue);
1211 asoc->strmout[i].chunks_on_queues = 0;
1212 #if defined(SCTP_DETAILED_STR_STATS)
1213 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1214 asoc->strmout[i].abandoned_sent[j] = 0;
1215 asoc->strmout[i].abandoned_unsent[j] = 0;
1216 }
1217 #else
1218 asoc->strmout[i].abandoned_sent[0] = 0;
1219 asoc->strmout[i].abandoned_unsent[0] = 0;
1220 #endif
1221 asoc->strmout[i].sid = i;
1222 asoc->strmout[i].last_msg_incomplete = 0;
1223 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1224 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1225 }
1226 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1227
1228 /* Now the mapping array */
1229 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1230 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1231 SCTP_M_MAP);
1232 if (asoc->mapping_array == NULL) {
1233 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1234 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1235 return (ENOMEM);
1236 }
1237 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1238 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1239 SCTP_M_MAP);
1240 if (asoc->nr_mapping_array == NULL) {
1241 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1242 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1243 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1244 return (ENOMEM);
1245 }
1246 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1247
1248 /* Now the init of the other outqueues */
1249 TAILQ_INIT(&asoc->free_chunks);
1250 TAILQ_INIT(&asoc->control_send_queue);
1251 TAILQ_INIT(&asoc->asconf_send_queue);
1252 TAILQ_INIT(&asoc->send_queue);
1253 TAILQ_INIT(&asoc->sent_queue);
1254 TAILQ_INIT(&asoc->resetHead);
1255 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1256 TAILQ_INIT(&asoc->asconf_queue);
1257 /* authentication fields */
1258 asoc->authinfo.random = NULL;
1259 asoc->authinfo.active_keyid = 0;
1260 asoc->authinfo.assoc_key = NULL;
1261 asoc->authinfo.assoc_keyid = 0;
1262 asoc->authinfo.recv_key = NULL;
1263 asoc->authinfo.recv_keyid = 0;
1264 LIST_INIT(&asoc->shared_keys);
1265 asoc->marked_retrans = 0;
1266 asoc->port = inp->sctp_ep.port;
1267 asoc->timoinit = 0;
1268 asoc->timodata = 0;
1269 asoc->timosack = 0;
1270 asoc->timoshutdown = 0;
1271 asoc->timoheartbeat = 0;
1272 asoc->timocookie = 0;
1273 asoc->timoshutdownack = 0;
1274 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1275 asoc->discontinuity_time = asoc->start_time;
1276 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1277 asoc->abandoned_unsent[i] = 0;
1278 asoc->abandoned_sent[i] = 0;
1279 }
1280 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1281 * the association is freed.
1282 */
1283 return (0);
1284 }
1285
1286 void
sctp_print_mapping_array(struct sctp_association * asoc)1287 sctp_print_mapping_array(struct sctp_association *asoc)
1288 {
1289 unsigned int i, limit;
1290
1291 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1292 asoc->mapping_array_size,
1293 asoc->mapping_array_base_tsn,
1294 asoc->cumulative_tsn,
1295 asoc->highest_tsn_inside_map,
1296 asoc->highest_tsn_inside_nr_map);
1297 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1298 if (asoc->mapping_array[limit - 1] != 0) {
1299 break;
1300 }
1301 }
1302 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1303 for (i = 0; i < limit; i++) {
1304 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1305 }
1306 if (limit % 16)
1307 SCTP_PRINTF("\n");
1308 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1309 if (asoc->nr_mapping_array[limit - 1]) {
1310 break;
1311 }
1312 }
1313 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1314 for (i = 0; i < limit; i++) {
1315 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1316 }
1317 if (limit % 16)
1318 SCTP_PRINTF("\n");
1319 }
1320
1321 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1322 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1323 {
1324 /* mapping array needs to grow */
1325 uint8_t *new_array1, *new_array2;
1326 uint32_t new_size;
1327
1328 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1329 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1330 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1331 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1332 /* can't get more, forget it */
1333 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1334 if (new_array1) {
1335 SCTP_FREE(new_array1, SCTP_M_MAP);
1336 }
1337 if (new_array2) {
1338 SCTP_FREE(new_array2, SCTP_M_MAP);
1339 }
1340 return (-1);
1341 }
1342 memset(new_array1, 0, new_size);
1343 memset(new_array2, 0, new_size);
1344 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1345 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1346 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1347 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1348 asoc->mapping_array = new_array1;
1349 asoc->nr_mapping_array = new_array2;
1350 asoc->mapping_array_size = new_size;
1351 return (0);
1352 }
1353
1354
1355 static void
sctp_iterator_work(struct sctp_iterator * it)1356 sctp_iterator_work(struct sctp_iterator *it)
1357 {
1358 int iteration_count = 0;
1359 int inp_skip = 0;
1360 int first_in = 1;
1361 struct sctp_inpcb *tinp;
1362
1363 SCTP_INP_INFO_RLOCK();
1364 SCTP_ITERATOR_LOCK();
1365 sctp_it_ctl.cur_it = it;
1366 if (it->inp) {
1367 SCTP_INP_RLOCK(it->inp);
1368 SCTP_INP_DECR_REF(it->inp);
1369 }
1370 if (it->inp == NULL) {
1371 /* iterator is complete */
1372 done_with_iterator:
1373 sctp_it_ctl.cur_it = NULL;
1374 SCTP_ITERATOR_UNLOCK();
1375 SCTP_INP_INFO_RUNLOCK();
1376 if (it->function_atend != NULL) {
1377 (*it->function_atend) (it->pointer, it->val);
1378 }
1379 SCTP_FREE(it, SCTP_M_ITER);
1380 return;
1381 }
1382 select_a_new_ep:
1383 if (first_in) {
1384 first_in = 0;
1385 } else {
1386 SCTP_INP_RLOCK(it->inp);
1387 }
1388 while (((it->pcb_flags) &&
1389 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1390 ((it->pcb_features) &&
1391 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1392 /* endpoint flags or features don't match, so keep looking */
1393 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1394 SCTP_INP_RUNLOCK(it->inp);
1395 goto done_with_iterator;
1396 }
1397 tinp = it->inp;
1398 it->inp = LIST_NEXT(it->inp, sctp_list);
1399 SCTP_INP_RUNLOCK(tinp);
1400 if (it->inp == NULL) {
1401 goto done_with_iterator;
1402 }
1403 SCTP_INP_RLOCK(it->inp);
1404 }
1405 /* now go through each assoc which is in the desired state */
1406 if (it->done_current_ep == 0) {
1407 if (it->function_inp != NULL)
1408 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1409 it->done_current_ep = 1;
1410 }
1411 if (it->stcb == NULL) {
1412 /* run the per instance function */
1413 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1414 }
1415 if ((inp_skip) || it->stcb == NULL) {
1416 if (it->function_inp_end != NULL) {
1417 inp_skip = (*it->function_inp_end)(it->inp,
1418 it->pointer,
1419 it->val);
1420 }
1421 SCTP_INP_RUNLOCK(it->inp);
1422 goto no_stcb;
1423 }
1424 while (it->stcb) {
1425 SCTP_TCB_LOCK(it->stcb);
1426 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1427 /* not in the right state... keep looking */
1428 SCTP_TCB_UNLOCK(it->stcb);
1429 goto next_assoc;
1430 }
1431 /* see if we have limited out the iterator loop */
1432 iteration_count++;
1433 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1434 /* Pause to let others grab the lock */
1435 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1436 SCTP_TCB_UNLOCK(it->stcb);
1437 SCTP_INP_INCR_REF(it->inp);
1438 SCTP_INP_RUNLOCK(it->inp);
1439 SCTP_ITERATOR_UNLOCK();
1440 SCTP_INP_INFO_RUNLOCK();
1441 SCTP_INP_INFO_RLOCK();
1442 SCTP_ITERATOR_LOCK();
1443 if (sctp_it_ctl.iterator_flags) {
1444 /* We won't be staying here */
1445 SCTP_INP_DECR_REF(it->inp);
1446 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1447 #if !defined(__FreeBSD__)
1448 if (sctp_it_ctl.iterator_flags &
1449 SCTP_ITERATOR_MUST_EXIT) {
1450 goto done_with_iterator;
1451 }
1452 #endif
1453 if (sctp_it_ctl.iterator_flags &
1454 SCTP_ITERATOR_STOP_CUR_IT) {
1455 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1456 goto done_with_iterator;
1457 }
1458 if (sctp_it_ctl.iterator_flags &
1459 SCTP_ITERATOR_STOP_CUR_INP) {
1460 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1461 goto no_stcb;
1462 }
1463 /* If we reach here huh? */
1464 SCTP_PRINTF("Unknown it ctl flag %x\n",
1465 sctp_it_ctl.iterator_flags);
1466 sctp_it_ctl.iterator_flags = 0;
1467 }
1468 SCTP_INP_RLOCK(it->inp);
1469 SCTP_INP_DECR_REF(it->inp);
1470 SCTP_TCB_LOCK(it->stcb);
1471 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1472 iteration_count = 0;
1473 }
1474
1475 /* run function on this one */
1476 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1477
1478 /*
1479 * we lie here, it really needs to have its own type but
1480 * first I must verify that this won't effect things :-0
1481 */
1482 if (it->no_chunk_output == 0)
1483 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1484
1485 SCTP_TCB_UNLOCK(it->stcb);
1486 next_assoc:
1487 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1488 if (it->stcb == NULL) {
1489 /* Run last function */
1490 if (it->function_inp_end != NULL) {
1491 inp_skip = (*it->function_inp_end)(it->inp,
1492 it->pointer,
1493 it->val);
1494 }
1495 }
1496 }
1497 SCTP_INP_RUNLOCK(it->inp);
1498 no_stcb:
1499 /* done with all assocs on this endpoint, move on to next endpoint */
1500 it->done_current_ep = 0;
1501 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1502 it->inp = NULL;
1503 } else {
1504 it->inp = LIST_NEXT(it->inp, sctp_list);
1505 }
1506 if (it->inp == NULL) {
1507 goto done_with_iterator;
1508 }
1509 goto select_a_new_ep;
1510 }
1511
1512 void
sctp_iterator_worker(void)1513 sctp_iterator_worker(void)
1514 {
1515 struct sctp_iterator *it, *nit;
1516
1517 /* This function is called with the WQ lock in place */
1518
1519 sctp_it_ctl.iterator_running = 1;
1520 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1521 /* now lets work on this one */
1522 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1523 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1524 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1525 CURVNET_SET(it->vn);
1526 #endif
1527 sctp_iterator_work(it);
1528 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1529 CURVNET_RESTORE();
1530 #endif
1531 SCTP_IPI_ITERATOR_WQ_LOCK();
1532 #if !defined(__FreeBSD__)
1533 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1534 break;
1535 }
1536 #endif
1537 /*sa_ignore FREED_MEMORY*/
1538 }
1539 sctp_it_ctl.iterator_running = 0;
1540 return;
1541 }
1542
1543
1544 static void
sctp_handle_addr_wq(void)1545 sctp_handle_addr_wq(void)
1546 {
1547 /* deal with the ADDR wq from the rtsock calls */
1548 struct sctp_laddr *wi, *nwi;
1549 struct sctp_asconf_iterator *asc;
1550
1551 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1552 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1553 if (asc == NULL) {
1554 /* Try later, no memory */
1555 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1556 (struct sctp_inpcb *)NULL,
1557 (struct sctp_tcb *)NULL,
1558 (struct sctp_nets *)NULL);
1559 return;
1560 }
1561 LIST_INIT(&asc->list_of_work);
1562 asc->cnt = 0;
1563
1564 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1565 LIST_REMOVE(wi, sctp_nxt_addr);
1566 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1567 asc->cnt++;
1568 }
1569
1570 if (asc->cnt == 0) {
1571 SCTP_FREE(asc, SCTP_M_ASC_IT);
1572 } else {
1573 int ret;
1574
1575 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1576 sctp_asconf_iterator_stcb,
1577 NULL, /* No ep end for boundall */
1578 SCTP_PCB_FLAGS_BOUNDALL,
1579 SCTP_PCB_ANY_FEATURES,
1580 SCTP_ASOC_ANY_STATE,
1581 (void *)asc, 0,
1582 sctp_asconf_iterator_end, NULL, 0);
1583 if (ret) {
1584 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1585 /* Freeing if we are stopping or put back on the addr_wq. */
1586 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1587 sctp_asconf_iterator_end(asc, 0);
1588 } else {
1589 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1590 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1591 }
1592 SCTP_FREE(asc, SCTP_M_ASC_IT);
1593 }
1594 }
1595 }
1596 }
1597
1598 void
sctp_timeout_handler(void * t)1599 sctp_timeout_handler(void *t)
1600 {
1601 struct sctp_inpcb *inp;
1602 struct sctp_tcb *stcb;
1603 struct sctp_nets *net;
1604 struct sctp_timer *tmr;
1605 struct mbuf *op_err;
1606 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1607 struct socket *so;
1608 #endif
1609 int did_output;
1610 int type;
1611
1612 tmr = (struct sctp_timer *)t;
1613 inp = (struct sctp_inpcb *)tmr->ep;
1614 stcb = (struct sctp_tcb *)tmr->tcb;
1615 net = (struct sctp_nets *)tmr->net;
1616 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1617 CURVNET_SET((struct vnet *)tmr->vnet);
1618 #endif
1619 did_output = 1;
1620
1621 #ifdef SCTP_AUDITING_ENABLED
1622 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1623 sctp_auditing(3, inp, stcb, net);
1624 #endif
1625
1626 /* sanity checks... */
1627 if (tmr->self != (void *)tmr) {
1628 /*
1629 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1630 * (void *)tmr);
1631 */
1632 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1633 CURVNET_RESTORE();
1634 #endif
1635 return;
1636 }
1637 tmr->stopped_from = 0xa001;
1638 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1639 /*
1640 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1641 * tmr->type);
1642 */
1643 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1644 CURVNET_RESTORE();
1645 #endif
1646 return;
1647 }
1648 tmr->stopped_from = 0xa002;
1649 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1650 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1651 CURVNET_RESTORE();
1652 #endif
1653 return;
1654 }
1655 /* if this is an iterator timeout, get the struct and clear inp */
1656 tmr->stopped_from = 0xa003;
1657 if (inp) {
1658 SCTP_INP_INCR_REF(inp);
1659 if ((inp->sctp_socket == NULL) &&
1660 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1661 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1662 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1663 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1664 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1665 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1666 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1667 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1668 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1669 SCTP_INP_DECR_REF(inp);
1670 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1671 CURVNET_RESTORE();
1672 #endif
1673 return;
1674 }
1675 }
1676 tmr->stopped_from = 0xa004;
1677 if (stcb) {
1678 atomic_add_int(&stcb->asoc.refcnt, 1);
1679 if (stcb->asoc.state == 0) {
1680 atomic_add_int(&stcb->asoc.refcnt, -1);
1681 if (inp) {
1682 SCTP_INP_DECR_REF(inp);
1683 }
1684 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1685 CURVNET_RESTORE();
1686 #endif
1687 return;
1688 }
1689 }
1690 type = tmr->type;
1691 tmr->stopped_from = 0xa005;
1692 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1693 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1694 if (inp) {
1695 SCTP_INP_DECR_REF(inp);
1696 }
1697 if (stcb) {
1698 atomic_add_int(&stcb->asoc.refcnt, -1);
1699 }
1700 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1701 CURVNET_RESTORE();
1702 #endif
1703 return;
1704 }
1705 tmr->stopped_from = 0xa006;
1706
1707 if (stcb) {
1708 SCTP_TCB_LOCK(stcb);
1709 atomic_add_int(&stcb->asoc.refcnt, -1);
1710 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1711 ((stcb->asoc.state == 0) ||
1712 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1713 SCTP_TCB_UNLOCK(stcb);
1714 if (inp) {
1715 SCTP_INP_DECR_REF(inp);
1716 }
1717 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1718 CURVNET_RESTORE();
1719 #endif
1720 return;
1721 }
1722 } else if (inp != NULL) {
1723 if (type != SCTP_TIMER_TYPE_INPKILL) {
1724 SCTP_INP_WLOCK(inp);
1725 }
1726 } else {
1727 SCTP_WQ_ADDR_LOCK();
1728 }
1729 /* record in stopped what t-o occurred */
1730 tmr->stopped_from = type;
1731
1732 /* mark as being serviced now */
1733 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1734 /*
1735 * Callout has been rescheduled.
1736 */
1737 goto get_out;
1738 }
1739 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1740 /*
1741 * Not active, so no action.
1742 */
1743 goto get_out;
1744 }
1745 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1746
1747 /* call the handler for the appropriate timer type */
1748 switch (type) {
1749 case SCTP_TIMER_TYPE_ADDR_WQ:
1750 sctp_handle_addr_wq();
1751 break;
1752 case SCTP_TIMER_TYPE_SEND:
1753 if ((stcb == NULL) || (inp == NULL)) {
1754 break;
1755 }
1756 SCTP_STAT_INCR(sctps_timodata);
1757 stcb->asoc.timodata++;
1758 stcb->asoc.num_send_timers_up--;
1759 if (stcb->asoc.num_send_timers_up < 0) {
1760 stcb->asoc.num_send_timers_up = 0;
1761 }
1762 SCTP_TCB_LOCK_ASSERT(stcb);
1763 if (sctp_t3rxt_timer(inp, stcb, net)) {
1764 /* no need to unlock on tcb its gone */
1765
1766 goto out_decr;
1767 }
1768 SCTP_TCB_LOCK_ASSERT(stcb);
1769 #ifdef SCTP_AUDITING_ENABLED
1770 sctp_auditing(4, inp, stcb, net);
1771 #endif
1772 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1773 if ((stcb->asoc.num_send_timers_up == 0) &&
1774 (stcb->asoc.sent_queue_cnt > 0)) {
1775 struct sctp_tmit_chunk *chk;
1776
1777 /*
1778 * safeguard. If there on some on the sent queue
1779 * somewhere but no timers running something is
1780 * wrong... so we start a timer on the first chunk
1781 * on the send queue on whatever net it is sent to.
1782 */
1783 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1784 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1785 chk->whoTo);
1786 }
1787 break;
1788 case SCTP_TIMER_TYPE_INIT:
1789 if ((stcb == NULL) || (inp == NULL)) {
1790 break;
1791 }
1792 SCTP_STAT_INCR(sctps_timoinit);
1793 stcb->asoc.timoinit++;
1794 if (sctp_t1init_timer(inp, stcb, net)) {
1795 /* no need to unlock on tcb its gone */
1796 goto out_decr;
1797 }
1798 /* We do output but not here */
1799 did_output = 0;
1800 break;
1801 case SCTP_TIMER_TYPE_RECV:
1802 if ((stcb == NULL) || (inp == NULL)) {
1803 break;
1804 }
1805 SCTP_STAT_INCR(sctps_timosack);
1806 stcb->asoc.timosack++;
1807 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1808 #ifdef SCTP_AUDITING_ENABLED
1809 sctp_auditing(4, inp, stcb, net);
1810 #endif
1811 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1812 break;
1813 case SCTP_TIMER_TYPE_SHUTDOWN:
1814 if ((stcb == NULL) || (inp == NULL)) {
1815 break;
1816 }
1817 if (sctp_shutdown_timer(inp, stcb, net)) {
1818 /* no need to unlock on tcb its gone */
1819 goto out_decr;
1820 }
1821 SCTP_STAT_INCR(sctps_timoshutdown);
1822 stcb->asoc.timoshutdown++;
1823 #ifdef SCTP_AUDITING_ENABLED
1824 sctp_auditing(4, inp, stcb, net);
1825 #endif
1826 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1827 break;
1828 case SCTP_TIMER_TYPE_HEARTBEAT:
1829 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1830 break;
1831 }
1832 SCTP_STAT_INCR(sctps_timoheartbeat);
1833 stcb->asoc.timoheartbeat++;
1834 if (sctp_heartbeat_timer(inp, stcb, net)) {
1835 /* no need to unlock on tcb its gone */
1836 goto out_decr;
1837 }
1838 #ifdef SCTP_AUDITING_ENABLED
1839 sctp_auditing(4, inp, stcb, net);
1840 #endif
1841 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1842 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1843 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1844 }
1845 break;
1846 case SCTP_TIMER_TYPE_COOKIE:
1847 if ((stcb == NULL) || (inp == NULL)) {
1848 break;
1849 }
1850
1851 if (sctp_cookie_timer(inp, stcb, net)) {
1852 /* no need to unlock on tcb its gone */
1853 goto out_decr;
1854 }
1855 SCTP_STAT_INCR(sctps_timocookie);
1856 stcb->asoc.timocookie++;
1857 #ifdef SCTP_AUDITING_ENABLED
1858 sctp_auditing(4, inp, stcb, net);
1859 #endif
1860 /*
1861 * We consider T3 and Cookie timer pretty much the same with
1862 * respect to where from in chunk_output.
1863 */
1864 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1865 break;
1866 case SCTP_TIMER_TYPE_NEWCOOKIE:
1867 {
1868 struct timeval tv;
1869 int i, secret;
1870 if (inp == NULL) {
1871 break;
1872 }
1873 SCTP_STAT_INCR(sctps_timosecret);
1874 (void)SCTP_GETTIME_TIMEVAL(&tv);
1875 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1876 inp->sctp_ep.last_secret_number =
1877 inp->sctp_ep.current_secret_number;
1878 inp->sctp_ep.current_secret_number++;
1879 if (inp->sctp_ep.current_secret_number >=
1880 SCTP_HOW_MANY_SECRETS) {
1881 inp->sctp_ep.current_secret_number = 0;
1882 }
1883 secret = (int)inp->sctp_ep.current_secret_number;
1884 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1885 inp->sctp_ep.secret_key[secret][i] =
1886 sctp_select_initial_TSN(&inp->sctp_ep);
1887 }
1888 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1889 }
1890 did_output = 0;
1891 break;
1892 case SCTP_TIMER_TYPE_PATHMTURAISE:
1893 if ((stcb == NULL) || (inp == NULL)) {
1894 break;
1895 }
1896 SCTP_STAT_INCR(sctps_timopathmtu);
1897 sctp_pathmtu_timer(inp, stcb, net);
1898 did_output = 0;
1899 break;
1900 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1901 if ((stcb == NULL) || (inp == NULL)) {
1902 break;
1903 }
1904 if (sctp_shutdownack_timer(inp, stcb, net)) {
1905 /* no need to unlock on tcb its gone */
1906 goto out_decr;
1907 }
1908 SCTP_STAT_INCR(sctps_timoshutdownack);
1909 stcb->asoc.timoshutdownack++;
1910 #ifdef SCTP_AUDITING_ENABLED
1911 sctp_auditing(4, inp, stcb, net);
1912 #endif
1913 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1914 break;
1915 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1916 if ((stcb == NULL) || (inp == NULL)) {
1917 break;
1918 }
1919 SCTP_STAT_INCR(sctps_timoshutdownguard);
1920 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1921 "Shutdown guard timer expired");
1922 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1923 /* no need to unlock on tcb its gone */
1924 goto out_decr;
1925
1926 case SCTP_TIMER_TYPE_STRRESET:
1927 if ((stcb == NULL) || (inp == NULL)) {
1928 break;
1929 }
1930 if (sctp_strreset_timer(inp, stcb, net)) {
1931 /* no need to unlock on tcb its gone */
1932 goto out_decr;
1933 }
1934 SCTP_STAT_INCR(sctps_timostrmrst);
1935 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1936 break;
1937 case SCTP_TIMER_TYPE_ASCONF:
1938 if ((stcb == NULL) || (inp == NULL)) {
1939 break;
1940 }
1941 if (sctp_asconf_timer(inp, stcb, net)) {
1942 /* no need to unlock on tcb its gone */
1943 goto out_decr;
1944 }
1945 SCTP_STAT_INCR(sctps_timoasconf);
1946 #ifdef SCTP_AUDITING_ENABLED
1947 sctp_auditing(4, inp, stcb, net);
1948 #endif
1949 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1950 break;
1951 case SCTP_TIMER_TYPE_PRIM_DELETED:
1952 if ((stcb == NULL) || (inp == NULL)) {
1953 break;
1954 }
1955 sctp_delete_prim_timer(inp, stcb, net);
1956 SCTP_STAT_INCR(sctps_timodelprim);
1957 break;
1958
1959 case SCTP_TIMER_TYPE_AUTOCLOSE:
1960 if ((stcb == NULL) || (inp == NULL)) {
1961 break;
1962 }
1963 SCTP_STAT_INCR(sctps_timoautoclose);
1964 sctp_autoclose_timer(inp, stcb, net);
1965 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1966 did_output = 0;
1967 break;
1968 case SCTP_TIMER_TYPE_ASOCKILL:
1969 if ((stcb == NULL) || (inp == NULL)) {
1970 break;
1971 }
1972 SCTP_STAT_INCR(sctps_timoassockill);
1973 /* Can we free it yet? */
1974 SCTP_INP_DECR_REF(inp);
1975 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1976 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1977 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1978 so = SCTP_INP_SO(inp);
1979 atomic_add_int(&stcb->asoc.refcnt, 1);
1980 SCTP_TCB_UNLOCK(stcb);
1981 SCTP_SOCKET_LOCK(so, 1);
1982 SCTP_TCB_LOCK(stcb);
1983 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1984 #endif
1985 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1986 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1987 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1988 SCTP_SOCKET_UNLOCK(so, 1);
1989 #endif
1990 /*
1991 * free asoc, always unlocks (or destroy's) so prevent
1992 * duplicate unlock or unlock of a free mtx :-0
1993 */
1994 stcb = NULL;
1995 goto out_no_decr;
1996 case SCTP_TIMER_TYPE_INPKILL:
1997 SCTP_STAT_INCR(sctps_timoinpkill);
1998 if (inp == NULL) {
1999 break;
2000 }
2001 /*
2002 * special case, take away our increment since WE are the
2003 * killer
2004 */
2005 SCTP_INP_DECR_REF(inp);
2006 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2007 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2008 #if defined(__APPLE__)
2009 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
2010 #endif
2011 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2012 SCTP_CALLED_FROM_INPKILL_TIMER);
2013 #if defined(__APPLE__)
2014 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
2015 #endif
2016 inp = NULL;
2017 goto out_no_decr;
2018 default:
2019 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2020 type);
2021 break;
2022 }
2023 #ifdef SCTP_AUDITING_ENABLED
2024 sctp_audit_log(0xF1, (uint8_t) type);
2025 if (inp)
2026 sctp_auditing(5, inp, stcb, net);
2027 #endif
2028 if ((did_output) && stcb) {
2029 /*
2030 * Now we need to clean up the control chunk chain if an
2031 * ECNE is on it. It must be marked as UNSENT again so next
2032 * call will continue to send it until such time that we get
2033 * a CWR, to remove it. It is, however, less likely that we
2034 * will find a ecn echo on the chain though.
2035 */
2036 sctp_fix_ecn_echo(&stcb->asoc);
2037 }
2038 get_out:
2039 if (stcb) {
2040 SCTP_TCB_UNLOCK(stcb);
2041 } else if (inp != NULL) {
2042 SCTP_INP_WUNLOCK(inp);
2043 } else {
2044 SCTP_WQ_ADDR_UNLOCK();
2045 }
2046
2047 out_decr:
2048 if (inp) {
2049 SCTP_INP_DECR_REF(inp);
2050 }
2051
2052 out_no_decr:
2053 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2054 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2055 CURVNET_RESTORE();
2056 #endif
2057 }
2058
2059 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)2060 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2061 struct sctp_nets *net)
2062 {
2063 uint32_t to_ticks;
2064 struct sctp_timer *tmr;
2065
2066 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2067 return;
2068
2069 tmr = NULL;
2070 if (stcb) {
2071 SCTP_TCB_LOCK_ASSERT(stcb);
2072 }
2073 switch (t_type) {
2074 case SCTP_TIMER_TYPE_ADDR_WQ:
2075 /* Only 1 tick away :-) */
2076 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2077 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2078 break;
2079 case SCTP_TIMER_TYPE_SEND:
2080 /* Here we use the RTO timer */
2081 {
2082 int rto_val;
2083
2084 if ((stcb == NULL) || (net == NULL)) {
2085 return;
2086 }
2087 tmr = &net->rxt_timer;
2088 if (net->RTO == 0) {
2089 rto_val = stcb->asoc.initial_rto;
2090 } else {
2091 rto_val = net->RTO;
2092 }
2093 to_ticks = MSEC_TO_TICKS(rto_val);
2094 }
2095 break;
2096 case SCTP_TIMER_TYPE_INIT:
2097 /*
2098 * Here we use the INIT timer default usually about 1
2099 * minute.
2100 */
2101 if ((stcb == NULL) || (net == NULL)) {
2102 return;
2103 }
2104 tmr = &net->rxt_timer;
2105 if (net->RTO == 0) {
2106 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2107 } else {
2108 to_ticks = MSEC_TO_TICKS(net->RTO);
2109 }
2110 break;
2111 case SCTP_TIMER_TYPE_RECV:
2112 /*
2113 * Here we use the Delayed-Ack timer value from the inp
2114 * ususually about 200ms.
2115 */
2116 if (stcb == NULL) {
2117 return;
2118 }
2119 tmr = &stcb->asoc.dack_timer;
2120 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2121 break;
2122 case SCTP_TIMER_TYPE_SHUTDOWN:
2123 /* Here we use the RTO of the destination. */
2124 if ((stcb == NULL) || (net == NULL)) {
2125 return;
2126 }
2127 if (net->RTO == 0) {
2128 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2129 } else {
2130 to_ticks = MSEC_TO_TICKS(net->RTO);
2131 }
2132 tmr = &net->rxt_timer;
2133 break;
2134 case SCTP_TIMER_TYPE_HEARTBEAT:
2135 /*
2136 * the net is used here so that we can add in the RTO. Even
2137 * though we use a different timer. We also add the HB timer
2138 * PLUS a random jitter.
2139 */
2140 if ((stcb == NULL) || (net == NULL)) {
2141 return;
2142 } else {
2143 uint32_t rndval;
2144 uint32_t jitter;
2145
2146 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2147 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2148 return;
2149 }
2150 if (net->RTO == 0) {
2151 to_ticks = stcb->asoc.initial_rto;
2152 } else {
2153 to_ticks = net->RTO;
2154 }
2155 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2156 jitter = rndval % to_ticks;
2157 if (jitter >= (to_ticks >> 1)) {
2158 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2159 } else {
2160 to_ticks = to_ticks - jitter;
2161 }
2162 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2163 !(net->dest_state & SCTP_ADDR_PF)) {
2164 to_ticks += net->heart_beat_delay;
2165 }
2166 /*
2167 * Now we must convert the to_ticks that are now in
2168 * ms to ticks.
2169 */
2170 to_ticks = MSEC_TO_TICKS(to_ticks);
2171 tmr = &net->hb_timer;
2172 }
2173 break;
2174 case SCTP_TIMER_TYPE_COOKIE:
2175 /*
2176 * Here we can use the RTO timer from the network since one
2177 * RTT was compelete. If a retran happened then we will be
2178 * using the RTO initial value.
2179 */
2180 if ((stcb == NULL) || (net == NULL)) {
2181 return;
2182 }
2183 if (net->RTO == 0) {
2184 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2185 } else {
2186 to_ticks = MSEC_TO_TICKS(net->RTO);
2187 }
2188 tmr = &net->rxt_timer;
2189 break;
2190 case SCTP_TIMER_TYPE_NEWCOOKIE:
2191 /*
2192 * nothing needed but the endpoint here ususually about 60
2193 * minutes.
2194 */
2195 tmr = &inp->sctp_ep.signature_change;
2196 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2197 break;
2198 case SCTP_TIMER_TYPE_ASOCKILL:
2199 if (stcb == NULL) {
2200 return;
2201 }
2202 tmr = &stcb->asoc.strreset_timer;
2203 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2204 break;
2205 case SCTP_TIMER_TYPE_INPKILL:
2206 /*
2207 * The inp is setup to die. We re-use the signature_chage
2208 * timer since that has stopped and we are in the GONE
2209 * state.
2210 */
2211 tmr = &inp->sctp_ep.signature_change;
2212 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2213 break;
2214 case SCTP_TIMER_TYPE_PATHMTURAISE:
2215 /*
2216 * Here we use the value found in the EP for PMTU ususually
2217 * about 10 minutes.
2218 */
2219 if ((stcb == NULL) || (net == NULL)) {
2220 return;
2221 }
2222 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2223 return;
2224 }
2225 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2226 tmr = &net->pmtu_timer;
2227 break;
2228 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2229 /* Here we use the RTO of the destination */
2230 if ((stcb == NULL) || (net == NULL)) {
2231 return;
2232 }
2233 if (net->RTO == 0) {
2234 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2235 } else {
2236 to_ticks = MSEC_TO_TICKS(net->RTO);
2237 }
2238 tmr = &net->rxt_timer;
2239 break;
2240 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2241 /*
2242 * Here we use the endpoints shutdown guard timer usually
2243 * about 3 minutes.
2244 */
2245 if (stcb == NULL) {
2246 return;
2247 }
2248 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2249 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2250 } else {
2251 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2252 }
2253 tmr = &stcb->asoc.shut_guard_timer;
2254 break;
2255 case SCTP_TIMER_TYPE_STRRESET:
2256 /*
2257 * Here the timer comes from the stcb but its value is from
2258 * the net's RTO.
2259 */
2260 if ((stcb == NULL) || (net == NULL)) {
2261 return;
2262 }
2263 if (net->RTO == 0) {
2264 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2265 } else {
2266 to_ticks = MSEC_TO_TICKS(net->RTO);
2267 }
2268 tmr = &stcb->asoc.strreset_timer;
2269 break;
2270 case SCTP_TIMER_TYPE_ASCONF:
2271 /*
2272 * Here the timer comes from the stcb but its value is from
2273 * the net's RTO.
2274 */
2275 if ((stcb == NULL) || (net == NULL)) {
2276 return;
2277 }
2278 if (net->RTO == 0) {
2279 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2280 } else {
2281 to_ticks = MSEC_TO_TICKS(net->RTO);
2282 }
2283 tmr = &stcb->asoc.asconf_timer;
2284 break;
2285 case SCTP_TIMER_TYPE_PRIM_DELETED:
2286 if ((stcb == NULL) || (net != NULL)) {
2287 return;
2288 }
2289 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2290 tmr = &stcb->asoc.delete_prim_timer;
2291 break;
2292 case SCTP_TIMER_TYPE_AUTOCLOSE:
2293 if (stcb == NULL) {
2294 return;
2295 }
2296 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2297 /*
2298 * Really an error since stcb is NOT set to
2299 * autoclose
2300 */
2301 return;
2302 }
2303 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2304 tmr = &stcb->asoc.autoclose_timer;
2305 break;
2306 default:
2307 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2308 __func__, t_type);
2309 return;
2310 break;
2311 }
2312 if ((to_ticks <= 0) || (tmr == NULL)) {
2313 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2314 __func__, t_type, to_ticks, (void *)tmr);
2315 return;
2316 }
2317 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2318 /*
2319 * we do NOT allow you to have it already running. if it is
2320 * we leave the current one up unchanged
2321 */
2322 return;
2323 }
2324 /* At this point we can proceed */
2325 if (t_type == SCTP_TIMER_TYPE_SEND) {
2326 stcb->asoc.num_send_timers_up++;
2327 }
2328 tmr->stopped_from = 0;
2329 tmr->type = t_type;
2330 tmr->ep = (void *)inp;
2331 tmr->tcb = (void *)stcb;
2332 tmr->net = (void *)net;
2333 tmr->self = (void *)tmr;
2334 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
2335 tmr->vnet = (void *)curvnet;
2336 #endif
2337 #ifndef __Panda__
2338 tmr->ticks = sctp_get_tick_count();
2339 #endif
2340 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2341 return;
2342 }
2343
2344 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2345 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2346 struct sctp_nets *net, uint32_t from)
2347 {
2348 struct sctp_timer *tmr;
2349
2350 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2351 (inp == NULL))
2352 return;
2353
2354 tmr = NULL;
2355 if (stcb) {
2356 SCTP_TCB_LOCK_ASSERT(stcb);
2357 }
2358 switch (t_type) {
2359 case SCTP_TIMER_TYPE_ADDR_WQ:
2360 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2361 break;
2362 case SCTP_TIMER_TYPE_SEND:
2363 if ((stcb == NULL) || (net == NULL)) {
2364 return;
2365 }
2366 tmr = &net->rxt_timer;
2367 break;
2368 case SCTP_TIMER_TYPE_INIT:
2369 if ((stcb == NULL) || (net == NULL)) {
2370 return;
2371 }
2372 tmr = &net->rxt_timer;
2373 break;
2374 case SCTP_TIMER_TYPE_RECV:
2375 if (stcb == NULL) {
2376 return;
2377 }
2378 tmr = &stcb->asoc.dack_timer;
2379 break;
2380 case SCTP_TIMER_TYPE_SHUTDOWN:
2381 if ((stcb == NULL) || (net == NULL)) {
2382 return;
2383 }
2384 tmr = &net->rxt_timer;
2385 break;
2386 case SCTP_TIMER_TYPE_HEARTBEAT:
2387 if ((stcb == NULL) || (net == NULL)) {
2388 return;
2389 }
2390 tmr = &net->hb_timer;
2391 break;
2392 case SCTP_TIMER_TYPE_COOKIE:
2393 if ((stcb == NULL) || (net == NULL)) {
2394 return;
2395 }
2396 tmr = &net->rxt_timer;
2397 break;
2398 case SCTP_TIMER_TYPE_NEWCOOKIE:
2399 /* nothing needed but the endpoint here */
2400 tmr = &inp->sctp_ep.signature_change;
2401 /*
2402 * We re-use the newcookie timer for the INP kill timer. We
2403 * must assure that we do not kill it by accident.
2404 */
2405 break;
2406 case SCTP_TIMER_TYPE_ASOCKILL:
2407 /*
2408 * Stop the asoc kill timer.
2409 */
2410 if (stcb == NULL) {
2411 return;
2412 }
2413 tmr = &stcb->asoc.strreset_timer;
2414 break;
2415
2416 case SCTP_TIMER_TYPE_INPKILL:
2417 /*
2418 * The inp is setup to die. We re-use the signature_chage
2419 * timer since that has stopped and we are in the GONE
2420 * state.
2421 */
2422 tmr = &inp->sctp_ep.signature_change;
2423 break;
2424 case SCTP_TIMER_TYPE_PATHMTURAISE:
2425 if ((stcb == NULL) || (net == NULL)) {
2426 return;
2427 }
2428 tmr = &net->pmtu_timer;
2429 break;
2430 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2431 if ((stcb == NULL) || (net == NULL)) {
2432 return;
2433 }
2434 tmr = &net->rxt_timer;
2435 break;
2436 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2437 if (stcb == NULL) {
2438 return;
2439 }
2440 tmr = &stcb->asoc.shut_guard_timer;
2441 break;
2442 case SCTP_TIMER_TYPE_STRRESET:
2443 if (stcb == NULL) {
2444 return;
2445 }
2446 tmr = &stcb->asoc.strreset_timer;
2447 break;
2448 case SCTP_TIMER_TYPE_ASCONF:
2449 if (stcb == NULL) {
2450 return;
2451 }
2452 tmr = &stcb->asoc.asconf_timer;
2453 break;
2454 case SCTP_TIMER_TYPE_PRIM_DELETED:
2455 if (stcb == NULL) {
2456 return;
2457 }
2458 tmr = &stcb->asoc.delete_prim_timer;
2459 break;
2460 case SCTP_TIMER_TYPE_AUTOCLOSE:
2461 if (stcb == NULL) {
2462 return;
2463 }
2464 tmr = &stcb->asoc.autoclose_timer;
2465 break;
2466 default:
2467 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2468 __func__, t_type);
2469 break;
2470 }
2471 if (tmr == NULL) {
2472 return;
2473 }
2474 if ((tmr->type != t_type) && tmr->type) {
2475 /*
2476 * Ok we have a timer that is under joint use. Cookie timer
2477 * per chance with the SEND timer. We therefore are NOT
2478 * running the timer that the caller wants stopped. So just
2479 * return.
2480 */
2481 return;
2482 }
2483 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2484 stcb->asoc.num_send_timers_up--;
2485 if (stcb->asoc.num_send_timers_up < 0) {
2486 stcb->asoc.num_send_timers_up = 0;
2487 }
2488 }
2489 tmr->self = NULL;
2490 tmr->stopped_from = from;
2491 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2492 return;
2493 }
2494
2495 uint32_t
sctp_calculate_len(struct mbuf * m)2496 sctp_calculate_len(struct mbuf *m)
2497 {
2498 uint32_t tlen = 0;
2499 struct mbuf *at;
2500
2501 at = m;
2502 while (at) {
2503 tlen += SCTP_BUF_LEN(at);
2504 at = SCTP_BUF_NEXT(at);
2505 }
2506 return (tlen);
2507 }
2508
2509 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2510 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2511 struct sctp_association *asoc, uint32_t mtu)
2512 {
2513 /*
2514 * Reset the P-MTU size on this association, this involves changing
2515 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2516 * allow the DF flag to be cleared.
2517 */
2518 struct sctp_tmit_chunk *chk;
2519 unsigned int eff_mtu, ovh;
2520
2521 asoc->smallest_mtu = mtu;
2522 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2523 ovh = SCTP_MIN_OVERHEAD;
2524 } else {
2525 ovh = SCTP_MIN_V4_OVERHEAD;
2526 }
2527 eff_mtu = mtu - ovh;
2528 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2529 if (chk->send_size > eff_mtu) {
2530 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2531 }
2532 }
2533 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2534 if (chk->send_size > eff_mtu) {
2535 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2536 }
2537 }
2538 }
2539
2540
2541 /*
2542 * given an association and starting time of the current RTT period return
2543 * RTO in number of msecs net should point to the current network
2544 */
2545
2546 uint32_t
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old,int rtt_from_sack)2547 sctp_calculate_rto(struct sctp_tcb *stcb,
2548 struct sctp_association *asoc,
2549 struct sctp_nets *net,
2550 struct timeval *old,
2551 int rtt_from_sack)
2552 {
2553 /*-
2554 * given an association and the starting time of the current RTT
2555 * period (in value1/value2) return RTO in number of msecs.
2556 */
2557 int32_t rtt; /* RTT in ms */
2558 uint32_t new_rto;
2559 int first_measure = 0;
2560 struct timeval now;
2561
2562 /************************/
2563 /* 1. calculate new RTT */
2564 /************************/
2565 /* get the current time */
2566 if (stcb->asoc.use_precise_time) {
2567 (void)SCTP_GETPTIME_TIMEVAL(&now);
2568 } else {
2569 (void)SCTP_GETTIME_TIMEVAL(&now);
2570 }
2571 timevalsub(&now, old);
2572 /* store the current RTT in us */
2573 net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2574 (uint64_t)now.tv_usec;
2575 /* compute rtt in ms */
2576 rtt = (int32_t)(net->rtt / 1000);
2577 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2578 /* Tell the CC module that a new update has just occurred from a sack */
2579 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
2580 }
2581 /* Do we need to determine the lan? We do this only
2582 * on sacks i.e. RTT being determined from data not
2583 * non-data (HB/INIT->INITACK).
2584 */
2585 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2586 (net->lan_type == SCTP_LAN_UNKNOWN)) {
2587 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2588 net->lan_type = SCTP_LAN_INTERNET;
2589 } else {
2590 net->lan_type = SCTP_LAN_LOCAL;
2591 }
2592 }
2593
2594 /***************************/
2595 /* 2. update RTTVAR & SRTT */
2596 /***************************/
2597 /*-
2598 * Compute the scaled average lastsa and the
2599 * scaled variance lastsv as described in van Jacobson
2600 * Paper "Congestion Avoidance and Control", Annex A.
2601 *
2602 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2603 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2604 */
2605 if (net->RTO_measured) {
2606 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2607 net->lastsa += rtt;
2608 if (rtt < 0) {
2609 rtt = -rtt;
2610 }
2611 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2612 net->lastsv += rtt;
2613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2614 rto_logging(net, SCTP_LOG_RTTVAR);
2615 }
2616 } else {
2617 /* First RTO measurment */
2618 net->RTO_measured = 1;
2619 first_measure = 1;
2620 net->lastsa = rtt << SCTP_RTT_SHIFT;
2621 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2622 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2623 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2624 }
2625 }
2626 if (net->lastsv == 0) {
2627 net->lastsv = SCTP_CLOCK_GRANULARITY;
2628 }
2629 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2630 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2631 (stcb->asoc.sat_network_lockout == 0)) {
2632 stcb->asoc.sat_network = 1;
2633 } else if ((!first_measure) && stcb->asoc.sat_network) {
2634 stcb->asoc.sat_network = 0;
2635 stcb->asoc.sat_network_lockout = 1;
2636 }
2637 /* bound it, per C6/C7 in Section 5.3.1 */
2638 if (new_rto < stcb->asoc.minrto) {
2639 new_rto = stcb->asoc.minrto;
2640 }
2641 if (new_rto > stcb->asoc.maxrto) {
2642 new_rto = stcb->asoc.maxrto;
2643 }
2644 /* we are now returning the RTO */
2645 return (new_rto);
2646 }
2647
2648 /*
2649 * return a pointer to a contiguous piece of data from the given mbuf chain
2650 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2651 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2652 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2653 */
2654 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)2655 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2656 {
2657 uint32_t count;
2658 uint8_t *ptr;
2659
2660 ptr = in_ptr;
2661 if ((off < 0) || (len <= 0))
2662 return (NULL);
2663
2664 /* find the desired start location */
2665 while ((m != NULL) && (off > 0)) {
2666 if (off < SCTP_BUF_LEN(m))
2667 break;
2668 off -= SCTP_BUF_LEN(m);
2669 m = SCTP_BUF_NEXT(m);
2670 }
2671 if (m == NULL)
2672 return (NULL);
2673
2674 /* is the current mbuf large enough (eg. contiguous)? */
2675 if ((SCTP_BUF_LEN(m) - off) >= len) {
2676 return (mtod(m, caddr_t) + off);
2677 } else {
2678 /* else, it spans more than one mbuf, so save a temp copy... */
2679 while ((m != NULL) && (len > 0)) {
2680 count = min(SCTP_BUF_LEN(m) - off, len);
2681 memcpy(ptr, mtod(m, caddr_t) + off, count);
2682 len -= count;
2683 ptr += count;
2684 off = 0;
2685 m = SCTP_BUF_NEXT(m);
2686 }
2687 if ((m == NULL) && (len > 0))
2688 return (NULL);
2689 else
2690 return ((caddr_t)in_ptr);
2691 }
2692 }
2693
2694
2695
2696 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)2697 sctp_get_next_param(struct mbuf *m,
2698 int offset,
2699 struct sctp_paramhdr *pull,
2700 int pull_limit)
2701 {
2702 /* This just provides a typed signature to Peter's Pull routine */
2703 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2704 (uint8_t *) pull));
2705 }
2706
2707
2708 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)2709 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2710 {
2711 struct mbuf *m_last;
2712 caddr_t dp;
2713
2714 if (padlen > 3) {
2715 return (NULL);
2716 }
2717 if (padlen <= M_TRAILINGSPACE(m)) {
2718 /*
2719 * The easy way. We hope the majority of the time we hit
2720 * here :)
2721 */
2722 m_last = m;
2723 } else {
2724 /* Hard way we must grow the mbuf chain */
2725 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2726 if (m_last == NULL) {
2727 return (NULL);
2728 }
2729 SCTP_BUF_LEN(m_last) = 0;
2730 SCTP_BUF_NEXT(m_last) = NULL;
2731 SCTP_BUF_NEXT(m) = m_last;
2732 }
2733 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
2734 SCTP_BUF_LEN(m_last) += padlen;
2735 memset(dp, 0, padlen);
2736 return (m_last);
2737 }
2738
2739 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)2740 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2741 {
2742 /* find the last mbuf in chain and pad it */
2743 struct mbuf *m_at;
2744
2745 if (last_mbuf != NULL) {
2746 return (sctp_add_pad_tombuf(last_mbuf, padval));
2747 } else {
2748 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2749 if (SCTP_BUF_NEXT(m_at) == NULL) {
2750 return (sctp_add_pad_tombuf(m_at, padval));
2751 }
2752 }
2753 }
2754 return (NULL);
2755 }
2756
2757 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked SCTP_UNUSED)2758 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2759 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2760 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2761 SCTP_UNUSED
2762 #endif
2763 )
2764 {
2765 struct mbuf *m_notify;
2766 struct sctp_assoc_change *sac;
2767 struct sctp_queued_to_read *control;
2768 unsigned int notif_len;
2769 uint16_t abort_len;
2770 unsigned int i;
2771 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2772 struct socket *so;
2773 #endif
2774
2775 if (stcb == NULL) {
2776 return;
2777 }
2778 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2779 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2780 if (abort != NULL) {
2781 abort_len = ntohs(abort->ch.chunk_length);
2782 } else {
2783 abort_len = 0;
2784 }
2785 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2786 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2787 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2788 notif_len += abort_len;
2789 }
2790 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2791 if (m_notify == NULL) {
2792 /* Retry with smaller value. */
2793 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2794 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2795 if (m_notify == NULL) {
2796 goto set_error;
2797 }
2798 }
2799 SCTP_BUF_NEXT(m_notify) = NULL;
2800 sac = mtod(m_notify, struct sctp_assoc_change *);
2801 memset(sac, 0, notif_len);
2802 sac->sac_type = SCTP_ASSOC_CHANGE;
2803 sac->sac_flags = 0;
2804 sac->sac_length = sizeof(struct sctp_assoc_change);
2805 sac->sac_state = state;
2806 sac->sac_error = error;
2807 /* XXX verify these stream counts */
2808 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2809 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2810 sac->sac_assoc_id = sctp_get_associd(stcb);
2811 if (notif_len > sizeof(struct sctp_assoc_change)) {
2812 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2813 i = 0;
2814 if (stcb->asoc.prsctp_supported == 1) {
2815 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2816 }
2817 if (stcb->asoc.auth_supported == 1) {
2818 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2819 }
2820 if (stcb->asoc.asconf_supported == 1) {
2821 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2822 }
2823 if (stcb->asoc.idata_supported == 1) {
2824 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2825 }
2826 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2827 if (stcb->asoc.reconfig_supported == 1) {
2828 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2829 }
2830 sac->sac_length += i;
2831 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2832 memcpy(sac->sac_info, abort, abort_len);
2833 sac->sac_length += abort_len;
2834 }
2835 }
2836 SCTP_BUF_LEN(m_notify) = sac->sac_length;
2837 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2838 0, 0, stcb->asoc.context, 0, 0, 0,
2839 m_notify);
2840 if (control != NULL) {
2841 control->length = SCTP_BUF_LEN(m_notify);
2842 control->spec_flags = M_NOTIFICATION;
2843 /* not that we need this */
2844 control->tail_mbuf = m_notify;
2845 sctp_add_to_readq(stcb->sctp_ep, stcb,
2846 control,
2847 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2848 so_locked);
2849 } else {
2850 sctp_m_freem(m_notify);
2851 }
2852 }
2853 /*
2854 * For 1-to-1 style sockets, we send up and error when an ABORT
2855 * comes in.
2856 */
2857 set_error:
2858 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2859 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2860 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2861 SOCK_LOCK(stcb->sctp_socket);
2862 if (from_peer) {
2863 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2864 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2865 stcb->sctp_socket->so_error = ECONNREFUSED;
2866 } else {
2867 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2868 stcb->sctp_socket->so_error = ECONNRESET;
2869 }
2870 } else {
2871 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2872 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2873 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2874 stcb->sctp_socket->so_error = ETIMEDOUT;
2875 } else {
2876 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2877 stcb->sctp_socket->so_error = ECONNABORTED;
2878 }
2879 }
2880 SOCK_UNLOCK(stcb->sctp_socket);
2881 }
2882 /* Wake ANY sleepers */
2883 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2884 so = SCTP_INP_SO(stcb->sctp_ep);
2885 if (!so_locked) {
2886 atomic_add_int(&stcb->asoc.refcnt, 1);
2887 SCTP_TCB_UNLOCK(stcb);
2888 SCTP_SOCKET_LOCK(so, 1);
2889 SCTP_TCB_LOCK(stcb);
2890 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2891 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2892 SCTP_SOCKET_UNLOCK(so, 1);
2893 return;
2894 }
2895 }
2896 #endif
2897 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2898 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2899 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2900 socantrcvmore(stcb->sctp_socket);
2901 }
2902 sorwakeup(stcb->sctp_socket);
2903 sowwakeup(stcb->sctp_socket);
2904 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2905 if (!so_locked) {
2906 SCTP_SOCKET_UNLOCK(so, 1);
2907 }
2908 #endif
2909 }
2910
2911 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked SCTP_UNUSED)2912 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2913 struct sockaddr *sa, uint32_t error, int so_locked
2914 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2915 SCTP_UNUSED
2916 #endif
2917 )
2918 {
2919 struct mbuf *m_notify;
2920 struct sctp_paddr_change *spc;
2921 struct sctp_queued_to_read *control;
2922
2923 if ((stcb == NULL) ||
2924 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2925 /* event not enabled */
2926 return;
2927 }
2928 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2929 if (m_notify == NULL)
2930 return;
2931 SCTP_BUF_LEN(m_notify) = 0;
2932 spc = mtod(m_notify, struct sctp_paddr_change *);
2933 memset(spc, 0, sizeof(struct sctp_paddr_change));
2934 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2935 spc->spc_flags = 0;
2936 spc->spc_length = sizeof(struct sctp_paddr_change);
2937 switch (sa->sa_family) {
2938 #ifdef INET
2939 case AF_INET:
2940 #ifdef INET6
2941 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2942 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2943 (struct sockaddr_in6 *)&spc->spc_aaddr);
2944 } else {
2945 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2946 }
2947 #else
2948 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2949 #endif
2950 break;
2951 #endif
2952 #ifdef INET6
2953 case AF_INET6:
2954 {
2955 #ifdef SCTP_EMBEDDED_V6_SCOPE
2956 struct sockaddr_in6 *sin6;
2957 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2958 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2959
2960 #ifdef SCTP_EMBEDDED_V6_SCOPE
2961 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2962 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2963 if (sin6->sin6_scope_id == 0) {
2964 /* recover scope_id for user */
2965 #ifdef SCTP_KAME
2966 (void)sa6_recoverscope(sin6);
2967 #else
2968 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
2969 NULL);
2970 #endif
2971 } else {
2972 /* clear embedded scope_id for user */
2973 in6_clearscope(&sin6->sin6_addr);
2974 }
2975 }
2976 #endif /* SCTP_EMBEDDED_V6_SCOPE */
2977 break;
2978 }
2979 #endif
2980 #if defined(__Userspace__)
2981 case AF_CONN:
2982 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
2983 break;
2984 #endif
2985 default:
2986 /* TSNH */
2987 break;
2988 }
2989 spc->spc_state = state;
2990 spc->spc_error = error;
2991 spc->spc_assoc_id = sctp_get_associd(stcb);
2992
2993 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2994 SCTP_BUF_NEXT(m_notify) = NULL;
2995
2996 /* append to socket */
2997 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2998 0, 0, stcb->asoc.context, 0, 0, 0,
2999 m_notify);
3000 if (control == NULL) {
3001 /* no memory */
3002 sctp_m_freem(m_notify);
3003 return;
3004 }
3005 control->length = SCTP_BUF_LEN(m_notify);
3006 control->spec_flags = M_NOTIFICATION;
3007 /* not that we need this */
3008 control->tail_mbuf = m_notify;
3009 sctp_add_to_readq(stcb->sctp_ep, stcb,
3010 control,
3011 &stcb->sctp_socket->so_rcv, 1,
3012 SCTP_READ_LOCK_NOT_HELD,
3013 so_locked);
3014 }
3015
3016
3017 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked SCTP_UNUSED)3018 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3019 struct sctp_tmit_chunk *chk, int so_locked
3020 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3021 SCTP_UNUSED
3022 #endif
3023 )
3024 {
3025 struct mbuf *m_notify;
3026 struct sctp_send_failed *ssf;
3027 struct sctp_send_failed_event *ssfe;
3028 struct sctp_queued_to_read *control;
3029 struct sctp_chunkhdr *chkhdr;
3030 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3031
3032 if ((stcb == NULL) ||
3033 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3034 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3035 /* event not enabled */
3036 return;
3037 }
3038
3039 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3040 notifhdr_len = sizeof(struct sctp_send_failed_event);
3041 } else {
3042 notifhdr_len = sizeof(struct sctp_send_failed);
3043 }
3044 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3045 if (m_notify == NULL)
3046 /* no space left */
3047 return;
3048 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3049 if (stcb->asoc.idata_supported) {
3050 chkhdr_len = sizeof(struct sctp_idata_chunk);
3051 } else {
3052 chkhdr_len = sizeof(struct sctp_data_chunk);
3053 }
3054 /* Use some defaults in case we can't access the chunk header */
3055 if (chk->send_size >= chkhdr_len) {
3056 payload_len = chk->send_size - chkhdr_len;
3057 } else {
3058 payload_len = 0;
3059 }
3060 padding_len = 0;
3061 if (chk->data != NULL) {
3062 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3063 if (chkhdr != NULL) {
3064 chk_len = ntohs(chkhdr->chunk_length);
3065 if ((chk_len >= chkhdr_len) &&
3066 (chk->send_size >= chk_len) &&
3067 (chk->send_size - chk_len < 4)) {
3068 padding_len = chk->send_size - chk_len;
3069 payload_len = chk->send_size - chkhdr_len - padding_len;
3070 }
3071 }
3072 }
3073 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3074 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3075 memset(ssfe, 0, notifhdr_len);
3076 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3077 if (sent) {
3078 ssfe->ssfe_flags = SCTP_DATA_SENT;
3079 } else {
3080 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3081 }
3082 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3083 ssfe->ssfe_error = error;
3084 /* not exactly what the user sent in, but should be close :) */
3085 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3086 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3087 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3088 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3089 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3090 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3091 } else {
3092 ssf = mtod(m_notify, struct sctp_send_failed *);
3093 memset(ssf, 0, notifhdr_len);
3094 ssf->ssf_type = SCTP_SEND_FAILED;
3095 if (sent) {
3096 ssf->ssf_flags = SCTP_DATA_SENT;
3097 } else {
3098 ssf->ssf_flags = SCTP_DATA_UNSENT;
3099 }
3100 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3101 ssf->ssf_error = error;
3102 /* not exactly what the user sent in, but should be close :) */
3103 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3104 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3105 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3106 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3107 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3108 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3109 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3110 }
3111 if (chk->data != NULL) {
3112 /* Trim off the sctp chunk header (it should be there) */
3113 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3114 m_adj(chk->data, chkhdr_len);
3115 m_adj(chk->data, -padding_len);
3116 sctp_mbuf_crush(chk->data);
3117 chk->send_size -= (chkhdr_len + padding_len);
3118 }
3119 }
3120 SCTP_BUF_NEXT(m_notify) = chk->data;
3121 /* Steal off the mbuf */
3122 chk->data = NULL;
3123 /*
3124 * For this case, we check the actual socket buffer, since the assoc
3125 * is going away we don't want to overfill the socket buffer for a
3126 * non-reader
3127 */
3128 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3129 sctp_m_freem(m_notify);
3130 return;
3131 }
3132 /* append to socket */
3133 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3134 0, 0, stcb->asoc.context, 0, 0, 0,
3135 m_notify);
3136 if (control == NULL) {
3137 /* no memory */
3138 sctp_m_freem(m_notify);
3139 return;
3140 }
3141 control->length = SCTP_BUF_LEN(m_notify);
3142 control->spec_flags = M_NOTIFICATION;
3143 /* not that we need this */
3144 control->tail_mbuf = m_notify;
3145 sctp_add_to_readq(stcb->sctp_ep, stcb,
3146 control,
3147 &stcb->sctp_socket->so_rcv, 1,
3148 SCTP_READ_LOCK_NOT_HELD,
3149 so_locked);
3150 }
3151
3152
3153 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked SCTP_UNUSED)3154 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3155 struct sctp_stream_queue_pending *sp, int so_locked
3156 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3157 SCTP_UNUSED
3158 #endif
3159 )
3160 {
3161 struct mbuf *m_notify;
3162 struct sctp_send_failed *ssf;
3163 struct sctp_send_failed_event *ssfe;
3164 struct sctp_queued_to_read *control;
3165 int notifhdr_len;
3166
3167 if ((stcb == NULL) ||
3168 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3169 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3170 /* event not enabled */
3171 return;
3172 }
3173 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3174 notifhdr_len = sizeof(struct sctp_send_failed_event);
3175 } else {
3176 notifhdr_len = sizeof(struct sctp_send_failed);
3177 }
3178 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3179 if (m_notify == NULL) {
3180 /* no space left */
3181 return;
3182 }
3183 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3184 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3185 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3186 memset(ssfe, 0, notifhdr_len);
3187 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3188 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3189 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3190 ssfe->ssfe_error = error;
3191 /* not exactly what the user sent in, but should be close :) */
3192 ssfe->ssfe_info.snd_sid = sp->sid;
3193 if (sp->some_taken) {
3194 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3195 } else {
3196 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3197 }
3198 ssfe->ssfe_info.snd_ppid = sp->ppid;
3199 ssfe->ssfe_info.snd_context = sp->context;
3200 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3201 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3202 } else {
3203 ssf = mtod(m_notify, struct sctp_send_failed *);
3204 memset(ssf, 0, notifhdr_len);
3205 ssf->ssf_type = SCTP_SEND_FAILED;
3206 ssf->ssf_flags = SCTP_DATA_UNSENT;
3207 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3208 ssf->ssf_error = error;
3209 /* not exactly what the user sent in, but should be close :) */
3210 ssf->ssf_info.sinfo_stream = sp->sid;
3211 ssf->ssf_info.sinfo_ssn = 0;
3212 if (sp->some_taken) {
3213 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3214 } else {
3215 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3216 }
3217 ssf->ssf_info.sinfo_ppid = sp->ppid;
3218 ssf->ssf_info.sinfo_context = sp->context;
3219 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3220 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3221 }
3222 SCTP_BUF_NEXT(m_notify) = sp->data;
3223
3224 /* Steal off the mbuf */
3225 sp->data = NULL;
3226 /*
3227 * For this case, we check the actual socket buffer, since the assoc
3228 * is going away we don't want to overfill the socket buffer for a
3229 * non-reader
3230 */
3231 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3232 sctp_m_freem(m_notify);
3233 return;
3234 }
3235 /* append to socket */
3236 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3237 0, 0, stcb->asoc.context, 0, 0, 0,
3238 m_notify);
3239 if (control == NULL) {
3240 /* no memory */
3241 sctp_m_freem(m_notify);
3242 return;
3243 }
3244 control->length = SCTP_BUF_LEN(m_notify);
3245 control->spec_flags = M_NOTIFICATION;
3246 /* not that we need this */
3247 control->tail_mbuf = m_notify;
3248 sctp_add_to_readq(stcb->sctp_ep, stcb,
3249 control,
3250 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3251 }
3252
3253
3254
3255 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3256 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3257 {
3258 struct mbuf *m_notify;
3259 struct sctp_adaptation_event *sai;
3260 struct sctp_queued_to_read *control;
3261
3262 if ((stcb == NULL) ||
3263 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3264 /* event not enabled */
3265 return;
3266 }
3267
3268 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3269 if (m_notify == NULL)
3270 /* no space left */
3271 return;
3272 SCTP_BUF_LEN(m_notify) = 0;
3273 sai = mtod(m_notify, struct sctp_adaptation_event *);
3274 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3275 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3276 sai->sai_flags = 0;
3277 sai->sai_length = sizeof(struct sctp_adaptation_event);
3278 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3279 sai->sai_assoc_id = sctp_get_associd(stcb);
3280
3281 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3282 SCTP_BUF_NEXT(m_notify) = NULL;
3283
3284 /* append to socket */
3285 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3286 0, 0, stcb->asoc.context, 0, 0, 0,
3287 m_notify);
3288 if (control == NULL) {
3289 /* no memory */
3290 sctp_m_freem(m_notify);
3291 return;
3292 }
3293 control->length = SCTP_BUF_LEN(m_notify);
3294 control->spec_flags = M_NOTIFICATION;
3295 /* not that we need this */
3296 control->tail_mbuf = m_notify;
3297 sctp_add_to_readq(stcb->sctp_ep, stcb,
3298 control,
3299 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3300 }
3301
3302 /* This always must be called with the read-queue LOCKED in the INP */
3303 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked SCTP_UNUSED)3304 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3305 uint32_t val, int so_locked
3306 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3307 SCTP_UNUSED
3308 #endif
3309 )
3310 {
3311 struct mbuf *m_notify;
3312 struct sctp_pdapi_event *pdapi;
3313 struct sctp_queued_to_read *control;
3314 struct sockbuf *sb;
3315
3316 if ((stcb == NULL) ||
3317 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3318 /* event not enabled */
3319 return;
3320 }
3321 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3322 return;
3323 }
3324
3325 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3326 if (m_notify == NULL)
3327 /* no space left */
3328 return;
3329 SCTP_BUF_LEN(m_notify) = 0;
3330 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3331 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3332 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3333 pdapi->pdapi_flags = 0;
3334 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3335 pdapi->pdapi_indication = error;
3336 pdapi->pdapi_stream = (val >> 16);
3337 pdapi->pdapi_seq = (val & 0x0000ffff);
3338 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3339
3340 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3341 SCTP_BUF_NEXT(m_notify) = NULL;
3342 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3343 0, 0, stcb->asoc.context, 0, 0, 0,
3344 m_notify);
3345 if (control == NULL) {
3346 /* no memory */
3347 sctp_m_freem(m_notify);
3348 return;
3349 }
3350 control->length = SCTP_BUF_LEN(m_notify);
3351 control->spec_flags = M_NOTIFICATION;
3352 /* not that we need this */
3353 control->tail_mbuf = m_notify;
3354 sb = &stcb->sctp_socket->so_rcv;
3355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3356 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3357 }
3358 sctp_sballoc(stcb, sb, m_notify);
3359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3360 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3361 }
3362 control->end_added = 1;
3363 if (stcb->asoc.control_pdapi)
3364 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3365 else {
3366 /* we really should not see this case */
3367 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3368 }
3369 if (stcb->sctp_ep && stcb->sctp_socket) {
3370 /* This should always be the case */
3371 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3372 struct socket *so;
3373
3374 so = SCTP_INP_SO(stcb->sctp_ep);
3375 if (!so_locked) {
3376 atomic_add_int(&stcb->asoc.refcnt, 1);
3377 SCTP_TCB_UNLOCK(stcb);
3378 SCTP_SOCKET_LOCK(so, 1);
3379 SCTP_TCB_LOCK(stcb);
3380 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3381 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3382 SCTP_SOCKET_UNLOCK(so, 1);
3383 return;
3384 }
3385 }
3386 #endif
3387 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3388 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3389 if (!so_locked) {
3390 SCTP_SOCKET_UNLOCK(so, 1);
3391 }
3392 #endif
3393 }
3394 }
3395
3396 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3397 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3398 {
3399 struct mbuf *m_notify;
3400 struct sctp_shutdown_event *sse;
3401 struct sctp_queued_to_read *control;
3402
3403 /*
3404 * For TCP model AND UDP connected sockets we will send an error up
3405 * when an SHUTDOWN completes
3406 */
3407 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3408 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3409 /* mark socket closed for read/write and wakeup! */
3410 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3411 struct socket *so;
3412
3413 so = SCTP_INP_SO(stcb->sctp_ep);
3414 atomic_add_int(&stcb->asoc.refcnt, 1);
3415 SCTP_TCB_UNLOCK(stcb);
3416 SCTP_SOCKET_LOCK(so, 1);
3417 SCTP_TCB_LOCK(stcb);
3418 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3419 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3420 SCTP_SOCKET_UNLOCK(so, 1);
3421 return;
3422 }
3423 #endif
3424 socantsendmore(stcb->sctp_socket);
3425 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3426 SCTP_SOCKET_UNLOCK(so, 1);
3427 #endif
3428 }
3429 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3430 /* event not enabled */
3431 return;
3432 }
3433
3434 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3435 if (m_notify == NULL)
3436 /* no space left */
3437 return;
3438 sse = mtod(m_notify, struct sctp_shutdown_event *);
3439 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3440 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3441 sse->sse_flags = 0;
3442 sse->sse_length = sizeof(struct sctp_shutdown_event);
3443 sse->sse_assoc_id = sctp_get_associd(stcb);
3444
3445 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3446 SCTP_BUF_NEXT(m_notify) = NULL;
3447
3448 /* append to socket */
3449 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3450 0, 0, stcb->asoc.context, 0, 0, 0,
3451 m_notify);
3452 if (control == NULL) {
3453 /* no memory */
3454 sctp_m_freem(m_notify);
3455 return;
3456 }
3457 control->length = SCTP_BUF_LEN(m_notify);
3458 control->spec_flags = M_NOTIFICATION;
3459 /* not that we need this */
3460 control->tail_mbuf = m_notify;
3461 sctp_add_to_readq(stcb->sctp_ep, stcb,
3462 control,
3463 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3464 }
3465
3466 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked SCTP_UNUSED)3467 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3468 int so_locked
3469 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3470 SCTP_UNUSED
3471 #endif
3472 )
3473 {
3474 struct mbuf *m_notify;
3475 struct sctp_sender_dry_event *event;
3476 struct sctp_queued_to_read *control;
3477
3478 if ((stcb == NULL) ||
3479 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3480 /* event not enabled */
3481 return;
3482 }
3483
3484 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3485 if (m_notify == NULL) {
3486 /* no space left */
3487 return;
3488 }
3489 SCTP_BUF_LEN(m_notify) = 0;
3490 event = mtod(m_notify, struct sctp_sender_dry_event *);
3491 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3492 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3493 event->sender_dry_flags = 0;
3494 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3495 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3496
3497 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3498 SCTP_BUF_NEXT(m_notify) = NULL;
3499
3500 /* append to socket */
3501 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3502 0, 0, stcb->asoc.context, 0, 0, 0,
3503 m_notify);
3504 if (control == NULL) {
3505 /* no memory */
3506 sctp_m_freem(m_notify);
3507 return;
3508 }
3509 control->length = SCTP_BUF_LEN(m_notify);
3510 control->spec_flags = M_NOTIFICATION;
3511 /* not that we need this */
3512 control->tail_mbuf = m_notify;
3513 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3514 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3515 }
3516
3517
3518 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3519 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3520 {
3521 struct mbuf *m_notify;
3522 struct sctp_queued_to_read *control;
3523 struct sctp_stream_change_event *stradd;
3524
3525 if ((stcb == NULL) ||
3526 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3527 /* event not enabled */
3528 return;
3529 }
3530 if ((stcb->asoc.peer_req_out) && flag) {
3531 /* Peer made the request, don't tell the local user */
3532 stcb->asoc.peer_req_out = 0;
3533 return;
3534 }
3535 stcb->asoc.peer_req_out = 0;
3536 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3537 if (m_notify == NULL)
3538 /* no space left */
3539 return;
3540 SCTP_BUF_LEN(m_notify) = 0;
3541 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3542 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3543 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3544 stradd->strchange_flags = flag;
3545 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3546 stradd->strchange_assoc_id = sctp_get_associd(stcb);
3547 stradd->strchange_instrms = numberin;
3548 stradd->strchange_outstrms = numberout;
3549 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3550 SCTP_BUF_NEXT(m_notify) = NULL;
3551 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3552 /* no space */
3553 sctp_m_freem(m_notify);
3554 return;
3555 }
3556 /* append to socket */
3557 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3558 0, 0, stcb->asoc.context, 0, 0, 0,
3559 m_notify);
3560 if (control == NULL) {
3561 /* no memory */
3562 sctp_m_freem(m_notify);
3563 return;
3564 }
3565 control->length = SCTP_BUF_LEN(m_notify);
3566 control->spec_flags = M_NOTIFICATION;
3567 /* not that we need this */
3568 control->tail_mbuf = m_notify;
3569 sctp_add_to_readq(stcb->sctp_ep, stcb,
3570 control,
3571 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3572 }
3573
3574 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)3575 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3576 {
3577 struct mbuf *m_notify;
3578 struct sctp_queued_to_read *control;
3579 struct sctp_assoc_reset_event *strasoc;
3580
3581 if ((stcb == NULL) ||
3582 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3583 /* event not enabled */
3584 return;
3585 }
3586 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3587 if (m_notify == NULL)
3588 /* no space left */
3589 return;
3590 SCTP_BUF_LEN(m_notify) = 0;
3591 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3592 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3593 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3594 strasoc->assocreset_flags = flag;
3595 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3596 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
3597 strasoc->assocreset_local_tsn = sending_tsn;
3598 strasoc->assocreset_remote_tsn = recv_tsn;
3599 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3600 SCTP_BUF_NEXT(m_notify) = NULL;
3601 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3602 /* no space */
3603 sctp_m_freem(m_notify);
3604 return;
3605 }
3606 /* append to socket */
3607 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3608 0, 0, stcb->asoc.context, 0, 0, 0,
3609 m_notify);
3610 if (control == NULL) {
3611 /* no memory */
3612 sctp_m_freem(m_notify);
3613 return;
3614 }
3615 control->length = SCTP_BUF_LEN(m_notify);
3616 control->spec_flags = M_NOTIFICATION;
3617 /* not that we need this */
3618 control->tail_mbuf = m_notify;
3619 sctp_add_to_readq(stcb->sctp_ep, stcb,
3620 control,
3621 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3622 }
3623
3624
3625
3626 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)3627 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3628 int number_entries, uint16_t * list, int flag)
3629 {
3630 struct mbuf *m_notify;
3631 struct sctp_queued_to_read *control;
3632 struct sctp_stream_reset_event *strreset;
3633 int len;
3634
3635 if ((stcb == NULL) ||
3636 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3637 /* event not enabled */
3638 return;
3639 }
3640
3641 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3642 if (m_notify == NULL)
3643 /* no space left */
3644 return;
3645 SCTP_BUF_LEN(m_notify) = 0;
3646 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3647 if (len > M_TRAILINGSPACE(m_notify)) {
3648 /* never enough room */
3649 sctp_m_freem(m_notify);
3650 return;
3651 }
3652 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3653 memset(strreset, 0, len);
3654 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3655 strreset->strreset_flags = flag;
3656 strreset->strreset_length = len;
3657 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3658 if (number_entries) {
3659 int i;
3660
3661 for (i = 0; i < number_entries; i++) {
3662 strreset->strreset_stream_list[i] = ntohs(list[i]);
3663 }
3664 }
3665 SCTP_BUF_LEN(m_notify) = len;
3666 SCTP_BUF_NEXT(m_notify) = NULL;
3667 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3668 /* no space */
3669 sctp_m_freem(m_notify);
3670 return;
3671 }
3672 /* append to socket */
3673 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3674 0, 0, stcb->asoc.context, 0, 0, 0,
3675 m_notify);
3676 if (control == NULL) {
3677 /* no memory */
3678 sctp_m_freem(m_notify);
3679 return;
3680 }
3681 control->length = SCTP_BUF_LEN(m_notify);
3682 control->spec_flags = M_NOTIFICATION;
3683 /* not that we need this */
3684 control->tail_mbuf = m_notify;
3685 sctp_add_to_readq(stcb->sctp_ep, stcb,
3686 control,
3687 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3688 }
3689
3690
3691 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)3692 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3693 {
3694 struct mbuf *m_notify;
3695 struct sctp_remote_error *sre;
3696 struct sctp_queued_to_read *control;
3697 unsigned int notif_len;
3698 uint16_t chunk_len;
3699
3700 if ((stcb == NULL) ||
3701 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3702 return;
3703 }
3704 if (chunk != NULL) {
3705 chunk_len = ntohs(chunk->ch.chunk_length);
3706 } else {
3707 chunk_len = 0;
3708 }
3709 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3710 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3711 if (m_notify == NULL) {
3712 /* Retry with smaller value. */
3713 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3714 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3715 if (m_notify == NULL) {
3716 return;
3717 }
3718 }
3719 SCTP_BUF_NEXT(m_notify) = NULL;
3720 sre = mtod(m_notify, struct sctp_remote_error *);
3721 memset(sre, 0, notif_len);
3722 sre->sre_type = SCTP_REMOTE_ERROR;
3723 sre->sre_flags = 0;
3724 sre->sre_length = sizeof(struct sctp_remote_error);
3725 sre->sre_error = error;
3726 sre->sre_assoc_id = sctp_get_associd(stcb);
3727 if (notif_len > sizeof(struct sctp_remote_error)) {
3728 memcpy(sre->sre_data, chunk, chunk_len);
3729 sre->sre_length += chunk_len;
3730 }
3731 SCTP_BUF_LEN(m_notify) = sre->sre_length;
3732 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3733 0, 0, stcb->asoc.context, 0, 0, 0,
3734 m_notify);
3735 if (control != NULL) {
3736 control->length = SCTP_BUF_LEN(m_notify);
3737 control->spec_flags = M_NOTIFICATION;
3738 /* not that we need this */
3739 control->tail_mbuf = m_notify;
3740 sctp_add_to_readq(stcb->sctp_ep, stcb,
3741 control,
3742 &stcb->sctp_socket->so_rcv, 1,
3743 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3744 } else {
3745 sctp_m_freem(m_notify);
3746 }
3747 }
3748
3749
3750 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked SCTP_UNUSED)3751 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3752 uint32_t error, void *data, int so_locked
3753 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3754 SCTP_UNUSED
3755 #endif
3756 )
3757 {
3758 if ((stcb == NULL) ||
3759 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3760 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3761 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3762 /* If the socket is gone we are out of here */
3763 return;
3764 }
3765 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
3766 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3767 #else
3768 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
3769 #endif
3770 return;
3771 }
3772 #if defined(__APPLE__)
3773 if (so_locked) {
3774 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3775 } else {
3776 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3777 }
3778 #endif
3779 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3780 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3781 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3782 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3783 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3784 /* Don't report these in front states */
3785 return;
3786 }
3787 }
3788 switch (notification) {
3789 case SCTP_NOTIFY_ASSOC_UP:
3790 if (stcb->asoc.assoc_up_sent == 0) {
3791 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3792 stcb->asoc.assoc_up_sent = 1;
3793 }
3794 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3795 sctp_notify_adaptation_layer(stcb);
3796 }
3797 if (stcb->asoc.auth_supported == 0) {
3798 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3799 NULL, so_locked);
3800 }
3801 break;
3802 case SCTP_NOTIFY_ASSOC_DOWN:
3803 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3804 #if defined(__Userspace__)
3805 if (stcb->sctp_ep->recv_callback) {
3806 if (stcb->sctp_socket) {
3807 union sctp_sockstore addr;
3808 struct sctp_rcvinfo rcv;
3809
3810 memset(&addr, 0, sizeof(union sctp_sockstore));
3811 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
3812 atomic_add_int(&stcb->asoc.refcnt, 1);
3813 SCTP_TCB_UNLOCK(stcb);
3814 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
3815 SCTP_TCB_LOCK(stcb);
3816 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3817 }
3818 }
3819 #endif
3820 break;
3821 case SCTP_NOTIFY_INTERFACE_DOWN:
3822 {
3823 struct sctp_nets *net;
3824
3825 net = (struct sctp_nets *)data;
3826 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3827 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3828 break;
3829 }
3830 case SCTP_NOTIFY_INTERFACE_UP:
3831 {
3832 struct sctp_nets *net;
3833
3834 net = (struct sctp_nets *)data;
3835 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3836 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3837 break;
3838 }
3839 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3840 {
3841 struct sctp_nets *net;
3842
3843 net = (struct sctp_nets *)data;
3844 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3845 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3846 break;
3847 }
3848 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3849 sctp_notify_send_failed2(stcb, error,
3850 (struct sctp_stream_queue_pending *)data, so_locked);
3851 break;
3852 case SCTP_NOTIFY_SENT_DG_FAIL:
3853 sctp_notify_send_failed(stcb, 1, error,
3854 (struct sctp_tmit_chunk *)data, so_locked);
3855 break;
3856 case SCTP_NOTIFY_UNSENT_DG_FAIL:
3857 sctp_notify_send_failed(stcb, 0, error,
3858 (struct sctp_tmit_chunk *)data, so_locked);
3859 break;
3860 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3861 {
3862 uint32_t val;
3863 val = *((uint32_t *)data);
3864
3865 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3866 break;
3867 }
3868 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3869 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3870 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3871 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3872 } else {
3873 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3874 }
3875 break;
3876 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3877 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3878 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3879 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3880 } else {
3881 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3882 }
3883 break;
3884 case SCTP_NOTIFY_ASSOC_RESTART:
3885 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3886 if (stcb->asoc.auth_supported == 0) {
3887 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3888 NULL, so_locked);
3889 }
3890 break;
3891 case SCTP_NOTIFY_STR_RESET_SEND:
3892 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3893 break;
3894 case SCTP_NOTIFY_STR_RESET_RECV:
3895 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3896 break;
3897 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3898 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3899 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
3900 break;
3901 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3902 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3903 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
3904 break;
3905 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3906 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3907 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
3908 break;
3909 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3910 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3911 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
3912 break;
3913 case SCTP_NOTIFY_ASCONF_ADD_IP:
3914 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3915 error, so_locked);
3916 break;
3917 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3918 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3919 error, so_locked);
3920 break;
3921 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3922 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3923 error, so_locked);
3924 break;
3925 case SCTP_NOTIFY_PEER_SHUTDOWN:
3926 sctp_notify_shutdown_event(stcb);
3927 break;
3928 case SCTP_NOTIFY_AUTH_NEW_KEY:
3929 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3930 (uint16_t)(uintptr_t)data,
3931 so_locked);
3932 break;
3933 case SCTP_NOTIFY_AUTH_FREE_KEY:
3934 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3935 (uint16_t)(uintptr_t)data,
3936 so_locked);
3937 break;
3938 case SCTP_NOTIFY_NO_PEER_AUTH:
3939 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3940 (uint16_t)(uintptr_t)data,
3941 so_locked);
3942 break;
3943 case SCTP_NOTIFY_SENDER_DRY:
3944 sctp_notify_sender_dry_event(stcb, so_locked);
3945 break;
3946 case SCTP_NOTIFY_REMOTE_ERROR:
3947 sctp_notify_remote_error(stcb, error, data);
3948 break;
3949 default:
3950 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3951 __func__, notification, notification);
3952 break;
3953 } /* end switch */
3954 }
3955
3956 void
3957 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3958 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3959 SCTP_UNUSED
3960 #endif
3961 )
3962 {
3963 struct sctp_association *asoc;
3964 struct sctp_stream_out *outs;
3965 struct sctp_tmit_chunk *chk, *nchk;
3966 struct sctp_stream_queue_pending *sp, *nsp;
3967 int i;
3968
3969 if (stcb == NULL) {
3970 return;
3971 }
3972 asoc = &stcb->asoc;
3973 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3974 /* already being freed */
3975 return;
3976 }
3977 #if defined(__APPLE__)
3978 if (so_locked) {
3979 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3980 } else {
3981 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3982 }
3983 #endif
3984 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3985 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3986 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3987 return;
3988 }
3989 /* now through all the gunk freeing chunks */
3990 if (holds_lock == 0) {
3991 SCTP_TCB_SEND_LOCK(stcb);
3992 }
3993 /* sent queue SHOULD be empty */
3994 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3995 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3996 asoc->sent_queue_cnt--;
3997 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3998 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3999 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4000 #ifdef INVARIANTS
4001 } else {
4002 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4003 #endif
4004 }
4005 }
4006 if (chk->data != NULL) {
4007 sctp_free_bufspace(stcb, asoc, chk, 1);
4008 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4009 error, chk, so_locked);
4010 if (chk->data) {
4011 sctp_m_freem(chk->data);
4012 chk->data = NULL;
4013 }
4014 }
4015 sctp_free_a_chunk(stcb, chk, so_locked);
4016 /*sa_ignore FREED_MEMORY*/
4017 }
4018 /* pending send queue SHOULD be empty */
4019 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4020 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4021 asoc->send_queue_cnt--;
4022 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4023 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4024 #ifdef INVARIANTS
4025 } else {
4026 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4027 #endif
4028 }
4029 if (chk->data != NULL) {
4030 sctp_free_bufspace(stcb, asoc, chk, 1);
4031 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4032 error, chk, so_locked);
4033 if (chk->data) {
4034 sctp_m_freem(chk->data);
4035 chk->data = NULL;
4036 }
4037 }
4038 sctp_free_a_chunk(stcb, chk, so_locked);
4039 /*sa_ignore FREED_MEMORY*/
4040 }
4041 for (i = 0; i < asoc->streamoutcnt; i++) {
4042 /* For each stream */
4043 outs = &asoc->strmout[i];
4044 /* clean up any sends there */
4045 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4046 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4047 TAILQ_REMOVE(&outs->outqueue, sp, next);
4048 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
4049 sctp_free_spbufspace(stcb, asoc, sp);
4050 if (sp->data) {
4051 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4052 error, (void *)sp, so_locked);
4053 if (sp->data) {
4054 sctp_m_freem(sp->data);
4055 sp->data = NULL;
4056 sp->tail_mbuf = NULL;
4057 sp->length = 0;
4058 }
4059 }
4060 if (sp->net) {
4061 sctp_free_remote_addr(sp->net);
4062 sp->net = NULL;
4063 }
4064 /* Free the chunk */
4065 sctp_free_a_strmoq(stcb, sp, so_locked);
4066 /*sa_ignore FREED_MEMORY*/
4067 }
4068 }
4069
4070 if (holds_lock == 0) {
4071 SCTP_TCB_SEND_UNLOCK(stcb);
4072 }
4073 }
4074
4075 void
4076 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4077 struct sctp_abort_chunk *abort, int so_locked
4078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4079 SCTP_UNUSED
4080 #endif
4081 )
4082 {
4083 if (stcb == NULL) {
4084 return;
4085 }
4086 #if defined(__APPLE__)
4087 if (so_locked) {
4088 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4089 } else {
4090 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4091 }
4092 #endif
4093 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4094 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4095 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4096 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4097 }
4098 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4099 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4100 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4101 return;
4102 }
4103 /* Tell them we lost the asoc */
4104 sctp_report_all_outbound(stcb, error, 1, so_locked);
4105 if (from_peer) {
4106 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4107 } else {
4108 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4109 }
4110 }
4111
4112 void
4113 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4114 struct mbuf *m, int iphlen,
4115 struct sockaddr *src, struct sockaddr *dst,
4116 struct sctphdr *sh, struct mbuf *op_err,
4117 #if defined(__FreeBSD__)
4118 uint8_t mflowtype, uint32_t mflowid,
4119 #endif
4120 uint32_t vrf_id, uint16_t port)
4121 {
4122 uint32_t vtag;
4123 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4124 struct socket *so;
4125 #endif
4126
4127 vtag = 0;
4128 if (stcb != NULL) {
4129 vtag = stcb->asoc.peer_vtag;
4130 vrf_id = stcb->asoc.vrf_id;
4131 }
4132 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4133 #if defined(__FreeBSD__)
4134 mflowtype, mflowid, inp->fibnum,
4135 #endif
4136 vrf_id, port);
4137 if (stcb != NULL) {
4138 /* We have a TCB to abort, send notification too */
4139 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4140 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4141 /* Ok, now lets free it */
4142 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4143 so = SCTP_INP_SO(inp);
4144 atomic_add_int(&stcb->asoc.refcnt, 1);
4145 SCTP_TCB_UNLOCK(stcb);
4146 SCTP_SOCKET_LOCK(so, 1);
4147 SCTP_TCB_LOCK(stcb);
4148 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4149 #endif
4150 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4151 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4152 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4153 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4154 }
4155 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4156 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4157 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4158 SCTP_SOCKET_UNLOCK(so, 1);
4159 #endif
4160 }
4161 }
4162 #ifdef SCTP_ASOCLOG_OF_TSNS
4163 void
4164 sctp_print_out_track_log(struct sctp_tcb *stcb)
4165 {
4166 #ifdef NOSIY_PRINTS
4167 int i;
4168 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4169 SCTP_PRINTF("IN bound TSN log-aaa\n");
4170 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4171 SCTP_PRINTF("None rcvd\n");
4172 goto none_in;
4173 }
4174 if (stcb->asoc.tsn_in_wrapped) {
4175 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4176 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4177 stcb->asoc.in_tsnlog[i].tsn,
4178 stcb->asoc.in_tsnlog[i].strm,
4179 stcb->asoc.in_tsnlog[i].seq,
4180 stcb->asoc.in_tsnlog[i].flgs,
4181 stcb->asoc.in_tsnlog[i].sz);
4182 }
4183 }
4184 if (stcb->asoc.tsn_in_at) {
4185 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4186 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4187 stcb->asoc.in_tsnlog[i].tsn,
4188 stcb->asoc.in_tsnlog[i].strm,
4189 stcb->asoc.in_tsnlog[i].seq,
4190 stcb->asoc.in_tsnlog[i].flgs,
4191 stcb->asoc.in_tsnlog[i].sz);
4192 }
4193 }
4194 none_in:
4195 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4196 if ((stcb->asoc.tsn_out_at == 0) &&
4197 (stcb->asoc.tsn_out_wrapped == 0)) {
4198 SCTP_PRINTF("None sent\n");
4199 }
4200 if (stcb->asoc.tsn_out_wrapped) {
4201 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4202 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4203 stcb->asoc.out_tsnlog[i].tsn,
4204 stcb->asoc.out_tsnlog[i].strm,
4205 stcb->asoc.out_tsnlog[i].seq,
4206 stcb->asoc.out_tsnlog[i].flgs,
4207 stcb->asoc.out_tsnlog[i].sz);
4208 }
4209 }
4210 if (stcb->asoc.tsn_out_at) {
4211 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4212 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4213 stcb->asoc.out_tsnlog[i].tsn,
4214 stcb->asoc.out_tsnlog[i].strm,
4215 stcb->asoc.out_tsnlog[i].seq,
4216 stcb->asoc.out_tsnlog[i].flgs,
4217 stcb->asoc.out_tsnlog[i].sz);
4218 }
4219 }
4220 #endif
4221 }
4222 #endif
4223
4224 void
4225 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4226 struct mbuf *op_err,
4227 int so_locked
4228 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4229 SCTP_UNUSED
4230 #endif
4231 )
4232 {
4233 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4234 struct socket *so;
4235 #endif
4236
4237 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4238 so = SCTP_INP_SO(inp);
4239 #endif
4240 #if defined(__APPLE__)
4241 if (so_locked) {
4242 sctp_lock_assert(SCTP_INP_SO(inp));
4243 } else {
4244 sctp_unlock_assert(SCTP_INP_SO(inp));
4245 }
4246 #endif
4247 if (stcb == NULL) {
4248 /* Got to have a TCB */
4249 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4250 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4251 #if defined(__APPLE__)
4252 if (!so_locked) {
4253 SCTP_SOCKET_LOCK(so, 1);
4254 }
4255 #endif
4256 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4257 SCTP_CALLED_DIRECTLY_NOCMPSET);
4258 #if defined(__APPLE__)
4259 if (!so_locked) {
4260 SCTP_SOCKET_UNLOCK(so, 1);
4261 }
4262 #endif
4263 }
4264 }
4265 return;
4266 } else {
4267 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4268 }
4269 /* notify the peer */
4270 sctp_send_abort_tcb(stcb, op_err, so_locked);
4271 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4272 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4273 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4274 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4275 }
4276 /* notify the ulp */
4277 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4278 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4279 }
4280 /* now free the asoc */
4281 #ifdef SCTP_ASOCLOG_OF_TSNS
4282 sctp_print_out_track_log(stcb);
4283 #endif
4284 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4285 if (!so_locked) {
4286 atomic_add_int(&stcb->asoc.refcnt, 1);
4287 SCTP_TCB_UNLOCK(stcb);
4288 SCTP_SOCKET_LOCK(so, 1);
4289 SCTP_TCB_LOCK(stcb);
4290 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4291 }
4292 #endif
4293 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4294 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4295 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4296 if (!so_locked) {
4297 SCTP_SOCKET_UNLOCK(so, 1);
4298 }
4299 #endif
4300 }
4301
4302 void
4303 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4304 struct sockaddr *src, struct sockaddr *dst,
4305 struct sctphdr *sh, struct sctp_inpcb *inp,
4306 struct mbuf *cause,
4307 #if defined(__FreeBSD__)
4308 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4309 #endif
4310 uint32_t vrf_id, uint16_t port)
4311 {
4312 struct sctp_chunkhdr *ch, chunk_buf;
4313 unsigned int chk_length;
4314 int contains_init_chunk;
4315
4316 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4317 /* Generate a TO address for future reference */
4318 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4319 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4320 #if defined(__APPLE__)
4321 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4322 #endif
4323 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4324 SCTP_CALLED_DIRECTLY_NOCMPSET);
4325 #if defined(__APPLE__)
4326 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4327 #endif
4328 }
4329 }
4330 contains_init_chunk = 0;
4331 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4332 sizeof(*ch), (uint8_t *) & chunk_buf);
4333 while (ch != NULL) {
4334 chk_length = ntohs(ch->chunk_length);
4335 if (chk_length < sizeof(*ch)) {
4336 /* break to abort land */
4337 break;
4338 }
4339 switch (ch->chunk_type) {
4340 case SCTP_INIT:
4341 contains_init_chunk = 1;
4342 break;
4343 case SCTP_PACKET_DROPPED:
4344 /* we don't respond to pkt-dropped */
4345 return;
4346 case SCTP_ABORT_ASSOCIATION:
4347 /* we don't respond with an ABORT to an ABORT */
4348 return;
4349 case SCTP_SHUTDOWN_COMPLETE:
4350 /*
4351 * we ignore it since we are not waiting for it and
4352 * peer is gone
4353 */
4354 return;
4355 case SCTP_SHUTDOWN_ACK:
4356 sctp_send_shutdown_complete2(src, dst, sh,
4357 #if defined(__FreeBSD__)
4358 mflowtype, mflowid, fibnum,
4359 #endif
4360 vrf_id, port);
4361 return;
4362 default:
4363 break;
4364 }
4365 offset += SCTP_SIZE32(chk_length);
4366 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4367 sizeof(*ch), (uint8_t *) & chunk_buf);
4368 }
4369 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4370 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4371 (contains_init_chunk == 0))) {
4372 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4373 #if defined(__FreeBSD__)
4374 mflowtype, mflowid, fibnum,
4375 #endif
4376 vrf_id, port);
4377 }
4378 }
4379
4380 /*
4381 * check the inbound datagram to make sure there is not an abort inside it,
4382 * if there is return 1, else return 0.
4383 */
4384 int
4385 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4386 {
4387 struct sctp_chunkhdr *ch;
4388 struct sctp_init_chunk *init_chk, chunk_buf;
4389 int offset;
4390 unsigned int chk_length;
4391
4392 offset = iphlen + sizeof(struct sctphdr);
4393 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4394 (uint8_t *) & chunk_buf);
4395 while (ch != NULL) {
4396 chk_length = ntohs(ch->chunk_length);
4397 if (chk_length < sizeof(*ch)) {
4398 /* packet is probably corrupt */
4399 break;
4400 }
4401 /* we seem to be ok, is it an abort? */
4402 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4403 /* yep, tell them */
4404 return (1);
4405 }
4406 if (ch->chunk_type == SCTP_INITIATION) {
4407 /* need to update the Vtag */
4408 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4409 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4410 if (init_chk != NULL) {
4411 *vtagfill = ntohl(init_chk->init.initiate_tag);
4412 }
4413 }
4414 /* Nope, move to the next chunk */
4415 offset += SCTP_SIZE32(chk_length);
4416 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4417 sizeof(*ch), (uint8_t *) & chunk_buf);
4418 }
4419 return (0);
4420 }
4421
4422 /*
4423 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4424 * set (i.e. it's 0) so, create this function to compare link local scopes
4425 */
4426 #ifdef INET6
4427 uint32_t
4428 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4429 {
4430 #if defined(__Userspace__)
4431 /*__Userspace__ Returning 1 here always */
4432 #endif
4433 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4434 struct sockaddr_in6 a, b;
4435
4436 /* save copies */
4437 a = *addr1;
4438 b = *addr2;
4439
4440 if (a.sin6_scope_id == 0)
4441 #ifdef SCTP_KAME
4442 if (sa6_recoverscope(&a)) {
4443 #else
4444 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4445 #endif /* SCTP_KAME */
4446 /* can't get scope, so can't match */
4447 return (0);
4448 }
4449 if (b.sin6_scope_id == 0)
4450 #ifdef SCTP_KAME
4451 if (sa6_recoverscope(&b)) {
4452 #else
4453 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4454 #endif /* SCTP_KAME */
4455 /* can't get scope, so can't match */
4456 return (0);
4457 }
4458 if (a.sin6_scope_id != b.sin6_scope_id)
4459 return (0);
4460 #else
4461 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4462 return (0);
4463 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4464
4465 return (1);
4466 }
4467
4468 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4469 /*
4470 * returns a sockaddr_in6 with embedded scope recovered and removed
4471 */
4472 struct sockaddr_in6 *
4473 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4474 {
4475 /* check and strip embedded scope junk */
4476 if (addr->sin6_family == AF_INET6) {
4477 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4478 if (addr->sin6_scope_id == 0) {
4479 *store = *addr;
4480 #ifdef SCTP_KAME
4481 if (!sa6_recoverscope(store)) {
4482 #else
4483 if (!in6_recoverscope(store, &store->sin6_addr,
4484 NULL)) {
4485 #endif /* SCTP_KAME */
4486 /* use the recovered scope */
4487 addr = store;
4488 }
4489 } else {
4490 /* else, return the original "to" addr */
4491 in6_clearscope(&addr->sin6_addr);
4492 }
4493 }
4494 }
4495 return (addr);
4496 }
4497 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4498 #endif
4499
4500 /*
4501 * are the two addresses the same? currently a "scopeless" check returns: 1
4502 * if same, 0 if not
4503 */
4504 int
4505 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4506 {
4507
4508 /* must be valid */
4509 if (sa1 == NULL || sa2 == NULL)
4510 return (0);
4511
4512 /* must be the same family */
4513 if (sa1->sa_family != sa2->sa_family)
4514 return (0);
4515
4516 switch (sa1->sa_family) {
4517 #ifdef INET6
4518 case AF_INET6:
4519 {
4520 /* IPv6 addresses */
4521 struct sockaddr_in6 *sin6_1, *sin6_2;
4522
4523 sin6_1 = (struct sockaddr_in6 *)sa1;
4524 sin6_2 = (struct sockaddr_in6 *)sa2;
4525 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4526 sin6_2));
4527 }
4528 #endif
4529 #ifdef INET
4530 case AF_INET:
4531 {
4532 /* IPv4 addresses */
4533 struct sockaddr_in *sin_1, *sin_2;
4534
4535 sin_1 = (struct sockaddr_in *)sa1;
4536 sin_2 = (struct sockaddr_in *)sa2;
4537 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4538 }
4539 #endif
4540 #if defined(__Userspace__)
4541 case AF_CONN:
4542 {
4543 struct sockaddr_conn *sconn_1, *sconn_2;
4544
4545 sconn_1 = (struct sockaddr_conn *)sa1;
4546 sconn_2 = (struct sockaddr_conn *)sa2;
4547 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4548 }
4549 #endif
4550 default:
4551 /* we don't do these... */
4552 return (0);
4553 }
4554 }
4555
4556 void
4557 sctp_print_address(struct sockaddr *sa)
4558 {
4559 #ifdef INET6
4560 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4561 char ip6buf[INET6_ADDRSTRLEN];
4562 #endif
4563 #endif
4564
4565 switch (sa->sa_family) {
4566 #ifdef INET6
4567 case AF_INET6:
4568 {
4569 struct sockaddr_in6 *sin6;
4570
4571 sin6 = (struct sockaddr_in6 *)sa;
4572 #if defined(__Userspace__)
4573 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
4574 ntohs(sin6->sin6_addr.s6_addr16[0]),
4575 ntohs(sin6->sin6_addr.s6_addr16[1]),
4576 ntohs(sin6->sin6_addr.s6_addr16[2]),
4577 ntohs(sin6->sin6_addr.s6_addr16[3]),
4578 ntohs(sin6->sin6_addr.s6_addr16[4]),
4579 ntohs(sin6->sin6_addr.s6_addr16[5]),
4580 ntohs(sin6->sin6_addr.s6_addr16[6]),
4581 ntohs(sin6->sin6_addr.s6_addr16[7]),
4582 ntohs(sin6->sin6_port),
4583 sin6->sin6_scope_id);
4584 #else
4585 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4586 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4587 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4588 ntohs(sin6->sin6_port),
4589 sin6->sin6_scope_id);
4590 #else
4591 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4592 ip6_sprintf(&sin6->sin6_addr),
4593 ntohs(sin6->sin6_port),
4594 sin6->sin6_scope_id);
4595 #endif
4596 #endif
4597 break;
4598 }
4599 #endif
4600 #ifdef INET
4601 case AF_INET:
4602 {
4603 struct sockaddr_in *sin;
4604 unsigned char *p;
4605
4606 sin = (struct sockaddr_in *)sa;
4607 p = (unsigned char *)&sin->sin_addr;
4608 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4609 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4610 break;
4611 }
4612 #endif
4613 #if defined(__Userspace__)
4614 case AF_CONN:
4615 {
4616 struct sockaddr_conn *sconn;
4617
4618 sconn = (struct sockaddr_conn *)sa;
4619 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
4620 break;
4621 }
4622 #endif
4623 default:
4624 SCTP_PRINTF("?\n");
4625 break;
4626 }
4627 }
4628
4629 void
4630 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4631 struct sctp_inpcb *new_inp,
4632 struct sctp_tcb *stcb,
4633 int waitflags)
4634 {
4635 /*
4636 * go through our old INP and pull off any control structures that
4637 * belong to stcb and move then to the new inp.
4638 */
4639 struct socket *old_so, *new_so;
4640 struct sctp_queued_to_read *control, *nctl;
4641 struct sctp_readhead tmp_queue;
4642 struct mbuf *m;
4643 #if defined(__FreeBSD__) || defined(__APPLE__)
4644 int error = 0;
4645 #endif
4646
4647 old_so = old_inp->sctp_socket;
4648 new_so = new_inp->sctp_socket;
4649 TAILQ_INIT(&tmp_queue);
4650 #if defined(__FreeBSD__) || defined(__APPLE__)
4651 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4652 SOCKBUF_LOCK(&(old_so->so_rcv));
4653 #endif
4654 error = sblock(&old_so->so_rcv, waitflags);
4655 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4656 SOCKBUF_UNLOCK(&(old_so->so_rcv));
4657 #endif
4658 if (error) {
4659 /* Gak, can't get sblock, we have a problem.
4660 * data will be left stranded.. and we
4661 * don't dare look at it since the
4662 * other thread may be reading something.
4663 * Oh well, its a screwed up app that does
4664 * a peeloff OR a accept while reading
4665 * from the main socket... actually its
4666 * only the peeloff() case, since I think
4667 * read will fail on a listening socket..
4668 */
4669 return;
4670 }
4671 #endif
4672 /* lock the socket buffers */
4673 SCTP_INP_READ_LOCK(old_inp);
4674 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4675 /* Pull off all for out target stcb */
4676 if (control->stcb == stcb) {
4677 /* remove it we want it */
4678 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4679 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4680 m = control->data;
4681 while (m) {
4682 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4683 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
4684 }
4685 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4686 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4687 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4688 }
4689 m = SCTP_BUF_NEXT(m);
4690 }
4691 }
4692 }
4693 SCTP_INP_READ_UNLOCK(old_inp);
4694 /* Remove the sb-lock on the old socket */
4695 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4696 SOCKBUF_LOCK(&(old_so->so_rcv));
4697 #endif
4698 #if defined(__APPLE__)
4699 sbunlock(&old_so->so_rcv, 1);
4700 #endif
4701
4702 #if defined(__FreeBSD__)
4703 sbunlock(&old_so->so_rcv);
4704 #endif
4705 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
4706 SOCKBUF_UNLOCK(&(old_so->so_rcv));
4707 #endif
4708 /* Now we move them over to the new socket buffer */
4709 SCTP_INP_READ_LOCK(new_inp);
4710 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4711 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4712 m = control->data;
4713 while (m) {
4714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4715 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4716 }
4717 sctp_sballoc(stcb, &new_so->so_rcv, m);
4718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4719 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4720 }
4721 m = SCTP_BUF_NEXT(m);
4722 }
4723 }
4724 SCTP_INP_READ_UNLOCK(new_inp);
4725 }
4726
4727 void
4728 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4729 struct sctp_tcb *stcb,
4730 int so_locked
4731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4732 SCTP_UNUSED
4733 #endif
4734 )
4735 {
4736 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4737 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4738 struct socket *so;
4739
4740 so = SCTP_INP_SO(inp);
4741 if (!so_locked) {
4742 if (stcb) {
4743 atomic_add_int(&stcb->asoc.refcnt, 1);
4744 SCTP_TCB_UNLOCK(stcb);
4745 }
4746 SCTP_SOCKET_LOCK(so, 1);
4747 if (stcb) {
4748 SCTP_TCB_LOCK(stcb);
4749 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4750 }
4751 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4752 SCTP_SOCKET_UNLOCK(so, 1);
4753 return;
4754 }
4755 }
4756 #endif
4757 sctp_sorwakeup(inp, inp->sctp_socket);
4758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4759 if (!so_locked) {
4760 SCTP_SOCKET_UNLOCK(so, 1);
4761 }
4762 #endif
4763 }
4764 }
4765 #if defined(__Userspace__)
4766
4767 void
4768 sctp_invoke_recv_callback(struct sctp_inpcb *inp,
4769 struct sctp_tcb *stcb,
4770 struct sctp_queued_to_read *control,
4771 int inp_read_lock_held)
4772 {
4773 uint32_t pd_point, length;
4774
4775 if ((inp->recv_callback == NULL) ||
4776 (stcb == NULL) ||
4777 (stcb->sctp_socket == NULL)) {
4778 return;
4779 }
4780
4781 length = control->length;
4782 if (stcb != NULL && stcb->sctp_socket != NULL) {
4783 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
4784 stcb->sctp_ep->partial_delivery_point);
4785 } else {
4786 pd_point = inp->partial_delivery_point;
4787 }
4788 if ((control->end_added == 1) || (length >= pd_point)) {
4789 struct socket *so;
4790 struct mbuf *m;
4791 char *buffer;
4792 struct sctp_rcvinfo rcv;
4793 union sctp_sockstore addr;
4794 int flags;
4795
4796 if ((buffer = malloc(length)) == NULL) {
4797 return;
4798 }
4799 if (inp_read_lock_held == 0) {
4800 SCTP_INP_READ_LOCK(inp);
4801 }
4802 so = stcb->sctp_socket;
4803 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4804 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4805 }
4806 m_copydata(control->data, 0, length, buffer);
4807 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4808 rcv.rcv_sid = control->sinfo_stream;
4809 rcv.rcv_ssn = (uint16_t)control->mid;
4810 rcv.rcv_flags = control->sinfo_flags;
4811 rcv.rcv_ppid = control->sinfo_ppid;
4812 rcv.rcv_tsn = control->sinfo_tsn;
4813 rcv.rcv_cumtsn = control->sinfo_cumtsn;
4814 rcv.rcv_context = control->sinfo_context;
4815 rcv.rcv_assoc_id = control->sinfo_assoc_id;
4816 memset(&addr, 0, sizeof(union sctp_sockstore));
4817 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4818 #ifdef INET
4819 case AF_INET:
4820 addr.sin = control->whoFrom->ro._l_addr.sin;
4821 break;
4822 #endif
4823 #ifdef INET6
4824 case AF_INET6:
4825 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
4826 break;
4827 #endif
4828 case AF_CONN:
4829 addr.sconn = control->whoFrom->ro._l_addr.sconn;
4830 break;
4831 default:
4832 addr.sa = control->whoFrom->ro._l_addr.sa;
4833 break;
4834 }
4835 flags = 0;
4836 if (control->end_added == 1) {
4837 flags |= MSG_EOR;
4838 }
4839 if (control->spec_flags & M_NOTIFICATION) {
4840 flags |= MSG_NOTIFICATION;
4841 }
4842 sctp_m_freem(control->data);
4843 control->data = NULL;
4844 control->tail_mbuf = NULL;
4845 control->length = 0;
4846 if (control->end_added) {
4847 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
4848 control->on_read_q = 0;
4849 sctp_free_remote_addr(control->whoFrom);
4850 control->whoFrom = NULL;
4851 sctp_free_a_readq(stcb, control);
4852 }
4853 atomic_add_int(&stcb->asoc.refcnt, 1);
4854 SCTP_TCB_UNLOCK(stcb);
4855 if (inp_read_lock_held == 0) {
4856 SCTP_INP_READ_UNLOCK(inp);
4857 }
4858 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
4859 SCTP_TCB_LOCK(stcb);
4860 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4861 }
4862 }
4863 #endif
4864
4865 void
4866 sctp_add_to_readq(struct sctp_inpcb *inp,
4867 struct sctp_tcb *stcb,
4868 struct sctp_queued_to_read *control,
4869 struct sockbuf *sb,
4870 int end,
4871 int inp_read_lock_held,
4872 int so_locked
4873 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4874 SCTP_UNUSED
4875 #endif
4876 )
4877 {
4878 /*
4879 * Here we must place the control on the end of the socket read
4880 * queue AND increment sb_cc so that select will work properly on
4881 * read.
4882 */
4883 struct mbuf *m, *prev = NULL;
4884
4885 if (inp == NULL) {
4886 /* Gak, TSNH!! */
4887 #ifdef INVARIANTS
4888 panic("Gak, inp NULL on add_to_readq");
4889 #endif
4890 return;
4891 }
4892 #if defined(__APPLE__)
4893 if (so_locked) {
4894 sctp_lock_assert(SCTP_INP_SO(inp));
4895 } else {
4896 sctp_unlock_assert(SCTP_INP_SO(inp));
4897 }
4898 #endif
4899 if (inp_read_lock_held == 0)
4900 SCTP_INP_READ_LOCK(inp);
4901 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4902 sctp_free_remote_addr(control->whoFrom);
4903 if (control->data) {
4904 sctp_m_freem(control->data);
4905 control->data = NULL;
4906 }
4907 sctp_free_a_readq(stcb, control);
4908 if (inp_read_lock_held == 0)
4909 SCTP_INP_READ_UNLOCK(inp);
4910 return;
4911 }
4912 if (!(control->spec_flags & M_NOTIFICATION)) {
4913 atomic_add_int(&inp->total_recvs, 1);
4914 if (!control->do_not_ref_stcb) {
4915 atomic_add_int(&stcb->total_recvs, 1);
4916 }
4917 }
4918 m = control->data;
4919 control->held_length = 0;
4920 control->length = 0;
4921 while (m) {
4922 if (SCTP_BUF_LEN(m) == 0) {
4923 /* Skip mbufs with NO length */
4924 if (prev == NULL) {
4925 /* First one */
4926 control->data = sctp_m_free(m);
4927 m = control->data;
4928 } else {
4929 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4930 m = SCTP_BUF_NEXT(prev);
4931 }
4932 if (m == NULL) {
4933 control->tail_mbuf = prev;
4934 }
4935 continue;
4936 }
4937 prev = m;
4938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4939 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4940 }
4941 sctp_sballoc(stcb, sb, m);
4942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4943 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4944 }
4945 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4946 m = SCTP_BUF_NEXT(m);
4947 }
4948 if (prev != NULL) {
4949 control->tail_mbuf = prev;
4950 } else {
4951 /* Everything got collapsed out?? */
4952 sctp_free_remote_addr(control->whoFrom);
4953 sctp_free_a_readq(stcb, control);
4954 if (inp_read_lock_held == 0)
4955 SCTP_INP_READ_UNLOCK(inp);
4956 return;
4957 }
4958 if (end) {
4959 control->end_added = 1;
4960 }
4961 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4962 control->on_read_q = 1;
4963 if (inp_read_lock_held == 0)
4964 SCTP_INP_READ_UNLOCK(inp);
4965 #if defined(__Userspace__)
4966 sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
4967 #endif
4968 if (inp && inp->sctp_socket) {
4969 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4970 }
4971 }
4972
4973 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4974 *************ALTERNATE ROUTING CODE
4975 */
4976
4977 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4978 *************ALTERNATE ROUTING CODE
4979 */
4980
4981 struct mbuf *
4982 sctp_generate_cause(uint16_t code, char *info)
4983 {
4984 struct mbuf *m;
4985 struct sctp_gen_error_cause *cause;
4986 size_t info_len;
4987 uint16_t len;
4988
4989 if ((code == 0) || (info == NULL)) {
4990 return (NULL);
4991 }
4992 info_len = strlen(info);
4993 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4994 return (NULL);
4995 }
4996 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4997 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4998 if (m != NULL) {
4999 SCTP_BUF_LEN(m) = len;
5000 cause = mtod(m, struct sctp_gen_error_cause *);
5001 cause->code = htons(code);
5002 cause->length = htons(len);
5003 memcpy(cause->info, info, info_len);
5004 }
5005 return (m);
5006 }
5007
5008 struct mbuf *
5009 sctp_generate_no_user_data_cause(uint32_t tsn)
5010 {
5011 struct mbuf *m;
5012 struct sctp_error_no_user_data *no_user_data_cause;
5013 uint16_t len;
5014
5015 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5016 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5017 if (m != NULL) {
5018 SCTP_BUF_LEN(m) = len;
5019 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5020 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5021 no_user_data_cause->cause.length = htons(len);
5022 no_user_data_cause->tsn = htonl(tsn);
5023 }
5024 return (m);
5025 }
5026
5027 #ifdef SCTP_MBCNT_LOGGING
5028 void
5029 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5030 struct sctp_tmit_chunk *tp1, int chk_cnt)
5031 {
5032 if (tp1->data == NULL) {
5033 return;
5034 }
5035 asoc->chunks_on_out_queue -= chk_cnt;
5036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5037 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5038 asoc->total_output_queue_size,
5039 tp1->book_size,
5040 0,
5041 tp1->mbcnt);
5042 }
5043 if (asoc->total_output_queue_size >= tp1->book_size) {
5044 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5045 } else {
5046 asoc->total_output_queue_size = 0;
5047 }
5048
5049 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5050 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5051 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5052 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5053 } else {
5054 stcb->sctp_socket->so_snd.sb_cc = 0;
5055
5056 }
5057 }
5058 }
5059
5060 #endif
5061
5062 int
5063 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5064 uint8_t sent, int so_locked
5065 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5066 SCTP_UNUSED
5067 #endif
5068 )
5069 {
5070 struct sctp_stream_out *strq;
5071 struct sctp_tmit_chunk *chk = NULL, *tp2;
5072 struct sctp_stream_queue_pending *sp;
5073 uint32_t mid;
5074 uint16_t sid;
5075 uint8_t foundeom = 0;
5076 int ret_sz = 0;
5077 int notdone;
5078 int do_wakeup_routine = 0;
5079
5080 #if defined(__APPLE__)
5081 if (so_locked) {
5082 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5083 } else {
5084 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5085 }
5086 #endif
5087 sid = tp1->rec.data.sid;
5088 mid = tp1->rec.data.mid;
5089 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5090 stcb->asoc.abandoned_sent[0]++;
5091 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5092 stcb->asoc.strmout[sid].abandoned_sent[0]++;
5093 #if defined(SCTP_DETAILED_STR_STATS)
5094 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5095 #endif
5096 } else {
5097 stcb->asoc.abandoned_unsent[0]++;
5098 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5099 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5100 #if defined(SCTP_DETAILED_STR_STATS)
5101 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5102 #endif
5103 }
5104 do {
5105 ret_sz += tp1->book_size;
5106 if (tp1->data != NULL) {
5107 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5108 sctp_flight_size_decrease(tp1);
5109 sctp_total_flight_decrease(stcb, tp1);
5110 }
5111 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5112 stcb->asoc.peers_rwnd += tp1->send_size;
5113 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5114 if (sent) {
5115 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5116 } else {
5117 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5118 }
5119 if (tp1->data) {
5120 sctp_m_freem(tp1->data);
5121 tp1->data = NULL;
5122 }
5123 do_wakeup_routine = 1;
5124 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5125 stcb->asoc.sent_queue_cnt_removeable--;
5126 }
5127 }
5128 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5129 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5130 SCTP_DATA_NOT_FRAG) {
5131 /* not frag'ed we ae done */
5132 notdone = 0;
5133 foundeom = 1;
5134 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5135 /* end of frag, we are done */
5136 notdone = 0;
5137 foundeom = 1;
5138 } else {
5139 /*
5140 * Its a begin or middle piece, we must mark all of
5141 * it
5142 */
5143 notdone = 1;
5144 tp1 = TAILQ_NEXT(tp1, sctp_next);
5145 }
5146 } while (tp1 && notdone);
5147 if (foundeom == 0) {
5148 /*
5149 * The multi-part message was scattered across the send and
5150 * sent queue.
5151 */
5152 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5153 if ((tp1->rec.data.sid != sid) ||
5154 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5155 break;
5156 }
5157 /* save to chk in case we have some on stream out
5158 * queue. If so and we have an un-transmitted one
5159 * we don't have to fudge the TSN.
5160 */
5161 chk = tp1;
5162 ret_sz += tp1->book_size;
5163 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5164 if (sent) {
5165 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5166 } else {
5167 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5168 }
5169 if (tp1->data) {
5170 sctp_m_freem(tp1->data);
5171 tp1->data = NULL;
5172 }
5173 /* No flight involved here book the size to 0 */
5174 tp1->book_size = 0;
5175 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5176 foundeom = 1;
5177 }
5178 do_wakeup_routine = 1;
5179 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5180 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5181 /* on to the sent queue so we can wait for it to be passed by. */
5182 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5183 sctp_next);
5184 stcb->asoc.send_queue_cnt--;
5185 stcb->asoc.sent_queue_cnt++;
5186 }
5187 }
5188 if (foundeom == 0) {
5189 /*
5190 * Still no eom found. That means there
5191 * is stuff left on the stream out queue.. yuck.
5192 */
5193 SCTP_TCB_SEND_LOCK(stcb);
5194 strq = &stcb->asoc.strmout[sid];
5195 sp = TAILQ_FIRST(&strq->outqueue);
5196 if (sp != NULL) {
5197 sp->discard_rest = 1;
5198 /*
5199 * We may need to put a chunk on the
5200 * queue that holds the TSN that
5201 * would have been sent with the LAST
5202 * bit.
5203 */
5204 if (chk == NULL) {
5205 /* Yep, we have to */
5206 sctp_alloc_a_chunk(stcb, chk);
5207 if (chk == NULL) {
5208 /* we are hosed. All we can
5209 * do is nothing.. which will
5210 * cause an abort if the peer is
5211 * paying attention.
5212 */
5213 goto oh_well;
5214 }
5215 memset(chk, 0, sizeof(*chk));
5216 chk->rec.data.rcv_flags = 0;
5217 chk->sent = SCTP_FORWARD_TSN_SKIP;
5218 chk->asoc = &stcb->asoc;
5219 if (stcb->asoc.idata_supported == 0) {
5220 if (sp->sinfo_flags & SCTP_UNORDERED) {
5221 chk->rec.data.mid = 0;
5222 } else {
5223 chk->rec.data.mid = strq->next_mid_ordered;
5224 }
5225 } else {
5226 if (sp->sinfo_flags & SCTP_UNORDERED) {
5227 chk->rec.data.mid = strq->next_mid_unordered;
5228 } else {
5229 chk->rec.data.mid = strq->next_mid_ordered;
5230 }
5231 }
5232 chk->rec.data.sid = sp->sid;
5233 chk->rec.data.ppid = sp->ppid;
5234 chk->rec.data.context = sp->context;
5235 chk->flags = sp->act_flags;
5236 chk->whoTo = NULL;
5237 #if defined(__FreeBSD__) || defined(__Panda__)
5238 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5239 #else
5240 chk->rec.data.tsn = stcb->asoc.sending_seq++;
5241 #endif
5242 strq->chunks_on_queues++;
5243 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5244 stcb->asoc.sent_queue_cnt++;
5245 stcb->asoc.pr_sctp_cnt++;
5246 }
5247 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5248 if (sp->sinfo_flags & SCTP_UNORDERED) {
5249 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5250 }
5251 if (stcb->asoc.idata_supported == 0) {
5252 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5253 strq->next_mid_ordered++;
5254 }
5255 } else {
5256 if (sp->sinfo_flags & SCTP_UNORDERED) {
5257 strq->next_mid_unordered++;
5258 } else {
5259 strq->next_mid_ordered++;
5260 }
5261 }
5262 oh_well:
5263 if (sp->data) {
5264 /* Pull any data to free up the SB and
5265 * allow sender to "add more" while we
5266 * will throw away :-)
5267 */
5268 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5269 ret_sz += sp->length;
5270 do_wakeup_routine = 1;
5271 sp->some_taken = 1;
5272 sctp_m_freem(sp->data);
5273 sp->data = NULL;
5274 sp->tail_mbuf = NULL;
5275 sp->length = 0;
5276 }
5277 }
5278 SCTP_TCB_SEND_UNLOCK(stcb);
5279 }
5280 if (do_wakeup_routine) {
5281 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5282 struct socket *so;
5283
5284 so = SCTP_INP_SO(stcb->sctp_ep);
5285 if (!so_locked) {
5286 atomic_add_int(&stcb->asoc.refcnt, 1);
5287 SCTP_TCB_UNLOCK(stcb);
5288 SCTP_SOCKET_LOCK(so, 1);
5289 SCTP_TCB_LOCK(stcb);
5290 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5291 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5292 /* assoc was freed while we were unlocked */
5293 SCTP_SOCKET_UNLOCK(so, 1);
5294 return (ret_sz);
5295 }
5296 }
5297 #endif
5298 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5299 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5300 if (!so_locked) {
5301 SCTP_SOCKET_UNLOCK(so, 1);
5302 }
5303 #endif
5304 }
5305 return (ret_sz);
5306 }
5307
5308 /*
5309 * checks to see if the given address, sa, is one that is currently known by
5310 * the kernel note: can't distinguish the same address on multiple interfaces
5311 * and doesn't handle multiple addresses with different zone/scope id's note:
5312 * ifa_ifwithaddr() compares the entire sockaddr struct
5313 */
5314 struct sctp_ifa *
5315 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5316 int holds_lock)
5317 {
5318 struct sctp_laddr *laddr;
5319
5320 if (holds_lock == 0) {
5321 SCTP_INP_RLOCK(inp);
5322 }
5323
5324 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5325 if (laddr->ifa == NULL)
5326 continue;
5327 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5328 continue;
5329 #ifdef INET
5330 if (addr->sa_family == AF_INET) {
5331 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5332 laddr->ifa->address.sin.sin_addr.s_addr) {
5333 /* found him. */
5334 if (holds_lock == 0) {
5335 SCTP_INP_RUNLOCK(inp);
5336 }
5337 return (laddr->ifa);
5338 break;
5339 }
5340 }
5341 #endif
5342 #ifdef INET6
5343 if (addr->sa_family == AF_INET6) {
5344 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5345 &laddr->ifa->address.sin6)) {
5346 /* found him. */
5347 if (holds_lock == 0) {
5348 SCTP_INP_RUNLOCK(inp);
5349 }
5350 return (laddr->ifa);
5351 break;
5352 }
5353 }
5354 #endif
5355 #if defined(__Userspace__)
5356 if (addr->sa_family == AF_CONN) {
5357 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5358 /* found him. */
5359 if (holds_lock == 0) {
5360 SCTP_INP_RUNLOCK(inp);
5361 }
5362 return (laddr->ifa);
5363 break;
5364 }
5365 }
5366 #endif
5367 }
5368 if (holds_lock == 0) {
5369 SCTP_INP_RUNLOCK(inp);
5370 }
5371 return (NULL);
5372 }
5373
5374 uint32_t
5375 sctp_get_ifa_hash_val(struct sockaddr *addr)
5376 {
5377 switch (addr->sa_family) {
5378 #ifdef INET
5379 case AF_INET:
5380 {
5381 struct sockaddr_in *sin;
5382
5383 sin = (struct sockaddr_in *)addr;
5384 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5385 }
5386 #endif
5387 #ifdef INET6
5388 case AF_INET6:
5389 {
5390 struct sockaddr_in6 *sin6;
5391 uint32_t hash_of_addr;
5392
5393 sin6 = (struct sockaddr_in6 *)addr;
5394 #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
5395 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5396 sin6->sin6_addr.s6_addr32[1] +
5397 sin6->sin6_addr.s6_addr32[2] +
5398 sin6->sin6_addr.s6_addr32[3]);
5399 #else
5400 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5401 ((uint32_t *)&sin6->sin6_addr)[1] +
5402 ((uint32_t *)&sin6->sin6_addr)[2] +
5403 ((uint32_t *)&sin6->sin6_addr)[3]);
5404 #endif
5405 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5406 return (hash_of_addr);
5407 }
5408 #endif
5409 #if defined(__Userspace__)
5410 case AF_CONN:
5411 {
5412 struct sockaddr_conn *sconn;
5413 uintptr_t temp;
5414
5415 sconn = (struct sockaddr_conn *)addr;
5416 temp = (uintptr_t)sconn->sconn_addr;
5417 return ((uint32_t)(temp ^ (temp >> 16)));
5418 }
5419 #endif
5420 default:
5421 break;
5422 }
5423 return (0);
5424 }
5425
5426 struct sctp_ifa *
5427 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5428 {
5429 struct sctp_ifa *sctp_ifap;
5430 struct sctp_vrf *vrf;
5431 struct sctp_ifalist *hash_head;
5432 uint32_t hash_of_addr;
5433
5434 if (holds_lock == 0)
5435 SCTP_IPI_ADDR_RLOCK();
5436
5437 vrf = sctp_find_vrf(vrf_id);
5438 if (vrf == NULL) {
5439 if (holds_lock == 0)
5440 SCTP_IPI_ADDR_RUNLOCK();
5441 return (NULL);
5442 }
5443
5444 hash_of_addr = sctp_get_ifa_hash_val(addr);
5445
5446 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5447 if (hash_head == NULL) {
5448 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5449 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5450 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5451 sctp_print_address(addr);
5452 SCTP_PRINTF("No such bucket for address\n");
5453 if (holds_lock == 0)
5454 SCTP_IPI_ADDR_RUNLOCK();
5455
5456 return (NULL);
5457 }
5458 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5459 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5460 continue;
5461 #ifdef INET
5462 if (addr->sa_family == AF_INET) {
5463 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5464 sctp_ifap->address.sin.sin_addr.s_addr) {
5465 /* found him. */
5466 if (holds_lock == 0)
5467 SCTP_IPI_ADDR_RUNLOCK();
5468 return (sctp_ifap);
5469 break;
5470 }
5471 }
5472 #endif
5473 #ifdef INET6
5474 if (addr->sa_family == AF_INET6) {
5475 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5476 &sctp_ifap->address.sin6)) {
5477 /* found him. */
5478 if (holds_lock == 0)
5479 SCTP_IPI_ADDR_RUNLOCK();
5480 return (sctp_ifap);
5481 break;
5482 }
5483 }
5484 #endif
5485 #if defined(__Userspace__)
5486 if (addr->sa_family == AF_CONN) {
5487 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5488 /* found him. */
5489 if (holds_lock == 0)
5490 SCTP_IPI_ADDR_RUNLOCK();
5491 return (sctp_ifap);
5492 break;
5493 }
5494 }
5495 #endif
5496 }
5497 if (holds_lock == 0)
5498 SCTP_IPI_ADDR_RUNLOCK();
5499 return (NULL);
5500 }
5501
5502 static void
5503 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5504 uint32_t rwnd_req)
5505 {
5506 /* User pulled some data, do we need a rwnd update? */
5507 int r_unlocked = 0;
5508 uint32_t dif, rwnd;
5509 struct socket *so = NULL;
5510
5511 if (stcb == NULL)
5512 return;
5513
5514 atomic_add_int(&stcb->asoc.refcnt, 1);
5515
5516 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5517 SCTP_STATE_SHUTDOWN_RECEIVED |
5518 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5519 /* Pre-check If we are freeing no update */
5520 goto no_lock;
5521 }
5522 SCTP_INP_INCR_REF(stcb->sctp_ep);
5523 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5524 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5525 goto out;
5526 }
5527 so = stcb->sctp_socket;
5528 if (so == NULL) {
5529 goto out;
5530 }
5531 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5532 /* Have you have freed enough to look */
5533 *freed_so_far = 0;
5534 /* Yep, its worth a look and the lock overhead */
5535
5536 /* Figure out what the rwnd would be */
5537 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5538 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5539 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5540 } else {
5541 dif = 0;
5542 }
5543 if (dif >= rwnd_req) {
5544 if (hold_rlock) {
5545 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5546 r_unlocked = 1;
5547 }
5548 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5549 /*
5550 * One last check before we allow the guy possibly
5551 * to get in. There is a race, where the guy has not
5552 * reached the gate. In that case
5553 */
5554 goto out;
5555 }
5556 SCTP_TCB_LOCK(stcb);
5557 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5558 /* No reports here */
5559 SCTP_TCB_UNLOCK(stcb);
5560 goto out;
5561 }
5562 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5563 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5564
5565 sctp_chunk_output(stcb->sctp_ep, stcb,
5566 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5567 /* make sure no timer is running */
5568 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5569 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5570 SCTP_TCB_UNLOCK(stcb);
5571 } else {
5572 /* Update how much we have pending */
5573 stcb->freed_by_sorcv_sincelast = dif;
5574 }
5575 out:
5576 if (so && r_unlocked && hold_rlock) {
5577 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5578 }
5579
5580 SCTP_INP_DECR_REF(stcb->sctp_ep);
5581 no_lock:
5582 atomic_add_int(&stcb->asoc.refcnt, -1);
5583 return;
5584 }
5585
5586 int
5587 sctp_sorecvmsg(struct socket *so,
5588 struct uio *uio,
5589 struct mbuf **mp,
5590 struct sockaddr *from,
5591 int fromlen,
5592 int *msg_flags,
5593 struct sctp_sndrcvinfo *sinfo,
5594 int filling_sinfo)
5595 {
5596 /*
5597 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5598 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5599 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5600 * On the way out we may send out any combination of:
5601 * MSG_NOTIFICATION MSG_EOR
5602 *
5603 */
5604 struct sctp_inpcb *inp = NULL;
5605 int my_len = 0;
5606 int cp_len = 0, error = 0;
5607 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5608 struct mbuf *m = NULL;
5609 struct sctp_tcb *stcb = NULL;
5610 int wakeup_read_socket = 0;
5611 int freecnt_applied = 0;
5612 int out_flags = 0, in_flags = 0;
5613 int block_allowed = 1;
5614 uint32_t freed_so_far = 0;
5615 uint32_t copied_so_far = 0;
5616 int in_eeor_mode = 0;
5617 int no_rcv_needed = 0;
5618 uint32_t rwnd_req = 0;
5619 int hold_sblock = 0;
5620 int hold_rlock = 0;
5621 ssize_t slen = 0;
5622 uint32_t held_length = 0;
5623 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5624 int sockbuf_lock = 0;
5625 #endif
5626
5627 if (uio == NULL) {
5628 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5629 return (EINVAL);
5630 }
5631
5632 if (msg_flags) {
5633 in_flags = *msg_flags;
5634 if (in_flags & MSG_PEEK)
5635 SCTP_STAT_INCR(sctps_read_peeks);
5636 } else {
5637 in_flags = 0;
5638 }
5639 #if defined(__APPLE__)
5640 #if defined(APPLE_LEOPARD)
5641 slen = uio->uio_resid;
5642 #else
5643 slen = uio_resid(uio);
5644 #endif
5645 #else
5646 slen = uio->uio_resid;
5647 #endif
5648
5649 /* Pull in and set up our int flags */
5650 if (in_flags & MSG_OOB) {
5651 /* Out of band's NOT supported */
5652 return (EOPNOTSUPP);
5653 }
5654 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5655 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5656 return (EINVAL);
5657 }
5658 if ((in_flags & (MSG_DONTWAIT
5659 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
5660 | MSG_NBIO
5661 #endif
5662 )) ||
5663 SCTP_SO_IS_NBIO(so)) {
5664 block_allowed = 0;
5665 }
5666 /* setup the endpoint */
5667 inp = (struct sctp_inpcb *)so->so_pcb;
5668 if (inp == NULL) {
5669 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5670 return (EFAULT);
5671 }
5672 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5673 /* Must be at least a MTU's worth */
5674 if (rwnd_req < SCTP_MIN_RWND)
5675 rwnd_req = SCTP_MIN_RWND;
5676 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5678 #if defined(__APPLE__)
5679 #if defined(APPLE_LEOPARD)
5680 sctp_misc_ints(SCTP_SORECV_ENTER,
5681 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5682 #else
5683 sctp_misc_ints(SCTP_SORECV_ENTER,
5684 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
5685 #endif
5686 #else
5687 sctp_misc_ints(SCTP_SORECV_ENTER,
5688 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5689 #endif
5690 }
5691 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5692 SOCKBUF_LOCK(&so->so_rcv);
5693 hold_sblock = 1;
5694 #endif
5695 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
5696 #if defined(__APPLE__)
5697 #if defined(APPLE_LEOPARD)
5698 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5699 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5700 #else
5701 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5702 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
5703 #endif
5704 #else
5705 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5706 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5707 #endif
5708 }
5709
5710 #if defined(__APPLE__)
5711 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5712 #endif
5713
5714 #if defined(__FreeBSD__)
5715 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5716 #endif
5717 if (error) {
5718 goto release_unlocked;
5719 }
5720 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5721 sockbuf_lock = 1;
5722 #endif
5723 restart:
5724 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5725 if (hold_sblock == 0) {
5726 SOCKBUF_LOCK(&so->so_rcv);
5727 hold_sblock = 1;
5728 }
5729 #endif
5730 #if defined(__APPLE__)
5731 sbunlock(&so->so_rcv, 1);
5732 #endif
5733
5734 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5735 sbunlock(&so->so_rcv);
5736 #endif
5737
5738 restart_nosblocks:
5739 if (hold_sblock == 0) {
5740 SOCKBUF_LOCK(&so->so_rcv);
5741 hold_sblock = 1;
5742 }
5743 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5744 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5745 goto out;
5746 }
5747 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
5748 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5749 #else
5750 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5751 #endif
5752 if (so->so_error) {
5753 error = so->so_error;
5754 if ((in_flags & MSG_PEEK) == 0)
5755 so->so_error = 0;
5756 goto out;
5757 } else {
5758 if (so->so_rcv.sb_cc == 0) {
5759 /* indicate EOF */
5760 error = 0;
5761 goto out;
5762 }
5763 }
5764 }
5765 if (so->so_rcv.sb_cc <= held_length) {
5766 if (so->so_error) {
5767 error = so->so_error;
5768 if ((in_flags & MSG_PEEK) == 0) {
5769 so->so_error = 0;
5770 }
5771 goto out;
5772 }
5773 if ((so->so_rcv.sb_cc == 0) &&
5774 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5775 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5776 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5777 /* For active open side clear flags for re-use
5778 * passive open is blocked by connect.
5779 */
5780 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5781 /* You were aborted, passive side always hits here */
5782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5783 error = ECONNRESET;
5784 }
5785 so->so_state &= ~(SS_ISCONNECTING |
5786 SS_ISDISCONNECTING |
5787 SS_ISCONFIRMING |
5788 SS_ISCONNECTED);
5789 if (error == 0) {
5790 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5791 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5792 error = ENOTCONN;
5793 }
5794 }
5795 goto out;
5796 }
5797 }
5798 if (block_allowed) {
5799 error = sbwait(&so->so_rcv);
5800 if (error) {
5801 goto out;
5802 }
5803 held_length = 0;
5804 goto restart_nosblocks;
5805 } else {
5806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5807 error = EWOULDBLOCK;
5808 goto out;
5809 }
5810 }
5811 if (hold_sblock == 1) {
5812 SOCKBUF_UNLOCK(&so->so_rcv);
5813 hold_sblock = 0;
5814 }
5815 #if defined(__APPLE__)
5816 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5817 #endif
5818 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
5819 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
5820 #endif
5821 /* we possibly have data we can read */
5822 /*sa_ignore FREED_MEMORY*/
5823 control = TAILQ_FIRST(&inp->read_queue);
5824 if (control == NULL) {
5825 /* This could be happening since
5826 * the appender did the increment but as not
5827 * yet did the tailq insert onto the read_queue
5828 */
5829 if (hold_rlock == 0) {
5830 SCTP_INP_READ_LOCK(inp);
5831 }
5832 control = TAILQ_FIRST(&inp->read_queue);
5833 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5834 #ifdef INVARIANTS
5835 panic("Huh, its non zero and nothing on control?");
5836 #endif
5837 so->so_rcv.sb_cc = 0;
5838 }
5839 SCTP_INP_READ_UNLOCK(inp);
5840 hold_rlock = 0;
5841 goto restart;
5842 }
5843
5844 if ((control->length == 0) &&
5845 (control->do_not_ref_stcb)) {
5846 /* Clean up code for freeing assoc that left behind a pdapi..
5847 * maybe a peer in EEOR that just closed after sending and
5848 * never indicated a EOR.
5849 */
5850 if (hold_rlock == 0) {
5851 hold_rlock = 1;
5852 SCTP_INP_READ_LOCK(inp);
5853 }
5854 control->held_length = 0;
5855 if (control->data) {
5856 /* Hmm there is data here .. fix */
5857 struct mbuf *m_tmp;
5858 int cnt = 0;
5859 m_tmp = control->data;
5860 while (m_tmp) {
5861 cnt += SCTP_BUF_LEN(m_tmp);
5862 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5863 control->tail_mbuf = m_tmp;
5864 control->end_added = 1;
5865 }
5866 m_tmp = SCTP_BUF_NEXT(m_tmp);
5867 }
5868 control->length = cnt;
5869 } else {
5870 /* remove it */
5871 TAILQ_REMOVE(&inp->read_queue, control, next);
5872 /* Add back any hiddend data */
5873 sctp_free_remote_addr(control->whoFrom);
5874 sctp_free_a_readq(stcb, control);
5875 }
5876 if (hold_rlock) {
5877 hold_rlock = 0;
5878 SCTP_INP_READ_UNLOCK(inp);
5879 }
5880 goto restart;
5881 }
5882 if ((control->length == 0) &&
5883 (control->end_added == 1)) {
5884 /* Do we also need to check for (control->pdapi_aborted == 1)? */
5885 if (hold_rlock == 0) {
5886 hold_rlock = 1;
5887 SCTP_INP_READ_LOCK(inp);
5888 }
5889 TAILQ_REMOVE(&inp->read_queue, control, next);
5890 if (control->data) {
5891 #ifdef INVARIANTS
5892 panic("control->data not null but control->length == 0");
5893 #else
5894 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5895 sctp_m_freem(control->data);
5896 control->data = NULL;
5897 #endif
5898 }
5899 if (control->aux_data) {
5900 sctp_m_free (control->aux_data);
5901 control->aux_data = NULL;
5902 }
5903 #ifdef INVARIANTS
5904 if (control->on_strm_q) {
5905 panic("About to free ctl:%p so:%p and its in %d",
5906 control, so, control->on_strm_q);
5907 }
5908 #endif
5909 sctp_free_remote_addr(control->whoFrom);
5910 sctp_free_a_readq(stcb, control);
5911 if (hold_rlock) {
5912 hold_rlock = 0;
5913 SCTP_INP_READ_UNLOCK(inp);
5914 }
5915 goto restart;
5916 }
5917 if (control->length == 0) {
5918 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5919 (filling_sinfo)) {
5920 /* find a more suitable one then this */
5921 ctl = TAILQ_NEXT(control, next);
5922 while (ctl) {
5923 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5924 (ctl->some_taken ||
5925 (ctl->spec_flags & M_NOTIFICATION) ||
5926 ((ctl->do_not_ref_stcb == 0) &&
5927 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5928 ) {
5929 /*-
5930 * If we have a different TCB next, and there is data
5931 * present. If we have already taken some (pdapi), OR we can
5932 * ref the tcb and no delivery as started on this stream, we
5933 * take it. Note we allow a notification on a different
5934 * assoc to be delivered..
5935 */
5936 control = ctl;
5937 goto found_one;
5938 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5939 (ctl->length) &&
5940 ((ctl->some_taken) ||
5941 ((ctl->do_not_ref_stcb == 0) &&
5942 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5943 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5944 /*-
5945 * If we have the same tcb, and there is data present, and we
5946 * have the strm interleave feature present. Then if we have
5947 * taken some (pdapi) or we can refer to tht tcb AND we have
5948 * not started a delivery for this stream, we can take it.
5949 * Note we do NOT allow a notificaiton on the same assoc to
5950 * be delivered.
5951 */
5952 control = ctl;
5953 goto found_one;
5954 }
5955 ctl = TAILQ_NEXT(ctl, next);
5956 }
5957 }
5958 /*
5959 * if we reach here, not suitable replacement is available
5960 * <or> fragment interleave is NOT on. So stuff the sb_cc
5961 * into the our held count, and its time to sleep again.
5962 */
5963 held_length = so->so_rcv.sb_cc;
5964 control->held_length = so->so_rcv.sb_cc;
5965 goto restart;
5966 }
5967 /* Clear the held length since there is something to read */
5968 control->held_length = 0;
5969 found_one:
5970 /*
5971 * If we reach here, control has a some data for us to read off.
5972 * Note that stcb COULD be NULL.
5973 */
5974 if (hold_rlock == 0) {
5975 hold_rlock = 1;
5976 SCTP_INP_READ_LOCK(inp);
5977 }
5978 control->some_taken++;
5979 stcb = control->stcb;
5980 if (stcb) {
5981 if ((control->do_not_ref_stcb == 0) &&
5982 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5983 if (freecnt_applied == 0)
5984 stcb = NULL;
5985 } else if (control->do_not_ref_stcb == 0) {
5986 /* you can't free it on me please */
5987 /*
5988 * The lock on the socket buffer protects us so the
5989 * free code will stop. But since we used the socketbuf
5990 * lock and the sender uses the tcb_lock to increment,
5991 * we need to use the atomic add to the refcnt
5992 */
5993 if (freecnt_applied) {
5994 #ifdef INVARIANTS
5995 panic("refcnt already incremented");
5996 #else
5997 SCTP_PRINTF("refcnt already incremented?\n");
5998 #endif
5999 } else {
6000 atomic_add_int(&stcb->asoc.refcnt, 1);
6001 freecnt_applied = 1;
6002 }
6003 /*
6004 * Setup to remember how much we have not yet told
6005 * the peer our rwnd has opened up. Note we grab
6006 * the value from the tcb from last time.
6007 * Note too that sack sending clears this when a sack
6008 * is sent, which is fine. Once we hit the rwnd_req,
6009 * we then will go to the sctp_user_rcvd() that will
6010 * not lock until it KNOWs it MUST send a WUP-SACK.
6011 */
6012 freed_so_far = stcb->freed_by_sorcv_sincelast;
6013 stcb->freed_by_sorcv_sincelast = 0;
6014 }
6015 }
6016 if (stcb &&
6017 ((control->spec_flags & M_NOTIFICATION) == 0) &&
6018 control->do_not_ref_stcb == 0) {
6019 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6020 }
6021
6022 /* First lets get off the sinfo and sockaddr info */
6023 if ((sinfo != NULL) && (filling_sinfo != 0)) {
6024 sinfo->sinfo_stream = control->sinfo_stream;
6025 sinfo->sinfo_ssn = (uint16_t)control->mid;
6026 sinfo->sinfo_flags = control->sinfo_flags;
6027 sinfo->sinfo_ppid = control->sinfo_ppid;
6028 sinfo->sinfo_context =control->sinfo_context;
6029 sinfo->sinfo_timetolive = control->sinfo_timetolive;
6030 sinfo->sinfo_tsn = control->sinfo_tsn;
6031 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6032 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6033 nxt = TAILQ_NEXT(control, next);
6034 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6035 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6036 struct sctp_extrcvinfo *s_extra;
6037 s_extra = (struct sctp_extrcvinfo *)sinfo;
6038 if ((nxt) &&
6039 (nxt->length)) {
6040 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6041 if (nxt->sinfo_flags & SCTP_UNORDERED) {
6042 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6043 }
6044 if (nxt->spec_flags & M_NOTIFICATION) {
6045 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6046 }
6047 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6048 s_extra->serinfo_next_length = nxt->length;
6049 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6050 s_extra->serinfo_next_stream = nxt->sinfo_stream;
6051 if (nxt->tail_mbuf != NULL) {
6052 if (nxt->end_added) {
6053 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6054 }
6055 }
6056 } else {
6057 /* we explicitly 0 this, since the memcpy got
6058 * some other things beyond the older sinfo_
6059 * that is on the control's structure :-D
6060 */
6061 nxt = NULL;
6062 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6063 s_extra->serinfo_next_aid = 0;
6064 s_extra->serinfo_next_length = 0;
6065 s_extra->serinfo_next_ppid = 0;
6066 s_extra->serinfo_next_stream = 0;
6067 }
6068 }
6069 /*
6070 * update off the real current cum-ack, if we have an stcb.
6071 */
6072 if ((control->do_not_ref_stcb == 0) && stcb)
6073 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6074 /*
6075 * mask off the high bits, we keep the actual chunk bits in
6076 * there.
6077 */
6078 sinfo->sinfo_flags &= 0x00ff;
6079 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6080 sinfo->sinfo_flags |= SCTP_UNORDERED;
6081 }
6082 }
6083 #ifdef SCTP_ASOCLOG_OF_TSNS
6084 {
6085 int index, newindex;
6086 struct sctp_pcbtsn_rlog *entry;
6087 do {
6088 index = inp->readlog_index;
6089 newindex = index + 1;
6090 if (newindex >= SCTP_READ_LOG_SIZE) {
6091 newindex = 0;
6092 }
6093 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6094 entry = &inp->readlog[index];
6095 entry->vtag = control->sinfo_assoc_id;
6096 entry->strm = control->sinfo_stream;
6097 entry->seq = (uint16_t)control->mid;
6098 entry->sz = control->length;
6099 entry->flgs = control->sinfo_flags;
6100 }
6101 #endif
6102 if ((fromlen > 0) && (from != NULL)) {
6103 union sctp_sockstore store;
6104 size_t len;
6105
6106 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6107 #ifdef INET6
6108 case AF_INET6:
6109 len = sizeof(struct sockaddr_in6);
6110 store.sin6 = control->whoFrom->ro._l_addr.sin6;
6111 store.sin6.sin6_port = control->port_from;
6112 break;
6113 #endif
6114 #ifdef INET
6115 case AF_INET:
6116 #ifdef INET6
6117 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6118 len = sizeof(struct sockaddr_in6);
6119 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6120 &store.sin6);
6121 store.sin6.sin6_port = control->port_from;
6122 } else {
6123 len = sizeof(struct sockaddr_in);
6124 store.sin = control->whoFrom->ro._l_addr.sin;
6125 store.sin.sin_port = control->port_from;
6126 }
6127 #else
6128 len = sizeof(struct sockaddr_in);
6129 store.sin = control->whoFrom->ro._l_addr.sin;
6130 store.sin.sin_port = control->port_from;
6131 #endif
6132 break;
6133 #endif
6134 #if defined(__Userspace__)
6135 case AF_CONN:
6136 len = sizeof(struct sockaddr_conn);
6137 store.sconn = control->whoFrom->ro._l_addr.sconn;
6138 store.sconn.sconn_port = control->port_from;
6139 break;
6140 #endif
6141 default:
6142 len = 0;
6143 break;
6144 }
6145 memcpy(from, &store, min((size_t)fromlen, len));
6146 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6147 #ifdef INET6
6148 {
6149 struct sockaddr_in6 lsa6, *from6;
6150
6151 from6 = (struct sockaddr_in6 *)from;
6152 sctp_recover_scope_mac(from6, (&lsa6));
6153 }
6154 #endif
6155 #endif
6156 }
6157 if (hold_rlock) {
6158 SCTP_INP_READ_UNLOCK(inp);
6159 hold_rlock = 0;
6160 }
6161 if (hold_sblock) {
6162 SOCKBUF_UNLOCK(&so->so_rcv);
6163 hold_sblock = 0;
6164 }
6165 /* now copy out what data we can */
6166 if (mp == NULL) {
6167 /* copy out each mbuf in the chain up to length */
6168 get_more_data:
6169 m = control->data;
6170 while (m) {
6171 /* Move out all we can */
6172 #if defined(__APPLE__)
6173 #if defined(APPLE_LEOPARD)
6174 cp_len = (int)uio->uio_resid;
6175 #else
6176 cp_len = (int)uio_resid(uio);
6177 #endif
6178 #else
6179 cp_len = (int)uio->uio_resid;
6180 #endif
6181 my_len = (int)SCTP_BUF_LEN(m);
6182 if (cp_len > my_len) {
6183 /* not enough in this buf */
6184 cp_len = my_len;
6185 }
6186 if (hold_rlock) {
6187 SCTP_INP_READ_UNLOCK(inp);
6188 hold_rlock = 0;
6189 }
6190 #if defined(__APPLE__)
6191 SCTP_SOCKET_UNLOCK(so, 0);
6192 #endif
6193 if (cp_len > 0)
6194 error = uiomove(mtod(m, char *), cp_len, uio);
6195 #if defined(__APPLE__)
6196 SCTP_SOCKET_LOCK(so, 0);
6197 #endif
6198 /* re-read */
6199 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6200 goto release;
6201 }
6202
6203 if ((control->do_not_ref_stcb == 0) && stcb &&
6204 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6205 no_rcv_needed = 1;
6206 }
6207 if (error) {
6208 /* error we are out of here */
6209 goto release;
6210 }
6211 SCTP_INP_READ_LOCK(inp);
6212 hold_rlock = 1;
6213 if (cp_len == SCTP_BUF_LEN(m)) {
6214 if ((SCTP_BUF_NEXT(m)== NULL) &&
6215 (control->end_added)) {
6216 out_flags |= MSG_EOR;
6217 if ((control->do_not_ref_stcb == 0) &&
6218 (control->stcb != NULL) &&
6219 ((control->spec_flags & M_NOTIFICATION) == 0))
6220 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6221 }
6222 if (control->spec_flags & M_NOTIFICATION) {
6223 out_flags |= MSG_NOTIFICATION;
6224 }
6225 /* we ate up the mbuf */
6226 if (in_flags & MSG_PEEK) {
6227 /* just looking */
6228 m = SCTP_BUF_NEXT(m);
6229 copied_so_far += cp_len;
6230 } else {
6231 /* dispose of the mbuf */
6232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6233 sctp_sblog(&so->so_rcv,
6234 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6235 }
6236 sctp_sbfree(control, stcb, &so->so_rcv, m);
6237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6238 sctp_sblog(&so->so_rcv,
6239 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6240 }
6241 copied_so_far += cp_len;
6242 freed_so_far += cp_len;
6243 freed_so_far += MSIZE;
6244 atomic_subtract_int(&control->length, cp_len);
6245 control->data = sctp_m_free(m);
6246 m = control->data;
6247 /* been through it all, must hold sb lock ok to null tail */
6248 if (control->data == NULL) {
6249 #ifdef INVARIANTS
6250 #if defined(__FreeBSD__)
6251 if ((control->end_added == 0) ||
6252 (TAILQ_NEXT(control, next) == NULL)) {
6253 /* If the end is not added, OR the
6254 * next is NOT null we MUST have the lock.
6255 */
6256 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6257 panic("Hmm we don't own the lock?");
6258 }
6259 }
6260 #endif
6261 #endif
6262 control->tail_mbuf = NULL;
6263 #ifdef INVARIANTS
6264 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6265 panic("end_added, nothing left and no MSG_EOR");
6266 }
6267 #endif
6268 }
6269 }
6270 } else {
6271 /* Do we need to trim the mbuf? */
6272 if (control->spec_flags & M_NOTIFICATION) {
6273 out_flags |= MSG_NOTIFICATION;
6274 }
6275 if ((in_flags & MSG_PEEK) == 0) {
6276 SCTP_BUF_RESV_UF(m, cp_len);
6277 SCTP_BUF_LEN(m) -= cp_len;
6278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6279 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
6280 }
6281 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6282 if ((control->do_not_ref_stcb == 0) &&
6283 stcb) {
6284 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6285 }
6286 copied_so_far += cp_len;
6287 freed_so_far += cp_len;
6288 freed_so_far += MSIZE;
6289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6290 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6291 SCTP_LOG_SBRESULT, 0);
6292 }
6293 atomic_subtract_int(&control->length, cp_len);
6294 } else {
6295 copied_so_far += cp_len;
6296 }
6297 }
6298 #if defined(__APPLE__)
6299 #if defined(APPLE_LEOPARD)
6300 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6301 #else
6302 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6303 #endif
6304 #else
6305 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6306 #endif
6307 break;
6308 }
6309 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6310 (control->do_not_ref_stcb == 0) &&
6311 (freed_so_far >= rwnd_req)) {
6312 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6313 }
6314 } /* end while(m) */
6315 /*
6316 * At this point we have looked at it all and we either have
6317 * a MSG_EOR/or read all the user wants... <OR>
6318 * control->length == 0.
6319 */
6320 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6321 /* we are done with this control */
6322 if (control->length == 0) {
6323 if (control->data) {
6324 #ifdef INVARIANTS
6325 panic("control->data not null at read eor?");
6326 #else
6327 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6328 sctp_m_freem(control->data);
6329 control->data = NULL;
6330 #endif
6331 }
6332 done_with_control:
6333 if (hold_rlock == 0) {
6334 SCTP_INP_READ_LOCK(inp);
6335 hold_rlock = 1;
6336 }
6337 TAILQ_REMOVE(&inp->read_queue, control, next);
6338 /* Add back any hiddend data */
6339 if (control->held_length) {
6340 held_length = 0;
6341 control->held_length = 0;
6342 wakeup_read_socket = 1;
6343 }
6344 if (control->aux_data) {
6345 sctp_m_free (control->aux_data);
6346 control->aux_data = NULL;
6347 }
6348 no_rcv_needed = control->do_not_ref_stcb;
6349 sctp_free_remote_addr(control->whoFrom);
6350 control->data = NULL;
6351 #ifdef INVARIANTS
6352 if (control->on_strm_q) {
6353 panic("About to free ctl:%p so:%p and its in %d",
6354 control, so, control->on_strm_q);
6355 }
6356 #endif
6357 sctp_free_a_readq(stcb, control);
6358 control = NULL;
6359 if ((freed_so_far >= rwnd_req) &&
6360 (no_rcv_needed == 0))
6361 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6362
6363 } else {
6364 /*
6365 * The user did not read all of this
6366 * message, turn off the returned MSG_EOR
6367 * since we are leaving more behind on the
6368 * control to read.
6369 */
6370 #ifdef INVARIANTS
6371 if (control->end_added &&
6372 (control->data == NULL) &&
6373 (control->tail_mbuf == NULL)) {
6374 panic("Gak, control->length is corrupt?");
6375 }
6376 #endif
6377 no_rcv_needed = control->do_not_ref_stcb;
6378 out_flags &= ~MSG_EOR;
6379 }
6380 }
6381 if (out_flags & MSG_EOR) {
6382 goto release;
6383 }
6384 #if defined(__APPLE__)
6385 #if defined(APPLE_LEOPARD)
6386 if ((uio->uio_resid == 0) ||
6387 #else
6388 if ((uio_resid(uio) == 0) ||
6389 #endif
6390 #else
6391 if ((uio->uio_resid == 0) ||
6392 #endif
6393 ((in_eeor_mode) &&
6394 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
6395 goto release;
6396 }
6397 /*
6398 * If I hit here the receiver wants more and this message is
6399 * NOT done (pd-api). So two questions. Can we block? if not
6400 * we are done. Did the user NOT set MSG_WAITALL?
6401 */
6402 if (block_allowed == 0) {
6403 goto release;
6404 }
6405 /*
6406 * We need to wait for more data a few things: - We don't
6407 * sbunlock() so we don't get someone else reading. - We
6408 * must be sure to account for the case where what is added
6409 * is NOT to our control when we wakeup.
6410 */
6411
6412 /* Do we need to tell the transport a rwnd update might be
6413 * needed before we go to sleep?
6414 */
6415 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6416 ((freed_so_far >= rwnd_req) &&
6417 (control->do_not_ref_stcb == 0) &&
6418 (no_rcv_needed == 0))) {
6419 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6420 }
6421 wait_some_more:
6422 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6423 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6424 goto release;
6425 }
6426 #else
6427 if (so->so_state & SS_CANTRCVMORE) {
6428 goto release;
6429 }
6430 #endif
6431
6432 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6433 goto release;
6434
6435 if (hold_rlock == 1) {
6436 SCTP_INP_READ_UNLOCK(inp);
6437 hold_rlock = 0;
6438 }
6439 if (hold_sblock == 0) {
6440 SOCKBUF_LOCK(&so->so_rcv);
6441 hold_sblock = 1;
6442 }
6443 if ((copied_so_far) && (control->length == 0) &&
6444 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6445 goto release;
6446 }
6447 #if defined(__APPLE__)
6448 sbunlock(&so->so_rcv, 1);
6449 #endif
6450 if (so->so_rcv.sb_cc <= control->held_length) {
6451 error = sbwait(&so->so_rcv);
6452 if (error) {
6453 #if defined(__FreeBSD__)
6454 goto release;
6455 #else
6456 goto release_unlocked;
6457 #endif
6458 }
6459 control->held_length = 0;
6460 }
6461 #if defined(__APPLE__)
6462 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6463 #endif
6464 if (hold_sblock) {
6465 SOCKBUF_UNLOCK(&so->so_rcv);
6466 hold_sblock = 0;
6467 }
6468 if (control->length == 0) {
6469 /* still nothing here */
6470 if (control->end_added == 1) {
6471 /* he aborted, or is done i.e.did a shutdown */
6472 out_flags |= MSG_EOR;
6473 if (control->pdapi_aborted) {
6474 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6475 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6476
6477 out_flags |= MSG_TRUNC;
6478 } else {
6479 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6480 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6481 }
6482 goto done_with_control;
6483 }
6484 if (so->so_rcv.sb_cc > held_length) {
6485 control->held_length = so->so_rcv.sb_cc;
6486 held_length = 0;
6487 }
6488 goto wait_some_more;
6489 } else if (control->data == NULL) {
6490 /* we must re-sync since data
6491 * is probably being added
6492 */
6493 SCTP_INP_READ_LOCK(inp);
6494 if ((control->length > 0) && (control->data == NULL)) {
6495 /* big trouble.. we have the lock and its corrupt? */
6496 #ifdef INVARIANTS
6497 panic ("Impossible data==NULL length !=0");
6498 #endif
6499 out_flags |= MSG_EOR;
6500 out_flags |= MSG_TRUNC;
6501 control->length = 0;
6502 SCTP_INP_READ_UNLOCK(inp);
6503 goto done_with_control;
6504 }
6505 SCTP_INP_READ_UNLOCK(inp);
6506 /* We will fall around to get more data */
6507 }
6508 goto get_more_data;
6509 } else {
6510 /*-
6511 * Give caller back the mbuf chain,
6512 * store in uio_resid the length
6513 */
6514 wakeup_read_socket = 0;
6515 if ((control->end_added == 0) ||
6516 (TAILQ_NEXT(control, next) == NULL)) {
6517 /* Need to get rlock */
6518 if (hold_rlock == 0) {
6519 SCTP_INP_READ_LOCK(inp);
6520 hold_rlock = 1;
6521 }
6522 }
6523 if (control->end_added) {
6524 out_flags |= MSG_EOR;
6525 if ((control->do_not_ref_stcb == 0) &&
6526 (control->stcb != NULL) &&
6527 ((control->spec_flags & M_NOTIFICATION) == 0))
6528 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6529 }
6530 if (control->spec_flags & M_NOTIFICATION) {
6531 out_flags |= MSG_NOTIFICATION;
6532 }
6533 #if defined(__APPLE__)
6534 #if defined(APPLE_LEOPARD)
6535 uio->uio_resid = control->length;
6536 #else
6537 uio_setresid(uio, control->length);
6538 #endif
6539 #else
6540 uio->uio_resid = control->length;
6541 #endif
6542 *mp = control->data;
6543 m = control->data;
6544 while (m) {
6545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6546 sctp_sblog(&so->so_rcv,
6547 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6548 }
6549 sctp_sbfree(control, stcb, &so->so_rcv, m);
6550 freed_so_far += SCTP_BUF_LEN(m);
6551 freed_so_far += MSIZE;
6552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6553 sctp_sblog(&so->so_rcv,
6554 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6555 }
6556 m = SCTP_BUF_NEXT(m);
6557 }
6558 control->data = control->tail_mbuf = NULL;
6559 control->length = 0;
6560 if (out_flags & MSG_EOR) {
6561 /* Done with this control */
6562 goto done_with_control;
6563 }
6564 }
6565 release:
6566 if (hold_rlock == 1) {
6567 SCTP_INP_READ_UNLOCK(inp);
6568 hold_rlock = 0;
6569 }
6570 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
6571 if (hold_sblock == 0) {
6572 SOCKBUF_LOCK(&so->so_rcv);
6573 hold_sblock = 1;
6574 }
6575 #else
6576 if (hold_sblock == 1) {
6577 SOCKBUF_UNLOCK(&so->so_rcv);
6578 hold_sblock = 0;
6579 }
6580 #endif
6581 #if defined(__APPLE__)
6582 sbunlock(&so->so_rcv, 1);
6583 #endif
6584
6585 #if defined(__FreeBSD__)
6586 sbunlock(&so->so_rcv);
6587 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6588 sockbuf_lock = 0;
6589 #endif
6590 #endif
6591
6592 release_unlocked:
6593 if (hold_sblock) {
6594 SOCKBUF_UNLOCK(&so->so_rcv);
6595 hold_sblock = 0;
6596 }
6597 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6598 if ((freed_so_far >= rwnd_req) &&
6599 (control && (control->do_not_ref_stcb == 0)) &&
6600 (no_rcv_needed == 0))
6601 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6602 }
6603 out:
6604 if (msg_flags) {
6605 *msg_flags = out_flags;
6606 }
6607 if (((out_flags & MSG_EOR) == 0) &&
6608 ((in_flags & MSG_PEEK) == 0) &&
6609 (sinfo) &&
6610 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6611 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6612 struct sctp_extrcvinfo *s_extra;
6613 s_extra = (struct sctp_extrcvinfo *)sinfo;
6614 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6615 }
6616 if (hold_rlock == 1) {
6617 SCTP_INP_READ_UNLOCK(inp);
6618 }
6619 if (hold_sblock) {
6620 SOCKBUF_UNLOCK(&so->so_rcv);
6621 }
6622 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6623 if (sockbuf_lock) {
6624 sbunlock(&so->so_rcv);
6625 }
6626 #endif
6627
6628 if (freecnt_applied) {
6629 /*
6630 * The lock on the socket buffer protects us so the free
6631 * code will stop. But since we used the socketbuf lock and
6632 * the sender uses the tcb_lock to increment, we need to use
6633 * the atomic add to the refcnt.
6634 */
6635 if (stcb == NULL) {
6636 #ifdef INVARIANTS
6637 panic("stcb for refcnt has gone NULL?");
6638 goto stage_left;
6639 #else
6640 goto stage_left;
6641 #endif
6642 }
6643 /* Save the value back for next time */
6644 stcb->freed_by_sorcv_sincelast = freed_so_far;
6645 atomic_add_int(&stcb->asoc.refcnt, -1);
6646 }
6647 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6648 if (stcb) {
6649 sctp_misc_ints(SCTP_SORECV_DONE,
6650 freed_so_far,
6651 #if defined(__APPLE__)
6652 #if defined(APPLE_LEOPARD)
6653 ((uio) ? (slen - uio->uio_resid) : slen),
6654 #else
6655 ((uio) ? (slen - uio_resid(uio)) : slen),
6656 #endif
6657 #else
6658 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6659 #endif
6660 stcb->asoc.my_rwnd,
6661 so->so_rcv.sb_cc);
6662 } else {
6663 sctp_misc_ints(SCTP_SORECV_DONE,
6664 freed_so_far,
6665 #if defined(__APPLE__)
6666 #if defined(APPLE_LEOPARD)
6667 ((uio) ? (slen - uio->uio_resid) : slen),
6668 #else
6669 ((uio) ? (slen - uio_resid(uio)) : slen),
6670 #endif
6671 #else
6672 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6673 #endif
6674 0,
6675 so->so_rcv.sb_cc);
6676 }
6677 }
6678 stage_left:
6679 if (wakeup_read_socket) {
6680 sctp_sorwakeup(inp, so);
6681 }
6682 return (error);
6683 }
6684
6685
6686 #ifdef SCTP_MBUF_LOGGING
6687 struct mbuf *
6688 sctp_m_free(struct mbuf *m)
6689 {
6690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6691 sctp_log_mb(m, SCTP_MBUF_IFREE);
6692 }
6693 return (m_free(m));
6694 }
6695
6696 void sctp_m_freem(struct mbuf *mb)
6697 {
6698 while (mb != NULL)
6699 mb = sctp_m_free(mb);
6700 }
6701
6702 #endif
6703
6704 int
6705 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6706 {
6707 /* Given a local address. For all associations
6708 * that holds the address, request a peer-set-primary.
6709 */
6710 struct sctp_ifa *ifa;
6711 struct sctp_laddr *wi;
6712
6713 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6714 if (ifa == NULL) {
6715 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6716 return (EADDRNOTAVAIL);
6717 }
6718 /* Now that we have the ifa we must awaken the
6719 * iterator with this message.
6720 */
6721 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6722 if (wi == NULL) {
6723 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6724 return (ENOMEM);
6725 }
6726 /* Now incr the count and int wi structure */
6727 SCTP_INCR_LADDR_COUNT();
6728 memset(wi, 0, sizeof(*wi));
6729 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6730 wi->ifa = ifa;
6731 wi->action = SCTP_SET_PRIM_ADDR;
6732 atomic_add_int(&ifa->refcount, 1);
6733
6734 /* Now add it to the work queue */
6735 SCTP_WQ_ADDR_LOCK();
6736 /*
6737 * Should this really be a tailq? As it is we will process the
6738 * newest first :-0
6739 */
6740 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6741 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6742 (struct sctp_inpcb *)NULL,
6743 (struct sctp_tcb *)NULL,
6744 (struct sctp_nets *)NULL);
6745 SCTP_WQ_ADDR_UNLOCK();
6746 return (0);
6747 }
6748
6749 #if defined(__Userspace__)
6750 /* no sctp_soreceive for __Userspace__ now */
6751 #endif
6752
6753 #if !defined(__Userspace__)
6754 int
6755 sctp_soreceive( struct socket *so,
6756 struct sockaddr **psa,
6757 struct uio *uio,
6758 struct mbuf **mp0,
6759 struct mbuf **controlp,
6760 int *flagsp)
6761 {
6762 int error, fromlen;
6763 uint8_t sockbuf[256];
6764 struct sockaddr *from;
6765 struct sctp_extrcvinfo sinfo;
6766 int filling_sinfo = 1;
6767 struct sctp_inpcb *inp;
6768
6769 inp = (struct sctp_inpcb *)so->so_pcb;
6770 /* pickup the assoc we are reading from */
6771 if (inp == NULL) {
6772 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6773 return (EINVAL);
6774 }
6775 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6776 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6777 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6778 (controlp == NULL)) {
6779 /* user does not want the sndrcv ctl */
6780 filling_sinfo = 0;
6781 }
6782 if (psa) {
6783 from = (struct sockaddr *)sockbuf;
6784 fromlen = sizeof(sockbuf);
6785 #ifdef HAVE_SA_LEN
6786 from->sa_len = 0;
6787 #endif
6788 } else {
6789 from = NULL;
6790 fromlen = 0;
6791 }
6792
6793 #if defined(__APPLE__)
6794 SCTP_SOCKET_LOCK(so, 1);
6795 #endif
6796 if (filling_sinfo) {
6797 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6798 }
6799 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6800 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6801 if (controlp != NULL) {
6802 /* copy back the sinfo in a CMSG format */
6803 if (filling_sinfo)
6804 *controlp = sctp_build_ctl_nchunk(inp,
6805 (struct sctp_sndrcvinfo *)&sinfo);
6806 else
6807 *controlp = NULL;
6808 }
6809 if (psa) {
6810 /* copy back the address info */
6811 #ifdef HAVE_SA_LEN
6812 if (from && from->sa_len) {
6813 #else
6814 if (from) {
6815 #endif
6816 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6817 *psa = sodupsockaddr(from, M_NOWAIT);
6818 #else
6819 *psa = dup_sockaddr(from, mp0 == 0);
6820 #endif
6821 } else {
6822 *psa = NULL;
6823 }
6824 }
6825 #if defined(__APPLE__)
6826 SCTP_SOCKET_UNLOCK(so, 1);
6827 #endif
6828 return (error);
6829 }
6830
6831
6832 #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
6833 /*
6834 * General routine to allocate a hash table with control of memory flags.
6835 * is in 7.0 and beyond for sure :-)
6836 */
6837 void *
6838 sctp_hashinit_flags(int elements, struct malloc_type *type,
6839 u_long *hashmask, int flags)
6840 {
6841 long hashsize;
6842 LIST_HEAD(generic, generic) *hashtbl;
6843 int i;
6844
6845
6846 if (elements <= 0) {
6847 #ifdef INVARIANTS
6848 panic("hashinit: bad elements");
6849 #else
6850 SCTP_PRINTF("hashinit: bad elements?");
6851 elements = 1;
6852 #endif
6853 }
6854 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6855 continue;
6856 hashsize >>= 1;
6857 if (flags & HASH_WAITOK)
6858 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
6859 else if (flags & HASH_NOWAIT)
6860 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
6861 else {
6862 #ifdef INVARIANTS
6863 panic("flag incorrect in hashinit_flags");
6864 #else
6865 return (NULL);
6866 #endif
6867 }
6868
6869 /* no memory? */
6870 if (hashtbl == NULL)
6871 return (NULL);
6872
6873 for (i = 0; i < hashsize; i++)
6874 LIST_INIT(&hashtbl[i]);
6875 *hashmask = hashsize - 1;
6876 return (hashtbl);
6877 }
6878 #endif
6879
6880 #else /* __Userspace__ ifdef above sctp_soreceive */
6881 /*
6882 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
6883 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
6884 *__FreeBSD__ must be excluded.
6885 *
6886 */
6887
6888 void *
6889 sctp_hashinit_flags(int elements, struct malloc_type *type,
6890 u_long *hashmask, int flags)
6891 {
6892 long hashsize;
6893 LIST_HEAD(generic, generic) *hashtbl;
6894 int i;
6895
6896 if (elements <= 0) {
6897 SCTP_PRINTF("hashinit: bad elements?");
6898 #ifdef INVARIANTS
6899 return (NULL);
6900 #else
6901 elements = 1;
6902 #endif
6903 }
6904 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6905 continue;
6906 hashsize >>= 1;
6907 /*cannot use MALLOC here because it has to be declared or defined
6908 using MALLOC_DECLARE or MALLOC_DEFINE first. */
6909 if (flags & HASH_WAITOK)
6910 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
6911 else if (flags & HASH_NOWAIT)
6912 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
6913 else {
6914 #ifdef INVARIANTS
6915 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
6916 #endif
6917 return (NULL);
6918 }
6919
6920 /* no memory? */
6921 if (hashtbl == NULL)
6922 return (NULL);
6923
6924 for (i = 0; i < hashsize; i++)
6925 LIST_INIT(&hashtbl[i]);
6926 *hashmask = hashsize - 1;
6927 return (hashtbl);
6928 }
6929
6930
6931 void
6932 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
6933 {
6934 LIST_HEAD(generic, generic) *hashtbl, *hp;
6935
6936 hashtbl = vhashtbl;
6937 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
6938 if (!LIST_EMPTY(hp)) {
6939 SCTP_PRINTF("hashdestroy: hash not empty.\n");
6940 return;
6941 }
6942 FREE(hashtbl, type);
6943 }
6944
6945
6946 void
6947 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
6948 {
6949 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
6950 /*
6951 LIST_ENTRY(type) *start, *temp;
6952 */
6953 hashtbl = vhashtbl;
6954 /* Apparently temp is not dynamically allocated, so attempts to
6955 free it results in error.
6956 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
6957 if (!LIST_EMPTY(hp)) {
6958 start = LIST_FIRST(hp);
6959 while (start != NULL) {
6960 temp = start;
6961 start = start->le_next;
6962 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
6963 FREE(temp, type);
6964 }
6965 }
6966 */
6967 FREE(hashtbl, type);
6968 }
6969
6970
6971 #endif
6972
6973
6974 int
6975 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6976 int totaddr, int *error)
6977 {
6978 int added = 0;
6979 int i;
6980 struct sctp_inpcb *inp;
6981 struct sockaddr *sa;
6982 size_t incr = 0;
6983 #ifdef INET
6984 struct sockaddr_in *sin;
6985 #endif
6986 #ifdef INET6
6987 struct sockaddr_in6 *sin6;
6988 #endif
6989
6990 sa = addr;
6991 inp = stcb->sctp_ep;
6992 *error = 0;
6993 for (i = 0; i < totaddr; i++) {
6994 switch (sa->sa_family) {
6995 #ifdef INET
6996 case AF_INET:
6997 incr = sizeof(struct sockaddr_in);
6998 sin = (struct sockaddr_in *)sa;
6999 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7000 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7001 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7002 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7003 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7004 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
7005 *error = EINVAL;
7006 goto out_now;
7007 }
7008 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7009 SCTP_DONOT_SETSCOPE,
7010 SCTP_ADDR_IS_CONFIRMED)) {
7011 /* assoc gone no un-lock */
7012 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7013 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7014 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
7015 *error = ENOBUFS;
7016 goto out_now;
7017 }
7018 added++;
7019 break;
7020 #endif
7021 #ifdef INET6
7022 case AF_INET6:
7023 incr = sizeof(struct sockaddr_in6);
7024 sin6 = (struct sockaddr_in6 *)sa;
7025 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7026 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7027 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7028 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7029 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
7030 *error = EINVAL;
7031 goto out_now;
7032 }
7033 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7034 SCTP_DONOT_SETSCOPE,
7035 SCTP_ADDR_IS_CONFIRMED)) {
7036 /* assoc gone no un-lock */
7037 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7038 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7039 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
7040 *error = ENOBUFS;
7041 goto out_now;
7042 }
7043 added++;
7044 break;
7045 #endif
7046 #if defined(__Userspace__)
7047 case AF_CONN:
7048 incr = sizeof(struct sockaddr_in6);
7049 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7050 SCTP_DONOT_SETSCOPE,
7051 SCTP_ADDR_IS_CONFIRMED)) {
7052 /* assoc gone no un-lock */
7053 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7054 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7055 SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
7056 *error = ENOBUFS;
7057 goto out_now;
7058 }
7059 added++;
7060 break;
7061 #endif
7062 default:
7063 break;
7064 }
7065 sa = (struct sockaddr *)((caddr_t)sa + incr);
7066 }
7067 out_now:
7068 return (added);
7069 }
7070
7071 struct sctp_tcb *
7072 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7073 unsigned int *totaddr,
7074 unsigned int *num_v4, unsigned int *num_v6, int *error,
7075 unsigned int limit, int *bad_addr)
7076 {
7077 struct sockaddr *sa;
7078 struct sctp_tcb *stcb = NULL;
7079 unsigned int incr, at, i;
7080
7081 at = 0;
7082 sa = addr;
7083 *error = *num_v6 = *num_v4 = 0;
7084 /* account and validate addresses */
7085 for (i = 0; i < *totaddr; i++) {
7086 switch (sa->sa_family) {
7087 #ifdef INET
7088 case AF_INET:
7089 incr = (unsigned int)sizeof(struct sockaddr_in);
7090 #ifdef HAVE_SA_LEN
7091 if (sa->sa_len != incr) {
7092 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7093 *error = EINVAL;
7094 *bad_addr = 1;
7095 return (NULL);
7096 }
7097 #endif
7098 (*num_v4) += 1;
7099 break;
7100 #endif
7101 #ifdef INET6
7102 case AF_INET6:
7103 {
7104 struct sockaddr_in6 *sin6;
7105
7106 sin6 = (struct sockaddr_in6 *)sa;
7107 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7108 /* Must be non-mapped for connectx */
7109 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7110 *error = EINVAL;
7111 *bad_addr = 1;
7112 return (NULL);
7113 }
7114 incr = (unsigned int)sizeof(struct sockaddr_in6);
7115 #ifdef HAVE_SA_LEN
7116 if (sa->sa_len != incr) {
7117 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7118 *error = EINVAL;
7119 *bad_addr = 1;
7120 return (NULL);
7121 }
7122 #endif
7123 (*num_v6) += 1;
7124 break;
7125 }
7126 #endif
7127 default:
7128 *totaddr = i;
7129 incr = 0;
7130 /* we are done */
7131 break;
7132 }
7133 if (i == *totaddr) {
7134 break;
7135 }
7136 SCTP_INP_INCR_REF(inp);
7137 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7138 if (stcb != NULL) {
7139 /* Already have or am bring up an association */
7140 return (stcb);
7141 } else {
7142 SCTP_INP_DECR_REF(inp);
7143 }
7144 if ((at + incr) > limit) {
7145 *totaddr = i;
7146 break;
7147 }
7148 sa = (struct sockaddr *)((caddr_t)sa + incr);
7149 }
7150 return ((struct sctp_tcb *)NULL);
7151 }
7152
7153 /*
7154 * sctp_bindx(ADD) for one address.
7155 * assumes all arguments are valid/checked by caller.
7156 */
7157 void
7158 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7159 struct sockaddr *sa, sctp_assoc_t assoc_id,
7160 uint32_t vrf_id, int *error, void *p)
7161 {
7162 struct sockaddr *addr_touse;
7163 #if defined(INET) && defined(INET6)
7164 struct sockaddr_in sin;
7165 #endif
7166 #ifdef SCTP_MVRF
7167 int i, fnd = 0;
7168 #endif
7169
7170 /* see if we're bound all already! */
7171 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7172 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7173 *error = EINVAL;
7174 return;
7175 }
7176 #ifdef SCTP_MVRF
7177 /* Is the VRF one we have */
7178 for (i = 0; i < inp->num_vrfs; i++) {
7179 if (vrf_id == inp->m_vrf_ids[i]) {
7180 fnd = 1;
7181 break;
7182 }
7183 }
7184 if (!fnd) {
7185 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7186 *error = EINVAL;
7187 return;
7188 }
7189 #endif
7190 addr_touse = sa;
7191 #ifdef INET6
7192 if (sa->sa_family == AF_INET6) {
7193 #ifdef INET
7194 struct sockaddr_in6 *sin6;
7195
7196 #endif
7197 #ifdef HAVE_SA_LEN
7198 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7199 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7200 *error = EINVAL;
7201 return;
7202 }
7203 #endif
7204 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7205 /* can only bind v6 on PF_INET6 sockets */
7206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7207 *error = EINVAL;
7208 return;
7209 }
7210 #ifdef INET
7211 sin6 = (struct sockaddr_in6 *)addr_touse;
7212 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7213 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7214 SCTP_IPV6_V6ONLY(inp)) {
7215 /* can't bind v4-mapped on PF_INET sockets */
7216 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7217 *error = EINVAL;
7218 return;
7219 }
7220 in6_sin6_2_sin(&sin, sin6);
7221 addr_touse = (struct sockaddr *)&sin;
7222 }
7223 #endif
7224 }
7225 #endif
7226 #ifdef INET
7227 if (sa->sa_family == AF_INET) {
7228 #ifdef HAVE_SA_LEN
7229 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7230 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7231 *error = EINVAL;
7232 return;
7233 }
7234 #endif
7235 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7236 SCTP_IPV6_V6ONLY(inp)) {
7237 /* can't bind v4 on PF_INET sockets */
7238 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7239 *error = EINVAL;
7240 return;
7241 }
7242 }
7243 #endif
7244 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7245 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
7246 if (p == NULL) {
7247 /* Can't get proc for Net/Open BSD */
7248 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7249 *error = EINVAL;
7250 return;
7251 }
7252 #endif
7253 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
7254 return;
7255 }
7256 /*
7257 * No locks required here since bind and mgmt_ep_sa
7258 * all do their own locking. If we do something for
7259 * the FIX: below we may need to lock in that case.
7260 */
7261 if (assoc_id == 0) {
7262 /* add the address */
7263 struct sctp_inpcb *lep;
7264 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7265
7266 /* validate the incoming port */
7267 if ((lsin->sin_port != 0) &&
7268 (lsin->sin_port != inp->sctp_lport)) {
7269 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7270 *error = EINVAL;
7271 return;
7272 } else {
7273 /* user specified 0 port, set it to existing port */
7274 lsin->sin_port = inp->sctp_lport;
7275 }
7276
7277 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7278 if (lep != NULL) {
7279 /*
7280 * We must decrement the refcount
7281 * since we have the ep already and
7282 * are binding. No remove going on
7283 * here.
7284 */
7285 SCTP_INP_DECR_REF(lep);
7286 }
7287 if (lep == inp) {
7288 /* already bound to it.. ok */
7289 return;
7290 } else if (lep == NULL) {
7291 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
7292 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7293 SCTP_ADD_IP_ADDRESS,
7294 vrf_id, NULL);
7295 } else {
7296 *error = EADDRINUSE;
7297 }
7298 if (*error)
7299 return;
7300 } else {
7301 /*
7302 * FIX: decide whether we allow assoc based
7303 * bindx
7304 */
7305 }
7306 }
7307
7308 /*
7309 * sctp_bindx(DELETE) for one address.
7310 * assumes all arguments are valid/checked by caller.
7311 */
7312 void
7313 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7314 struct sockaddr *sa, sctp_assoc_t assoc_id,
7315 uint32_t vrf_id, int *error)
7316 {
7317 struct sockaddr *addr_touse;
7318 #if defined(INET) && defined(INET6)
7319 struct sockaddr_in sin;
7320 #endif
7321 #ifdef SCTP_MVRF
7322 int i, fnd = 0;
7323 #endif
7324
7325 /* see if we're bound all already! */
7326 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7327 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7328 *error = EINVAL;
7329 return;
7330 }
7331 #ifdef SCTP_MVRF
7332 /* Is the VRF one we have */
7333 for (i = 0; i < inp->num_vrfs; i++) {
7334 if (vrf_id == inp->m_vrf_ids[i]) {
7335 fnd = 1;
7336 break;
7337 }
7338 }
7339 if (!fnd) {
7340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7341 *error = EINVAL;
7342 return;
7343 }
7344 #endif
7345 addr_touse = sa;
7346 #ifdef INET6
7347 if (sa->sa_family == AF_INET6) {
7348 #ifdef INET
7349 struct sockaddr_in6 *sin6;
7350 #endif
7351
7352 #ifdef HAVE_SA_LEN
7353 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7354 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7355 *error = EINVAL;
7356 return;
7357 }
7358 #endif
7359 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7360 /* can only bind v6 on PF_INET6 sockets */
7361 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7362 *error = EINVAL;
7363 return;
7364 }
7365 #ifdef INET
7366 sin6 = (struct sockaddr_in6 *)addr_touse;
7367 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7368 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7369 SCTP_IPV6_V6ONLY(inp)) {
7370 /* can't bind mapped-v4 on PF_INET sockets */
7371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7372 *error = EINVAL;
7373 return;
7374 }
7375 in6_sin6_2_sin(&sin, sin6);
7376 addr_touse = (struct sockaddr *)&sin;
7377 }
7378 #endif
7379 }
7380 #endif
7381 #ifdef INET
7382 if (sa->sa_family == AF_INET) {
7383 #ifdef HAVE_SA_LEN
7384 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7385 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7386 *error = EINVAL;
7387 return;
7388 }
7389 #endif
7390 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7391 SCTP_IPV6_V6ONLY(inp)) {
7392 /* can't bind v4 on PF_INET sockets */
7393 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7394 *error = EINVAL;
7395 return;
7396 }
7397 }
7398 #endif
7399 /*
7400 * No lock required mgmt_ep_sa does its own locking.
7401 * If the FIX: below is ever changed we may need to
7402 * lock before calling association level binding.
7403 */
7404 if (assoc_id == 0) {
7405 /* delete the address */
7406 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7407 SCTP_DEL_IP_ADDRESS,
7408 vrf_id, NULL);
7409 } else {
7410 /*
7411 * FIX: decide whether we allow assoc based
7412 * bindx
7413 */
7414 }
7415 }
7416
7417 /*
7418 * returns the valid local address count for an assoc, taking into account
7419 * all scoping rules
7420 */
7421 int
7422 sctp_local_addr_count(struct sctp_tcb *stcb)
7423 {
7424 int loopback_scope;
7425 #if defined(INET)
7426 int ipv4_local_scope, ipv4_addr_legal;
7427 #endif
7428 #if defined (INET6)
7429 int local_scope, site_scope, ipv6_addr_legal;
7430 #endif
7431 #if defined(__Userspace__)
7432 int conn_addr_legal;
7433 #endif
7434 struct sctp_vrf *vrf;
7435 struct sctp_ifn *sctp_ifn;
7436 struct sctp_ifa *sctp_ifa;
7437 int count = 0;
7438
7439 /* Turn on all the appropriate scopes */
7440 loopback_scope = stcb->asoc.scope.loopback_scope;
7441 #if defined(INET)
7442 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7443 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7444 #endif
7445 #if defined(INET6)
7446 local_scope = stcb->asoc.scope.local_scope;
7447 site_scope = stcb->asoc.scope.site_scope;
7448 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7449 #endif
7450 #if defined(__Userspace__)
7451 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7452 #endif
7453 SCTP_IPI_ADDR_RLOCK();
7454 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7455 if (vrf == NULL) {
7456 /* no vrf, no addresses */
7457 SCTP_IPI_ADDR_RUNLOCK();
7458 return (0);
7459 }
7460
7461 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7462 /*
7463 * bound all case: go through all ifns on the vrf
7464 */
7465 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7466 if ((loopback_scope == 0) &&
7467 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7468 continue;
7469 }
7470 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7471 if (sctp_is_addr_restricted(stcb, sctp_ifa))
7472 continue;
7473 switch (sctp_ifa->address.sa.sa_family) {
7474 #ifdef INET
7475 case AF_INET:
7476 if (ipv4_addr_legal) {
7477 struct sockaddr_in *sin;
7478
7479 sin = &sctp_ifa->address.sin;
7480 if (sin->sin_addr.s_addr == 0) {
7481 /* skip unspecified addrs */
7482 continue;
7483 }
7484 #if defined(__FreeBSD__)
7485 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7486 &sin->sin_addr) != 0) {
7487 continue;
7488 }
7489 #endif
7490 if ((ipv4_local_scope == 0) &&
7491 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7492 continue;
7493 }
7494 /* count this one */
7495 count++;
7496 } else {
7497 continue;
7498 }
7499 break;
7500 #endif
7501 #ifdef INET6
7502 case AF_INET6:
7503 if (ipv6_addr_legal) {
7504 struct sockaddr_in6 *sin6;
7505
7506 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7507 struct sockaddr_in6 lsa6;
7508 #endif
7509 sin6 = &sctp_ifa->address.sin6;
7510 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7511 continue;
7512 }
7513 #if defined(__FreeBSD__)
7514 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7515 &sin6->sin6_addr) != 0) {
7516 continue;
7517 }
7518 #endif
7519 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7520 if (local_scope == 0)
7521 continue;
7522 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7523 if (sin6->sin6_scope_id == 0) {
7524 #ifdef SCTP_KAME
7525 if (sa6_recoverscope(sin6) != 0)
7526 /*
7527 * bad link
7528 * local
7529 * address
7530 */
7531 continue;
7532 #else
7533 lsa6 = *sin6;
7534 if (in6_recoverscope(&lsa6,
7535 &lsa6.sin6_addr,
7536 NULL))
7537 /*
7538 * bad link
7539 * local
7540 * address
7541 */
7542 continue;
7543 sin6 = &lsa6;
7544 #endif /* SCTP_KAME */
7545 }
7546 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7547 }
7548 if ((site_scope == 0) &&
7549 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7550 continue;
7551 }
7552 /* count this one */
7553 count++;
7554 }
7555 break;
7556 #endif
7557 #if defined(__Userspace__)
7558 case AF_CONN:
7559 if (conn_addr_legal) {
7560 count++;
7561 }
7562 break;
7563 #endif
7564 default:
7565 /* TSNH */
7566 break;
7567 }
7568 }
7569 }
7570 } else {
7571 /*
7572 * subset bound case
7573 */
7574 struct sctp_laddr *laddr;
7575 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7576 sctp_nxt_addr) {
7577 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7578 continue;
7579 }
7580 /* count this one */
7581 count++;
7582 }
7583 }
7584 SCTP_IPI_ADDR_RUNLOCK();
7585 return (count);
7586 }
7587
7588 #if defined(SCTP_LOCAL_TRACE_BUF)
7589
7590 void
7591 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7592 {
7593 uint32_t saveindex, newindex;
7594
7595 #if defined(__Windows__)
7596 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7597 return;
7598 }
7599 do {
7600 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7601 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7602 newindex = 1;
7603 } else {
7604 newindex = saveindex + 1;
7605 }
7606 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7607 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7608 saveindex = 0;
7609 }
7610 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7611 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7612 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7613 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
7614 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
7615 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
7616 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
7617 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
7618 #else
7619 do {
7620 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7621 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7622 newindex = 1;
7623 } else {
7624 newindex = saveindex + 1;
7625 }
7626 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7627 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7628 saveindex = 0;
7629 }
7630 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7631 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7632 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7633 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7634 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7635 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7636 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7637 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7638 #endif
7639 }
7640
7641 #endif
7642 #if defined(__FreeBSD__)
7643 #if __FreeBSD_version >= 800044
7644 static void
7645 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7646 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7647 {
7648 struct ip *iph;
7649 #ifdef INET6
7650 struct ip6_hdr *ip6;
7651 #endif
7652 struct mbuf *sp, *last;
7653 struct udphdr *uhdr;
7654 uint16_t port;
7655
7656 if ((m->m_flags & M_PKTHDR) == 0) {
7657 /* Can't handle one that is not a pkt hdr */
7658 goto out;
7659 }
7660 /* Pull the src port */
7661 iph = mtod(m, struct ip *);
7662 uhdr = (struct udphdr *)((caddr_t)iph + off);
7663 port = uhdr->uh_sport;
7664 /* Split out the mbuf chain. Leave the
7665 * IP header in m, place the
7666 * rest in the sp.
7667 */
7668 sp = m_split(m, off, M_NOWAIT);
7669 if (sp == NULL) {
7670 /* Gak, drop packet, we can't do a split */
7671 goto out;
7672 }
7673 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7674 /* Gak, packet can't have an SCTP header in it - too small */
7675 m_freem(sp);
7676 goto out;
7677 }
7678 /* Now pull up the UDP header and SCTP header together */
7679 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7680 if (sp == NULL) {
7681 /* Gak pullup failed */
7682 goto out;
7683 }
7684 /* Trim out the UDP header */
7685 m_adj(sp, sizeof(struct udphdr));
7686
7687 /* Now reconstruct the mbuf chain */
7688 for (last = m; last->m_next; last = last->m_next);
7689 last->m_next = sp;
7690 m->m_pkthdr.len += sp->m_pkthdr.len;
7691 /*
7692 * The CSUM_DATA_VALID flags indicates that the HW checked the
7693 * UDP checksum and it was valid.
7694 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
7695 * the HW also verified the SCTP checksum. Therefore, clear the bit.
7696 */
7697 #if __FreeBSD_version > 1000049
7698 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7699 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7700 m->m_pkthdr.len,
7701 if_name(m->m_pkthdr.rcvif),
7702 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7703 #else
7704 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7705 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%x.\n",
7706 m->m_pkthdr.len,
7707 if_name(m->m_pkthdr.rcvif),
7708 m->m_pkthdr.csum_flags);
7709 #endif
7710 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7711 iph = mtod(m, struct ip *);
7712 switch (iph->ip_v) {
7713 #ifdef INET
7714 case IPVERSION:
7715 #if __FreeBSD_version >= 1000000
7716 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7717 #else
7718 iph->ip_len -= sizeof(struct udphdr);
7719 #endif
7720 sctp_input_with_port(m, off, port);
7721 break;
7722 #endif
7723 #ifdef INET6
7724 case IPV6_VERSION >> 4:
7725 ip6 = mtod(m, struct ip6_hdr *);
7726 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7727 sctp6_input_with_port(&m, &off, port);
7728 break;
7729 #endif
7730 default:
7731 goto out;
7732 break;
7733 }
7734 return;
7735 out:
7736 m_freem(m);
7737 }
7738 #endif
7739
7740 #if __FreeBSD_version >= 1100000
7741 #ifdef INET
7742 static void
7743 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7744 {
7745 struct ip *outer_ip, *inner_ip;
7746 struct sctphdr *sh;
7747 struct icmp *icmp;
7748 struct udphdr *udp;
7749 struct sctp_inpcb *inp;
7750 struct sctp_tcb *stcb;
7751 struct sctp_nets *net;
7752 struct sctp_init_chunk *ch;
7753 struct sockaddr_in src, dst;
7754 uint8_t type, code;
7755
7756 inner_ip = (struct ip *)vip;
7757 icmp = (struct icmp *)((caddr_t)inner_ip -
7758 (sizeof(struct icmp) - sizeof(struct ip)));
7759 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7760 if (ntohs(outer_ip->ip_len) <
7761 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7762 return;
7763 }
7764 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7765 sh = (struct sctphdr *)(udp + 1);
7766 memset(&src, 0, sizeof(struct sockaddr_in));
7767 src.sin_family = AF_INET;
7768 #ifdef HAVE_SIN_LEN
7769 src.sin_len = sizeof(struct sockaddr_in);
7770 #endif
7771 src.sin_port = sh->src_port;
7772 src.sin_addr = inner_ip->ip_src;
7773 memset(&dst, 0, sizeof(struct sockaddr_in));
7774 dst.sin_family = AF_INET;
7775 #ifdef HAVE_SIN_LEN
7776 dst.sin_len = sizeof(struct sockaddr_in);
7777 #endif
7778 dst.sin_port = sh->dest_port;
7779 dst.sin_addr = inner_ip->ip_dst;
7780 /*
7781 * 'dst' holds the dest of the packet that failed to be sent.
7782 * 'src' holds our local endpoint address. Thus we reverse
7783 * the dst and the src in the lookup.
7784 */
7785 inp = NULL;
7786 net = NULL;
7787 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7788 (struct sockaddr *)&src,
7789 &inp, &net, 1,
7790 SCTP_DEFAULT_VRFID);
7791 if ((stcb != NULL) &&
7792 (net != NULL) &&
7793 (inp != NULL)) {
7794 /* Check the UDP port numbers */
7795 if ((udp->uh_dport != net->port) ||
7796 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7797 SCTP_TCB_UNLOCK(stcb);
7798 return;
7799 }
7800 /* Check the verification tag */
7801 if (ntohl(sh->v_tag) != 0) {
7802 /*
7803 * This must be the verification tag used
7804 * for sending out packets. We don't
7805 * consider packets reflecting the
7806 * verification tag.
7807 */
7808 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7809 SCTP_TCB_UNLOCK(stcb);
7810 return;
7811 }
7812 } else {
7813 if (ntohs(outer_ip->ip_len) >=
7814 sizeof(struct ip) +
7815 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7816 /*
7817 * In this case we can check if we
7818 * got an INIT chunk and if the
7819 * initiate tag matches.
7820 */
7821 ch = (struct sctp_init_chunk *)(sh + 1);
7822 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7823 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7824 SCTP_TCB_UNLOCK(stcb);
7825 return;
7826 }
7827 } else {
7828 SCTP_TCB_UNLOCK(stcb);
7829 return;
7830 }
7831 }
7832 type = icmp->icmp_type;
7833 code = icmp->icmp_code;
7834 if ((type == ICMP_UNREACH) &&
7835 (code == ICMP_UNREACH_PORT)) {
7836 code = ICMP_UNREACH_PROTOCOL;
7837 }
7838 sctp_notify(inp, stcb, net, type, code,
7839 ntohs(inner_ip->ip_len),
7840 (uint32_t)ntohs(icmp->icmp_nextmtu));
7841 } else {
7842 #if defined(__FreeBSD__) && __FreeBSD_version < 500000
7843 /*
7844 * XXX must be fixed for 5.x and higher, leave for
7845 * 4.x
7846 */
7847 if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
7848 in_rtchange((struct inpcb *)inp,
7849 inetctlerrmap[cmd]);
7850 }
7851 #endif
7852 if ((stcb == NULL) && (inp != NULL)) {
7853 /* reduce ref-count */
7854 SCTP_INP_WLOCK(inp);
7855 SCTP_INP_DECR_REF(inp);
7856 SCTP_INP_WUNLOCK(inp);
7857 }
7858 if (stcb) {
7859 SCTP_TCB_UNLOCK(stcb);
7860 }
7861 }
7862 return;
7863 }
7864 #endif
7865
7866 #ifdef INET6
7867 static void
7868 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7869 {
7870 struct ip6ctlparam *ip6cp;
7871 struct sctp_inpcb *inp;
7872 struct sctp_tcb *stcb;
7873 struct sctp_nets *net;
7874 struct sctphdr sh;
7875 struct udphdr udp;
7876 struct sockaddr_in6 src, dst;
7877 uint8_t type, code;
7878
7879 ip6cp = (struct ip6ctlparam *)d;
7880 /*
7881 * XXX: We assume that when IPV6 is non NULL, M and OFF are
7882 * valid.
7883 */
7884 if (ip6cp->ip6c_m == NULL) {
7885 return;
7886 }
7887 /* Check if we can safely examine the ports and the
7888 * verification tag of the SCTP common header.
7889 */
7890 if (ip6cp->ip6c_m->m_pkthdr.len <
7891 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
7892 return;
7893 }
7894 /* Copy out the UDP header. */
7895 memset(&udp, 0, sizeof(struct udphdr));
7896 m_copydata(ip6cp->ip6c_m,
7897 ip6cp->ip6c_off,
7898 sizeof(struct udphdr),
7899 (caddr_t)&udp);
7900 /* Copy out the port numbers and the verification tag. */
7901 memset(&sh, 0, sizeof(struct sctphdr));
7902 m_copydata(ip6cp->ip6c_m,
7903 ip6cp->ip6c_off + sizeof(struct udphdr),
7904 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7905 (caddr_t)&sh);
7906 memset(&src, 0, sizeof(struct sockaddr_in6));
7907 src.sin6_family = AF_INET6;
7908 #ifdef HAVE_SIN6_LEN
7909 src.sin6_len = sizeof(struct sockaddr_in6);
7910 #endif
7911 src.sin6_port = sh.src_port;
7912 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7913 #if defined(__FreeBSD__)
7914 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7915 return;
7916 }
7917 #endif
7918 memset(&dst, 0, sizeof(struct sockaddr_in6));
7919 dst.sin6_family = AF_INET6;
7920 #ifdef HAVE_SIN6_LEN
7921 dst.sin6_len = sizeof(struct sockaddr_in6);
7922 #endif
7923 dst.sin6_port = sh.dest_port;
7924 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7925 #if defined(__FreeBSD__)
7926 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7927 return;
7928 }
7929 #endif
7930 inp = NULL;
7931 net = NULL;
7932 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7933 (struct sockaddr *)&src,
7934 &inp, &net, 1, SCTP_DEFAULT_VRFID);
7935 if ((stcb != NULL) &&
7936 (net != NULL) &&
7937 (inp != NULL)) {
7938 /* Check the UDP port numbers */
7939 if ((udp.uh_dport != net->port) ||
7940 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7941 SCTP_TCB_UNLOCK(stcb);
7942 return;
7943 }
7944 /* Check the verification tag */
7945 if (ntohl(sh.v_tag) != 0) {
7946 /*
7947 * This must be the verification tag used for
7948 * sending out packets. We don't consider
7949 * packets reflecting the verification tag.
7950 */
7951 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7952 SCTP_TCB_UNLOCK(stcb);
7953 return;
7954 }
7955 } else {
7956 #if defined(__FreeBSD__)
7957 if (ip6cp->ip6c_m->m_pkthdr.len >=
7958 ip6cp->ip6c_off + sizeof(struct udphdr) +
7959 sizeof(struct sctphdr) +
7960 sizeof(struct sctp_chunkhdr) +
7961 offsetof(struct sctp_init, a_rwnd)) {
7962 /*
7963 * In this case we can check if we
7964 * got an INIT chunk and if the
7965 * initiate tag matches.
7966 */
7967 uint32_t initiate_tag;
7968 uint8_t chunk_type;
7969
7970 m_copydata(ip6cp->ip6c_m,
7971 ip6cp->ip6c_off +
7972 sizeof(struct udphdr) +
7973 sizeof(struct sctphdr),
7974 sizeof(uint8_t),
7975 (caddr_t)&chunk_type);
7976 m_copydata(ip6cp->ip6c_m,
7977 ip6cp->ip6c_off +
7978 sizeof(struct udphdr) +
7979 sizeof(struct sctphdr) +
7980 sizeof(struct sctp_chunkhdr),
7981 sizeof(uint32_t),
7982 (caddr_t)&initiate_tag);
7983 if ((chunk_type != SCTP_INITIATION) ||
7984 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7985 SCTP_TCB_UNLOCK(stcb);
7986 return;
7987 }
7988 } else {
7989 SCTP_TCB_UNLOCK(stcb);
7990 return;
7991 }
7992 #else
7993 SCTP_TCB_UNLOCK(stcb);
7994 return;
7995 #endif
7996 }
7997 type = ip6cp->ip6c_icmp6->icmp6_type;
7998 code = ip6cp->ip6c_icmp6->icmp6_code;
7999 if ((type == ICMP6_DST_UNREACH) &&
8000 (code == ICMP6_DST_UNREACH_NOPORT)) {
8001 type = ICMP6_PARAM_PROB;
8002 code = ICMP6_PARAMPROB_NEXTHEADER;
8003 }
8004 sctp6_notify(inp, stcb, net, type, code,
8005 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
8006 } else {
8007 #if defined(__FreeBSD__) && __FreeBSD_version < 500000
8008 if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
8009 in6_rtchange((struct in6pcb *)inp,
8010 inet6ctlerrmap[cmd]);
8011 }
8012 #endif
8013 if ((stcb == NULL) && (inp != NULL)) {
8014 /* reduce inp's ref-count */
8015 SCTP_INP_WLOCK(inp);
8016 SCTP_INP_DECR_REF(inp);
8017 SCTP_INP_WUNLOCK(inp);
8018 }
8019 if (stcb) {
8020 SCTP_TCB_UNLOCK(stcb);
8021 }
8022 }
8023 }
8024 #endif
8025 #endif
8026
8027 void
8028 sctp_over_udp_stop(void)
8029 {
8030 /*
8031 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8032 */
8033 #ifdef INET
8034 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8035 soclose(SCTP_BASE_INFO(udp4_tun_socket));
8036 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
8037 }
8038 #endif
8039 #ifdef INET6
8040 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8041 soclose(SCTP_BASE_INFO(udp6_tun_socket));
8042 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
8043 }
8044 #endif
8045 }
8046
8047 int
8048 sctp_over_udp_start(void)
8049 {
8050 #if __FreeBSD_version >= 800044
8051 uint16_t port;
8052 int ret;
8053 #ifdef INET
8054 struct sockaddr_in sin;
8055 #endif
8056 #ifdef INET6
8057 struct sockaddr_in6 sin6;
8058 #endif
8059 /*
8060 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8061 */
8062 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
8063 if (ntohs(port) == 0) {
8064 /* Must have a port set */
8065 return (EINVAL);
8066 }
8067 #ifdef INET
8068 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8069 /* Already running -- must stop first */
8070 return (EALREADY);
8071 }
8072 #endif
8073 #ifdef INET6
8074 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8075 /* Already running -- must stop first */
8076 return (EALREADY);
8077 }
8078 #endif
8079 #ifdef INET
8080 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
8081 SOCK_DGRAM, IPPROTO_UDP,
8082 curthread->td_ucred, curthread))) {
8083 sctp_over_udp_stop();
8084 return (ret);
8085 }
8086 /* Call the special UDP hook. */
8087 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
8088 sctp_recv_udp_tunneled_packet,
8089 #if __FreeBSD_version >= 1100000
8090 sctp_recv_icmp_tunneled_packet,
8091 #endif
8092 NULL))) {
8093 sctp_over_udp_stop();
8094 return (ret);
8095 }
8096 /* Ok, we have a socket, bind it to the port. */
8097 memset(&sin, 0, sizeof(struct sockaddr_in));
8098 sin.sin_len = sizeof(struct sockaddr_in);
8099 sin.sin_family = AF_INET;
8100 sin.sin_port = htons(port);
8101 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
8102 (struct sockaddr *)&sin, curthread))) {
8103 sctp_over_udp_stop();
8104 return (ret);
8105 }
8106 #endif
8107 #ifdef INET6
8108 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
8109 SOCK_DGRAM, IPPROTO_UDP,
8110 curthread->td_ucred, curthread))) {
8111 sctp_over_udp_stop();
8112 return (ret);
8113 }
8114 /* Call the special UDP hook. */
8115 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
8116 sctp_recv_udp_tunneled_packet,
8117 #if __FreeBSD_version >= 1100000
8118 sctp_recv_icmp6_tunneled_packet,
8119 #endif
8120 NULL))) {
8121 sctp_over_udp_stop();
8122 return (ret);
8123 }
8124 /* Ok, we have a socket, bind it to the port. */
8125 memset(&sin6, 0, sizeof(struct sockaddr_in6));
8126 sin6.sin6_len = sizeof(struct sockaddr_in6);
8127 sin6.sin6_family = AF_INET6;
8128 sin6.sin6_port = htons(port);
8129 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
8130 (struct sockaddr *)&sin6, curthread))) {
8131 sctp_over_udp_stop();
8132 return (ret);
8133 }
8134 #endif
8135 return (0);
8136 #else
8137 return (ENOTSUP);
8138 #endif
8139 }
8140 #endif
8141
8142 /*
8143 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
8144 * If all arguments are zero, zero is returned.
8145 */
8146 uint32_t
8147 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
8148 {
8149 if (mtu1 > 0) {
8150 if (mtu2 > 0) {
8151 if (mtu3 > 0) {
8152 return (min(mtu1, min(mtu2, mtu3)));
8153 } else {
8154 return (min(mtu1, mtu2));
8155 }
8156 } else {
8157 if (mtu3 > 0) {
8158 return (min(mtu1, mtu3));
8159 } else {
8160 return (mtu1);
8161 }
8162 }
8163 } else {
8164 if (mtu2 > 0) {
8165 if (mtu3 > 0) {
8166 return (min(mtu2, mtu3));
8167 } else {
8168 return (mtu2);
8169 }
8170 } else {
8171 return (mtu3);
8172 }
8173 }
8174 }
8175
8176 #if defined(__FreeBSD__)
8177 void
8178 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
8179 {
8180 struct in_conninfo inc;
8181
8182 memset(&inc, 0, sizeof(struct in_conninfo));
8183 inc.inc_fibnum = fibnum;
8184 switch (addr->sa.sa_family) {
8185 #ifdef INET
8186 case AF_INET:
8187 inc.inc_faddr = addr->sin.sin_addr;
8188 break;
8189 #endif
8190 #ifdef INET6
8191 case AF_INET6:
8192 inc.inc_flags |= INC_ISIPV6;
8193 inc.inc6_faddr = addr->sin6.sin6_addr;
8194 break;
8195 #endif
8196 default:
8197 return;
8198 }
8199 tcp_hc_updatemtu(&inc, (u_long)mtu);
8200 }
8201
8202 uint32_t
8203 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
8204 {
8205 struct in_conninfo inc;
8206
8207 memset(&inc, 0, sizeof(struct in_conninfo));
8208 inc.inc_fibnum = fibnum;
8209 switch (addr->sa.sa_family) {
8210 #ifdef INET
8211 case AF_INET:
8212 inc.inc_faddr = addr->sin.sin_addr;
8213 break;
8214 #endif
8215 #ifdef INET6
8216 case AF_INET6:
8217 inc.inc_flags |= INC_ISIPV6;
8218 inc.inc6_faddr = addr->sin6.sin6_addr;
8219 break;
8220 #endif
8221 default:
8222 return (0);
8223 }
8224 return ((uint32_t)tcp_hc_getmtu(&inc));
8225 }
8226 #endif
8227