1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 362462 2020-06-21 09:56:09Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_var.h>
44 #include <netinet/sctp_sysctl.h>
45 #ifdef INET6
46 #if defined(__Userspace__) || defined(__FreeBSD__)
47 #include <netinet6/sctp6_var.h>
48 #endif
49 #endif
50 #include <netinet/sctp_header.h>
51 #include <netinet/sctp_output.h>
52 #include <netinet/sctp_uio.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_auth.h>
56 #include <netinet/sctp_asconf.h>
57 #include <netinet/sctp_bsd_addr.h>
58 #if defined(__Userspace__)
59 #include <netinet/sctp_constants.h>
60 #endif
61 #if defined(__FreeBSD__) && !defined(__Userspace__)
62 #include <netinet/sctp_kdtrace.h>
63 #if defined(INET6) || defined(INET)
64 #include <netinet/tcp_var.h>
65 #endif
66 #include <netinet/udp.h>
67 #include <netinet/udp_var.h>
68 #include <sys/proc.h>
69 #ifdef INET6
70 #include <netinet/icmp6.h>
71 #endif
72 #endif
73
74 #if defined(_WIN32) && !defined(__Userspace__)
75 #if !defined(SCTP_LOCAL_TRACE_BUF)
76 #include "eventrace_netinet.h"
77 #include "sctputil.tmh" /* this is the file that will be auto generated */
78 #endif
79 #else
80 #ifndef KTR_SCTP
81 #define KTR_SCTP KTR_SUBSYS
82 #endif
83 #endif
84
85 extern const struct sctp_cc_functions sctp_cc_functions[];
86 extern const struct sctp_ss_functions sctp_ss_functions[];
87
88 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)89 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
90 {
91 #if defined(SCTP_LOCAL_TRACE_BUF)
92 struct sctp_cwnd_log sctp_clog;
93
94 sctp_clog.x.sb.stcb = stcb;
95 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
96 if (stcb)
97 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
98 else
99 sctp_clog.x.sb.stcb_sbcc = 0;
100 sctp_clog.x.sb.incr = incr;
101 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
102 SCTP_LOG_EVENT_SB,
103 from,
104 sctp_clog.x.misc.log1,
105 sctp_clog.x.misc.log2,
106 sctp_clog.x.misc.log3,
107 sctp_clog.x.misc.log4);
108 #endif
109 }
110
111 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)112 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
113 {
114 #if defined(SCTP_LOCAL_TRACE_BUF)
115 struct sctp_cwnd_log sctp_clog;
116
117 sctp_clog.x.close.inp = (void *)inp;
118 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
119 if (stcb) {
120 sctp_clog.x.close.stcb = (void *)stcb;
121 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
122 } else {
123 sctp_clog.x.close.stcb = 0;
124 sctp_clog.x.close.state = 0;
125 }
126 sctp_clog.x.close.loc = loc;
127 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
128 SCTP_LOG_EVENT_CLOSE,
129 0,
130 sctp_clog.x.misc.log1,
131 sctp_clog.x.misc.log2,
132 sctp_clog.x.misc.log3,
133 sctp_clog.x.misc.log4);
134 #endif
135 }
136
137 void
rto_logging(struct sctp_nets * net,int from)138 rto_logging(struct sctp_nets *net, int from)
139 {
140 #if defined(SCTP_LOCAL_TRACE_BUF)
141 struct sctp_cwnd_log sctp_clog;
142
143 memset(&sctp_clog, 0, sizeof(sctp_clog));
144 sctp_clog.x.rto.net = (void *) net;
145 sctp_clog.x.rto.rtt = net->rtt / 1000;
146 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
147 SCTP_LOG_EVENT_RTT,
148 from,
149 sctp_clog.x.misc.log1,
150 sctp_clog.x.misc.log2,
151 sctp_clog.x.misc.log3,
152 sctp_clog.x.misc.log4);
153 #endif
154 }
155
156 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)157 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
158 {
159 #if defined(SCTP_LOCAL_TRACE_BUF)
160 struct sctp_cwnd_log sctp_clog;
161
162 sctp_clog.x.strlog.stcb = stcb;
163 sctp_clog.x.strlog.n_tsn = tsn;
164 sctp_clog.x.strlog.n_sseq = sseq;
165 sctp_clog.x.strlog.e_tsn = 0;
166 sctp_clog.x.strlog.e_sseq = 0;
167 sctp_clog.x.strlog.strm = stream;
168 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
169 SCTP_LOG_EVENT_STRM,
170 from,
171 sctp_clog.x.misc.log1,
172 sctp_clog.x.misc.log2,
173 sctp_clog.x.misc.log3,
174 sctp_clog.x.misc.log4);
175 #endif
176 }
177
178 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)179 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
180 {
181 #if defined(SCTP_LOCAL_TRACE_BUF)
182 struct sctp_cwnd_log sctp_clog;
183
184 sctp_clog.x.nagle.stcb = (void *)stcb;
185 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
186 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
187 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
188 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
189 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
190 SCTP_LOG_EVENT_NAGLE,
191 action,
192 sctp_clog.x.misc.log1,
193 sctp_clog.x.misc.log2,
194 sctp_clog.x.misc.log3,
195 sctp_clog.x.misc.log4);
196 #endif
197 }
198
199 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)200 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
201 {
202 #if defined(SCTP_LOCAL_TRACE_BUF)
203 struct sctp_cwnd_log sctp_clog;
204
205 sctp_clog.x.sack.cumack = cumack;
206 sctp_clog.x.sack.oldcumack = old_cumack;
207 sctp_clog.x.sack.tsn = tsn;
208 sctp_clog.x.sack.numGaps = gaps;
209 sctp_clog.x.sack.numDups = dups;
210 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
211 SCTP_LOG_EVENT_SACK,
212 from,
213 sctp_clog.x.misc.log1,
214 sctp_clog.x.misc.log2,
215 sctp_clog.x.misc.log3,
216 sctp_clog.x.misc.log4);
217 #endif
218 }
219
220 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)221 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
222 {
223 #if defined(SCTP_LOCAL_TRACE_BUF)
224 struct sctp_cwnd_log sctp_clog;
225
226 memset(&sctp_clog, 0, sizeof(sctp_clog));
227 sctp_clog.x.map.base = map;
228 sctp_clog.x.map.cum = cum;
229 sctp_clog.x.map.high = high;
230 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
231 SCTP_LOG_EVENT_MAP,
232 from,
233 sctp_clog.x.misc.log1,
234 sctp_clog.x.misc.log2,
235 sctp_clog.x.misc.log3,
236 sctp_clog.x.misc.log4);
237 #endif
238 }
239
240 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)241 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
242 {
243 #if defined(SCTP_LOCAL_TRACE_BUF)
244 struct sctp_cwnd_log sctp_clog;
245
246 memset(&sctp_clog, 0, sizeof(sctp_clog));
247 sctp_clog.x.fr.largest_tsn = biggest_tsn;
248 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
249 sctp_clog.x.fr.tsn = tsn;
250 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
251 SCTP_LOG_EVENT_FR,
252 from,
253 sctp_clog.x.misc.log1,
254 sctp_clog.x.misc.log2,
255 sctp_clog.x.misc.log3,
256 sctp_clog.x.misc.log4);
257 #endif
258 }
259
260 #ifdef SCTP_MBUF_LOGGING
261 void
sctp_log_mb(struct mbuf * m,int from)262 sctp_log_mb(struct mbuf *m, int from)
263 {
264 #if defined(SCTP_LOCAL_TRACE_BUF)
265 struct sctp_cwnd_log sctp_clog;
266
267 sctp_clog.x.mb.mp = m;
268 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
269 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
270 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
271 if (SCTP_BUF_IS_EXTENDED(m)) {
272 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
273 #if defined(__APPLE__) && !defined(__Userspace__)
274 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
275 #else
276 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
277 #endif
278 } else {
279 sctp_clog.x.mb.ext = 0;
280 sctp_clog.x.mb.refcnt = 0;
281 }
282 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
283 SCTP_LOG_EVENT_MBUF,
284 from,
285 sctp_clog.x.misc.log1,
286 sctp_clog.x.misc.log2,
287 sctp_clog.x.misc.log3,
288 sctp_clog.x.misc.log4);
289 #endif
290 }
291
292 void
sctp_log_mbc(struct mbuf * m,int from)293 sctp_log_mbc(struct mbuf *m, int from)
294 {
295 struct mbuf *mat;
296
297 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
298 sctp_log_mb(mat, from);
299 }
300 }
301 #endif
302
303 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)304 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
305 {
306 #if defined(SCTP_LOCAL_TRACE_BUF)
307 struct sctp_cwnd_log sctp_clog;
308
309 if (control == NULL) {
310 SCTP_PRINTF("Gak log of NULL?\n");
311 return;
312 }
313 sctp_clog.x.strlog.stcb = control->stcb;
314 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
315 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
316 sctp_clog.x.strlog.strm = control->sinfo_stream;
317 if (poschk != NULL) {
318 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
319 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
320 } else {
321 sctp_clog.x.strlog.e_tsn = 0;
322 sctp_clog.x.strlog.e_sseq = 0;
323 }
324 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
325 SCTP_LOG_EVENT_STRM,
326 from,
327 sctp_clog.x.misc.log1,
328 sctp_clog.x.misc.log2,
329 sctp_clog.x.misc.log3,
330 sctp_clog.x.misc.log4);
331 #endif
332 }
333
334 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)335 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
336 {
337 #if defined(SCTP_LOCAL_TRACE_BUF)
338 struct sctp_cwnd_log sctp_clog;
339
340 sctp_clog.x.cwnd.net = net;
341 if (stcb->asoc.send_queue_cnt > 255)
342 sctp_clog.x.cwnd.cnt_in_send = 255;
343 else
344 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
345 if (stcb->asoc.stream_queue_cnt > 255)
346 sctp_clog.x.cwnd.cnt_in_str = 255;
347 else
348 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
349
350 if (net) {
351 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
352 sctp_clog.x.cwnd.inflight = net->flight_size;
353 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
354 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
355 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
356 }
357 if (SCTP_CWNDLOG_PRESEND == from) {
358 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
359 }
360 sctp_clog.x.cwnd.cwnd_augment = augment;
361 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
362 SCTP_LOG_EVENT_CWND,
363 from,
364 sctp_clog.x.misc.log1,
365 sctp_clog.x.misc.log2,
366 sctp_clog.x.misc.log3,
367 sctp_clog.x.misc.log4);
368 #endif
369 }
370
371 #if !defined(__APPLE__) && !defined(__Userspace__)
372 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)373 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
374 {
375 #if defined(SCTP_LOCAL_TRACE_BUF)
376 struct sctp_cwnd_log sctp_clog;
377
378 memset(&sctp_clog, 0, sizeof(sctp_clog));
379 if (inp) {
380 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
381
382 } else {
383 sctp_clog.x.lock.sock = (void *) NULL;
384 }
385 sctp_clog.x.lock.inp = (void *) inp;
386 #if defined(__FreeBSD__)
387 if (stcb) {
388 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
389 } else {
390 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
391 }
392 if (inp) {
393 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
394 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
395 } else {
396 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
397 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
398 }
399 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
400 if (inp && (inp->sctp_socket)) {
401 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
402 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
403 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
404 } else {
405 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
406 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
407 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
408 }
409 #endif
410 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
411 SCTP_LOG_LOCK_EVENT,
412 from,
413 sctp_clog.x.misc.log1,
414 sctp_clog.x.misc.log2,
415 sctp_clog.x.misc.log3,
416 sctp_clog.x.misc.log4);
417 #endif
418 }
419 #endif
420
421 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)422 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
423 {
424 #if defined(SCTP_LOCAL_TRACE_BUF)
425 struct sctp_cwnd_log sctp_clog;
426
427 memset(&sctp_clog, 0, sizeof(sctp_clog));
428 sctp_clog.x.cwnd.net = net;
429 sctp_clog.x.cwnd.cwnd_new_value = error;
430 sctp_clog.x.cwnd.inflight = net->flight_size;
431 sctp_clog.x.cwnd.cwnd_augment = burst;
432 if (stcb->asoc.send_queue_cnt > 255)
433 sctp_clog.x.cwnd.cnt_in_send = 255;
434 else
435 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
436 if (stcb->asoc.stream_queue_cnt > 255)
437 sctp_clog.x.cwnd.cnt_in_str = 255;
438 else
439 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
440 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 SCTP_LOG_EVENT_MAXBURST,
442 from,
443 sctp_clog.x.misc.log1,
444 sctp_clog.x.misc.log2,
445 sctp_clog.x.misc.log3,
446 sctp_clog.x.misc.log4);
447 #endif
448 }
449
450 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)451 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
452 {
453 #if defined(SCTP_LOCAL_TRACE_BUF)
454 struct sctp_cwnd_log sctp_clog;
455
456 sctp_clog.x.rwnd.rwnd = peers_rwnd;
457 sctp_clog.x.rwnd.send_size = snd_size;
458 sctp_clog.x.rwnd.overhead = overhead;
459 sctp_clog.x.rwnd.new_rwnd = 0;
460 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
461 SCTP_LOG_EVENT_RWND,
462 from,
463 sctp_clog.x.misc.log1,
464 sctp_clog.x.misc.log2,
465 sctp_clog.x.misc.log3,
466 sctp_clog.x.misc.log4);
467 #endif
468 }
469
470 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)471 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
472 {
473 #if defined(SCTP_LOCAL_TRACE_BUF)
474 struct sctp_cwnd_log sctp_clog;
475
476 sctp_clog.x.rwnd.rwnd = peers_rwnd;
477 sctp_clog.x.rwnd.send_size = flight_size;
478 sctp_clog.x.rwnd.overhead = overhead;
479 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 SCTP_LOG_EVENT_RWND,
482 from,
483 sctp_clog.x.misc.log1,
484 sctp_clog.x.misc.log2,
485 sctp_clog.x.misc.log3,
486 sctp_clog.x.misc.log4);
487 #endif
488 }
489
490 #ifdef SCTP_MBCNT_LOGGING
491 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)492 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
493 {
494 #if defined(SCTP_LOCAL_TRACE_BUF)
495 struct sctp_cwnd_log sctp_clog;
496
497 sctp_clog.x.mbcnt.total_queue_size = total_oq;
498 sctp_clog.x.mbcnt.size_change = book;
499 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
500 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 SCTP_LOG_EVENT_MBCNT,
503 from,
504 sctp_clog.x.misc.log1,
505 sctp_clog.x.misc.log2,
506 sctp_clog.x.misc.log3,
507 sctp_clog.x.misc.log4);
508 #endif
509 }
510 #endif
511
512 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)513 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
514 {
515 #if defined(SCTP_LOCAL_TRACE_BUF)
516 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
517 SCTP_LOG_MISC_EVENT,
518 from,
519 a, b, c, d);
520 #endif
521 }
522
523 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)524 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
525 {
526 #if defined(SCTP_LOCAL_TRACE_BUF)
527 struct sctp_cwnd_log sctp_clog;
528
529 sctp_clog.x.wake.stcb = (void *)stcb;
530 sctp_clog.x.wake.wake_cnt = wake_cnt;
531 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
532 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
533 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
534
535 if (stcb->asoc.stream_queue_cnt < 0xff)
536 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
537 else
538 sctp_clog.x.wake.stream_qcnt = 0xff;
539
540 if (stcb->asoc.chunks_on_out_queue < 0xff)
541 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
542 else
543 sctp_clog.x.wake.chunks_on_oque = 0xff;
544
545 sctp_clog.x.wake.sctpflags = 0;
546 /* set in the defered mode stuff */
547 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
548 sctp_clog.x.wake.sctpflags |= 1;
549 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
550 sctp_clog.x.wake.sctpflags |= 2;
551 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
552 sctp_clog.x.wake.sctpflags |= 4;
553 /* what about the sb */
554 if (stcb->sctp_socket) {
555 struct socket *so = stcb->sctp_socket;
556
557 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
558 } else {
559 sctp_clog.x.wake.sbflags = 0xff;
560 }
561 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 SCTP_LOG_EVENT_WAKE,
563 from,
564 sctp_clog.x.misc.log1,
565 sctp_clog.x.misc.log2,
566 sctp_clog.x.misc.log3,
567 sctp_clog.x.misc.log4);
568 #endif
569 }
570
571 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,ssize_t sendlen)572 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
573 {
574 #if defined(SCTP_LOCAL_TRACE_BUF)
575 struct sctp_cwnd_log sctp_clog;
576
577 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
578 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
579 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
580 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
581 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
582 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
583 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
584 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
585 SCTP_LOG_EVENT_BLOCK,
586 from,
587 sctp_clog.x.misc.log1,
588 sctp_clog.x.misc.log2,
589 sctp_clog.x.misc.log3,
590 sctp_clog.x.misc.log4);
591 #endif
592 }
593
594 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)595 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
596 {
597 /* May need to fix this if ktrdump does not work */
598 return (0);
599 }
600
601 #ifdef SCTP_AUDITING_ENABLED
602 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
603 static int sctp_audit_indx = 0;
604
605 static
606 void
sctp_print_audit_report(void)607 sctp_print_audit_report(void)
608 {
609 int i;
610 int cnt;
611
612 cnt = 0;
613 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
614 if ((sctp_audit_data[i][0] == 0xe0) &&
615 (sctp_audit_data[i][1] == 0x01)) {
616 cnt = 0;
617 SCTP_PRINTF("\n");
618 } else if (sctp_audit_data[i][0] == 0xf0) {
619 cnt = 0;
620 SCTP_PRINTF("\n");
621 } else if ((sctp_audit_data[i][0] == 0xc0) &&
622 (sctp_audit_data[i][1] == 0x01)) {
623 SCTP_PRINTF("\n");
624 cnt = 0;
625 }
626 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
627 (uint32_t) sctp_audit_data[i][1]);
628 cnt++;
629 if ((cnt % 14) == 0)
630 SCTP_PRINTF("\n");
631 }
632 for (i = 0; i < sctp_audit_indx; i++) {
633 if ((sctp_audit_data[i][0] == 0xe0) &&
634 (sctp_audit_data[i][1] == 0x01)) {
635 cnt = 0;
636 SCTP_PRINTF("\n");
637 } else if (sctp_audit_data[i][0] == 0xf0) {
638 cnt = 0;
639 SCTP_PRINTF("\n");
640 } else if ((sctp_audit_data[i][0] == 0xc0) &&
641 (sctp_audit_data[i][1] == 0x01)) {
642 SCTP_PRINTF("\n");
643 cnt = 0;
644 }
645 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
646 (uint32_t) sctp_audit_data[i][1]);
647 cnt++;
648 if ((cnt % 14) == 0)
649 SCTP_PRINTF("\n");
650 }
651 SCTP_PRINTF("\n");
652 }
653
654 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)655 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
656 struct sctp_nets *net)
657 {
658 int resend_cnt, tot_out, rep, tot_book_cnt;
659 struct sctp_nets *lnet;
660 struct sctp_tmit_chunk *chk;
661
662 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
663 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
664 sctp_audit_indx++;
665 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666 sctp_audit_indx = 0;
667 }
668 if (inp == NULL) {
669 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
670 sctp_audit_data[sctp_audit_indx][1] = 0x01;
671 sctp_audit_indx++;
672 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 sctp_audit_indx = 0;
674 }
675 return;
676 }
677 if (stcb == NULL) {
678 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
679 sctp_audit_data[sctp_audit_indx][1] = 0x02;
680 sctp_audit_indx++;
681 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
682 sctp_audit_indx = 0;
683 }
684 return;
685 }
686 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
687 sctp_audit_data[sctp_audit_indx][1] =
688 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
689 sctp_audit_indx++;
690 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 sctp_audit_indx = 0;
692 }
693 rep = 0;
694 tot_book_cnt = 0;
695 resend_cnt = tot_out = 0;
696 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 if (chk->sent == SCTP_DATAGRAM_RESEND) {
698 resend_cnt++;
699 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
700 tot_out += chk->book_size;
701 tot_book_cnt++;
702 }
703 }
704 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
705 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
706 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
707 sctp_audit_indx++;
708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
709 sctp_audit_indx = 0;
710 }
711 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
712 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
713 rep = 1;
714 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
715 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
716 sctp_audit_data[sctp_audit_indx][1] =
717 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
718 sctp_audit_indx++;
719 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
720 sctp_audit_indx = 0;
721 }
722 }
723 if (tot_out != stcb->asoc.total_flight) {
724 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
725 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
726 sctp_audit_indx++;
727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 sctp_audit_indx = 0;
729 }
730 rep = 1;
731 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
732 (int)stcb->asoc.total_flight);
733 stcb->asoc.total_flight = tot_out;
734 }
735 if (tot_book_cnt != stcb->asoc.total_flight_count) {
736 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
737 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
738 sctp_audit_indx++;
739 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
740 sctp_audit_indx = 0;
741 }
742 rep = 1;
743 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
744
745 stcb->asoc.total_flight_count = tot_book_cnt;
746 }
747 tot_out = 0;
748 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
749 tot_out += lnet->flight_size;
750 }
751 if (tot_out != stcb->asoc.total_flight) {
752 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
753 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
754 sctp_audit_indx++;
755 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
756 sctp_audit_indx = 0;
757 }
758 rep = 1;
759 SCTP_PRINTF("real flight:%d net total was %d\n",
760 stcb->asoc.total_flight, tot_out);
761 /* now corrective action */
762 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
763
764 tot_out = 0;
765 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
766 if ((chk->whoTo == lnet) &&
767 (chk->sent < SCTP_DATAGRAM_RESEND)) {
768 tot_out += chk->book_size;
769 }
770 }
771 if (lnet->flight_size != tot_out) {
772 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
773 (void *)lnet, lnet->flight_size,
774 tot_out);
775 lnet->flight_size = tot_out;
776 }
777 }
778 }
779 if (rep) {
780 sctp_print_audit_report();
781 }
782 }
783
784 void
sctp_audit_log(uint8_t ev,uint8_t fd)785 sctp_audit_log(uint8_t ev, uint8_t fd)
786 {
787
788 sctp_audit_data[sctp_audit_indx][0] = ev;
789 sctp_audit_data[sctp_audit_indx][1] = fd;
790 sctp_audit_indx++;
791 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
792 sctp_audit_indx = 0;
793 }
794 }
795
796 #endif
797
798 /*
799 * The conversion from time to ticks and vice versa is done by rounding
800 * upwards. This way we can test in the code the time to be positive and
801 * know that this corresponds to a positive number of ticks.
802 */
803
804 uint32_t
sctp_msecs_to_ticks(uint32_t msecs)805 sctp_msecs_to_ticks(uint32_t msecs)
806 {
807 uint64_t temp;
808 uint32_t ticks;
809
810 if (hz == 1000) {
811 ticks = msecs;
812 } else {
813 temp = (((uint64_t)msecs * hz) + 999) / 1000;
814 if (temp > UINT32_MAX) {
815 ticks = UINT32_MAX;
816 } else {
817 ticks = (uint32_t)temp;
818 }
819 }
820 return (ticks);
821 }
822
823 uint32_t
sctp_ticks_to_msecs(uint32_t ticks)824 sctp_ticks_to_msecs(uint32_t ticks)
825 {
826 uint64_t temp;
827 uint32_t msecs;
828
829 if (hz == 1000) {
830 msecs = ticks;
831 } else {
832 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
833 if (temp > UINT32_MAX) {
834 msecs = UINT32_MAX;
835 } else {
836 msecs = (uint32_t)temp;
837 }
838 }
839 return (msecs);
840 }
841
842 uint32_t
sctp_secs_to_ticks(uint32_t secs)843 sctp_secs_to_ticks(uint32_t secs)
844 {
845 uint64_t temp;
846 uint32_t ticks;
847
848 temp = (uint64_t)secs * hz;
849 if (temp > UINT32_MAX) {
850 ticks = UINT32_MAX;
851 } else {
852 ticks = (uint32_t)temp;
853 }
854 return (ticks);
855 }
856
857 uint32_t
sctp_ticks_to_secs(uint32_t ticks)858 sctp_ticks_to_secs(uint32_t ticks)
859 {
860 uint64_t temp;
861 uint32_t secs;
862
863 temp = ((uint64_t)ticks + (hz - 1)) / hz;
864 if (temp > UINT32_MAX) {
865 secs = UINT32_MAX;
866 } else {
867 secs = (uint32_t)temp;
868 }
869 return (secs);
870 }
871
872 /*
873 * sctp_stop_timers_for_shutdown() should be called
874 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
875 * state to make sure that all timers are stopped.
876 */
877 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)878 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
879 {
880 struct sctp_inpcb *inp;
881 struct sctp_nets *net;
882
883 inp = stcb->sctp_ep;
884
885 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
886 SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
887 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
888 SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
889 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
890 SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
891 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
892 SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
893 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
894 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
895 SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
896 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
897 SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
898 }
899 }
900
901 void
sctp_stop_association_timers(struct sctp_tcb * stcb,bool stop_assoc_kill_timer)902 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
903 {
904 struct sctp_inpcb *inp;
905 struct sctp_nets *net;
906
907 inp = stcb->sctp_ep;
908 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
909 SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
910 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
911 SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
912 if (stop_assoc_kill_timer) {
913 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
914 SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
915 }
916 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
917 SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
918 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
919 SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
920 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
921 SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
922 /* Mobility adaptation */
923 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
924 SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
925 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
926 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
927 SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
928 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
929 SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
930 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
931 SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
932 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
933 SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
934 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
935 SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
936 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
937 SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
938 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
939 SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
940 }
941 }
942
943 /*
944 * A list of sizes based on typical mtu's, used only if next hop size not
945 * returned. These values MUST be multiples of 4 and MUST be ordered.
946 */
947 static uint32_t sctp_mtu_sizes[] = {
948 68,
949 296,
950 508,
951 512,
952 544,
953 576,
954 1004,
955 1492,
956 1500,
957 1536,
958 2000,
959 2048,
960 4352,
961 4464,
962 8168,
963 17912,
964 32000,
965 65532
966 };
967
968 /*
969 * Return the largest MTU in sctp_mtu_sizes smaller than val.
970 * If val is smaller than the minimum, just return the largest
971 * multiple of 4 smaller or equal to val.
972 * Ensure that the result is a multiple of 4.
973 */
974 uint32_t
sctp_get_prev_mtu(uint32_t val)975 sctp_get_prev_mtu(uint32_t val)
976 {
977 uint32_t i;
978
979 val &= 0xfffffffc;
980 if (val <= sctp_mtu_sizes[0]) {
981 return (val);
982 }
983 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
984 if (val <= sctp_mtu_sizes[i]) {
985 break;
986 }
987 }
988 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
989 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
990 return (sctp_mtu_sizes[i - 1]);
991 }
992
993 /*
994 * Return the smallest MTU in sctp_mtu_sizes larger than val.
995 * If val is larger than the maximum, just return the largest multiple of 4 smaller
996 * or equal to val.
997 * Ensure that the result is a multiple of 4.
998 */
999 uint32_t
sctp_get_next_mtu(uint32_t val)1000 sctp_get_next_mtu(uint32_t val)
1001 {
1002 /* select another MTU that is just bigger than this one */
1003 uint32_t i;
1004
1005 val &= 0xfffffffc;
1006 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
1007 if (val < sctp_mtu_sizes[i]) {
1008 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
1009 ("sctp_mtu_sizes[%u] not a multiple of 4", i));
1010 return (sctp_mtu_sizes[i]);
1011 }
1012 }
1013 return (val);
1014 }
1015
1016 void
sctp_fill_random_store(struct sctp_pcb * m)1017 sctp_fill_random_store(struct sctp_pcb *m)
1018 {
1019 /*
1020 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
1021 * our counter. The result becomes our good random numbers and we
1022 * then setup to give these out. Note that we do no locking to
1023 * protect this. This is ok, since if competing folks call this we
1024 * will get more gobbled gook in the random store which is what we
1025 * want. There is a danger that two guys will use the same random
1026 * numbers, but thats ok too since that is random as well :->
1027 */
1028 m->store_at = 0;
1029 #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION)
1030 for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) {
1031 m->random_store[i] = (uint8_t) rand();
1032 }
1033 #else
1034 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1035 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1036 sizeof(m->random_counter), (uint8_t *)m->random_store);
1037 #endif
1038 m->random_counter++;
1039 }
1040
1041 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)1042 sctp_select_initial_TSN(struct sctp_pcb *inp)
1043 {
1044 /*
1045 * A true implementation should use random selection process to get
1046 * the initial stream sequence number, using RFC1750 as a good
1047 * guideline
1048 */
1049 uint32_t x, *xp;
1050 uint8_t *p;
1051 int store_at, new_store;
1052
1053 if (inp->initial_sequence_debug != 0) {
1054 uint32_t ret;
1055
1056 ret = inp->initial_sequence_debug;
1057 inp->initial_sequence_debug++;
1058 return (ret);
1059 }
1060 retry:
1061 store_at = inp->store_at;
1062 new_store = store_at + sizeof(uint32_t);
1063 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
1064 new_store = 0;
1065 }
1066 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1067 goto retry;
1068 }
1069 if (new_store == 0) {
1070 /* Refill the random store */
1071 sctp_fill_random_store(inp);
1072 }
1073 p = &inp->random_store[store_at];
1074 xp = (uint32_t *)p;
1075 x = *xp;
1076 return (x);
1077 }
1078
1079 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)1080 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1081 {
1082 uint32_t x;
1083 struct timeval now;
1084
1085 if (check) {
1086 (void)SCTP_GETTIME_TIMEVAL(&now);
1087 }
1088 for (;;) {
1089 x = sctp_select_initial_TSN(&inp->sctp_ep);
1090 if (x == 0) {
1091 /* we never use 0 */
1092 continue;
1093 }
1094 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1095 break;
1096 }
1097 }
1098 return (x);
1099 }
1100
1101 int32_t
sctp_map_assoc_state(int kernel_state)1102 sctp_map_assoc_state(int kernel_state)
1103 {
1104 int32_t user_state;
1105
1106 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1107 user_state = SCTP_CLOSED;
1108 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1109 user_state = SCTP_SHUTDOWN_PENDING;
1110 } else {
1111 switch (kernel_state & SCTP_STATE_MASK) {
1112 case SCTP_STATE_EMPTY:
1113 user_state = SCTP_CLOSED;
1114 break;
1115 case SCTP_STATE_INUSE:
1116 user_state = SCTP_CLOSED;
1117 break;
1118 case SCTP_STATE_COOKIE_WAIT:
1119 user_state = SCTP_COOKIE_WAIT;
1120 break;
1121 case SCTP_STATE_COOKIE_ECHOED:
1122 user_state = SCTP_COOKIE_ECHOED;
1123 break;
1124 case SCTP_STATE_OPEN:
1125 user_state = SCTP_ESTABLISHED;
1126 break;
1127 case SCTP_STATE_SHUTDOWN_SENT:
1128 user_state = SCTP_SHUTDOWN_SENT;
1129 break;
1130 case SCTP_STATE_SHUTDOWN_RECEIVED:
1131 user_state = SCTP_SHUTDOWN_RECEIVED;
1132 break;
1133 case SCTP_STATE_SHUTDOWN_ACK_SENT:
1134 user_state = SCTP_SHUTDOWN_ACK_SENT;
1135 break;
1136 default:
1137 user_state = SCTP_CLOSED;
1138 break;
1139 }
1140 }
1141 return (user_state);
1142 }
1143
1144 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id,uint16_t o_strms)1145 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1146 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1147 {
1148 struct sctp_association *asoc;
1149 /*
1150 * Anything set to zero is taken care of by the allocation routine's
1151 * bzero
1152 */
1153
1154 /*
1155 * Up front select what scoping to apply on addresses I tell my peer
1156 * Not sure what to do with these right now, we will need to come up
1157 * with a way to set them. We may need to pass them through from the
1158 * caller in the sctp_aloc_assoc() function.
1159 */
1160 int i;
1161 #if defined(SCTP_DETAILED_STR_STATS)
1162 int j;
1163 #endif
1164
1165 asoc = &stcb->asoc;
1166 /* init all variables to a known value. */
1167 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1168 asoc->max_burst = inp->sctp_ep.max_burst;
1169 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1170 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1171 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1172 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1173 asoc->ecn_supported = inp->ecn_supported;
1174 asoc->prsctp_supported = inp->prsctp_supported;
1175 asoc->auth_supported = inp->auth_supported;
1176 asoc->asconf_supported = inp->asconf_supported;
1177 asoc->reconfig_supported = inp->reconfig_supported;
1178 asoc->nrsack_supported = inp->nrsack_supported;
1179 asoc->pktdrop_supported = inp->pktdrop_supported;
1180 asoc->idata_supported = inp->idata_supported;
1181 asoc->sctp_cmt_pf = (uint8_t)0;
1182 asoc->sctp_frag_point = inp->sctp_frag_point;
1183 asoc->sctp_features = inp->sctp_features;
1184 asoc->default_dscp = inp->sctp_ep.default_dscp;
1185 asoc->max_cwnd = inp->max_cwnd;
1186 #ifdef INET6
1187 if (inp->sctp_ep.default_flowlabel) {
1188 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1189 } else {
1190 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1191 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1192 asoc->default_flowlabel &= 0x000fffff;
1193 asoc->default_flowlabel |= 0x80000000;
1194 } else {
1195 asoc->default_flowlabel = 0;
1196 }
1197 }
1198 #endif
1199 asoc->sb_send_resv = 0;
1200 if (override_tag) {
1201 asoc->my_vtag = override_tag;
1202 } else {
1203 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1204 }
1205 /* Get the nonce tags */
1206 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1207 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1208 asoc->vrf_id = vrf_id;
1209
1210 #ifdef SCTP_ASOCLOG_OF_TSNS
1211 asoc->tsn_in_at = 0;
1212 asoc->tsn_out_at = 0;
1213 asoc->tsn_in_wrapped = 0;
1214 asoc->tsn_out_wrapped = 0;
1215 asoc->cumack_log_at = 0;
1216 asoc->cumack_log_atsnt = 0;
1217 #endif
1218 #ifdef SCTP_FS_SPEC_LOG
1219 asoc->fs_index = 0;
1220 #endif
1221 asoc->refcnt = 0;
1222 asoc->assoc_up_sent = 0;
1223 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1224 sctp_select_initial_TSN(&inp->sctp_ep);
1225 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1226 /* we are optimisitic here */
1227 asoc->peer_supports_nat = 0;
1228 asoc->sent_queue_retran_cnt = 0;
1229
1230 /* for CMT */
1231 asoc->last_net_cmt_send_started = NULL;
1232
1233 /* This will need to be adjusted */
1234 asoc->last_acked_seq = asoc->init_seq_number - 1;
1235 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1236 asoc->asconf_seq_in = asoc->last_acked_seq;
1237
1238 /* here we are different, we hold the next one we expect */
1239 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1240
1241 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1242 asoc->initial_rto = inp->sctp_ep.initial_rto;
1243
1244 asoc->default_mtu = inp->sctp_ep.default_mtu;
1245 asoc->max_init_times = inp->sctp_ep.max_init_times;
1246 asoc->max_send_times = inp->sctp_ep.max_send_times;
1247 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1248 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1249 asoc->free_chunk_cnt = 0;
1250
1251 asoc->iam_blocking = 0;
1252 asoc->context = inp->sctp_context;
1253 asoc->local_strreset_support = inp->local_strreset_support;
1254 asoc->def_send = inp->def_send;
1255 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1256 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1257 asoc->pr_sctp_cnt = 0;
1258 asoc->total_output_queue_size = 0;
1259
1260 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1261 asoc->scope.ipv6_addr_legal = 1;
1262 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1263 asoc->scope.ipv4_addr_legal = 1;
1264 } else {
1265 asoc->scope.ipv4_addr_legal = 0;
1266 }
1267 #if defined(__Userspace__)
1268 asoc->scope.conn_addr_legal = 0;
1269 #endif
1270 } else {
1271 asoc->scope.ipv6_addr_legal = 0;
1272 #if defined(__Userspace__)
1273 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1274 asoc->scope.conn_addr_legal = 1;
1275 asoc->scope.ipv4_addr_legal = 0;
1276 } else {
1277 asoc->scope.conn_addr_legal = 0;
1278 asoc->scope.ipv4_addr_legal = 1;
1279 }
1280 #else
1281 asoc->scope.ipv4_addr_legal = 1;
1282 #endif
1283 }
1284
1285 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1286 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1287
1288 asoc->smallest_mtu = inp->sctp_frag_point;
1289 asoc->minrto = inp->sctp_ep.sctp_minrto;
1290 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1291
1292 asoc->stream_locked_on = 0;
1293 asoc->ecn_echo_cnt_onq = 0;
1294 asoc->stream_locked = 0;
1295
1296 asoc->send_sack = 1;
1297
1298 LIST_INIT(&asoc->sctp_restricted_addrs);
1299
1300 TAILQ_INIT(&asoc->nets);
1301 TAILQ_INIT(&asoc->pending_reply_queue);
1302 TAILQ_INIT(&asoc->asconf_ack_sent);
1303 /* Setup to fill the hb random cache at first HB */
1304 asoc->hb_random_idx = 4;
1305
1306 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1307
1308 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1309 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1310
1311 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1312 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1313
1314 /*
1315 * Now the stream parameters, here we allocate space for all streams
1316 * that we request by default.
1317 */
1318 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1319 o_strms;
1320 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1321 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1322 SCTP_M_STRMO);
1323 if (asoc->strmout == NULL) {
1324 /* big trouble no memory */
1325 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1326 return (ENOMEM);
1327 }
1328 for (i = 0; i < asoc->streamoutcnt; i++) {
1329 /*
1330 * inbound side must be set to 0xffff, also NOTE when we get
1331 * the INIT-ACK back (for INIT sender) we MUST reduce the
1332 * count (streamoutcnt) but first check if we sent to any of
1333 * the upper streams that were dropped (if some were). Those
1334 * that were dropped must be notified to the upper layer as
1335 * failed to send.
1336 */
1337 asoc->strmout[i].next_mid_ordered = 0;
1338 asoc->strmout[i].next_mid_unordered = 0;
1339 TAILQ_INIT(&asoc->strmout[i].outqueue);
1340 asoc->strmout[i].chunks_on_queues = 0;
1341 #if defined(SCTP_DETAILED_STR_STATS)
1342 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1343 asoc->strmout[i].abandoned_sent[j] = 0;
1344 asoc->strmout[i].abandoned_unsent[j] = 0;
1345 }
1346 #else
1347 asoc->strmout[i].abandoned_sent[0] = 0;
1348 asoc->strmout[i].abandoned_unsent[0] = 0;
1349 #endif
1350 asoc->strmout[i].sid = i;
1351 asoc->strmout[i].last_msg_incomplete = 0;
1352 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1353 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1354 }
1355 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1356
1357 /* Now the mapping array */
1358 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1359 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1360 SCTP_M_MAP);
1361 if (asoc->mapping_array == NULL) {
1362 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1363 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1364 return (ENOMEM);
1365 }
1366 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1367 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1368 SCTP_M_MAP);
1369 if (asoc->nr_mapping_array == NULL) {
1370 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1371 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1372 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1373 return (ENOMEM);
1374 }
1375 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1376
1377 /* Now the init of the other outqueues */
1378 TAILQ_INIT(&asoc->free_chunks);
1379 TAILQ_INIT(&asoc->control_send_queue);
1380 TAILQ_INIT(&asoc->asconf_send_queue);
1381 TAILQ_INIT(&asoc->send_queue);
1382 TAILQ_INIT(&asoc->sent_queue);
1383 TAILQ_INIT(&asoc->resetHead);
1384 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1385 TAILQ_INIT(&asoc->asconf_queue);
1386 /* authentication fields */
1387 asoc->authinfo.random = NULL;
1388 asoc->authinfo.active_keyid = 0;
1389 asoc->authinfo.assoc_key = NULL;
1390 asoc->authinfo.assoc_keyid = 0;
1391 asoc->authinfo.recv_key = NULL;
1392 asoc->authinfo.recv_keyid = 0;
1393 LIST_INIT(&asoc->shared_keys);
1394 asoc->marked_retrans = 0;
1395 asoc->port = inp->sctp_ep.port;
1396 asoc->timoinit = 0;
1397 asoc->timodata = 0;
1398 asoc->timosack = 0;
1399 asoc->timoshutdown = 0;
1400 asoc->timoheartbeat = 0;
1401 asoc->timocookie = 0;
1402 asoc->timoshutdownack = 0;
1403 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1404 asoc->discontinuity_time = asoc->start_time;
1405 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1406 asoc->abandoned_unsent[i] = 0;
1407 asoc->abandoned_sent[i] = 0;
1408 }
1409 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1410 * the association is freed.
1411 */
1412 return (0);
1413 }
1414
1415 void
sctp_print_mapping_array(struct sctp_association * asoc)1416 sctp_print_mapping_array(struct sctp_association *asoc)
1417 {
1418 unsigned int i, limit;
1419
1420 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1421 asoc->mapping_array_size,
1422 asoc->mapping_array_base_tsn,
1423 asoc->cumulative_tsn,
1424 asoc->highest_tsn_inside_map,
1425 asoc->highest_tsn_inside_nr_map);
1426 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1427 if (asoc->mapping_array[limit - 1] != 0) {
1428 break;
1429 }
1430 }
1431 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1432 for (i = 0; i < limit; i++) {
1433 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1434 }
1435 if (limit % 16)
1436 SCTP_PRINTF("\n");
1437 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1438 if (asoc->nr_mapping_array[limit - 1]) {
1439 break;
1440 }
1441 }
1442 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1443 for (i = 0; i < limit; i++) {
1444 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1445 }
1446 if (limit % 16)
1447 SCTP_PRINTF("\n");
1448 }
1449
1450 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1451 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1452 {
1453 /* mapping array needs to grow */
1454 uint8_t *new_array1, *new_array2;
1455 uint32_t new_size;
1456
1457 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1458 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1459 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1460 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1461 /* can't get more, forget it */
1462 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1463 if (new_array1) {
1464 SCTP_FREE(new_array1, SCTP_M_MAP);
1465 }
1466 if (new_array2) {
1467 SCTP_FREE(new_array2, SCTP_M_MAP);
1468 }
1469 return (-1);
1470 }
1471 memset(new_array1, 0, new_size);
1472 memset(new_array2, 0, new_size);
1473 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1474 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1475 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1476 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1477 asoc->mapping_array = new_array1;
1478 asoc->nr_mapping_array = new_array2;
1479 asoc->mapping_array_size = new_size;
1480 return (0);
1481 }
1482
1483
1484 static void
sctp_iterator_work(struct sctp_iterator * it)1485 sctp_iterator_work(struct sctp_iterator *it)
1486 {
1487 #if defined(__FreeBSD__) && !defined(__Userspace__)
1488 struct epoch_tracker et;
1489 #endif
1490 struct sctp_inpcb *tinp;
1491 int iteration_count = 0;
1492 int inp_skip = 0;
1493 int first_in = 1;
1494
1495 #if defined(__FreeBSD__) && !defined(__Userspace__)
1496 NET_EPOCH_ENTER(et);
1497 #endif
1498 SCTP_INP_INFO_RLOCK();
1499 SCTP_ITERATOR_LOCK();
1500 sctp_it_ctl.cur_it = it;
1501 if (it->inp) {
1502 SCTP_INP_RLOCK(it->inp);
1503 SCTP_INP_DECR_REF(it->inp);
1504 }
1505 if (it->inp == NULL) {
1506 /* iterator is complete */
1507 done_with_iterator:
1508 sctp_it_ctl.cur_it = NULL;
1509 SCTP_ITERATOR_UNLOCK();
1510 SCTP_INP_INFO_RUNLOCK();
1511 if (it->function_atend != NULL) {
1512 (*it->function_atend) (it->pointer, it->val);
1513 }
1514 SCTP_FREE(it, SCTP_M_ITER);
1515 #if defined(__FreeBSD__) && !defined(__Userspace__)
1516 NET_EPOCH_EXIT(et);
1517 #endif
1518 return;
1519 }
1520 select_a_new_ep:
1521 if (first_in) {
1522 first_in = 0;
1523 } else {
1524 SCTP_INP_RLOCK(it->inp);
1525 }
1526 while (((it->pcb_flags) &&
1527 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1528 ((it->pcb_features) &&
1529 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1530 /* endpoint flags or features don't match, so keep looking */
1531 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1532 SCTP_INP_RUNLOCK(it->inp);
1533 goto done_with_iterator;
1534 }
1535 tinp = it->inp;
1536 it->inp = LIST_NEXT(it->inp, sctp_list);
1537 it->stcb = NULL;
1538 SCTP_INP_RUNLOCK(tinp);
1539 if (it->inp == NULL) {
1540 goto done_with_iterator;
1541 }
1542 SCTP_INP_RLOCK(it->inp);
1543 }
1544 /* now go through each assoc which is in the desired state */
1545 if (it->done_current_ep == 0) {
1546 if (it->function_inp != NULL)
1547 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1548 it->done_current_ep = 1;
1549 }
1550 if (it->stcb == NULL) {
1551 /* run the per instance function */
1552 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1553 }
1554 if ((inp_skip) || it->stcb == NULL) {
1555 if (it->function_inp_end != NULL) {
1556 inp_skip = (*it->function_inp_end)(it->inp,
1557 it->pointer,
1558 it->val);
1559 }
1560 SCTP_INP_RUNLOCK(it->inp);
1561 goto no_stcb;
1562 }
1563 while (it->stcb) {
1564 SCTP_TCB_LOCK(it->stcb);
1565 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1566 /* not in the right state... keep looking */
1567 SCTP_TCB_UNLOCK(it->stcb);
1568 goto next_assoc;
1569 }
1570 /* see if we have limited out the iterator loop */
1571 iteration_count++;
1572 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1573 /* Pause to let others grab the lock */
1574 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1575 SCTP_TCB_UNLOCK(it->stcb);
1576 SCTP_INP_INCR_REF(it->inp);
1577 SCTP_INP_RUNLOCK(it->inp);
1578 SCTP_ITERATOR_UNLOCK();
1579 SCTP_INP_INFO_RUNLOCK();
1580 SCTP_INP_INFO_RLOCK();
1581 SCTP_ITERATOR_LOCK();
1582 if (sctp_it_ctl.iterator_flags) {
1583 /* We won't be staying here */
1584 SCTP_INP_DECR_REF(it->inp);
1585 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1586 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
1587 if (sctp_it_ctl.iterator_flags &
1588 SCTP_ITERATOR_MUST_EXIT) {
1589 goto done_with_iterator;
1590 }
1591 #endif
1592 if (sctp_it_ctl.iterator_flags &
1593 SCTP_ITERATOR_STOP_CUR_IT) {
1594 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1595 goto done_with_iterator;
1596 }
1597 if (sctp_it_ctl.iterator_flags &
1598 SCTP_ITERATOR_STOP_CUR_INP) {
1599 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1600 goto no_stcb;
1601 }
1602 /* If we reach here huh? */
1603 SCTP_PRINTF("Unknown it ctl flag %x\n",
1604 sctp_it_ctl.iterator_flags);
1605 sctp_it_ctl.iterator_flags = 0;
1606 }
1607 SCTP_INP_RLOCK(it->inp);
1608 SCTP_INP_DECR_REF(it->inp);
1609 SCTP_TCB_LOCK(it->stcb);
1610 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1611 iteration_count = 0;
1612 }
1613 KASSERT(it->inp == it->stcb->sctp_ep,
1614 ("%s: stcb %p does not belong to inp %p, but inp %p",
1615 __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1616
1617 /* run function on this one */
1618 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1619
1620 /*
1621 * we lie here, it really needs to have its own type but
1622 * first I must verify that this won't effect things :-0
1623 */
1624 if (it->no_chunk_output == 0)
1625 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1626
1627 SCTP_TCB_UNLOCK(it->stcb);
1628 next_assoc:
1629 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1630 if (it->stcb == NULL) {
1631 /* Run last function */
1632 if (it->function_inp_end != NULL) {
1633 inp_skip = (*it->function_inp_end)(it->inp,
1634 it->pointer,
1635 it->val);
1636 }
1637 }
1638 }
1639 SCTP_INP_RUNLOCK(it->inp);
1640 no_stcb:
1641 /* done with all assocs on this endpoint, move on to next endpoint */
1642 it->done_current_ep = 0;
1643 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1644 it->inp = NULL;
1645 } else {
1646 it->inp = LIST_NEXT(it->inp, sctp_list);
1647 }
1648 it->stcb = NULL;
1649 if (it->inp == NULL) {
1650 goto done_with_iterator;
1651 }
1652 goto select_a_new_ep;
1653 }
1654
1655 void
sctp_iterator_worker(void)1656 sctp_iterator_worker(void)
1657 {
1658 struct sctp_iterator *it;
1659
1660 /* This function is called with the WQ lock in place */
1661 sctp_it_ctl.iterator_running = 1;
1662 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1663 /* now lets work on this one */
1664 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1665 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1666 #if defined(__FreeBSD__) && !defined(__Userspace__)
1667 CURVNET_SET(it->vn);
1668 #endif
1669 sctp_iterator_work(it);
1670 #if defined(__FreeBSD__) && !defined(__Userspace__)
1671 CURVNET_RESTORE();
1672 #endif
1673 SCTP_IPI_ITERATOR_WQ_LOCK();
1674 #if !defined(__FreeBSD__) && !defined(__Userspace__)
1675 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1676 break;
1677 }
1678 #endif
1679 /*sa_ignore FREED_MEMORY*/
1680 }
1681 sctp_it_ctl.iterator_running = 0;
1682 return;
1683 }
1684
1685
1686 static void
sctp_handle_addr_wq(void)1687 sctp_handle_addr_wq(void)
1688 {
1689 /* deal with the ADDR wq from the rtsock calls */
1690 struct sctp_laddr *wi, *nwi;
1691 struct sctp_asconf_iterator *asc;
1692
1693 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1694 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1695 if (asc == NULL) {
1696 /* Try later, no memory */
1697 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1698 (struct sctp_inpcb *)NULL,
1699 (struct sctp_tcb *)NULL,
1700 (struct sctp_nets *)NULL);
1701 return;
1702 }
1703 LIST_INIT(&asc->list_of_work);
1704 asc->cnt = 0;
1705
1706 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1707 LIST_REMOVE(wi, sctp_nxt_addr);
1708 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1709 asc->cnt++;
1710 }
1711
1712 if (asc->cnt == 0) {
1713 SCTP_FREE(asc, SCTP_M_ASC_IT);
1714 } else {
1715 int ret;
1716
1717 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1718 sctp_asconf_iterator_stcb,
1719 NULL, /* No ep end for boundall */
1720 SCTP_PCB_FLAGS_BOUNDALL,
1721 SCTP_PCB_ANY_FEATURES,
1722 SCTP_ASOC_ANY_STATE,
1723 (void *)asc, 0,
1724 sctp_asconf_iterator_end, NULL, 0);
1725 if (ret) {
1726 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1727 /* Freeing if we are stopping or put back on the addr_wq. */
1728 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1729 sctp_asconf_iterator_end(asc, 0);
1730 } else {
1731 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1732 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1733 }
1734 SCTP_FREE(asc, SCTP_M_ASC_IT);
1735 }
1736 }
1737 }
1738 }
1739
1740 /*-
1741 * The following table shows which pointers for the inp, stcb, or net are
1742 * stored for each timer after it was started.
1743 *
1744 *|Name |Timer |inp |stcb|net |
1745 *|-----------------------------|-----------------------------|----|----|----|
1746 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes |
1747 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes |
1748 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No |
1749 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes |
1750 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes |
1751 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes |
1752 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No |
1753 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes |
1754 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes |
1755 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes |
1756 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No |
1757 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No |
1758 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No |
1759 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No |
1760 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No |
1761 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No |
1762 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No |
1763 */
1764
1765 void
sctp_timeout_handler(void * t)1766 sctp_timeout_handler(void *t)
1767 {
1768 #if defined(__FreeBSD__) && !defined(__Userspace__)
1769 struct epoch_tracker et;
1770 #endif
1771 struct timeval tv;
1772 struct sctp_inpcb *inp;
1773 struct sctp_tcb *stcb;
1774 struct sctp_nets *net;
1775 struct sctp_timer *tmr;
1776 struct mbuf *op_err;
1777 #if defined(__APPLE__) && !defined(__Userspace__)
1778 struct socket *so;
1779 #endif
1780 #if defined(__Userspace__)
1781 struct socket *upcall_socket = NULL;
1782 #endif
1783 int did_output;
1784 int type;
1785 int i, secret;
1786
1787 tmr = (struct sctp_timer *)t;
1788 inp = (struct sctp_inpcb *)tmr->ep;
1789 stcb = (struct sctp_tcb *)tmr->tcb;
1790 net = (struct sctp_nets *)tmr->net;
1791 #if defined(__FreeBSD__) && !defined(__Userspace__)
1792 CURVNET_SET((struct vnet *)tmr->vnet);
1793 #endif
1794 did_output = 1;
1795
1796 #ifdef SCTP_AUDITING_ENABLED
1797 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1798 sctp_auditing(3, inp, stcb, net);
1799 #endif
1800
1801 /* sanity checks... */
1802 KASSERT(tmr->self == NULL || tmr->self == tmr,
1803 ("sctp_timeout_handler: tmr->self corrupted"));
1804 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
1805 ("sctp_timeout_handler: invalid timer type %d", tmr->type));
1806 type = tmr->type;
1807 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
1808 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
1809 type, stcb, stcb->sctp_ep));
1810 if (inp) {
1811 SCTP_INP_INCR_REF(inp);
1812 }
1813 tmr->stopped_from = 0xa001;
1814 if (stcb) {
1815 atomic_add_int(&stcb->asoc.refcnt, 1);
1816 if (stcb->asoc.state == 0) {
1817 atomic_add_int(&stcb->asoc.refcnt, -1);
1818 if (inp) {
1819 SCTP_INP_DECR_REF(inp);
1820 }
1821 SCTPDBG(SCTP_DEBUG_TIMER2,
1822 "Timer type %d handler exiting due to CLOSED association.\n",
1823 type);
1824 #if defined(__FreeBSD__) && !defined(__Userspace__)
1825 CURVNET_RESTORE();
1826 #endif
1827 return;
1828 }
1829 }
1830 tmr->stopped_from = 0xa002;
1831 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1832 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1833 if (inp) {
1834 SCTP_INP_DECR_REF(inp);
1835 }
1836 if (stcb) {
1837 atomic_add_int(&stcb->asoc.refcnt, -1);
1838 }
1839 SCTPDBG(SCTP_DEBUG_TIMER2,
1840 "Timer type %d handler exiting due to not being active.\n",
1841 type);
1842 #if defined(__FreeBSD__) && !defined(__Userspace__)
1843 CURVNET_RESTORE();
1844 #endif
1845 return;
1846 }
1847
1848 tmr->stopped_from = 0xa003;
1849 if (stcb) {
1850 SCTP_TCB_LOCK(stcb);
1851 atomic_add_int(&stcb->asoc.refcnt, -1);
1852 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1853 ((stcb->asoc.state == 0) ||
1854 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1855 SCTP_TCB_UNLOCK(stcb);
1856 if (inp) {
1857 SCTP_INP_DECR_REF(inp);
1858 }
1859 SCTPDBG(SCTP_DEBUG_TIMER2,
1860 "Timer type %d handler exiting due to CLOSED association.\n",
1861 type);
1862 #if defined(__FreeBSD__) && !defined(__Userspace__)
1863 CURVNET_RESTORE();
1864 #endif
1865 return;
1866 }
1867 } else if (inp != NULL) {
1868 SCTP_INP_WLOCK(inp);
1869 } else {
1870 SCTP_WQ_ADDR_LOCK();
1871 }
1872
1873 /* Record in stopped_from which timeout occurred. */
1874 tmr->stopped_from = type;
1875 #if defined(__FreeBSD__) && !defined(__Userspace__)
1876 NET_EPOCH_ENTER(et);
1877 #endif
1878 /* mark as being serviced now */
1879 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1880 /*
1881 * Callout has been rescheduled.
1882 */
1883 goto get_out;
1884 }
1885 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1886 /*
1887 * Not active, so no action.
1888 */
1889 goto get_out;
1890 }
1891 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1892
1893 #if defined(__Userspace__)
1894 if ((stcb != NULL) &&
1895 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
1896 (stcb->sctp_socket != NULL)) {
1897 upcall_socket = stcb->sctp_socket;
1898 SOCK_LOCK(upcall_socket);
1899 soref(upcall_socket);
1900 SOCK_UNLOCK(upcall_socket);
1901 }
1902 #endif
1903 /* call the handler for the appropriate timer type */
1904 switch (type) {
1905 case SCTP_TIMER_TYPE_SEND:
1906 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1907 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1908 type, inp, stcb, net));
1909 SCTP_STAT_INCR(sctps_timodata);
1910 stcb->asoc.timodata++;
1911 stcb->asoc.num_send_timers_up--;
1912 if (stcb->asoc.num_send_timers_up < 0) {
1913 stcb->asoc.num_send_timers_up = 0;
1914 }
1915 SCTP_TCB_LOCK_ASSERT(stcb);
1916 if (sctp_t3rxt_timer(inp, stcb, net)) {
1917 /* no need to unlock on tcb its gone */
1918
1919 goto out_decr;
1920 }
1921 SCTP_TCB_LOCK_ASSERT(stcb);
1922 #ifdef SCTP_AUDITING_ENABLED
1923 sctp_auditing(4, inp, stcb, net);
1924 #endif
1925 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1926 if ((stcb->asoc.num_send_timers_up == 0) &&
1927 (stcb->asoc.sent_queue_cnt > 0)) {
1928 struct sctp_tmit_chunk *chk;
1929
1930 /*
1931 * Safeguard. If there on some on the sent queue
1932 * somewhere but no timers running something is
1933 * wrong... so we start a timer on the first chunk
1934 * on the send queue on whatever net it is sent to.
1935 */
1936 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1937 if (chk->whoTo != NULL) {
1938 break;
1939 }
1940 }
1941 if (chk != NULL) {
1942 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1943 }
1944 }
1945 break;
1946 case SCTP_TIMER_TYPE_INIT:
1947 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1948 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1949 type, inp, stcb, net));
1950 SCTP_STAT_INCR(sctps_timoinit);
1951 stcb->asoc.timoinit++;
1952 if (sctp_t1init_timer(inp, stcb, net)) {
1953 /* no need to unlock on tcb its gone */
1954 goto out_decr;
1955 }
1956 /* We do output but not here */
1957 did_output = 0;
1958 break;
1959 case SCTP_TIMER_TYPE_RECV:
1960 KASSERT(inp != NULL && stcb != NULL && net == NULL,
1961 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1962 type, inp, stcb, net));
1963 SCTP_STAT_INCR(sctps_timosack);
1964 stcb->asoc.timosack++;
1965 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1966 #ifdef SCTP_AUDITING_ENABLED
1967 sctp_auditing(4, inp, stcb, NULL);
1968 #endif
1969 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1970 break;
1971 case SCTP_TIMER_TYPE_SHUTDOWN:
1972 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1973 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1974 type, inp, stcb, net));
1975 SCTP_STAT_INCR(sctps_timoshutdown);
1976 stcb->asoc.timoshutdown++;
1977 if (sctp_shutdown_timer(inp, stcb, net)) {
1978 /* no need to unlock on tcb its gone */
1979 goto out_decr;
1980 }
1981 #ifdef SCTP_AUDITING_ENABLED
1982 sctp_auditing(4, inp, stcb, net);
1983 #endif
1984 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1985 break;
1986 case SCTP_TIMER_TYPE_HEARTBEAT:
1987 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1988 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1989 type, inp, stcb, net));
1990 SCTP_STAT_INCR(sctps_timoheartbeat);
1991 stcb->asoc.timoheartbeat++;
1992 if (sctp_heartbeat_timer(inp, stcb, net)) {
1993 /* no need to unlock on tcb its gone */
1994 goto out_decr;
1995 }
1996 #ifdef SCTP_AUDITING_ENABLED
1997 sctp_auditing(4, inp, stcb, net);
1998 #endif
1999 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
2000 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
2001 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
2002 }
2003 break;
2004 case SCTP_TIMER_TYPE_COOKIE:
2005 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2006 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2007 type, inp, stcb, net));
2008 SCTP_STAT_INCR(sctps_timocookie);
2009 stcb->asoc.timocookie++;
2010 if (sctp_cookie_timer(inp, stcb, net)) {
2011 /* no need to unlock on tcb its gone */
2012 goto out_decr;
2013 }
2014 #ifdef SCTP_AUDITING_ENABLED
2015 sctp_auditing(4, inp, stcb, net);
2016 #endif
2017 /*
2018 * We consider T3 and Cookie timer pretty much the same with
2019 * respect to where from in chunk_output.
2020 */
2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
2022 break;
2023 case SCTP_TIMER_TYPE_NEWCOOKIE:
2024 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2025 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2026 type, inp, stcb, net));
2027 SCTP_STAT_INCR(sctps_timosecret);
2028 (void)SCTP_GETTIME_TIMEVAL(&tv);
2029 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
2030 inp->sctp_ep.last_secret_number =
2031 inp->sctp_ep.current_secret_number;
2032 inp->sctp_ep.current_secret_number++;
2033 if (inp->sctp_ep.current_secret_number >=
2034 SCTP_HOW_MANY_SECRETS) {
2035 inp->sctp_ep.current_secret_number = 0;
2036 }
2037 secret = (int)inp->sctp_ep.current_secret_number;
2038 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
2039 inp->sctp_ep.secret_key[secret][i] =
2040 sctp_select_initial_TSN(&inp->sctp_ep);
2041 }
2042 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
2043 did_output = 0;
2044 break;
2045 case SCTP_TIMER_TYPE_PATHMTURAISE:
2046 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2047 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2048 type, inp, stcb, net));
2049 SCTP_STAT_INCR(sctps_timopathmtu);
2050 sctp_pathmtu_timer(inp, stcb, net);
2051 did_output = 0;
2052 break;
2053 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2054 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2055 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2056 type, inp, stcb, net));
2057 if (sctp_shutdownack_timer(inp, stcb, net)) {
2058 /* no need to unlock on tcb its gone */
2059 goto out_decr;
2060 }
2061 SCTP_STAT_INCR(sctps_timoshutdownack);
2062 stcb->asoc.timoshutdownack++;
2063 #ifdef SCTP_AUDITING_ENABLED
2064 sctp_auditing(4, inp, stcb, net);
2065 #endif
2066 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
2067 break;
2068 case SCTP_TIMER_TYPE_ASCONF:
2069 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2070 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2071 type, inp, stcb, net));
2072 SCTP_STAT_INCR(sctps_timoasconf);
2073 if (sctp_asconf_timer(inp, stcb, net)) {
2074 /* no need to unlock on tcb its gone */
2075 goto out_decr;
2076 }
2077 #ifdef SCTP_AUDITING_ENABLED
2078 sctp_auditing(4, inp, stcb, net);
2079 #endif
2080 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
2081 break;
2082 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2083 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2084 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2085 type, inp, stcb, net));
2086 SCTP_STAT_INCR(sctps_timoshutdownguard);
2087 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
2088 "Shutdown guard timer expired");
2089 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2090 /* no need to unlock on tcb its gone */
2091 goto out_decr;
2092 case SCTP_TIMER_TYPE_AUTOCLOSE:
2093 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2094 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2095 type, inp, stcb, net));
2096 SCTP_STAT_INCR(sctps_timoautoclose);
2097 sctp_autoclose_timer(inp, stcb);
2098 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2099 did_output = 0;
2100 break;
2101 case SCTP_TIMER_TYPE_STRRESET:
2102 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2103 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2104 type, inp, stcb, net));
2105 SCTP_STAT_INCR(sctps_timostrmrst);
2106 if (sctp_strreset_timer(inp, stcb)) {
2107 /* no need to unlock on tcb its gone */
2108 goto out_decr;
2109 }
2110 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2111 break;
2112 case SCTP_TIMER_TYPE_INPKILL:
2113 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2114 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2115 type, inp, stcb, net));
2116 SCTP_STAT_INCR(sctps_timoinpkill);
2117 /*
2118 * special case, take away our increment since WE are the
2119 * killer
2120 */
2121 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2122 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2123 #if defined(__APPLE__) && !defined(__Userspace__)
2124 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
2125 #endif
2126 SCTP_INP_DECR_REF(inp);
2127 SCTP_INP_WUNLOCK(inp);
2128 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2129 SCTP_CALLED_FROM_INPKILL_TIMER);
2130 #if defined(__APPLE__) && !defined(__Userspace__)
2131 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
2132 #endif
2133 inp = NULL;
2134 goto out_no_decr;
2135 case SCTP_TIMER_TYPE_ASOCKILL:
2136 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2137 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2138 type, inp, stcb, net));
2139 SCTP_STAT_INCR(sctps_timoassockill);
2140 /* Can we free it yet? */
2141 SCTP_INP_DECR_REF(inp);
2142 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2143 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2144 #if defined(__APPLE__) && !defined(__Userspace__)
2145 so = SCTP_INP_SO(inp);
2146 atomic_add_int(&stcb->asoc.refcnt, 1);
2147 SCTP_TCB_UNLOCK(stcb);
2148 SCTP_SOCKET_LOCK(so, 1);
2149 SCTP_TCB_LOCK(stcb);
2150 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2151 #endif
2152 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2153 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2154 #if defined(__APPLE__) && !defined(__Userspace__)
2155 SCTP_SOCKET_UNLOCK(so, 1);
2156 #endif
2157 /*
2158 * free asoc, always unlocks (or destroy's) so prevent
2159 * duplicate unlock or unlock of a free mtx :-0
2160 */
2161 stcb = NULL;
2162 goto out_no_decr;
2163 case SCTP_TIMER_TYPE_ADDR_WQ:
2164 KASSERT(inp == NULL && stcb == NULL && net == NULL,
2165 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2166 type, inp, stcb, net));
2167 sctp_handle_addr_wq();
2168 break;
2169 case SCTP_TIMER_TYPE_PRIM_DELETED:
2170 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2171 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2172 type, inp, stcb, net));
2173 SCTP_STAT_INCR(sctps_timodelprim);
2174 sctp_delete_prim_timer(inp, stcb);
2175 break;
2176 default:
2177 #ifdef INVARIANTS
2178 panic("Unknown timer type %d", type);
2179 #else
2180 goto get_out;
2181 #endif
2182 }
2183 #ifdef SCTP_AUDITING_ENABLED
2184 sctp_audit_log(0xF1, (uint8_t) type);
2185 if (inp)
2186 sctp_auditing(5, inp, stcb, net);
2187 #endif
2188 if ((did_output) && stcb) {
2189 /*
2190 * Now we need to clean up the control chunk chain if an
2191 * ECNE is on it. It must be marked as UNSENT again so next
2192 * call will continue to send it until such time that we get
2193 * a CWR, to remove it. It is, however, less likely that we
2194 * will find a ecn echo on the chain though.
2195 */
2196 sctp_fix_ecn_echo(&stcb->asoc);
2197 }
2198 get_out:
2199 if (stcb) {
2200 SCTP_TCB_UNLOCK(stcb);
2201 } else if (inp != NULL) {
2202 SCTP_INP_WUNLOCK(inp);
2203 } else {
2204 SCTP_WQ_ADDR_UNLOCK();
2205 }
2206
2207 out_decr:
2208 #if defined(__Userspace__)
2209 if (upcall_socket != NULL) {
2210 if ((upcall_socket->so_upcall != NULL) &&
2211 (upcall_socket->so_error != 0)) {
2212 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
2213 }
2214 ACCEPT_LOCK();
2215 SOCK_LOCK(upcall_socket);
2216 sorele(upcall_socket);
2217 }
2218 #endif
2219 if (inp) {
2220 SCTP_INP_DECR_REF(inp);
2221 }
2222
2223 out_no_decr:
2224 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2225 #if defined(__FreeBSD__) && !defined(__Userspace__)
2226 CURVNET_RESTORE();
2227 NET_EPOCH_EXIT(et);
2228 #endif
2229 }
2230
2231 /*-
2232 * The following table shows which parameters must be provided
2233 * when calling sctp_timer_start(). For parameters not being
2234 * provided, NULL must be used.
2235 *
2236 * |Name |inp |stcb|net |
2237 * |-----------------------------|----|----|----|
2238 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2239 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2240 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2241 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2242 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2243 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2244 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2245 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2246 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2247 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes |
2248 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2249 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2250 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes |
2251 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2252 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2253 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2254 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2255 *
2256 */
2257
2258 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)2259 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2260 struct sctp_nets *net)
2261 {
2262 struct sctp_timer *tmr;
2263 uint32_t to_ticks;
2264 uint32_t rndval, jitter;
2265
2266 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2267 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
2268 t_type, stcb, stcb->sctp_ep));
2269 tmr = NULL;
2270 to_ticks = 0;
2271 if (stcb != NULL) {
2272 SCTP_TCB_LOCK_ASSERT(stcb);
2273 } else if (inp != NULL) {
2274 SCTP_INP_WLOCK_ASSERT(inp);
2275 } else {
2276 SCTP_WQ_ADDR_LOCK_ASSERT();
2277 }
2278 if (stcb != NULL) {
2279 /* Don't restart timer on association that's about to be killed. */
2280 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2281 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2282 SCTPDBG(SCTP_DEBUG_TIMER2,
2283 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2284 t_type, inp, stcb, net);
2285 return;
2286 }
2287 /* Don't restart timer on net that's been removed. */
2288 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2289 SCTPDBG(SCTP_DEBUG_TIMER2,
2290 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2291 t_type, inp, stcb, net);
2292 return;
2293 }
2294 }
2295 switch (t_type) {
2296 case SCTP_TIMER_TYPE_SEND:
2297 /* Here we use the RTO timer. */
2298 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2299 #ifdef INVARIANTS
2300 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2301 t_type, inp, stcb, net);
2302 #else
2303 return;
2304 #endif
2305 }
2306 tmr = &net->rxt_timer;
2307 if (net->RTO == 0) {
2308 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2309 } else {
2310 to_ticks = sctp_msecs_to_ticks(net->RTO);
2311 }
2312 break;
2313 case SCTP_TIMER_TYPE_INIT:
2314 /*
2315 * Here we use the INIT timer default usually about 1
2316 * second.
2317 */
2318 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2319 #ifdef INVARIANTS
2320 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2321 t_type, inp, stcb, net);
2322 #else
2323 return;
2324 #endif
2325 }
2326 tmr = &net->rxt_timer;
2327 if (net->RTO == 0) {
2328 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2329 } else {
2330 to_ticks = sctp_msecs_to_ticks(net->RTO);
2331 }
2332 break;
2333 case SCTP_TIMER_TYPE_RECV:
2334 /*
2335 * Here we use the Delayed-Ack timer value from the inp,
2336 * ususually about 200ms.
2337 */
2338 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2339 #ifdef INVARIANTS
2340 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2341 t_type, inp, stcb, net);
2342 #else
2343 return;
2344 #endif
2345 }
2346 tmr = &stcb->asoc.dack_timer;
2347 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2348 break;
2349 case SCTP_TIMER_TYPE_SHUTDOWN:
2350 /* Here we use the RTO of the destination. */
2351 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2352 #ifdef INVARIANTS
2353 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2354 t_type, inp, stcb, net);
2355 #else
2356 return;
2357 #endif
2358 }
2359 tmr = &net->rxt_timer;
2360 if (net->RTO == 0) {
2361 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2362 } else {
2363 to_ticks = sctp_msecs_to_ticks(net->RTO);
2364 }
2365 break;
2366 case SCTP_TIMER_TYPE_HEARTBEAT:
2367 /*
2368 * The net is used here so that we can add in the RTO. Even
2369 * though we use a different timer. We also add the HB timer
2370 * PLUS a random jitter.
2371 */
2372 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2373 #ifdef INVARIANTS
2374 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2375 t_type, inp, stcb, net);
2376 #else
2377 return;
2378 #endif
2379 }
2380 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2381 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2382 SCTPDBG(SCTP_DEBUG_TIMER2,
2383 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2384 t_type, inp, stcb, net);
2385 return;
2386 }
2387 tmr = &net->hb_timer;
2388 if (net->RTO == 0) {
2389 to_ticks = stcb->asoc.initial_rto;
2390 } else {
2391 to_ticks = net->RTO;
2392 }
2393 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2394 jitter = rndval % to_ticks;
2395 if (jitter >= (to_ticks >> 1)) {
2396 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2397 } else {
2398 to_ticks = to_ticks - jitter;
2399 }
2400 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2401 !(net->dest_state & SCTP_ADDR_PF)) {
2402 to_ticks += net->heart_beat_delay;
2403 }
2404 /*
2405 * Now we must convert the to_ticks that are now in
2406 * ms to ticks.
2407 */
2408 to_ticks = sctp_msecs_to_ticks(to_ticks);
2409 break;
2410 case SCTP_TIMER_TYPE_COOKIE:
2411 /*
2412 * Here we can use the RTO timer from the network since one
2413 * RTT was complete. If a retransmission happened then we will
2414 * be using the RTO initial value.
2415 */
2416 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2417 #ifdef INVARIANTS
2418 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2419 t_type, inp, stcb, net);
2420 #else
2421 return;
2422 #endif
2423 }
2424 tmr = &net->rxt_timer;
2425 if (net->RTO == 0) {
2426 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2427 } else {
2428 to_ticks = sctp_msecs_to_ticks(net->RTO);
2429 }
2430 break;
2431 case SCTP_TIMER_TYPE_NEWCOOKIE:
2432 /*
2433 * Nothing needed but the endpoint here ususually about 60
2434 * minutes.
2435 */
2436 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2437 #ifdef INVARIANTS
2438 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2439 t_type, inp, stcb, net);
2440 #else
2441 return;
2442 #endif
2443 }
2444 tmr = &inp->sctp_ep.signature_change;
2445 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2446 break;
2447 case SCTP_TIMER_TYPE_PATHMTURAISE:
2448 /*
2449 * Here we use the value found in the EP for PMTUD, ususually
2450 * about 10 minutes.
2451 */
2452 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2453 #ifdef INVARIANTS
2454 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2455 t_type, inp, stcb, net);
2456 #else
2457 return;
2458 #endif
2459 }
2460 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2461 SCTPDBG(SCTP_DEBUG_TIMER2,
2462 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2463 t_type, inp, stcb, net);
2464 return;
2465 }
2466 tmr = &net->pmtu_timer;
2467 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2468 break;
2469 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2470 /* Here we use the RTO of the destination. */
2471 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2472 #ifdef INVARIANTS
2473 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2474 t_type, inp, stcb, net);
2475 #else
2476 return;
2477 #endif
2478 }
2479 tmr = &net->rxt_timer;
2480 if (net->RTO == 0) {
2481 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2482 } else {
2483 to_ticks = sctp_msecs_to_ticks(net->RTO);
2484 }
2485 break;
2486 case SCTP_TIMER_TYPE_ASCONF:
2487 /*
2488 * Here the timer comes from the stcb but its value is from
2489 * the net's RTO.
2490 */
2491 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2492 #ifdef INVARIANTS
2493 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2494 t_type, inp, stcb, net);
2495 #else
2496 return;
2497 #endif
2498 }
2499 tmr = &stcb->asoc.asconf_timer;
2500 if (net->RTO == 0) {
2501 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2502 } else {
2503 to_ticks = sctp_msecs_to_ticks(net->RTO);
2504 }
2505 break;
2506 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2507 /*
2508 * Here we use the endpoints shutdown guard timer usually
2509 * about 3 minutes.
2510 */
2511 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2512 #ifdef INVARIANTS
2513 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2514 t_type, inp, stcb, net);
2515 #else
2516 return;
2517 #endif
2518 }
2519 tmr = &stcb->asoc.shut_guard_timer;
2520 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2521 if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2522 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2523 } else {
2524 to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2525 }
2526 } else {
2527 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2528 }
2529 break;
2530 case SCTP_TIMER_TYPE_AUTOCLOSE:
2531 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2532 #ifdef INVARIANTS
2533 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2534 t_type, inp, stcb, net);
2535 #else
2536 return;
2537 #endif
2538 }
2539 tmr = &stcb->asoc.autoclose_timer;
2540 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2541 break;
2542 case SCTP_TIMER_TYPE_STRRESET:
2543 /*
2544 * Here the timer comes from the stcb but its value is from
2545 * the net's RTO.
2546 */
2547 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2548 #ifdef INVARIANTS
2549 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2550 t_type, inp, stcb, net);
2551 #else
2552 return;
2553 #endif
2554 }
2555 tmr = &stcb->asoc.strreset_timer;
2556 if (net->RTO == 0) {
2557 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2558 } else {
2559 to_ticks = sctp_msecs_to_ticks(net->RTO);
2560 }
2561 break;
2562 case SCTP_TIMER_TYPE_INPKILL:
2563 /*
2564 * The inp is setup to die. We re-use the signature_chage
2565 * timer since that has stopped and we are in the GONE
2566 * state.
2567 */
2568 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2569 #ifdef INVARIANTS
2570 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2571 t_type, inp, stcb, net);
2572 #else
2573 return;
2574 #endif
2575 }
2576 tmr = &inp->sctp_ep.signature_change;
2577 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2578 break;
2579 case SCTP_TIMER_TYPE_ASOCKILL:
2580 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2581 #ifdef INVARIANTS
2582 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2583 t_type, inp, stcb, net);
2584 #else
2585 return;
2586 #endif
2587 }
2588 tmr = &stcb->asoc.strreset_timer;
2589 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2590 break;
2591 case SCTP_TIMER_TYPE_ADDR_WQ:
2592 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2593 #ifdef INVARIANTS
2594 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2595 t_type, inp, stcb, net);
2596 #else
2597 return;
2598 #endif
2599 }
2600 /* Only 1 tick away :-) */
2601 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2602 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2603 break;
2604 case SCTP_TIMER_TYPE_PRIM_DELETED:
2605 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2606 #ifdef INVARIANTS
2607 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2608 t_type, inp, stcb, net);
2609 #else
2610 return;
2611 #endif
2612 }
2613 tmr = &stcb->asoc.delete_prim_timer;
2614 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2615 break;
2616 default:
2617 #ifdef INVARIANTS
2618 panic("Unknown timer type %d", t_type);
2619 #else
2620 return;
2621 #endif
2622 }
2623 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2624 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2625 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2626 /*
2627 * We do NOT allow you to have it already running. If it is,
2628 * we leave the current one up unchanged.
2629 */
2630 SCTPDBG(SCTP_DEBUG_TIMER2,
2631 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2632 t_type, inp, stcb, net);
2633 return;
2634 }
2635 /* At this point we can proceed. */
2636 if (t_type == SCTP_TIMER_TYPE_SEND) {
2637 stcb->asoc.num_send_timers_up++;
2638 }
2639 tmr->stopped_from = 0;
2640 tmr->type = t_type;
2641 tmr->ep = (void *)inp;
2642 tmr->tcb = (void *)stcb;
2643 if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2644 tmr->net = NULL;
2645 } else {
2646 tmr->net = (void *)net;
2647 }
2648 tmr->self = (void *)tmr;
2649 #if defined(__FreeBSD__) && !defined(__Userspace__)
2650 tmr->vnet = (void *)curvnet;
2651 #endif
2652 tmr->ticks = sctp_get_tick_count();
2653 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2654 SCTPDBG(SCTP_DEBUG_TIMER2,
2655 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2656 t_type, to_ticks, inp, stcb, net);
2657 } else {
2658 /*
2659 * This should not happen, since we checked for pending
2660 * above.
2661 */
2662 SCTPDBG(SCTP_DEBUG_TIMER2,
2663 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2664 t_type, to_ticks, inp, stcb, net);
2665 }
2666 return;
2667 }
2668
2669 /*-
2670 * The following table shows which parameters must be provided
2671 * when calling sctp_timer_stop(). For parameters not being
2672 * provided, NULL must be used.
2673 *
2674 * |Name |inp |stcb|net |
2675 * |-----------------------------|----|----|----|
2676 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2677 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2678 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2679 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2680 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2681 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2682 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2683 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2684 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2685 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No |
2686 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2687 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2688 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No |
2689 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2690 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2691 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2692 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2693 *
2694 */
2695
2696 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2697 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2698 struct sctp_nets *net, uint32_t from)
2699 {
2700 struct sctp_timer *tmr;
2701
2702 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2703 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
2704 t_type, stcb, stcb->sctp_ep));
2705 if (stcb != NULL) {
2706 SCTP_TCB_LOCK_ASSERT(stcb);
2707 } else if (inp != NULL) {
2708 SCTP_INP_WLOCK_ASSERT(inp);
2709 } else {
2710 SCTP_WQ_ADDR_LOCK_ASSERT();
2711 }
2712 tmr = NULL;
2713 switch (t_type) {
2714 case SCTP_TIMER_TYPE_SEND:
2715 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2716 #ifdef INVARIANTS
2717 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2718 t_type, inp, stcb, net);
2719 #else
2720 return;
2721 #endif
2722 }
2723 tmr = &net->rxt_timer;
2724 break;
2725 case SCTP_TIMER_TYPE_INIT:
2726 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2727 #ifdef INVARIANTS
2728 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2729 t_type, inp, stcb, net);
2730 #else
2731 return;
2732 #endif
2733 }
2734 tmr = &net->rxt_timer;
2735 break;
2736 case SCTP_TIMER_TYPE_RECV:
2737 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2738 #ifdef INVARIANTS
2739 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2740 t_type, inp, stcb, net);
2741 #else
2742 return;
2743 #endif
2744 }
2745 tmr = &stcb->asoc.dack_timer;
2746 break;
2747 case SCTP_TIMER_TYPE_SHUTDOWN:
2748 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2749 #ifdef INVARIANTS
2750 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2751 t_type, inp, stcb, net);
2752 #else
2753 return;
2754 #endif
2755 }
2756 tmr = &net->rxt_timer;
2757 break;
2758 case SCTP_TIMER_TYPE_HEARTBEAT:
2759 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2760 #ifdef INVARIANTS
2761 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2762 t_type, inp, stcb, net);
2763 #else
2764 return;
2765 #endif
2766 }
2767 tmr = &net->hb_timer;
2768 break;
2769 case SCTP_TIMER_TYPE_COOKIE:
2770 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2771 #ifdef INVARIANTS
2772 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2773 t_type, inp, stcb, net);
2774 #else
2775 return;
2776 #endif
2777 }
2778 tmr = &net->rxt_timer;
2779 break;
2780 case SCTP_TIMER_TYPE_NEWCOOKIE:
2781 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2782 #ifdef INVARIANTS
2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2784 t_type, inp, stcb, net);
2785 #else
2786 return;
2787 #endif
2788 }
2789 tmr = &inp->sctp_ep.signature_change;
2790 break;
2791 case SCTP_TIMER_TYPE_PATHMTURAISE:
2792 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2793 #ifdef INVARIANTS
2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2795 t_type, inp, stcb, net);
2796 #else
2797 return;
2798 #endif
2799 }
2800 tmr = &net->pmtu_timer;
2801 break;
2802 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2803 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2804 #ifdef INVARIANTS
2805 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2806 t_type, inp, stcb, net);
2807 #else
2808 return;
2809 #endif
2810 }
2811 tmr = &net->rxt_timer;
2812 break;
2813 case SCTP_TIMER_TYPE_ASCONF:
2814 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2815 #ifdef INVARIANTS
2816 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2817 t_type, inp, stcb, net);
2818 #else
2819 return;
2820 #endif
2821 }
2822 tmr = &stcb->asoc.asconf_timer;
2823 break;
2824 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2825 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2826 #ifdef INVARIANTS
2827 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2828 t_type, inp, stcb, net);
2829 #else
2830 return;
2831 #endif
2832 }
2833 tmr = &stcb->asoc.shut_guard_timer;
2834 break;
2835 case SCTP_TIMER_TYPE_AUTOCLOSE:
2836 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2837 #ifdef INVARIANTS
2838 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2839 t_type, inp, stcb, net);
2840 #else
2841 return;
2842 #endif
2843 }
2844 tmr = &stcb->asoc.autoclose_timer;
2845 break;
2846 case SCTP_TIMER_TYPE_STRRESET:
2847 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2848 #ifdef INVARIANTS
2849 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2850 t_type, inp, stcb, net);
2851 #else
2852 return;
2853 #endif
2854 }
2855 tmr = &stcb->asoc.strreset_timer;
2856 break;
2857 case SCTP_TIMER_TYPE_INPKILL:
2858 /*
2859 * The inp is setup to die. We re-use the signature_chage
2860 * timer since that has stopped and we are in the GONE
2861 * state.
2862 */
2863 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2864 #ifdef INVARIANTS
2865 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2866 t_type, inp, stcb, net);
2867 #else
2868 return;
2869 #endif
2870 }
2871 tmr = &inp->sctp_ep.signature_change;
2872 break;
2873 case SCTP_TIMER_TYPE_ASOCKILL:
2874 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2875 #ifdef INVARIANTS
2876 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2877 t_type, inp, stcb, net);
2878 #else
2879 return;
2880 #endif
2881 }
2882 tmr = &stcb->asoc.strreset_timer;
2883 break;
2884 case SCTP_TIMER_TYPE_ADDR_WQ:
2885 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2886 #ifdef INVARIANTS
2887 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2888 t_type, inp, stcb, net);
2889 #else
2890 return;
2891 #endif
2892 }
2893 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2894 break;
2895 case SCTP_TIMER_TYPE_PRIM_DELETED:
2896 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2897 #ifdef INVARIANTS
2898 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2899 t_type, inp, stcb, net);
2900 #else
2901 return;
2902 #endif
2903 }
2904 tmr = &stcb->asoc.delete_prim_timer;
2905 break;
2906 default:
2907 #ifdef INVARIANTS
2908 panic("Unknown timer type %d", t_type);
2909 #else
2910 return;
2911 #endif
2912 }
2913 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2914 if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2915 (tmr->type != t_type)) {
2916 /*
2917 * Ok we have a timer that is under joint use. Cookie timer
2918 * per chance with the SEND timer. We therefore are NOT
2919 * running the timer that the caller wants stopped. So just
2920 * return.
2921 */
2922 SCTPDBG(SCTP_DEBUG_TIMER2,
2923 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2924 t_type, inp, stcb, net);
2925 return;
2926 }
2927 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2928 stcb->asoc.num_send_timers_up--;
2929 if (stcb->asoc.num_send_timers_up < 0) {
2930 stcb->asoc.num_send_timers_up = 0;
2931 }
2932 }
2933 tmr->self = NULL;
2934 tmr->stopped_from = from;
2935 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2936 KASSERT(tmr->ep == inp,
2937 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2938 t_type, inp, tmr->ep));
2939 KASSERT(tmr->tcb == stcb,
2940 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2941 t_type, stcb, tmr->tcb));
2942 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2943 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2944 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2945 t_type, net, tmr->net));
2946 SCTPDBG(SCTP_DEBUG_TIMER2,
2947 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2948 t_type, inp, stcb, net);
2949 tmr->ep = NULL;
2950 tmr->tcb = NULL;
2951 tmr->net = NULL;
2952 } else {
2953 SCTPDBG(SCTP_DEBUG_TIMER2,
2954 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2955 t_type, inp, stcb, net);
2956 }
2957 return;
2958 }
2959
2960 uint32_t
sctp_calculate_len(struct mbuf * m)2961 sctp_calculate_len(struct mbuf *m)
2962 {
2963 uint32_t tlen = 0;
2964 struct mbuf *at;
2965
2966 at = m;
2967 while (at) {
2968 tlen += SCTP_BUF_LEN(at);
2969 at = SCTP_BUF_NEXT(at);
2970 }
2971 return (tlen);
2972 }
2973
2974 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2975 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2976 struct sctp_association *asoc, uint32_t mtu)
2977 {
2978 /*
2979 * Reset the P-MTU size on this association, this involves changing
2980 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2981 * allow the DF flag to be cleared.
2982 */
2983 struct sctp_tmit_chunk *chk;
2984 unsigned int eff_mtu, ovh;
2985
2986 asoc->smallest_mtu = mtu;
2987 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2988 ovh = SCTP_MIN_OVERHEAD;
2989 } else {
2990 ovh = SCTP_MIN_V4_OVERHEAD;
2991 }
2992 eff_mtu = mtu - ovh;
2993 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2994 if (chk->send_size > eff_mtu) {
2995 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2996 }
2997 }
2998 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2999 if (chk->send_size > eff_mtu) {
3000 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
3001 }
3002 }
3003 }
3004
3005
3006 /*
3007 * Given an association and starting time of the current RTT period, update
3008 * RTO in number of msecs. net should point to the current network.
3009 * Return 1, if an RTO update was performed, return 0 if no update was
3010 * performed due to invalid starting point.
3011 */
3012
3013 int
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old,int rtt_from_sack)3014 sctp_calculate_rto(struct sctp_tcb *stcb,
3015 struct sctp_association *asoc,
3016 struct sctp_nets *net,
3017 struct timeval *old,
3018 int rtt_from_sack)
3019 {
3020 struct timeval now;
3021 uint64_t rtt_us; /* RTT in us */
3022 int32_t rtt; /* RTT in ms */
3023 uint32_t new_rto;
3024 int first_measure = 0;
3025
3026 /************************/
3027 /* 1. calculate new RTT */
3028 /************************/
3029 /* get the current time */
3030 if (stcb->asoc.use_precise_time) {
3031 (void)SCTP_GETPTIME_TIMEVAL(&now);
3032 } else {
3033 (void)SCTP_GETTIME_TIMEVAL(&now);
3034 }
3035 if ((old->tv_sec > now.tv_sec) ||
3036 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
3037 /* The starting point is in the future. */
3038 return (0);
3039 }
3040 timevalsub(&now, old);
3041 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
3042 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
3043 /* The RTT is larger than a sane value. */
3044 return (0);
3045 }
3046 /* store the current RTT in us */
3047 net->rtt = rtt_us;
3048 /* compute rtt in ms */
3049 rtt = (int32_t)(net->rtt / 1000);
3050 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
3051 /* Tell the CC module that a new update has just occurred from a sack */
3052 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
3053 }
3054 /* Do we need to determine the lan? We do this only
3055 * on sacks i.e. RTT being determined from data not
3056 * non-data (HB/INIT->INITACK).
3057 */
3058 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
3059 (net->lan_type == SCTP_LAN_UNKNOWN)) {
3060 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
3061 net->lan_type = SCTP_LAN_INTERNET;
3062 } else {
3063 net->lan_type = SCTP_LAN_LOCAL;
3064 }
3065 }
3066
3067 /***************************/
3068 /* 2. update RTTVAR & SRTT */
3069 /***************************/
3070 /*-
3071 * Compute the scaled average lastsa and the
3072 * scaled variance lastsv as described in van Jacobson
3073 * Paper "Congestion Avoidance and Control", Annex A.
3074 *
3075 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
3076 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
3077 */
3078 if (net->RTO_measured) {
3079 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
3080 net->lastsa += rtt;
3081 if (rtt < 0) {
3082 rtt = -rtt;
3083 }
3084 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
3085 net->lastsv += rtt;
3086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3087 rto_logging(net, SCTP_LOG_RTTVAR);
3088 }
3089 } else {
3090 /* First RTO measurment */
3091 net->RTO_measured = 1;
3092 first_measure = 1;
3093 net->lastsa = rtt << SCTP_RTT_SHIFT;
3094 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
3095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3096 rto_logging(net, SCTP_LOG_INITIAL_RTT);
3097 }
3098 }
3099 if (net->lastsv == 0) {
3100 net->lastsv = SCTP_CLOCK_GRANULARITY;
3101 }
3102 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3103 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
3104 (stcb->asoc.sat_network_lockout == 0)) {
3105 stcb->asoc.sat_network = 1;
3106 } else if ((!first_measure) && stcb->asoc.sat_network) {
3107 stcb->asoc.sat_network = 0;
3108 stcb->asoc.sat_network_lockout = 1;
3109 }
3110 /* bound it, per C6/C7 in Section 5.3.1 */
3111 if (new_rto < stcb->asoc.minrto) {
3112 new_rto = stcb->asoc.minrto;
3113 }
3114 if (new_rto > stcb->asoc.maxrto) {
3115 new_rto = stcb->asoc.maxrto;
3116 }
3117 net->RTO = new_rto;
3118 return (1);
3119 }
3120
3121 /*
3122 * return a pointer to a contiguous piece of data from the given mbuf chain
3123 * starting at 'off' for 'len' bytes. If the desired piece spans more than
3124 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3125 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3126 */
3127 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)3128 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
3129 {
3130 uint32_t count;
3131 uint8_t *ptr;
3132
3133 ptr = in_ptr;
3134 if ((off < 0) || (len <= 0))
3135 return (NULL);
3136
3137 /* find the desired start location */
3138 while ((m != NULL) && (off > 0)) {
3139 if (off < SCTP_BUF_LEN(m))
3140 break;
3141 off -= SCTP_BUF_LEN(m);
3142 m = SCTP_BUF_NEXT(m);
3143 }
3144 if (m == NULL)
3145 return (NULL);
3146
3147 /* is the current mbuf large enough (eg. contiguous)? */
3148 if ((SCTP_BUF_LEN(m) - off) >= len) {
3149 return (mtod(m, caddr_t) + off);
3150 } else {
3151 /* else, it spans more than one mbuf, so save a temp copy... */
3152 while ((m != NULL) && (len > 0)) {
3153 count = min(SCTP_BUF_LEN(m) - off, len);
3154 memcpy(ptr, mtod(m, caddr_t) + off, count);
3155 len -= count;
3156 ptr += count;
3157 off = 0;
3158 m = SCTP_BUF_NEXT(m);
3159 }
3160 if ((m == NULL) && (len > 0))
3161 return (NULL);
3162 else
3163 return ((caddr_t)in_ptr);
3164 }
3165 }
3166
3167
3168
3169 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)3170 sctp_get_next_param(struct mbuf *m,
3171 int offset,
3172 struct sctp_paramhdr *pull,
3173 int pull_limit)
3174 {
3175 /* This just provides a typed signature to Peter's Pull routine */
3176 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3177 (uint8_t *) pull));
3178 }
3179
3180
3181 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)3182 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3183 {
3184 struct mbuf *m_last;
3185 caddr_t dp;
3186
3187 if (padlen > 3) {
3188 return (NULL);
3189 }
3190 if (padlen <= M_TRAILINGSPACE(m)) {
3191 /*
3192 * The easy way. We hope the majority of the time we hit
3193 * here :)
3194 */
3195 m_last = m;
3196 } else {
3197 /* Hard way we must grow the mbuf chain */
3198 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3199 if (m_last == NULL) {
3200 return (NULL);
3201 }
3202 SCTP_BUF_LEN(m_last) = 0;
3203 SCTP_BUF_NEXT(m_last) = NULL;
3204 SCTP_BUF_NEXT(m) = m_last;
3205 }
3206 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
3207 SCTP_BUF_LEN(m_last) += padlen;
3208 memset(dp, 0, padlen);
3209 return (m_last);
3210 }
3211
3212 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)3213 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3214 {
3215 /* find the last mbuf in chain and pad it */
3216 struct mbuf *m_at;
3217
3218 if (last_mbuf != NULL) {
3219 return (sctp_add_pad_tombuf(last_mbuf, padval));
3220 } else {
3221 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3222 if (SCTP_BUF_NEXT(m_at) == NULL) {
3223 return (sctp_add_pad_tombuf(m_at, padval));
3224 }
3225 }
3226 }
3227 return (NULL);
3228 }
3229
3230 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked)3231 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3232 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked)
3233 {
3234 struct mbuf *m_notify;
3235 struct sctp_assoc_change *sac;
3236 struct sctp_queued_to_read *control;
3237 unsigned int notif_len;
3238 uint16_t abort_len;
3239 unsigned int i;
3240 #if defined(__APPLE__) && !defined(__Userspace__)
3241 struct socket *so;
3242 #endif
3243
3244 if (stcb == NULL) {
3245 return;
3246 }
3247 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3248 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3249 if (abort != NULL) {
3250 abort_len = ntohs(abort->ch.chunk_length);
3251 /*
3252 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3253 * contiguous.
3254 */
3255 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3256 abort_len = SCTP_CHUNK_BUFFER_SIZE;
3257 }
3258 } else {
3259 abort_len = 0;
3260 }
3261 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3262 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3263 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3264 notif_len += abort_len;
3265 }
3266 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3267 if (m_notify == NULL) {
3268 /* Retry with smaller value. */
3269 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3270 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3271 if (m_notify == NULL) {
3272 goto set_error;
3273 }
3274 }
3275 SCTP_BUF_NEXT(m_notify) = NULL;
3276 sac = mtod(m_notify, struct sctp_assoc_change *);
3277 memset(sac, 0, notif_len);
3278 sac->sac_type = SCTP_ASSOC_CHANGE;
3279 sac->sac_flags = 0;
3280 sac->sac_length = sizeof(struct sctp_assoc_change);
3281 sac->sac_state = state;
3282 sac->sac_error = error;
3283 /* XXX verify these stream counts */
3284 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3285 sac->sac_inbound_streams = stcb->asoc.streamincnt;
3286 sac->sac_assoc_id = sctp_get_associd(stcb);
3287 if (notif_len > sizeof(struct sctp_assoc_change)) {
3288 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3289 i = 0;
3290 if (stcb->asoc.prsctp_supported == 1) {
3291 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3292 }
3293 if (stcb->asoc.auth_supported == 1) {
3294 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3295 }
3296 if (stcb->asoc.asconf_supported == 1) {
3297 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3298 }
3299 if (stcb->asoc.idata_supported == 1) {
3300 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3301 }
3302 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3303 if (stcb->asoc.reconfig_supported == 1) {
3304 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3305 }
3306 sac->sac_length += i;
3307 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3308 memcpy(sac->sac_info, abort, abort_len);
3309 sac->sac_length += abort_len;
3310 }
3311 }
3312 SCTP_BUF_LEN(m_notify) = sac->sac_length;
3313 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3314 0, 0, stcb->asoc.context, 0, 0, 0,
3315 m_notify);
3316 if (control != NULL) {
3317 control->length = SCTP_BUF_LEN(m_notify);
3318 control->spec_flags = M_NOTIFICATION;
3319 /* not that we need this */
3320 control->tail_mbuf = m_notify;
3321 sctp_add_to_readq(stcb->sctp_ep, stcb,
3322 control,
3323 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3324 so_locked);
3325 } else {
3326 sctp_m_freem(m_notify);
3327 }
3328 }
3329 /*
3330 * For 1-to-1 style sockets, we send up and error when an ABORT
3331 * comes in.
3332 */
3333 set_error:
3334 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3335 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3336 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3337 SOCK_LOCK(stcb->sctp_socket);
3338 if (from_peer) {
3339 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3340 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3341 stcb->sctp_socket->so_error = ECONNREFUSED;
3342 } else {
3343 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3344 stcb->sctp_socket->so_error = ECONNRESET;
3345 }
3346 } else {
3347 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3348 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3349 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3350 stcb->sctp_socket->so_error = ETIMEDOUT;
3351 } else {
3352 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3353 stcb->sctp_socket->so_error = ECONNABORTED;
3354 }
3355 }
3356 SOCK_UNLOCK(stcb->sctp_socket);
3357 }
3358 /* Wake ANY sleepers */
3359 #if defined(__APPLE__) && !defined(__Userspace__)
3360 so = SCTP_INP_SO(stcb->sctp_ep);
3361 if (!so_locked) {
3362 atomic_add_int(&stcb->asoc.refcnt, 1);
3363 SCTP_TCB_UNLOCK(stcb);
3364 SCTP_SOCKET_LOCK(so, 1);
3365 SCTP_TCB_LOCK(stcb);
3366 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3367 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3368 SCTP_SOCKET_UNLOCK(so, 1);
3369 return;
3370 }
3371 }
3372 #endif
3373 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3374 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3375 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3376 socantrcvmore(stcb->sctp_socket);
3377 }
3378 sorwakeup(stcb->sctp_socket);
3379 sowwakeup(stcb->sctp_socket);
3380 #if defined(__APPLE__) && !defined(__Userspace__)
3381 if (!so_locked) {
3382 SCTP_SOCKET_UNLOCK(so, 1);
3383 }
3384 #endif
3385 }
3386
3387 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked)3388 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3389 struct sockaddr *sa, uint32_t error, int so_locked)
3390 {
3391 struct mbuf *m_notify;
3392 struct sctp_paddr_change *spc;
3393 struct sctp_queued_to_read *control;
3394
3395 if ((stcb == NULL) ||
3396 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3397 /* event not enabled */
3398 return;
3399 }
3400 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3401 if (m_notify == NULL)
3402 return;
3403 SCTP_BUF_LEN(m_notify) = 0;
3404 spc = mtod(m_notify, struct sctp_paddr_change *);
3405 memset(spc, 0, sizeof(struct sctp_paddr_change));
3406 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3407 spc->spc_flags = 0;
3408 spc->spc_length = sizeof(struct sctp_paddr_change);
3409 switch (sa->sa_family) {
3410 #ifdef INET
3411 case AF_INET:
3412 #ifdef INET6
3413 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3414 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3415 (struct sockaddr_in6 *)&spc->spc_aaddr);
3416 } else {
3417 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3418 }
3419 #else
3420 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3421 #endif
3422 break;
3423 #endif
3424 #ifdef INET6
3425 case AF_INET6:
3426 {
3427 #ifdef SCTP_EMBEDDED_V6_SCOPE
3428 struct sockaddr_in6 *sin6;
3429 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3430 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3431
3432 #ifdef SCTP_EMBEDDED_V6_SCOPE
3433 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3434 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3435 if (sin6->sin6_scope_id == 0) {
3436 /* recover scope_id for user */
3437 #ifdef SCTP_KAME
3438 (void)sa6_recoverscope(sin6);
3439 #else
3440 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
3441 NULL);
3442 #endif
3443 } else {
3444 /* clear embedded scope_id for user */
3445 in6_clearscope(&sin6->sin6_addr);
3446 }
3447 }
3448 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3449 break;
3450 }
3451 #endif
3452 #if defined(__Userspace__)
3453 case AF_CONN:
3454 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
3455 break;
3456 #endif
3457 default:
3458 /* TSNH */
3459 break;
3460 }
3461 spc->spc_state = state;
3462 spc->spc_error = error;
3463 spc->spc_assoc_id = sctp_get_associd(stcb);
3464
3465 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3466 SCTP_BUF_NEXT(m_notify) = NULL;
3467
3468 /* append to socket */
3469 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3470 0, 0, stcb->asoc.context, 0, 0, 0,
3471 m_notify);
3472 if (control == NULL) {
3473 /* no memory */
3474 sctp_m_freem(m_notify);
3475 return;
3476 }
3477 control->length = SCTP_BUF_LEN(m_notify);
3478 control->spec_flags = M_NOTIFICATION;
3479 /* not that we need this */
3480 control->tail_mbuf = m_notify;
3481 sctp_add_to_readq(stcb->sctp_ep, stcb,
3482 control,
3483 &stcb->sctp_socket->so_rcv, 1,
3484 SCTP_READ_LOCK_NOT_HELD,
3485 so_locked);
3486 }
3487
3488
3489 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked)3490 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3491 struct sctp_tmit_chunk *chk, int so_locked)
3492 {
3493 struct mbuf *m_notify;
3494 struct sctp_send_failed *ssf;
3495 struct sctp_send_failed_event *ssfe;
3496 struct sctp_queued_to_read *control;
3497 struct sctp_chunkhdr *chkhdr;
3498 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3499
3500 if ((stcb == NULL) ||
3501 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3502 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3503 /* event not enabled */
3504 return;
3505 }
3506
3507 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3508 notifhdr_len = sizeof(struct sctp_send_failed_event);
3509 } else {
3510 notifhdr_len = sizeof(struct sctp_send_failed);
3511 }
3512 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3513 if (m_notify == NULL)
3514 /* no space left */
3515 return;
3516 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3517 if (stcb->asoc.idata_supported) {
3518 chkhdr_len = sizeof(struct sctp_idata_chunk);
3519 } else {
3520 chkhdr_len = sizeof(struct sctp_data_chunk);
3521 }
3522 /* Use some defaults in case we can't access the chunk header */
3523 if (chk->send_size >= chkhdr_len) {
3524 payload_len = chk->send_size - chkhdr_len;
3525 } else {
3526 payload_len = 0;
3527 }
3528 padding_len = 0;
3529 if (chk->data != NULL) {
3530 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3531 if (chkhdr != NULL) {
3532 chk_len = ntohs(chkhdr->chunk_length);
3533 if ((chk_len >= chkhdr_len) &&
3534 (chk->send_size >= chk_len) &&
3535 (chk->send_size - chk_len < 4)) {
3536 padding_len = chk->send_size - chk_len;
3537 payload_len = chk->send_size - chkhdr_len - padding_len;
3538 }
3539 }
3540 }
3541 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3542 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3543 memset(ssfe, 0, notifhdr_len);
3544 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3545 if (sent) {
3546 ssfe->ssfe_flags = SCTP_DATA_SENT;
3547 } else {
3548 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3549 }
3550 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3551 ssfe->ssfe_error = error;
3552 /* not exactly what the user sent in, but should be close :) */
3553 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3554 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3555 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3556 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3557 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3558 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3559 } else {
3560 ssf = mtod(m_notify, struct sctp_send_failed *);
3561 memset(ssf, 0, notifhdr_len);
3562 ssf->ssf_type = SCTP_SEND_FAILED;
3563 if (sent) {
3564 ssf->ssf_flags = SCTP_DATA_SENT;
3565 } else {
3566 ssf->ssf_flags = SCTP_DATA_UNSENT;
3567 }
3568 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3569 ssf->ssf_error = error;
3570 /* not exactly what the user sent in, but should be close :) */
3571 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3572 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3573 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3574 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3575 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3576 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3577 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3578 }
3579 if (chk->data != NULL) {
3580 /* Trim off the sctp chunk header (it should be there) */
3581 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3582 m_adj(chk->data, chkhdr_len);
3583 m_adj(chk->data, -padding_len);
3584 sctp_mbuf_crush(chk->data);
3585 chk->send_size -= (chkhdr_len + padding_len);
3586 }
3587 }
3588 SCTP_BUF_NEXT(m_notify) = chk->data;
3589 /* Steal off the mbuf */
3590 chk->data = NULL;
3591 /*
3592 * For this case, we check the actual socket buffer, since the assoc
3593 * is going away we don't want to overfill the socket buffer for a
3594 * non-reader
3595 */
3596 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3597 sctp_m_freem(m_notify);
3598 return;
3599 }
3600 /* append to socket */
3601 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3602 0, 0, stcb->asoc.context, 0, 0, 0,
3603 m_notify);
3604 if (control == NULL) {
3605 /* no memory */
3606 sctp_m_freem(m_notify);
3607 return;
3608 }
3609 control->length = SCTP_BUF_LEN(m_notify);
3610 control->spec_flags = M_NOTIFICATION;
3611 /* not that we need this */
3612 control->tail_mbuf = m_notify;
3613 sctp_add_to_readq(stcb->sctp_ep, stcb,
3614 control,
3615 &stcb->sctp_socket->so_rcv, 1,
3616 SCTP_READ_LOCK_NOT_HELD,
3617 so_locked);
3618 }
3619
3620
3621 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked)3622 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3623 struct sctp_stream_queue_pending *sp, int so_locked)
3624 {
3625 struct mbuf *m_notify;
3626 struct sctp_send_failed *ssf;
3627 struct sctp_send_failed_event *ssfe;
3628 struct sctp_queued_to_read *control;
3629 int notifhdr_len;
3630
3631 if ((stcb == NULL) ||
3632 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3633 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3634 /* event not enabled */
3635 return;
3636 }
3637 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3638 notifhdr_len = sizeof(struct sctp_send_failed_event);
3639 } else {
3640 notifhdr_len = sizeof(struct sctp_send_failed);
3641 }
3642 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3643 if (m_notify == NULL) {
3644 /* no space left */
3645 return;
3646 }
3647 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3648 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3649 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3650 memset(ssfe, 0, notifhdr_len);
3651 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3652 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3653 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3654 ssfe->ssfe_error = error;
3655 /* not exactly what the user sent in, but should be close :) */
3656 ssfe->ssfe_info.snd_sid = sp->sid;
3657 if (sp->some_taken) {
3658 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3659 } else {
3660 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3661 }
3662 ssfe->ssfe_info.snd_ppid = sp->ppid;
3663 ssfe->ssfe_info.snd_context = sp->context;
3664 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3665 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3666 } else {
3667 ssf = mtod(m_notify, struct sctp_send_failed *);
3668 memset(ssf, 0, notifhdr_len);
3669 ssf->ssf_type = SCTP_SEND_FAILED;
3670 ssf->ssf_flags = SCTP_DATA_UNSENT;
3671 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3672 ssf->ssf_error = error;
3673 /* not exactly what the user sent in, but should be close :) */
3674 ssf->ssf_info.sinfo_stream = sp->sid;
3675 ssf->ssf_info.sinfo_ssn = 0;
3676 if (sp->some_taken) {
3677 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3678 } else {
3679 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3680 }
3681 ssf->ssf_info.sinfo_ppid = sp->ppid;
3682 ssf->ssf_info.sinfo_context = sp->context;
3683 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3684 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3685 }
3686 SCTP_BUF_NEXT(m_notify) = sp->data;
3687
3688 /* Steal off the mbuf */
3689 sp->data = NULL;
3690 /*
3691 * For this case, we check the actual socket buffer, since the assoc
3692 * is going away we don't want to overfill the socket buffer for a
3693 * non-reader
3694 */
3695 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3696 sctp_m_freem(m_notify);
3697 return;
3698 }
3699 /* append to socket */
3700 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3701 0, 0, stcb->asoc.context, 0, 0, 0,
3702 m_notify);
3703 if (control == NULL) {
3704 /* no memory */
3705 sctp_m_freem(m_notify);
3706 return;
3707 }
3708 control->length = SCTP_BUF_LEN(m_notify);
3709 control->spec_flags = M_NOTIFICATION;
3710 /* not that we need this */
3711 control->tail_mbuf = m_notify;
3712 sctp_add_to_readq(stcb->sctp_ep, stcb,
3713 control,
3714 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3715 }
3716
3717
3718
3719 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3720 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3721 {
3722 struct mbuf *m_notify;
3723 struct sctp_adaptation_event *sai;
3724 struct sctp_queued_to_read *control;
3725
3726 if ((stcb == NULL) ||
3727 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3728 /* event not enabled */
3729 return;
3730 }
3731
3732 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3733 if (m_notify == NULL)
3734 /* no space left */
3735 return;
3736 SCTP_BUF_LEN(m_notify) = 0;
3737 sai = mtod(m_notify, struct sctp_adaptation_event *);
3738 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3739 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3740 sai->sai_flags = 0;
3741 sai->sai_length = sizeof(struct sctp_adaptation_event);
3742 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3743 sai->sai_assoc_id = sctp_get_associd(stcb);
3744
3745 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3746 SCTP_BUF_NEXT(m_notify) = NULL;
3747
3748 /* append to socket */
3749 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3750 0, 0, stcb->asoc.context, 0, 0, 0,
3751 m_notify);
3752 if (control == NULL) {
3753 /* no memory */
3754 sctp_m_freem(m_notify);
3755 return;
3756 }
3757 control->length = SCTP_BUF_LEN(m_notify);
3758 control->spec_flags = M_NOTIFICATION;
3759 /* not that we need this */
3760 control->tail_mbuf = m_notify;
3761 sctp_add_to_readq(stcb->sctp_ep, stcb,
3762 control,
3763 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3764 }
3765
3766 /* This always must be called with the read-queue LOCKED in the INP */
3767 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked)3768 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3769 uint32_t val, int so_locked)
3770 {
3771 struct mbuf *m_notify;
3772 struct sctp_pdapi_event *pdapi;
3773 struct sctp_queued_to_read *control;
3774 struct sockbuf *sb;
3775
3776 if ((stcb == NULL) ||
3777 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3778 /* event not enabled */
3779 return;
3780 }
3781 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3782 return;
3783 }
3784
3785 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3786 if (m_notify == NULL)
3787 /* no space left */
3788 return;
3789 SCTP_BUF_LEN(m_notify) = 0;
3790 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3791 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3792 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3793 pdapi->pdapi_flags = 0;
3794 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3795 pdapi->pdapi_indication = error;
3796 pdapi->pdapi_stream = (val >> 16);
3797 pdapi->pdapi_seq = (val & 0x0000ffff);
3798 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3799
3800 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3801 SCTP_BUF_NEXT(m_notify) = NULL;
3802 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3803 0, 0, stcb->asoc.context, 0, 0, 0,
3804 m_notify);
3805 if (control == NULL) {
3806 /* no memory */
3807 sctp_m_freem(m_notify);
3808 return;
3809 }
3810 control->length = SCTP_BUF_LEN(m_notify);
3811 control->spec_flags = M_NOTIFICATION;
3812 /* not that we need this */
3813 control->tail_mbuf = m_notify;
3814 sb = &stcb->sctp_socket->so_rcv;
3815 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3816 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3817 }
3818 sctp_sballoc(stcb, sb, m_notify);
3819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3820 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3821 }
3822 control->end_added = 1;
3823 if (stcb->asoc.control_pdapi)
3824 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3825 else {
3826 /* we really should not see this case */
3827 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3828 }
3829 if (stcb->sctp_ep && stcb->sctp_socket) {
3830 /* This should always be the case */
3831 #if defined(__APPLE__) && !defined(__Userspace__)
3832 struct socket *so;
3833
3834 so = SCTP_INP_SO(stcb->sctp_ep);
3835 if (!so_locked) {
3836 atomic_add_int(&stcb->asoc.refcnt, 1);
3837 SCTP_TCB_UNLOCK(stcb);
3838 SCTP_SOCKET_LOCK(so, 1);
3839 SCTP_TCB_LOCK(stcb);
3840 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3841 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3842 SCTP_SOCKET_UNLOCK(so, 1);
3843 return;
3844 }
3845 }
3846 #endif
3847 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3848 #if defined(__APPLE__) && !defined(__Userspace__)
3849 if (!so_locked) {
3850 SCTP_SOCKET_UNLOCK(so, 1);
3851 }
3852 #endif
3853 }
3854 }
3855
3856 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3857 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3858 {
3859 struct mbuf *m_notify;
3860 struct sctp_shutdown_event *sse;
3861 struct sctp_queued_to_read *control;
3862
3863 /*
3864 * For TCP model AND UDP connected sockets we will send an error up
3865 * when an SHUTDOWN completes
3866 */
3867 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3868 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3869 /* mark socket closed for read/write and wakeup! */
3870 #if defined(__APPLE__) && !defined(__Userspace__)
3871 struct socket *so;
3872
3873 so = SCTP_INP_SO(stcb->sctp_ep);
3874 atomic_add_int(&stcb->asoc.refcnt, 1);
3875 SCTP_TCB_UNLOCK(stcb);
3876 SCTP_SOCKET_LOCK(so, 1);
3877 SCTP_TCB_LOCK(stcb);
3878 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3879 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3880 SCTP_SOCKET_UNLOCK(so, 1);
3881 return;
3882 }
3883 #endif
3884 socantsendmore(stcb->sctp_socket);
3885 #if defined(__APPLE__) && !defined(__Userspace__)
3886 SCTP_SOCKET_UNLOCK(so, 1);
3887 #endif
3888 }
3889 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3890 /* event not enabled */
3891 return;
3892 }
3893
3894 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3895 if (m_notify == NULL)
3896 /* no space left */
3897 return;
3898 sse = mtod(m_notify, struct sctp_shutdown_event *);
3899 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3900 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3901 sse->sse_flags = 0;
3902 sse->sse_length = sizeof(struct sctp_shutdown_event);
3903 sse->sse_assoc_id = sctp_get_associd(stcb);
3904
3905 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3906 SCTP_BUF_NEXT(m_notify) = NULL;
3907
3908 /* append to socket */
3909 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3910 0, 0, stcb->asoc.context, 0, 0, 0,
3911 m_notify);
3912 if (control == NULL) {
3913 /* no memory */
3914 sctp_m_freem(m_notify);
3915 return;
3916 }
3917 control->length = SCTP_BUF_LEN(m_notify);
3918 control->spec_flags = M_NOTIFICATION;
3919 /* not that we need this */
3920 control->tail_mbuf = m_notify;
3921 sctp_add_to_readq(stcb->sctp_ep, stcb,
3922 control,
3923 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3924 }
3925
3926 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked)3927 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3928 int so_locked)
3929 {
3930 struct mbuf *m_notify;
3931 struct sctp_sender_dry_event *event;
3932 struct sctp_queued_to_read *control;
3933
3934 if ((stcb == NULL) ||
3935 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3936 /* event not enabled */
3937 return;
3938 }
3939
3940 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3941 if (m_notify == NULL) {
3942 /* no space left */
3943 return;
3944 }
3945 SCTP_BUF_LEN(m_notify) = 0;
3946 event = mtod(m_notify, struct sctp_sender_dry_event *);
3947 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3948 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3949 event->sender_dry_flags = 0;
3950 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3951 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3952
3953 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3954 SCTP_BUF_NEXT(m_notify) = NULL;
3955
3956 /* append to socket */
3957 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3958 0, 0, stcb->asoc.context, 0, 0, 0,
3959 m_notify);
3960 if (control == NULL) {
3961 /* no memory */
3962 sctp_m_freem(m_notify);
3963 return;
3964 }
3965 control->length = SCTP_BUF_LEN(m_notify);
3966 control->spec_flags = M_NOTIFICATION;
3967 /* not that we need this */
3968 control->tail_mbuf = m_notify;
3969 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3970 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3971 }
3972
3973
3974 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3975 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3976 {
3977 struct mbuf *m_notify;
3978 struct sctp_queued_to_read *control;
3979 struct sctp_stream_change_event *stradd;
3980
3981 if ((stcb == NULL) ||
3982 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3983 /* event not enabled */
3984 return;
3985 }
3986 if ((stcb->asoc.peer_req_out) && flag) {
3987 /* Peer made the request, don't tell the local user */
3988 stcb->asoc.peer_req_out = 0;
3989 return;
3990 }
3991 stcb->asoc.peer_req_out = 0;
3992 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3993 if (m_notify == NULL)
3994 /* no space left */
3995 return;
3996 SCTP_BUF_LEN(m_notify) = 0;
3997 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3998 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3999 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
4000 stradd->strchange_flags = flag;
4001 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
4002 stradd->strchange_assoc_id = sctp_get_associd(stcb);
4003 stradd->strchange_instrms = numberin;
4004 stradd->strchange_outstrms = numberout;
4005 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
4006 SCTP_BUF_NEXT(m_notify) = NULL;
4007 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4008 /* no space */
4009 sctp_m_freem(m_notify);
4010 return;
4011 }
4012 /* append to socket */
4013 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4014 0, 0, stcb->asoc.context, 0, 0, 0,
4015 m_notify);
4016 if (control == NULL) {
4017 /* no memory */
4018 sctp_m_freem(m_notify);
4019 return;
4020 }
4021 control->length = SCTP_BUF_LEN(m_notify);
4022 control->spec_flags = M_NOTIFICATION;
4023 /* not that we need this */
4024 control->tail_mbuf = m_notify;
4025 sctp_add_to_readq(stcb->sctp_ep, stcb,
4026 control,
4027 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4028 }
4029
4030 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)4031 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
4032 {
4033 struct mbuf *m_notify;
4034 struct sctp_queued_to_read *control;
4035 struct sctp_assoc_reset_event *strasoc;
4036
4037 if ((stcb == NULL) ||
4038 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
4039 /* event not enabled */
4040 return;
4041 }
4042 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
4043 if (m_notify == NULL)
4044 /* no space left */
4045 return;
4046 SCTP_BUF_LEN(m_notify) = 0;
4047 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
4048 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
4049 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
4050 strasoc->assocreset_flags = flag;
4051 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
4052 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
4053 strasoc->assocreset_local_tsn = sending_tsn;
4054 strasoc->assocreset_remote_tsn = recv_tsn;
4055 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
4056 SCTP_BUF_NEXT(m_notify) = NULL;
4057 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4058 /* no space */
4059 sctp_m_freem(m_notify);
4060 return;
4061 }
4062 /* append to socket */
4063 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4064 0, 0, stcb->asoc.context, 0, 0, 0,
4065 m_notify);
4066 if (control == NULL) {
4067 /* no memory */
4068 sctp_m_freem(m_notify);
4069 return;
4070 }
4071 control->length = SCTP_BUF_LEN(m_notify);
4072 control->spec_flags = M_NOTIFICATION;
4073 /* not that we need this */
4074 control->tail_mbuf = m_notify;
4075 sctp_add_to_readq(stcb->sctp_ep, stcb,
4076 control,
4077 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4078 }
4079
4080
4081
4082 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)4083 sctp_notify_stream_reset(struct sctp_tcb *stcb,
4084 int number_entries, uint16_t * list, int flag)
4085 {
4086 struct mbuf *m_notify;
4087 struct sctp_queued_to_read *control;
4088 struct sctp_stream_reset_event *strreset;
4089 int len;
4090
4091 if ((stcb == NULL) ||
4092 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
4093 /* event not enabled */
4094 return;
4095 }
4096
4097 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4098 if (m_notify == NULL)
4099 /* no space left */
4100 return;
4101 SCTP_BUF_LEN(m_notify) = 0;
4102 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
4103 if (len > M_TRAILINGSPACE(m_notify)) {
4104 /* never enough room */
4105 sctp_m_freem(m_notify);
4106 return;
4107 }
4108 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4109 memset(strreset, 0, len);
4110 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4111 strreset->strreset_flags = flag;
4112 strreset->strreset_length = len;
4113 strreset->strreset_assoc_id = sctp_get_associd(stcb);
4114 if (number_entries) {
4115 int i;
4116
4117 for (i = 0; i < number_entries; i++) {
4118 strreset->strreset_stream_list[i] = ntohs(list[i]);
4119 }
4120 }
4121 SCTP_BUF_LEN(m_notify) = len;
4122 SCTP_BUF_NEXT(m_notify) = NULL;
4123 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4124 /* no space */
4125 sctp_m_freem(m_notify);
4126 return;
4127 }
4128 /* append to socket */
4129 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4130 0, 0, stcb->asoc.context, 0, 0, 0,
4131 m_notify);
4132 if (control == NULL) {
4133 /* no memory */
4134 sctp_m_freem(m_notify);
4135 return;
4136 }
4137 control->length = SCTP_BUF_LEN(m_notify);
4138 control->spec_flags = M_NOTIFICATION;
4139 /* not that we need this */
4140 control->tail_mbuf = m_notify;
4141 sctp_add_to_readq(stcb->sctp_ep, stcb,
4142 control,
4143 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4144 }
4145
4146
4147 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)4148 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4149 {
4150 struct mbuf *m_notify;
4151 struct sctp_remote_error *sre;
4152 struct sctp_queued_to_read *control;
4153 unsigned int notif_len;
4154 uint16_t chunk_len;
4155
4156 if ((stcb == NULL) ||
4157 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4158 return;
4159 }
4160 if (chunk != NULL) {
4161 chunk_len = ntohs(chunk->ch.chunk_length);
4162 /*
4163 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4164 * contiguous.
4165 */
4166 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4167 chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4168 }
4169 } else {
4170 chunk_len = 0;
4171 }
4172 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4173 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4174 if (m_notify == NULL) {
4175 /* Retry with smaller value. */
4176 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4177 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4178 if (m_notify == NULL) {
4179 return;
4180 }
4181 }
4182 SCTP_BUF_NEXT(m_notify) = NULL;
4183 sre = mtod(m_notify, struct sctp_remote_error *);
4184 memset(sre, 0, notif_len);
4185 sre->sre_type = SCTP_REMOTE_ERROR;
4186 sre->sre_flags = 0;
4187 sre->sre_length = sizeof(struct sctp_remote_error);
4188 sre->sre_error = error;
4189 sre->sre_assoc_id = sctp_get_associd(stcb);
4190 if (notif_len > sizeof(struct sctp_remote_error)) {
4191 memcpy(sre->sre_data, chunk, chunk_len);
4192 sre->sre_length += chunk_len;
4193 }
4194 SCTP_BUF_LEN(m_notify) = sre->sre_length;
4195 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4196 0, 0, stcb->asoc.context, 0, 0, 0,
4197 m_notify);
4198 if (control != NULL) {
4199 control->length = SCTP_BUF_LEN(m_notify);
4200 control->spec_flags = M_NOTIFICATION;
4201 /* not that we need this */
4202 control->tail_mbuf = m_notify;
4203 sctp_add_to_readq(stcb->sctp_ep, stcb,
4204 control,
4205 &stcb->sctp_socket->so_rcv, 1,
4206 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4207 } else {
4208 sctp_m_freem(m_notify);
4209 }
4210 }
4211
4212
4213 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked)4214 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4215 uint32_t error, void *data, int so_locked)
4216 {
4217 if ((stcb == NULL) ||
4218 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4219 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4220 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4221 /* If the socket is gone we are out of here */
4222 return;
4223 }
4224 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
4225 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4226 #else
4227 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
4228 #endif
4229 return;
4230 }
4231 #if defined(__APPLE__) && !defined(__Userspace__)
4232 if (so_locked) {
4233 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4234 } else {
4235 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4236 }
4237 #endif
4238 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4239 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4240 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4241 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4242 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4243 /* Don't report these in front states */
4244 return;
4245 }
4246 }
4247 switch (notification) {
4248 case SCTP_NOTIFY_ASSOC_UP:
4249 if (stcb->asoc.assoc_up_sent == 0) {
4250 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4251 stcb->asoc.assoc_up_sent = 1;
4252 }
4253 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4254 sctp_notify_adaptation_layer(stcb);
4255 }
4256 if (stcb->asoc.auth_supported == 0) {
4257 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4258 NULL, so_locked);
4259 }
4260 break;
4261 case SCTP_NOTIFY_ASSOC_DOWN:
4262 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4263 #if defined(__Userspace__)
4264 if (stcb->sctp_ep->recv_callback) {
4265 if (stcb->sctp_socket) {
4266 union sctp_sockstore addr;
4267 struct sctp_rcvinfo rcv;
4268
4269 memset(&addr, 0, sizeof(union sctp_sockstore));
4270 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4271 atomic_add_int(&stcb->asoc.refcnt, 1);
4272 SCTP_TCB_UNLOCK(stcb);
4273 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
4274 SCTP_TCB_LOCK(stcb);
4275 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4276 }
4277 }
4278 #endif
4279 break;
4280 case SCTP_NOTIFY_INTERFACE_DOWN:
4281 {
4282 struct sctp_nets *net;
4283
4284 net = (struct sctp_nets *)data;
4285 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4286 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4287 break;
4288 }
4289 case SCTP_NOTIFY_INTERFACE_UP:
4290 {
4291 struct sctp_nets *net;
4292
4293 net = (struct sctp_nets *)data;
4294 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4295 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4296 break;
4297 }
4298 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4299 {
4300 struct sctp_nets *net;
4301
4302 net = (struct sctp_nets *)data;
4303 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4304 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4305 break;
4306 }
4307 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4308 sctp_notify_send_failed2(stcb, error,
4309 (struct sctp_stream_queue_pending *)data, so_locked);
4310 break;
4311 case SCTP_NOTIFY_SENT_DG_FAIL:
4312 sctp_notify_send_failed(stcb, 1, error,
4313 (struct sctp_tmit_chunk *)data, so_locked);
4314 break;
4315 case SCTP_NOTIFY_UNSENT_DG_FAIL:
4316 sctp_notify_send_failed(stcb, 0, error,
4317 (struct sctp_tmit_chunk *)data, so_locked);
4318 break;
4319 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4320 {
4321 uint32_t val;
4322 val = *((uint32_t *)data);
4323
4324 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4325 break;
4326 }
4327 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4328 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4329 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4330 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4331 } else {
4332 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4333 }
4334 break;
4335 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4336 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4337 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4338 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4339 } else {
4340 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4341 }
4342 break;
4343 case SCTP_NOTIFY_ASSOC_RESTART:
4344 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4345 if (stcb->asoc.auth_supported == 0) {
4346 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4347 NULL, so_locked);
4348 }
4349 break;
4350 case SCTP_NOTIFY_STR_RESET_SEND:
4351 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
4352 break;
4353 case SCTP_NOTIFY_STR_RESET_RECV:
4354 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
4355 break;
4356 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4357 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4358 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
4359 break;
4360 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4361 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4362 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
4363 break;
4364 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4365 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4366 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
4367 break;
4368 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4369 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4370 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
4371 break;
4372 case SCTP_NOTIFY_ASCONF_ADD_IP:
4373 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4374 error, so_locked);
4375 break;
4376 case SCTP_NOTIFY_ASCONF_DELETE_IP:
4377 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4378 error, so_locked);
4379 break;
4380 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4381 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4382 error, so_locked);
4383 break;
4384 case SCTP_NOTIFY_PEER_SHUTDOWN:
4385 sctp_notify_shutdown_event(stcb);
4386 break;
4387 case SCTP_NOTIFY_AUTH_NEW_KEY:
4388 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4389 (uint16_t)(uintptr_t)data,
4390 so_locked);
4391 break;
4392 case SCTP_NOTIFY_AUTH_FREE_KEY:
4393 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4394 (uint16_t)(uintptr_t)data,
4395 so_locked);
4396 break;
4397 case SCTP_NOTIFY_NO_PEER_AUTH:
4398 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4399 (uint16_t)(uintptr_t)data,
4400 so_locked);
4401 break;
4402 case SCTP_NOTIFY_SENDER_DRY:
4403 sctp_notify_sender_dry_event(stcb, so_locked);
4404 break;
4405 case SCTP_NOTIFY_REMOTE_ERROR:
4406 sctp_notify_remote_error(stcb, error, data);
4407 break;
4408 default:
4409 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4410 __func__, notification, notification);
4411 break;
4412 } /* end switch */
4413 }
4414
4415 void
4416 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked)
4417 {
4418 struct sctp_association *asoc;
4419 struct sctp_stream_out *outs;
4420 struct sctp_tmit_chunk *chk, *nchk;
4421 struct sctp_stream_queue_pending *sp, *nsp;
4422 int i;
4423
4424 if (stcb == NULL) {
4425 return;
4426 }
4427 asoc = &stcb->asoc;
4428 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4429 /* already being freed */
4430 return;
4431 }
4432 #if defined(__APPLE__) && !defined(__Userspace__)
4433 if (so_locked) {
4434 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4435 } else {
4436 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4437 }
4438 #endif
4439 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4440 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4441 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4442 return;
4443 }
4444 /* now through all the gunk freeing chunks */
4445 if (holds_lock == 0) {
4446 SCTP_TCB_SEND_LOCK(stcb);
4447 }
4448 /* sent queue SHOULD be empty */
4449 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4450 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4451 asoc->sent_queue_cnt--;
4452 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4453 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4454 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4455 #ifdef INVARIANTS
4456 } else {
4457 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4458 #endif
4459 }
4460 }
4461 if (chk->data != NULL) {
4462 sctp_free_bufspace(stcb, asoc, chk, 1);
4463 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4464 error, chk, so_locked);
4465 if (chk->data) {
4466 sctp_m_freem(chk->data);
4467 chk->data = NULL;
4468 }
4469 }
4470 sctp_free_a_chunk(stcb, chk, so_locked);
4471 /*sa_ignore FREED_MEMORY*/
4472 }
4473 /* pending send queue SHOULD be empty */
4474 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4475 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4476 asoc->send_queue_cnt--;
4477 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4478 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4479 #ifdef INVARIANTS
4480 } else {
4481 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4482 #endif
4483 }
4484 if (chk->data != NULL) {
4485 sctp_free_bufspace(stcb, asoc, chk, 1);
4486 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4487 error, chk, so_locked);
4488 if (chk->data) {
4489 sctp_m_freem(chk->data);
4490 chk->data = NULL;
4491 }
4492 }
4493 sctp_free_a_chunk(stcb, chk, so_locked);
4494 /*sa_ignore FREED_MEMORY*/
4495 }
4496 for (i = 0; i < asoc->streamoutcnt; i++) {
4497 /* For each stream */
4498 outs = &asoc->strmout[i];
4499 /* clean up any sends there */
4500 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4501 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4502 TAILQ_REMOVE(&outs->outqueue, sp, next);
4503 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4504 sctp_free_spbufspace(stcb, asoc, sp);
4505 if (sp->data) {
4506 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4507 error, (void *)sp, so_locked);
4508 if (sp->data) {
4509 sctp_m_freem(sp->data);
4510 sp->data = NULL;
4511 sp->tail_mbuf = NULL;
4512 sp->length = 0;
4513 }
4514 }
4515 if (sp->net) {
4516 sctp_free_remote_addr(sp->net);
4517 sp->net = NULL;
4518 }
4519 /* Free the chunk */
4520 sctp_free_a_strmoq(stcb, sp, so_locked);
4521 /*sa_ignore FREED_MEMORY*/
4522 }
4523 }
4524
4525 if (holds_lock == 0) {
4526 SCTP_TCB_SEND_UNLOCK(stcb);
4527 }
4528 }
4529
4530 void
4531 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4532 struct sctp_abort_chunk *abort, int so_locked)
4533 {
4534 if (stcb == NULL) {
4535 return;
4536 }
4537 #if defined(__APPLE__) && !defined(__Userspace__)
4538 if (so_locked) {
4539 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4540 } else {
4541 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4542 }
4543 #endif
4544 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4545 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4546 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4547 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4548 }
4549 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4550 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4551 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4552 return;
4553 }
4554 /* Tell them we lost the asoc */
4555 sctp_report_all_outbound(stcb, error, 0, so_locked);
4556 if (from_peer) {
4557 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4558 } else {
4559 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4560 }
4561 }
4562
4563 void
4564 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4565 struct mbuf *m, int iphlen,
4566 struct sockaddr *src, struct sockaddr *dst,
4567 struct sctphdr *sh, struct mbuf *op_err,
4568 #if defined(__FreeBSD__) && !defined(__Userspace__)
4569 uint8_t mflowtype, uint32_t mflowid,
4570 #endif
4571 uint32_t vrf_id, uint16_t port)
4572 {
4573 uint32_t vtag;
4574 #if defined(__APPLE__) && !defined(__Userspace__)
4575 struct socket *so;
4576 #endif
4577
4578 vtag = 0;
4579 if (stcb != NULL) {
4580 vtag = stcb->asoc.peer_vtag;
4581 vrf_id = stcb->asoc.vrf_id;
4582 }
4583 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4584 #if defined(__FreeBSD__) && !defined(__Userspace__)
4585 mflowtype, mflowid, inp->fibnum,
4586 #endif
4587 vrf_id, port);
4588 if (stcb != NULL) {
4589 /* We have a TCB to abort, send notification too */
4590 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4591 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4592 /* Ok, now lets free it */
4593 #if defined(__APPLE__) && !defined(__Userspace__)
4594 so = SCTP_INP_SO(inp);
4595 atomic_add_int(&stcb->asoc.refcnt, 1);
4596 SCTP_TCB_UNLOCK(stcb);
4597 SCTP_SOCKET_LOCK(so, 1);
4598 SCTP_TCB_LOCK(stcb);
4599 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4600 #endif
4601 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4602 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4603 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4604 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4605 }
4606 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4607 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4608 #if defined(__APPLE__) && !defined(__Userspace__)
4609 SCTP_SOCKET_UNLOCK(so, 1);
4610 #endif
4611 }
4612 }
4613 #ifdef SCTP_ASOCLOG_OF_TSNS
4614 void
4615 sctp_print_out_track_log(struct sctp_tcb *stcb)
4616 {
4617 #ifdef NOSIY_PRINTS
4618 int i;
4619 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4620 SCTP_PRINTF("IN bound TSN log-aaa\n");
4621 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4622 SCTP_PRINTF("None rcvd\n");
4623 goto none_in;
4624 }
4625 if (stcb->asoc.tsn_in_wrapped) {
4626 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4627 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4628 stcb->asoc.in_tsnlog[i].tsn,
4629 stcb->asoc.in_tsnlog[i].strm,
4630 stcb->asoc.in_tsnlog[i].seq,
4631 stcb->asoc.in_tsnlog[i].flgs,
4632 stcb->asoc.in_tsnlog[i].sz);
4633 }
4634 }
4635 if (stcb->asoc.tsn_in_at) {
4636 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4637 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4638 stcb->asoc.in_tsnlog[i].tsn,
4639 stcb->asoc.in_tsnlog[i].strm,
4640 stcb->asoc.in_tsnlog[i].seq,
4641 stcb->asoc.in_tsnlog[i].flgs,
4642 stcb->asoc.in_tsnlog[i].sz);
4643 }
4644 }
4645 none_in:
4646 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4647 if ((stcb->asoc.tsn_out_at == 0) &&
4648 (stcb->asoc.tsn_out_wrapped == 0)) {
4649 SCTP_PRINTF("None sent\n");
4650 }
4651 if (stcb->asoc.tsn_out_wrapped) {
4652 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4653 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4654 stcb->asoc.out_tsnlog[i].tsn,
4655 stcb->asoc.out_tsnlog[i].strm,
4656 stcb->asoc.out_tsnlog[i].seq,
4657 stcb->asoc.out_tsnlog[i].flgs,
4658 stcb->asoc.out_tsnlog[i].sz);
4659 }
4660 }
4661 if (stcb->asoc.tsn_out_at) {
4662 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4663 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4664 stcb->asoc.out_tsnlog[i].tsn,
4665 stcb->asoc.out_tsnlog[i].strm,
4666 stcb->asoc.out_tsnlog[i].seq,
4667 stcb->asoc.out_tsnlog[i].flgs,
4668 stcb->asoc.out_tsnlog[i].sz);
4669 }
4670 }
4671 #endif
4672 }
4673 #endif
4674
4675 void
4676 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4677 struct mbuf *op_err,
4678 int so_locked)
4679 {
4680 #if defined(__APPLE__) && !defined(__Userspace__)
4681 struct socket *so;
4682 #endif
4683
4684 #if defined(__APPLE__) && !defined(__Userspace__)
4685 so = SCTP_INP_SO(inp);
4686 #endif
4687 #if defined(__APPLE__) && !defined(__Userspace__)
4688 if (so_locked) {
4689 sctp_lock_assert(SCTP_INP_SO(inp));
4690 } else {
4691 sctp_unlock_assert(SCTP_INP_SO(inp));
4692 }
4693 #endif
4694 if (stcb == NULL) {
4695 /* Got to have a TCB */
4696 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4697 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4698 #if defined(__APPLE__) && !defined(__Userspace__)
4699 if (!so_locked) {
4700 SCTP_SOCKET_LOCK(so, 1);
4701 }
4702 #endif
4703 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4704 SCTP_CALLED_DIRECTLY_NOCMPSET);
4705 #if defined(__APPLE__) && !defined(__Userspace__)
4706 if (!so_locked) {
4707 SCTP_SOCKET_UNLOCK(so, 1);
4708 }
4709 #endif
4710 }
4711 }
4712 return;
4713 } else {
4714 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4715 }
4716 /* notify the peer */
4717 sctp_send_abort_tcb(stcb, op_err, so_locked);
4718 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4719 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4720 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4721 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4722 }
4723 /* notify the ulp */
4724 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4725 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4726 }
4727 /* now free the asoc */
4728 #ifdef SCTP_ASOCLOG_OF_TSNS
4729 sctp_print_out_track_log(stcb);
4730 #endif
4731 #if defined(__APPLE__) && !defined(__Userspace__)
4732 if (!so_locked) {
4733 atomic_add_int(&stcb->asoc.refcnt, 1);
4734 SCTP_TCB_UNLOCK(stcb);
4735 SCTP_SOCKET_LOCK(so, 1);
4736 SCTP_TCB_LOCK(stcb);
4737 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4738 }
4739 #endif
4740 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4741 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4742 #if defined(__APPLE__) && !defined(__Userspace__)
4743 if (!so_locked) {
4744 SCTP_SOCKET_UNLOCK(so, 1);
4745 }
4746 #endif
4747 }
4748
4749 void
4750 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4751 struct sockaddr *src, struct sockaddr *dst,
4752 struct sctphdr *sh, struct sctp_inpcb *inp,
4753 struct mbuf *cause,
4754 #if defined(__FreeBSD__) && !defined(__Userspace__)
4755 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4756 #endif
4757 uint32_t vrf_id, uint16_t port)
4758 {
4759 struct sctp_chunkhdr *ch, chunk_buf;
4760 unsigned int chk_length;
4761 int contains_init_chunk;
4762
4763 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4764 /* Generate a TO address for future reference */
4765 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4766 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4767 #if defined(__APPLE__) && !defined(__Userspace__)
4768 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4769 #endif
4770 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4771 SCTP_CALLED_DIRECTLY_NOCMPSET);
4772 #if defined(__APPLE__) && !defined(__Userspace__)
4773 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4774 #endif
4775 }
4776 }
4777 contains_init_chunk = 0;
4778 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4779 sizeof(*ch), (uint8_t *) & chunk_buf);
4780 while (ch != NULL) {
4781 chk_length = ntohs(ch->chunk_length);
4782 if (chk_length < sizeof(*ch)) {
4783 /* break to abort land */
4784 break;
4785 }
4786 switch (ch->chunk_type) {
4787 case SCTP_INIT:
4788 contains_init_chunk = 1;
4789 break;
4790 case SCTP_PACKET_DROPPED:
4791 /* we don't respond to pkt-dropped */
4792 return;
4793 case SCTP_ABORT_ASSOCIATION:
4794 /* we don't respond with an ABORT to an ABORT */
4795 return;
4796 case SCTP_SHUTDOWN_COMPLETE:
4797 /*
4798 * we ignore it since we are not waiting for it and
4799 * peer is gone
4800 */
4801 return;
4802 case SCTP_SHUTDOWN_ACK:
4803 sctp_send_shutdown_complete2(src, dst, sh,
4804 #if defined(__FreeBSD__) && !defined(__Userspace__)
4805 mflowtype, mflowid, fibnum,
4806 #endif
4807 vrf_id, port);
4808 return;
4809 default:
4810 break;
4811 }
4812 offset += SCTP_SIZE32(chk_length);
4813 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4814 sizeof(*ch), (uint8_t *) & chunk_buf);
4815 }
4816 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4817 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4818 (contains_init_chunk == 0))) {
4819 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4820 #if defined(__FreeBSD__) && !defined(__Userspace__)
4821 mflowtype, mflowid, fibnum,
4822 #endif
4823 vrf_id, port);
4824 }
4825 }
4826
4827 /*
4828 * check the inbound datagram to make sure there is not an abort inside it,
4829 * if there is return 1, else return 0.
4830 */
4831 int
4832 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4833 {
4834 struct sctp_chunkhdr *ch;
4835 struct sctp_init_chunk *init_chk, chunk_buf;
4836 int offset;
4837 unsigned int chk_length;
4838
4839 offset = iphlen + sizeof(struct sctphdr);
4840 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4841 (uint8_t *) & chunk_buf);
4842 while (ch != NULL) {
4843 chk_length = ntohs(ch->chunk_length);
4844 if (chk_length < sizeof(*ch)) {
4845 /* packet is probably corrupt */
4846 break;
4847 }
4848 /* we seem to be ok, is it an abort? */
4849 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4850 /* yep, tell them */
4851 return (1);
4852 }
4853 if (ch->chunk_type == SCTP_INITIATION) {
4854 /* need to update the Vtag */
4855 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4856 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4857 if (init_chk != NULL) {
4858 *vtagfill = ntohl(init_chk->init.initiate_tag);
4859 }
4860 }
4861 /* Nope, move to the next chunk */
4862 offset += SCTP_SIZE32(chk_length);
4863 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4864 sizeof(*ch), (uint8_t *) & chunk_buf);
4865 }
4866 return (0);
4867 }
4868
4869 /*
4870 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4871 * set (i.e. it's 0) so, create this function to compare link local scopes
4872 */
4873 #ifdef INET6
4874 uint32_t
4875 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4876 {
4877 #if defined(__Userspace__)
4878 /*__Userspace__ Returning 1 here always */
4879 #endif
4880 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4881 struct sockaddr_in6 a, b;
4882
4883 /* save copies */
4884 a = *addr1;
4885 b = *addr2;
4886
4887 if (a.sin6_scope_id == 0)
4888 #ifdef SCTP_KAME
4889 if (sa6_recoverscope(&a)) {
4890 #else
4891 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4892 #endif /* SCTP_KAME */
4893 /* can't get scope, so can't match */
4894 return (0);
4895 }
4896 if (b.sin6_scope_id == 0)
4897 #ifdef SCTP_KAME
4898 if (sa6_recoverscope(&b)) {
4899 #else
4900 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4901 #endif /* SCTP_KAME */
4902 /* can't get scope, so can't match */
4903 return (0);
4904 }
4905 if (a.sin6_scope_id != b.sin6_scope_id)
4906 return (0);
4907 #else
4908 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4909 return (0);
4910 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4911
4912 return (1);
4913 }
4914
4915 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4916 /*
4917 * returns a sockaddr_in6 with embedded scope recovered and removed
4918 */
4919 struct sockaddr_in6 *
4920 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4921 {
4922 /* check and strip embedded scope junk */
4923 if (addr->sin6_family == AF_INET6) {
4924 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4925 if (addr->sin6_scope_id == 0) {
4926 *store = *addr;
4927 #ifdef SCTP_KAME
4928 if (!sa6_recoverscope(store)) {
4929 #else
4930 if (!in6_recoverscope(store, &store->sin6_addr,
4931 NULL)) {
4932 #endif /* SCTP_KAME */
4933 /* use the recovered scope */
4934 addr = store;
4935 }
4936 } else {
4937 /* else, return the original "to" addr */
4938 in6_clearscope(&addr->sin6_addr);
4939 }
4940 }
4941 }
4942 return (addr);
4943 }
4944 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4945 #endif
4946
4947 /*
4948 * are the two addresses the same? currently a "scopeless" check returns: 1
4949 * if same, 0 if not
4950 */
4951 int
4952 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4953 {
4954
4955 /* must be valid */
4956 if (sa1 == NULL || sa2 == NULL)
4957 return (0);
4958
4959 /* must be the same family */
4960 if (sa1->sa_family != sa2->sa_family)
4961 return (0);
4962
4963 switch (sa1->sa_family) {
4964 #ifdef INET6
4965 case AF_INET6:
4966 {
4967 /* IPv6 addresses */
4968 struct sockaddr_in6 *sin6_1, *sin6_2;
4969
4970 sin6_1 = (struct sockaddr_in6 *)sa1;
4971 sin6_2 = (struct sockaddr_in6 *)sa2;
4972 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4973 sin6_2));
4974 }
4975 #endif
4976 #ifdef INET
4977 case AF_INET:
4978 {
4979 /* IPv4 addresses */
4980 struct sockaddr_in *sin_1, *sin_2;
4981
4982 sin_1 = (struct sockaddr_in *)sa1;
4983 sin_2 = (struct sockaddr_in *)sa2;
4984 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4985 }
4986 #endif
4987 #if defined(__Userspace__)
4988 case AF_CONN:
4989 {
4990 struct sockaddr_conn *sconn_1, *sconn_2;
4991
4992 sconn_1 = (struct sockaddr_conn *)sa1;
4993 sconn_2 = (struct sockaddr_conn *)sa2;
4994 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4995 }
4996 #endif
4997 default:
4998 /* we don't do these... */
4999 return (0);
5000 }
5001 }
5002
5003 void
5004 sctp_print_address(struct sockaddr *sa)
5005 {
5006 #ifdef INET6
5007 #if defined(__FreeBSD__) && !defined(__Userspace__)
5008 char ip6buf[INET6_ADDRSTRLEN];
5009 #endif
5010 #endif
5011
5012 switch (sa->sa_family) {
5013 #ifdef INET6
5014 case AF_INET6:
5015 {
5016 struct sockaddr_in6 *sin6;
5017
5018 sin6 = (struct sockaddr_in6 *)sa;
5019 #if defined(__Userspace__)
5020 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
5021 ntohs(sin6->sin6_addr.s6_addr16[0]),
5022 ntohs(sin6->sin6_addr.s6_addr16[1]),
5023 ntohs(sin6->sin6_addr.s6_addr16[2]),
5024 ntohs(sin6->sin6_addr.s6_addr16[3]),
5025 ntohs(sin6->sin6_addr.s6_addr16[4]),
5026 ntohs(sin6->sin6_addr.s6_addr16[5]),
5027 ntohs(sin6->sin6_addr.s6_addr16[6]),
5028 ntohs(sin6->sin6_addr.s6_addr16[7]),
5029 ntohs(sin6->sin6_port),
5030 sin6->sin6_scope_id);
5031 #else
5032 #if defined(__FreeBSD__) && !defined(__Userspace__)
5033 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5034 ip6_sprintf(ip6buf, &sin6->sin6_addr),
5035 ntohs(sin6->sin6_port),
5036 sin6->sin6_scope_id);
5037 #else
5038 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5039 ip6_sprintf(&sin6->sin6_addr),
5040 ntohs(sin6->sin6_port),
5041 sin6->sin6_scope_id);
5042 #endif
5043 #endif
5044 break;
5045 }
5046 #endif
5047 #ifdef INET
5048 case AF_INET:
5049 {
5050 struct sockaddr_in *sin;
5051 unsigned char *p;
5052
5053 sin = (struct sockaddr_in *)sa;
5054 p = (unsigned char *)&sin->sin_addr;
5055 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
5056 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
5057 break;
5058 }
5059 #endif
5060 #if defined(__Userspace__)
5061 case AF_CONN:
5062 {
5063 struct sockaddr_conn *sconn;
5064
5065 sconn = (struct sockaddr_conn *)sa;
5066 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
5067 break;
5068 }
5069 #endif
5070 default:
5071 SCTP_PRINTF("?\n");
5072 break;
5073 }
5074 }
5075
5076 void
5077 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
5078 struct sctp_inpcb *new_inp,
5079 struct sctp_tcb *stcb,
5080 int waitflags)
5081 {
5082 /*
5083 * go through our old INP and pull off any control structures that
5084 * belong to stcb and move then to the new inp.
5085 */
5086 struct socket *old_so, *new_so;
5087 struct sctp_queued_to_read *control, *nctl;
5088 struct sctp_readhead tmp_queue;
5089 struct mbuf *m;
5090 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5091 int error = 0;
5092 #endif
5093
5094 old_so = old_inp->sctp_socket;
5095 new_so = new_inp->sctp_socket;
5096 TAILQ_INIT(&tmp_queue);
5097 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5098 error = sblock(&old_so->so_rcv, waitflags);
5099 if (error) {
5100 /* Gak, can't get sblock, we have a problem.
5101 * data will be left stranded.. and we
5102 * don't dare look at it since the
5103 * other thread may be reading something.
5104 * Oh well, its a screwed up app that does
5105 * a peeloff OR a accept while reading
5106 * from the main socket... actually its
5107 * only the peeloff() case, since I think
5108 * read will fail on a listening socket..
5109 */
5110 return;
5111 }
5112 #endif
5113 /* lock the socket buffers */
5114 SCTP_INP_READ_LOCK(old_inp);
5115 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
5116 /* Pull off all for out target stcb */
5117 if (control->stcb == stcb) {
5118 /* remove it we want it */
5119 TAILQ_REMOVE(&old_inp->read_queue, control, next);
5120 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
5121 m = control->data;
5122 while (m) {
5123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5124 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
5125 }
5126 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
5127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5128 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5129 }
5130 m = SCTP_BUF_NEXT(m);
5131 }
5132 }
5133 }
5134 SCTP_INP_READ_UNLOCK(old_inp);
5135 /* Remove the sb-lock on the old socket */
5136 #if defined(__APPLE__) && !defined(__Userspace__)
5137 sbunlock(&old_so->so_rcv, 1);
5138 #endif
5139
5140 #if defined(__FreeBSD__) && !defined(__Userspace__)
5141 sbunlock(&old_so->so_rcv);
5142 #endif
5143 /* Now we move them over to the new socket buffer */
5144 SCTP_INP_READ_LOCK(new_inp);
5145 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
5146 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
5147 m = control->data;
5148 while (m) {
5149 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5150 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5151 }
5152 sctp_sballoc(stcb, &new_so->so_rcv, m);
5153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5154 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5155 }
5156 m = SCTP_BUF_NEXT(m);
5157 }
5158 }
5159 SCTP_INP_READ_UNLOCK(new_inp);
5160 }
5161
5162 void
5163 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
5164 struct sctp_tcb *stcb,
5165 int so_locked
5166 #if !(defined(__APPLE__) && !defined(__Userspace__))
5167 SCTP_UNUSED
5168 #endif
5169 )
5170 {
5171 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
5172 #if defined(__APPLE__) && !defined(__Userspace__)
5173 struct socket *so;
5174
5175 so = SCTP_INP_SO(inp);
5176 if (!so_locked) {
5177 if (stcb) {
5178 atomic_add_int(&stcb->asoc.refcnt, 1);
5179 SCTP_TCB_UNLOCK(stcb);
5180 }
5181 SCTP_SOCKET_LOCK(so, 1);
5182 if (stcb) {
5183 SCTP_TCB_LOCK(stcb);
5184 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5185 }
5186 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5187 SCTP_SOCKET_UNLOCK(so, 1);
5188 return;
5189 }
5190 }
5191 #endif
5192 sctp_sorwakeup(inp, inp->sctp_socket);
5193 #if defined(__APPLE__) && !defined(__Userspace__)
5194 if (!so_locked) {
5195 SCTP_SOCKET_UNLOCK(so, 1);
5196 }
5197 #endif
5198 }
5199 }
5200 #if defined(__Userspace__)
5201
5202 void
5203 sctp_invoke_recv_callback(struct sctp_inpcb *inp,
5204 struct sctp_tcb *stcb,
5205 struct sctp_queued_to_read *control,
5206 int inp_read_lock_held)
5207 {
5208 uint32_t pd_point, length;
5209
5210 if ((inp->recv_callback == NULL) ||
5211 (stcb == NULL) ||
5212 (stcb->sctp_socket == NULL)) {
5213 return;
5214 }
5215
5216 length = control->length;
5217 if (stcb != NULL && stcb->sctp_socket != NULL) {
5218 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
5219 stcb->sctp_ep->partial_delivery_point);
5220 } else {
5221 pd_point = inp->partial_delivery_point;
5222 }
5223 if ((control->end_added == 1) || (length >= pd_point)) {
5224 struct socket *so;
5225 struct mbuf *m;
5226 char *buffer;
5227 struct sctp_rcvinfo rcv;
5228 union sctp_sockstore addr;
5229 int flags;
5230
5231 if ((buffer = malloc(length)) == NULL) {
5232 return;
5233 }
5234 if (inp_read_lock_held == 0) {
5235 SCTP_INP_READ_LOCK(inp);
5236 }
5237 so = stcb->sctp_socket;
5238 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
5239 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
5240 }
5241 m_copydata(control->data, 0, length, buffer);
5242 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
5243 rcv.rcv_sid = control->sinfo_stream;
5244 rcv.rcv_ssn = (uint16_t)control->mid;
5245 rcv.rcv_flags = control->sinfo_flags;
5246 rcv.rcv_ppid = control->sinfo_ppid;
5247 rcv.rcv_tsn = control->sinfo_tsn;
5248 rcv.rcv_cumtsn = control->sinfo_cumtsn;
5249 rcv.rcv_context = control->sinfo_context;
5250 rcv.rcv_assoc_id = control->sinfo_assoc_id;
5251 memset(&addr, 0, sizeof(union sctp_sockstore));
5252 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5253 #ifdef INET
5254 case AF_INET:
5255 addr.sin = control->whoFrom->ro._l_addr.sin;
5256 break;
5257 #endif
5258 #ifdef INET6
5259 case AF_INET6:
5260 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5261 break;
5262 #endif
5263 case AF_CONN:
5264 addr.sconn = control->whoFrom->ro._l_addr.sconn;
5265 break;
5266 default:
5267 addr.sa = control->whoFrom->ro._l_addr.sa;
5268 break;
5269 }
5270 flags = 0;
5271 if (control->end_added == 1) {
5272 flags |= MSG_EOR;
5273 }
5274 if (control->spec_flags & M_NOTIFICATION) {
5275 flags |= MSG_NOTIFICATION;
5276 }
5277 sctp_m_freem(control->data);
5278 control->data = NULL;
5279 control->tail_mbuf = NULL;
5280 control->length = 0;
5281 if (control->end_added) {
5282 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5283 control->on_read_q = 0;
5284 sctp_free_remote_addr(control->whoFrom);
5285 control->whoFrom = NULL;
5286 sctp_free_a_readq(stcb, control);
5287 }
5288 atomic_add_int(&stcb->asoc.refcnt, 1);
5289 SCTP_TCB_UNLOCK(stcb);
5290 if (inp_read_lock_held == 0) {
5291 SCTP_INP_READ_UNLOCK(inp);
5292 }
5293 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5294 SCTP_TCB_LOCK(stcb);
5295 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5296 }
5297 }
5298 #endif
5299
5300 void
5301 sctp_add_to_readq(struct sctp_inpcb *inp,
5302 struct sctp_tcb *stcb,
5303 struct sctp_queued_to_read *control,
5304 struct sockbuf *sb,
5305 int end,
5306 int inp_read_lock_held,
5307 int so_locked)
5308 {
5309 /*
5310 * Here we must place the control on the end of the socket read
5311 * queue AND increment sb_cc so that select will work properly on
5312 * read.
5313 */
5314 struct mbuf *m, *prev = NULL;
5315
5316 if (inp == NULL) {
5317 /* Gak, TSNH!! */
5318 #ifdef INVARIANTS
5319 panic("Gak, inp NULL on add_to_readq");
5320 #endif
5321 return;
5322 }
5323 #if defined(__APPLE__) && !defined(__Userspace__)
5324 if (so_locked) {
5325 sctp_lock_assert(SCTP_INP_SO(inp));
5326 } else {
5327 sctp_unlock_assert(SCTP_INP_SO(inp));
5328 }
5329 #endif
5330 if (inp_read_lock_held == 0)
5331 SCTP_INP_READ_LOCK(inp);
5332 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
5333 if (!control->on_strm_q) {
5334 sctp_free_remote_addr(control->whoFrom);
5335 if (control->data) {
5336 sctp_m_freem(control->data);
5337 control->data = NULL;
5338 }
5339 sctp_free_a_readq(stcb, control);
5340 }
5341 if (inp_read_lock_held == 0)
5342 SCTP_INP_READ_UNLOCK(inp);
5343 return;
5344 }
5345 if (!(control->spec_flags & M_NOTIFICATION)) {
5346 atomic_add_int(&inp->total_recvs, 1);
5347 if (!control->do_not_ref_stcb) {
5348 atomic_add_int(&stcb->total_recvs, 1);
5349 }
5350 }
5351 m = control->data;
5352 control->held_length = 0;
5353 control->length = 0;
5354 while (m) {
5355 if (SCTP_BUF_LEN(m) == 0) {
5356 /* Skip mbufs with NO length */
5357 if (prev == NULL) {
5358 /* First one */
5359 control->data = sctp_m_free(m);
5360 m = control->data;
5361 } else {
5362 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5363 m = SCTP_BUF_NEXT(prev);
5364 }
5365 if (m == NULL) {
5366 control->tail_mbuf = prev;
5367 }
5368 continue;
5369 }
5370 prev = m;
5371 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5372 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5373 }
5374 sctp_sballoc(stcb, sb, m);
5375 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5376 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5377 }
5378 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5379 m = SCTP_BUF_NEXT(m);
5380 }
5381 if (prev != NULL) {
5382 control->tail_mbuf = prev;
5383 } else {
5384 /* Everything got collapsed out?? */
5385 if (!control->on_strm_q) {
5386 sctp_free_remote_addr(control->whoFrom);
5387 sctp_free_a_readq(stcb, control);
5388 }
5389 if (inp_read_lock_held == 0)
5390 SCTP_INP_READ_UNLOCK(inp);
5391 return;
5392 }
5393 if (end) {
5394 control->end_added = 1;
5395 }
5396 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5397 control->on_read_q = 1;
5398 if (inp_read_lock_held == 0)
5399 SCTP_INP_READ_UNLOCK(inp);
5400 #if defined(__Userspace__)
5401 sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
5402 #endif
5403 if (inp && inp->sctp_socket) {
5404 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5405 }
5406 }
5407
5408 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5409 *************ALTERNATE ROUTING CODE
5410 */
5411
5412 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5413 *************ALTERNATE ROUTING CODE
5414 */
5415
5416 struct mbuf *
5417 sctp_generate_cause(uint16_t code, char *info)
5418 {
5419 struct mbuf *m;
5420 struct sctp_gen_error_cause *cause;
5421 size_t info_len;
5422 uint16_t len;
5423
5424 if ((code == 0) || (info == NULL)) {
5425 return (NULL);
5426 }
5427 info_len = strlen(info);
5428 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5429 return (NULL);
5430 }
5431 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5432 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5433 if (m != NULL) {
5434 SCTP_BUF_LEN(m) = len;
5435 cause = mtod(m, struct sctp_gen_error_cause *);
5436 cause->code = htons(code);
5437 cause->length = htons(len);
5438 memcpy(cause->info, info, info_len);
5439 }
5440 return (m);
5441 }
5442
5443 struct mbuf *
5444 sctp_generate_no_user_data_cause(uint32_t tsn)
5445 {
5446 struct mbuf *m;
5447 struct sctp_error_no_user_data *no_user_data_cause;
5448 uint16_t len;
5449
5450 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5451 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5452 if (m != NULL) {
5453 SCTP_BUF_LEN(m) = len;
5454 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5455 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5456 no_user_data_cause->cause.length = htons(len);
5457 no_user_data_cause->tsn = htonl(tsn);
5458 }
5459 return (m);
5460 }
5461
5462 #ifdef SCTP_MBCNT_LOGGING
5463 void
5464 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5465 struct sctp_tmit_chunk *tp1, int chk_cnt)
5466 {
5467 if (tp1->data == NULL) {
5468 return;
5469 }
5470 asoc->chunks_on_out_queue -= chk_cnt;
5471 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5472 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5473 asoc->total_output_queue_size,
5474 tp1->book_size,
5475 0,
5476 tp1->mbcnt);
5477 }
5478 if (asoc->total_output_queue_size >= tp1->book_size) {
5479 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5480 } else {
5481 asoc->total_output_queue_size = 0;
5482 }
5483
5484 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5485 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5486 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5487 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5488 } else {
5489 stcb->sctp_socket->so_snd.sb_cc = 0;
5490
5491 }
5492 }
5493 }
5494
5495 #endif
5496
5497 int
5498 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5499 uint8_t sent, int so_locked)
5500 {
5501 struct sctp_stream_out *strq;
5502 struct sctp_tmit_chunk *chk = NULL, *tp2;
5503 struct sctp_stream_queue_pending *sp;
5504 uint32_t mid;
5505 uint16_t sid;
5506 uint8_t foundeom = 0;
5507 int ret_sz = 0;
5508 int notdone;
5509 int do_wakeup_routine = 0;
5510
5511 #if defined(__APPLE__) && !defined(__Userspace__)
5512 if (so_locked) {
5513 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5514 } else {
5515 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5516 }
5517 #endif
5518 sid = tp1->rec.data.sid;
5519 mid = tp1->rec.data.mid;
5520 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5521 stcb->asoc.abandoned_sent[0]++;
5522 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5523 stcb->asoc.strmout[sid].abandoned_sent[0]++;
5524 #if defined(SCTP_DETAILED_STR_STATS)
5525 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5526 #endif
5527 } else {
5528 stcb->asoc.abandoned_unsent[0]++;
5529 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5530 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5531 #if defined(SCTP_DETAILED_STR_STATS)
5532 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5533 #endif
5534 }
5535 do {
5536 ret_sz += tp1->book_size;
5537 if (tp1->data != NULL) {
5538 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5539 sctp_flight_size_decrease(tp1);
5540 sctp_total_flight_decrease(stcb, tp1);
5541 }
5542 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5543 stcb->asoc.peers_rwnd += tp1->send_size;
5544 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5545 if (sent) {
5546 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5547 } else {
5548 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5549 }
5550 if (tp1->data) {
5551 sctp_m_freem(tp1->data);
5552 tp1->data = NULL;
5553 }
5554 do_wakeup_routine = 1;
5555 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5556 stcb->asoc.sent_queue_cnt_removeable--;
5557 }
5558 }
5559 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5560 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5561 SCTP_DATA_NOT_FRAG) {
5562 /* not frag'ed we ae done */
5563 notdone = 0;
5564 foundeom = 1;
5565 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5566 /* end of frag, we are done */
5567 notdone = 0;
5568 foundeom = 1;
5569 } else {
5570 /*
5571 * Its a begin or middle piece, we must mark all of
5572 * it
5573 */
5574 notdone = 1;
5575 tp1 = TAILQ_NEXT(tp1, sctp_next);
5576 }
5577 } while (tp1 && notdone);
5578 if (foundeom == 0) {
5579 /*
5580 * The multi-part message was scattered across the send and
5581 * sent queue.
5582 */
5583 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5584 if ((tp1->rec.data.sid != sid) ||
5585 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5586 break;
5587 }
5588 /* save to chk in case we have some on stream out
5589 * queue. If so and we have an un-transmitted one
5590 * we don't have to fudge the TSN.
5591 */
5592 chk = tp1;
5593 ret_sz += tp1->book_size;
5594 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5595 if (sent) {
5596 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5597 } else {
5598 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5599 }
5600 if (tp1->data) {
5601 sctp_m_freem(tp1->data);
5602 tp1->data = NULL;
5603 }
5604 /* No flight involved here book the size to 0 */
5605 tp1->book_size = 0;
5606 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5607 foundeom = 1;
5608 }
5609 do_wakeup_routine = 1;
5610 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5611 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5612 /* on to the sent queue so we can wait for it to be passed by. */
5613 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5614 sctp_next);
5615 stcb->asoc.send_queue_cnt--;
5616 stcb->asoc.sent_queue_cnt++;
5617 }
5618 }
5619 if (foundeom == 0) {
5620 /*
5621 * Still no eom found. That means there
5622 * is stuff left on the stream out queue.. yuck.
5623 */
5624 SCTP_TCB_SEND_LOCK(stcb);
5625 strq = &stcb->asoc.strmout[sid];
5626 sp = TAILQ_FIRST(&strq->outqueue);
5627 if (sp != NULL) {
5628 sp->discard_rest = 1;
5629 /*
5630 * We may need to put a chunk on the
5631 * queue that holds the TSN that
5632 * would have been sent with the LAST
5633 * bit.
5634 */
5635 if (chk == NULL) {
5636 /* Yep, we have to */
5637 sctp_alloc_a_chunk(stcb, chk);
5638 if (chk == NULL) {
5639 /* we are hosed. All we can
5640 * do is nothing.. which will
5641 * cause an abort if the peer is
5642 * paying attention.
5643 */
5644 goto oh_well;
5645 }
5646 memset(chk, 0, sizeof(*chk));
5647 chk->rec.data.rcv_flags = 0;
5648 chk->sent = SCTP_FORWARD_TSN_SKIP;
5649 chk->asoc = &stcb->asoc;
5650 if (stcb->asoc.idata_supported == 0) {
5651 if (sp->sinfo_flags & SCTP_UNORDERED) {
5652 chk->rec.data.mid = 0;
5653 } else {
5654 chk->rec.data.mid = strq->next_mid_ordered;
5655 }
5656 } else {
5657 if (sp->sinfo_flags & SCTP_UNORDERED) {
5658 chk->rec.data.mid = strq->next_mid_unordered;
5659 } else {
5660 chk->rec.data.mid = strq->next_mid_ordered;
5661 }
5662 }
5663 chk->rec.data.sid = sp->sid;
5664 chk->rec.data.ppid = sp->ppid;
5665 chk->rec.data.context = sp->context;
5666 chk->flags = sp->act_flags;
5667 chk->whoTo = NULL;
5668 #if defined(__FreeBSD__) && !defined(__Userspace__)
5669 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5670 #else
5671 chk->rec.data.tsn = stcb->asoc.sending_seq++;
5672 #endif
5673 strq->chunks_on_queues++;
5674 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5675 stcb->asoc.sent_queue_cnt++;
5676 stcb->asoc.pr_sctp_cnt++;
5677 }
5678 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5679 if (sp->sinfo_flags & SCTP_UNORDERED) {
5680 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5681 }
5682 if (stcb->asoc.idata_supported == 0) {
5683 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5684 strq->next_mid_ordered++;
5685 }
5686 } else {
5687 if (sp->sinfo_flags & SCTP_UNORDERED) {
5688 strq->next_mid_unordered++;
5689 } else {
5690 strq->next_mid_ordered++;
5691 }
5692 }
5693 oh_well:
5694 if (sp->data) {
5695 /* Pull any data to free up the SB and
5696 * allow sender to "add more" while we
5697 * will throw away :-)
5698 */
5699 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5700 ret_sz += sp->length;
5701 do_wakeup_routine = 1;
5702 sp->some_taken = 1;
5703 sctp_m_freem(sp->data);
5704 sp->data = NULL;
5705 sp->tail_mbuf = NULL;
5706 sp->length = 0;
5707 }
5708 }
5709 SCTP_TCB_SEND_UNLOCK(stcb);
5710 }
5711 if (do_wakeup_routine) {
5712 #if defined(__APPLE__) && !defined(__Userspace__)
5713 struct socket *so;
5714
5715 so = SCTP_INP_SO(stcb->sctp_ep);
5716 if (!so_locked) {
5717 atomic_add_int(&stcb->asoc.refcnt, 1);
5718 SCTP_TCB_UNLOCK(stcb);
5719 SCTP_SOCKET_LOCK(so, 1);
5720 SCTP_TCB_LOCK(stcb);
5721 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5722 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5723 /* assoc was freed while we were unlocked */
5724 SCTP_SOCKET_UNLOCK(so, 1);
5725 return (ret_sz);
5726 }
5727 }
5728 #endif
5729 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5730 #if defined(__APPLE__) && !defined(__Userspace__)
5731 if (!so_locked) {
5732 SCTP_SOCKET_UNLOCK(so, 1);
5733 }
5734 #endif
5735 }
5736 return (ret_sz);
5737 }
5738
5739 /*
5740 * checks to see if the given address, sa, is one that is currently known by
5741 * the kernel note: can't distinguish the same address on multiple interfaces
5742 * and doesn't handle multiple addresses with different zone/scope id's note:
5743 * ifa_ifwithaddr() compares the entire sockaddr struct
5744 */
5745 struct sctp_ifa *
5746 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5747 int holds_lock)
5748 {
5749 struct sctp_laddr *laddr;
5750
5751 if (holds_lock == 0) {
5752 SCTP_INP_RLOCK(inp);
5753 }
5754
5755 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5756 if (laddr->ifa == NULL)
5757 continue;
5758 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5759 continue;
5760 #ifdef INET
5761 if (addr->sa_family == AF_INET) {
5762 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5763 laddr->ifa->address.sin.sin_addr.s_addr) {
5764 /* found him. */
5765 break;
5766 }
5767 }
5768 #endif
5769 #ifdef INET6
5770 if (addr->sa_family == AF_INET6) {
5771 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5772 &laddr->ifa->address.sin6)) {
5773 /* found him. */
5774 break;
5775 }
5776 }
5777 #endif
5778 #if defined(__Userspace__)
5779 if (addr->sa_family == AF_CONN) {
5780 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5781 /* found him. */
5782 break;
5783 }
5784 }
5785 #endif
5786 }
5787 if (holds_lock == 0) {
5788 SCTP_INP_RUNLOCK(inp);
5789 }
5790 return (laddr->ifa);
5791 }
5792
5793 uint32_t
5794 sctp_get_ifa_hash_val(struct sockaddr *addr)
5795 {
5796 switch (addr->sa_family) {
5797 #ifdef INET
5798 case AF_INET:
5799 {
5800 struct sockaddr_in *sin;
5801
5802 sin = (struct sockaddr_in *)addr;
5803 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5804 }
5805 #endif
5806 #ifdef INET6
5807 case AF_INET6:
5808 {
5809 struct sockaddr_in6 *sin6;
5810 uint32_t hash_of_addr;
5811
5812 sin6 = (struct sockaddr_in6 *)addr;
5813 #if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__)
5814 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5815 sin6->sin6_addr.s6_addr32[1] +
5816 sin6->sin6_addr.s6_addr32[2] +
5817 sin6->sin6_addr.s6_addr32[3]);
5818 #else
5819 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5820 ((uint32_t *)&sin6->sin6_addr)[1] +
5821 ((uint32_t *)&sin6->sin6_addr)[2] +
5822 ((uint32_t *)&sin6->sin6_addr)[3]);
5823 #endif
5824 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5825 return (hash_of_addr);
5826 }
5827 #endif
5828 #if defined(__Userspace__)
5829 case AF_CONN:
5830 {
5831 struct sockaddr_conn *sconn;
5832 uintptr_t temp;
5833
5834 sconn = (struct sockaddr_conn *)addr;
5835 temp = (uintptr_t)sconn->sconn_addr;
5836 return ((uint32_t)(temp ^ (temp >> 16)));
5837 }
5838 #endif
5839 default:
5840 break;
5841 }
5842 return (0);
5843 }
5844
5845 struct sctp_ifa *
5846 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5847 {
5848 struct sctp_ifa *sctp_ifap;
5849 struct sctp_vrf *vrf;
5850 struct sctp_ifalist *hash_head;
5851 uint32_t hash_of_addr;
5852
5853 if (holds_lock == 0)
5854 SCTP_IPI_ADDR_RLOCK();
5855
5856 vrf = sctp_find_vrf(vrf_id);
5857 if (vrf == NULL) {
5858 if (holds_lock == 0)
5859 SCTP_IPI_ADDR_RUNLOCK();
5860 return (NULL);
5861 }
5862
5863 hash_of_addr = sctp_get_ifa_hash_val(addr);
5864
5865 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5866 if (hash_head == NULL) {
5867 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5868 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5869 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5870 sctp_print_address(addr);
5871 SCTP_PRINTF("No such bucket for address\n");
5872 if (holds_lock == 0)
5873 SCTP_IPI_ADDR_RUNLOCK();
5874
5875 return (NULL);
5876 }
5877 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5878 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5879 continue;
5880 #ifdef INET
5881 if (addr->sa_family == AF_INET) {
5882 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5883 sctp_ifap->address.sin.sin_addr.s_addr) {
5884 /* found him. */
5885 break;
5886 }
5887 }
5888 #endif
5889 #ifdef INET6
5890 if (addr->sa_family == AF_INET6) {
5891 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5892 &sctp_ifap->address.sin6)) {
5893 /* found him. */
5894 break;
5895 }
5896 }
5897 #endif
5898 #if defined(__Userspace__)
5899 if (addr->sa_family == AF_CONN) {
5900 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5901 /* found him. */
5902 break;
5903 }
5904 }
5905 #endif
5906 }
5907 if (holds_lock == 0)
5908 SCTP_IPI_ADDR_RUNLOCK();
5909 return (sctp_ifap);
5910 }
5911
5912 static void
5913 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5914 uint32_t rwnd_req)
5915 {
5916 /* User pulled some data, do we need a rwnd update? */
5917 #if defined(__FreeBSD__) && !defined(__Userspace__)
5918 struct epoch_tracker et;
5919 #endif
5920 int r_unlocked = 0;
5921 uint32_t dif, rwnd;
5922 struct socket *so = NULL;
5923
5924 if (stcb == NULL)
5925 return;
5926
5927 atomic_add_int(&stcb->asoc.refcnt, 1);
5928
5929 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5930 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5931 /* Pre-check If we are freeing no update */
5932 goto no_lock;
5933 }
5934 SCTP_INP_INCR_REF(stcb->sctp_ep);
5935 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5936 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5937 goto out;
5938 }
5939 so = stcb->sctp_socket;
5940 if (so == NULL) {
5941 goto out;
5942 }
5943 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5944 /* Have you have freed enough to look */
5945 *freed_so_far = 0;
5946 /* Yep, its worth a look and the lock overhead */
5947
5948 /* Figure out what the rwnd would be */
5949 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5950 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5951 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5952 } else {
5953 dif = 0;
5954 }
5955 if (dif >= rwnd_req) {
5956 if (hold_rlock) {
5957 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5958 r_unlocked = 1;
5959 }
5960 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5961 /*
5962 * One last check before we allow the guy possibly
5963 * to get in. There is a race, where the guy has not
5964 * reached the gate. In that case
5965 */
5966 goto out;
5967 }
5968 SCTP_TCB_LOCK(stcb);
5969 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5970 /* No reports here */
5971 SCTP_TCB_UNLOCK(stcb);
5972 goto out;
5973 }
5974 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5975 #if defined(__FreeBSD__) && !defined(__Userspace__)
5976 NET_EPOCH_ENTER(et);
5977 #endif
5978 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5979
5980 sctp_chunk_output(stcb->sctp_ep, stcb,
5981 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5982 /* make sure no timer is running */
5983 #if defined(__FreeBSD__) && !defined(__Userspace__)
5984 NET_EPOCH_EXIT(et);
5985 #endif
5986 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5987 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5988 SCTP_TCB_UNLOCK(stcb);
5989 } else {
5990 /* Update how much we have pending */
5991 stcb->freed_by_sorcv_sincelast = dif;
5992 }
5993 out:
5994 if (so && r_unlocked && hold_rlock) {
5995 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5996 }
5997
5998 SCTP_INP_DECR_REF(stcb->sctp_ep);
5999 no_lock:
6000 atomic_add_int(&stcb->asoc.refcnt, -1);
6001 return;
6002 }
6003
6004 int
6005 sctp_sorecvmsg(struct socket *so,
6006 struct uio *uio,
6007 struct mbuf **mp,
6008 struct sockaddr *from,
6009 int fromlen,
6010 int *msg_flags,
6011 struct sctp_sndrcvinfo *sinfo,
6012 int filling_sinfo)
6013 {
6014 /*
6015 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
6016 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
6017 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
6018 * On the way out we may send out any combination of:
6019 * MSG_NOTIFICATION MSG_EOR
6020 *
6021 */
6022 struct sctp_inpcb *inp = NULL;
6023 ssize_t my_len = 0;
6024 ssize_t cp_len = 0;
6025 int error = 0;
6026 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
6027 struct mbuf *m = NULL;
6028 struct sctp_tcb *stcb = NULL;
6029 int wakeup_read_socket = 0;
6030 int freecnt_applied = 0;
6031 int out_flags = 0, in_flags = 0;
6032 int block_allowed = 1;
6033 uint32_t freed_so_far = 0;
6034 ssize_t copied_so_far = 0;
6035 int in_eeor_mode = 0;
6036 int no_rcv_needed = 0;
6037 uint32_t rwnd_req = 0;
6038 int hold_sblock = 0;
6039 int hold_rlock = 0;
6040 ssize_t slen = 0;
6041 uint32_t held_length = 0;
6042 #if defined(__FreeBSD__) && !defined(__Userspace__)
6043 int sockbuf_lock = 0;
6044 #endif
6045
6046 if (uio == NULL) {
6047 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6048 return (EINVAL);
6049 }
6050
6051 if (msg_flags) {
6052 in_flags = *msg_flags;
6053 if (in_flags & MSG_PEEK)
6054 SCTP_STAT_INCR(sctps_read_peeks);
6055 } else {
6056 in_flags = 0;
6057 }
6058 #if defined(__APPLE__) && !defined(__Userspace__)
6059 #if defined(APPLE_LEOPARD)
6060 slen = uio->uio_resid;
6061 #else
6062 slen = uio_resid(uio);
6063 #endif
6064 #else
6065 slen = uio->uio_resid;
6066 #endif
6067
6068 /* Pull in and set up our int flags */
6069 if (in_flags & MSG_OOB) {
6070 /* Out of band's NOT supported */
6071 return (EOPNOTSUPP);
6072 }
6073 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
6074 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6075 return (EINVAL);
6076 }
6077 if ((in_flags & (MSG_DONTWAIT
6078 #if defined(__FreeBSD__) && !defined(__Userspace__)
6079 | MSG_NBIO
6080 #endif
6081 )) ||
6082 SCTP_SO_IS_NBIO(so)) {
6083 block_allowed = 0;
6084 }
6085 /* setup the endpoint */
6086 inp = (struct sctp_inpcb *)so->so_pcb;
6087 if (inp == NULL) {
6088 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
6089 return (EFAULT);
6090 }
6091 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
6092 /* Must be at least a MTU's worth */
6093 if (rwnd_req < SCTP_MIN_RWND)
6094 rwnd_req = SCTP_MIN_RWND;
6095 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
6096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6097 #if defined(__APPLE__) && !defined(__Userspace__)
6098 #if defined(APPLE_LEOPARD)
6099 sctp_misc_ints(SCTP_SORECV_ENTER,
6100 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
6101 #else
6102 sctp_misc_ints(SCTP_SORECV_ENTER,
6103 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
6104 #endif
6105 #else
6106 sctp_misc_ints(SCTP_SORECV_ENTER,
6107 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6108 #endif
6109 }
6110 #if defined(__Userspace__)
6111 SOCKBUF_LOCK(&so->so_rcv);
6112 hold_sblock = 1;
6113 #endif
6114 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6115 #if defined(__APPLE__) && !defined(__Userspace__)
6116 #if defined(APPLE_LEOPARD)
6117 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6118 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
6119 #else
6120 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6121 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
6122 #endif
6123 #else
6124 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6125 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6126 #endif
6127 }
6128
6129 #if defined(__APPLE__) && !defined(__Userspace__)
6130 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6131 #endif
6132
6133 #if defined(__FreeBSD__) && !defined(__Userspace__)
6134 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
6135 #endif
6136 if (error) {
6137 goto release_unlocked;
6138 }
6139 #if defined(__FreeBSD__) && !defined(__Userspace__)
6140 sockbuf_lock = 1;
6141 #endif
6142 restart:
6143 #if defined(__Userspace__)
6144 if (hold_sblock == 0) {
6145 SOCKBUF_LOCK(&so->so_rcv);
6146 hold_sblock = 1;
6147 }
6148 #endif
6149 #if defined(__APPLE__) && !defined(__Userspace__)
6150 sbunlock(&so->so_rcv, 1);
6151 #endif
6152
6153 restart_nosblocks:
6154 if (hold_sblock == 0) {
6155 SOCKBUF_LOCK(&so->so_rcv);
6156 hold_sblock = 1;
6157 }
6158 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
6159 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
6160 goto out;
6161 }
6162 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6163 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6164 #else
6165 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6166 #endif
6167 if (so->so_error) {
6168 error = so->so_error;
6169 if ((in_flags & MSG_PEEK) == 0)
6170 so->so_error = 0;
6171 goto out;
6172 } else {
6173 if (so->so_rcv.sb_cc == 0) {
6174 /* indicate EOF */
6175 error = 0;
6176 goto out;
6177 }
6178 }
6179 }
6180 if (so->so_rcv.sb_cc <= held_length) {
6181 if (so->so_error) {
6182 error = so->so_error;
6183 if ((in_flags & MSG_PEEK) == 0) {
6184 so->so_error = 0;
6185 }
6186 goto out;
6187 }
6188 if ((so->so_rcv.sb_cc == 0) &&
6189 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6190 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6191 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
6192 /* For active open side clear flags for re-use
6193 * passive open is blocked by connect.
6194 */
6195 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
6196 /* You were aborted, passive side always hits here */
6197 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
6198 error = ECONNRESET;
6199 }
6200 so->so_state &= ~(SS_ISCONNECTING |
6201 SS_ISDISCONNECTING |
6202 SS_ISCONFIRMING |
6203 SS_ISCONNECTED);
6204 if (error == 0) {
6205 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
6206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
6207 error = ENOTCONN;
6208 }
6209 }
6210 goto out;
6211 }
6212 }
6213 if (block_allowed) {
6214 error = sbwait(&so->so_rcv);
6215 if (error) {
6216 goto out;
6217 }
6218 held_length = 0;
6219 goto restart_nosblocks;
6220 } else {
6221 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
6222 error = EWOULDBLOCK;
6223 goto out;
6224 }
6225 }
6226 if (hold_sblock == 1) {
6227 SOCKBUF_UNLOCK(&so->so_rcv);
6228 hold_sblock = 0;
6229 }
6230 #if defined(__APPLE__) && !defined(__Userspace__)
6231 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6232 #endif
6233 /* we possibly have data we can read */
6234 /*sa_ignore FREED_MEMORY*/
6235 control = TAILQ_FIRST(&inp->read_queue);
6236 if (control == NULL) {
6237 /* This could be happening since
6238 * the appender did the increment but as not
6239 * yet did the tailq insert onto the read_queue
6240 */
6241 if (hold_rlock == 0) {
6242 SCTP_INP_READ_LOCK(inp);
6243 }
6244 control = TAILQ_FIRST(&inp->read_queue);
6245 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
6246 #ifdef INVARIANTS
6247 panic("Huh, its non zero and nothing on control?");
6248 #endif
6249 so->so_rcv.sb_cc = 0;
6250 }
6251 SCTP_INP_READ_UNLOCK(inp);
6252 hold_rlock = 0;
6253 goto restart;
6254 }
6255
6256 if ((control->length == 0) &&
6257 (control->do_not_ref_stcb)) {
6258 /* Clean up code for freeing assoc that left behind a pdapi..
6259 * maybe a peer in EEOR that just closed after sending and
6260 * never indicated a EOR.
6261 */
6262 if (hold_rlock == 0) {
6263 hold_rlock = 1;
6264 SCTP_INP_READ_LOCK(inp);
6265 }
6266 control->held_length = 0;
6267 if (control->data) {
6268 /* Hmm there is data here .. fix */
6269 struct mbuf *m_tmp;
6270 int cnt = 0;
6271 m_tmp = control->data;
6272 while (m_tmp) {
6273 cnt += SCTP_BUF_LEN(m_tmp);
6274 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6275 control->tail_mbuf = m_tmp;
6276 control->end_added = 1;
6277 }
6278 m_tmp = SCTP_BUF_NEXT(m_tmp);
6279 }
6280 control->length = cnt;
6281 } else {
6282 /* remove it */
6283 TAILQ_REMOVE(&inp->read_queue, control, next);
6284 /* Add back any hiddend data */
6285 sctp_free_remote_addr(control->whoFrom);
6286 sctp_free_a_readq(stcb, control);
6287 }
6288 if (hold_rlock) {
6289 hold_rlock = 0;
6290 SCTP_INP_READ_UNLOCK(inp);
6291 }
6292 goto restart;
6293 }
6294 if ((control->length == 0) &&
6295 (control->end_added == 1)) {
6296 /* Do we also need to check for (control->pdapi_aborted == 1)? */
6297 if (hold_rlock == 0) {
6298 hold_rlock = 1;
6299 SCTP_INP_READ_LOCK(inp);
6300 }
6301 TAILQ_REMOVE(&inp->read_queue, control, next);
6302 if (control->data) {
6303 #ifdef INVARIANTS
6304 panic("control->data not null but control->length == 0");
6305 #else
6306 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
6307 sctp_m_freem(control->data);
6308 control->data = NULL;
6309 #endif
6310 }
6311 if (control->aux_data) {
6312 sctp_m_free (control->aux_data);
6313 control->aux_data = NULL;
6314 }
6315 #ifdef INVARIANTS
6316 if (control->on_strm_q) {
6317 panic("About to free ctl:%p so:%p and its in %d",
6318 control, so, control->on_strm_q);
6319 }
6320 #endif
6321 sctp_free_remote_addr(control->whoFrom);
6322 sctp_free_a_readq(stcb, control);
6323 if (hold_rlock) {
6324 hold_rlock = 0;
6325 SCTP_INP_READ_UNLOCK(inp);
6326 }
6327 goto restart;
6328 }
6329 if (control->length == 0) {
6330 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6331 (filling_sinfo)) {
6332 /* find a more suitable one then this */
6333 ctl = TAILQ_NEXT(control, next);
6334 while (ctl) {
6335 if ((ctl->stcb != control->stcb) && (ctl->length) &&
6336 (ctl->some_taken ||
6337 (ctl->spec_flags & M_NOTIFICATION) ||
6338 ((ctl->do_not_ref_stcb == 0) &&
6339 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6340 ) {
6341 /*-
6342 * If we have a different TCB next, and there is data
6343 * present. If we have already taken some (pdapi), OR we can
6344 * ref the tcb and no delivery as started on this stream, we
6345 * take it. Note we allow a notification on a different
6346 * assoc to be delivered..
6347 */
6348 control = ctl;
6349 goto found_one;
6350 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6351 (ctl->length) &&
6352 ((ctl->some_taken) ||
6353 ((ctl->do_not_ref_stcb == 0) &&
6354 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6355 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6356 /*-
6357 * If we have the same tcb, and there is data present, and we
6358 * have the strm interleave feature present. Then if we have
6359 * taken some (pdapi) or we can refer to tht tcb AND we have
6360 * not started a delivery for this stream, we can take it.
6361 * Note we do NOT allow a notificaiton on the same assoc to
6362 * be delivered.
6363 */
6364 control = ctl;
6365 goto found_one;
6366 }
6367 ctl = TAILQ_NEXT(ctl, next);
6368 }
6369 }
6370 /*
6371 * if we reach here, not suitable replacement is available
6372 * <or> fragment interleave is NOT on. So stuff the sb_cc
6373 * into the our held count, and its time to sleep again.
6374 */
6375 held_length = so->so_rcv.sb_cc;
6376 control->held_length = so->so_rcv.sb_cc;
6377 goto restart;
6378 }
6379 /* Clear the held length since there is something to read */
6380 control->held_length = 0;
6381 found_one:
6382 /*
6383 * If we reach here, control has a some data for us to read off.
6384 * Note that stcb COULD be NULL.
6385 */
6386 if (hold_rlock == 0) {
6387 hold_rlock = 1;
6388 SCTP_INP_READ_LOCK(inp);
6389 }
6390 control->some_taken++;
6391 stcb = control->stcb;
6392 if (stcb) {
6393 if ((control->do_not_ref_stcb == 0) &&
6394 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6395 if (freecnt_applied == 0)
6396 stcb = NULL;
6397 } else if (control->do_not_ref_stcb == 0) {
6398 /* you can't free it on me please */
6399 /*
6400 * The lock on the socket buffer protects us so the
6401 * free code will stop. But since we used the socketbuf
6402 * lock and the sender uses the tcb_lock to increment,
6403 * we need to use the atomic add to the refcnt
6404 */
6405 if (freecnt_applied) {
6406 #ifdef INVARIANTS
6407 panic("refcnt already incremented");
6408 #else
6409 SCTP_PRINTF("refcnt already incremented?\n");
6410 #endif
6411 } else {
6412 atomic_add_int(&stcb->asoc.refcnt, 1);
6413 freecnt_applied = 1;
6414 }
6415 /*
6416 * Setup to remember how much we have not yet told
6417 * the peer our rwnd has opened up. Note we grab
6418 * the value from the tcb from last time.
6419 * Note too that sack sending clears this when a sack
6420 * is sent, which is fine. Once we hit the rwnd_req,
6421 * we then will go to the sctp_user_rcvd() that will
6422 * not lock until it KNOWs it MUST send a WUP-SACK.
6423 */
6424 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6425 stcb->freed_by_sorcv_sincelast = 0;
6426 }
6427 }
6428 if (stcb &&
6429 ((control->spec_flags & M_NOTIFICATION) == 0) &&
6430 control->do_not_ref_stcb == 0) {
6431 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6432 }
6433
6434 /* First lets get off the sinfo and sockaddr info */
6435 if ((sinfo != NULL) && (filling_sinfo != 0)) {
6436 sinfo->sinfo_stream = control->sinfo_stream;
6437 sinfo->sinfo_ssn = (uint16_t)control->mid;
6438 sinfo->sinfo_flags = control->sinfo_flags;
6439 sinfo->sinfo_ppid = control->sinfo_ppid;
6440 sinfo->sinfo_context =control->sinfo_context;
6441 sinfo->sinfo_timetolive = control->sinfo_timetolive;
6442 sinfo->sinfo_tsn = control->sinfo_tsn;
6443 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6444 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6445 nxt = TAILQ_NEXT(control, next);
6446 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6447 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6448 struct sctp_extrcvinfo *s_extra;
6449 s_extra = (struct sctp_extrcvinfo *)sinfo;
6450 if ((nxt) &&
6451 (nxt->length)) {
6452 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6453 if (nxt->sinfo_flags & SCTP_UNORDERED) {
6454 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6455 }
6456 if (nxt->spec_flags & M_NOTIFICATION) {
6457 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6458 }
6459 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6460 s_extra->serinfo_next_length = nxt->length;
6461 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6462 s_extra->serinfo_next_stream = nxt->sinfo_stream;
6463 if (nxt->tail_mbuf != NULL) {
6464 if (nxt->end_added) {
6465 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6466 }
6467 }
6468 } else {
6469 /* we explicitly 0 this, since the memcpy got
6470 * some other things beyond the older sinfo_
6471 * that is on the control's structure :-D
6472 */
6473 nxt = NULL;
6474 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6475 s_extra->serinfo_next_aid = 0;
6476 s_extra->serinfo_next_length = 0;
6477 s_extra->serinfo_next_ppid = 0;
6478 s_extra->serinfo_next_stream = 0;
6479 }
6480 }
6481 /*
6482 * update off the real current cum-ack, if we have an stcb.
6483 */
6484 if ((control->do_not_ref_stcb == 0) && stcb)
6485 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6486 /*
6487 * mask off the high bits, we keep the actual chunk bits in
6488 * there.
6489 */
6490 sinfo->sinfo_flags &= 0x00ff;
6491 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6492 sinfo->sinfo_flags |= SCTP_UNORDERED;
6493 }
6494 }
6495 #ifdef SCTP_ASOCLOG_OF_TSNS
6496 {
6497 int index, newindex;
6498 struct sctp_pcbtsn_rlog *entry;
6499 do {
6500 index = inp->readlog_index;
6501 newindex = index + 1;
6502 if (newindex >= SCTP_READ_LOG_SIZE) {
6503 newindex = 0;
6504 }
6505 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6506 entry = &inp->readlog[index];
6507 entry->vtag = control->sinfo_assoc_id;
6508 entry->strm = control->sinfo_stream;
6509 entry->seq = (uint16_t)control->mid;
6510 entry->sz = control->length;
6511 entry->flgs = control->sinfo_flags;
6512 }
6513 #endif
6514 if ((fromlen > 0) && (from != NULL)) {
6515 union sctp_sockstore store;
6516 size_t len;
6517
6518 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6519 #ifdef INET6
6520 case AF_INET6:
6521 len = sizeof(struct sockaddr_in6);
6522 store.sin6 = control->whoFrom->ro._l_addr.sin6;
6523 store.sin6.sin6_port = control->port_from;
6524 break;
6525 #endif
6526 #ifdef INET
6527 case AF_INET:
6528 #ifdef INET6
6529 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6530 len = sizeof(struct sockaddr_in6);
6531 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6532 &store.sin6);
6533 store.sin6.sin6_port = control->port_from;
6534 } else {
6535 len = sizeof(struct sockaddr_in);
6536 store.sin = control->whoFrom->ro._l_addr.sin;
6537 store.sin.sin_port = control->port_from;
6538 }
6539 #else
6540 len = sizeof(struct sockaddr_in);
6541 store.sin = control->whoFrom->ro._l_addr.sin;
6542 store.sin.sin_port = control->port_from;
6543 #endif
6544 break;
6545 #endif
6546 #if defined(__Userspace__)
6547 case AF_CONN:
6548 len = sizeof(struct sockaddr_conn);
6549 store.sconn = control->whoFrom->ro._l_addr.sconn;
6550 store.sconn.sconn_port = control->port_from;
6551 break;
6552 #endif
6553 default:
6554 len = 0;
6555 break;
6556 }
6557 memcpy(from, &store, min((size_t)fromlen, len));
6558 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6559 #ifdef INET6
6560 {
6561 struct sockaddr_in6 lsa6, *from6;
6562
6563 from6 = (struct sockaddr_in6 *)from;
6564 sctp_recover_scope_mac(from6, (&lsa6));
6565 }
6566 #endif
6567 #endif
6568 }
6569 if (hold_rlock) {
6570 SCTP_INP_READ_UNLOCK(inp);
6571 hold_rlock = 0;
6572 }
6573 if (hold_sblock) {
6574 SOCKBUF_UNLOCK(&so->so_rcv);
6575 hold_sblock = 0;
6576 }
6577 /* now copy out what data we can */
6578 if (mp == NULL) {
6579 /* copy out each mbuf in the chain up to length */
6580 get_more_data:
6581 m = control->data;
6582 while (m) {
6583 /* Move out all we can */
6584 #if defined(__APPLE__) && !defined(__Userspace__)
6585 #if defined(APPLE_LEOPARD)
6586 cp_len = uio->uio_resid;
6587 #else
6588 cp_len = uio_resid(uio);
6589 #endif
6590 #else
6591 cp_len = uio->uio_resid;
6592 #endif
6593 my_len = SCTP_BUF_LEN(m);
6594 if (cp_len > my_len) {
6595 /* not enough in this buf */
6596 cp_len = my_len;
6597 }
6598 if (hold_rlock) {
6599 SCTP_INP_READ_UNLOCK(inp);
6600 hold_rlock = 0;
6601 }
6602 #if defined(__APPLE__) && !defined(__Userspace__)
6603 SCTP_SOCKET_UNLOCK(so, 0);
6604 #endif
6605 if (cp_len > 0)
6606 error = uiomove(mtod(m, char *), (int)cp_len, uio);
6607 #if defined(__APPLE__) && !defined(__Userspace__)
6608 SCTP_SOCKET_LOCK(so, 0);
6609 #endif
6610 /* re-read */
6611 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6612 goto release;
6613 }
6614
6615 if ((control->do_not_ref_stcb == 0) && stcb &&
6616 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6617 no_rcv_needed = 1;
6618 }
6619 if (error) {
6620 /* error we are out of here */
6621 goto release;
6622 }
6623 SCTP_INP_READ_LOCK(inp);
6624 hold_rlock = 1;
6625 if (cp_len == SCTP_BUF_LEN(m)) {
6626 if ((SCTP_BUF_NEXT(m)== NULL) &&
6627 (control->end_added)) {
6628 out_flags |= MSG_EOR;
6629 if ((control->do_not_ref_stcb == 0) &&
6630 (control->stcb != NULL) &&
6631 ((control->spec_flags & M_NOTIFICATION) == 0))
6632 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6633 }
6634 if (control->spec_flags & M_NOTIFICATION) {
6635 out_flags |= MSG_NOTIFICATION;
6636 }
6637 /* we ate up the mbuf */
6638 if (in_flags & MSG_PEEK) {
6639 /* just looking */
6640 m = SCTP_BUF_NEXT(m);
6641 copied_so_far += cp_len;
6642 } else {
6643 /* dispose of the mbuf */
6644 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6645 sctp_sblog(&so->so_rcv,
6646 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6647 }
6648 sctp_sbfree(control, stcb, &so->so_rcv, m);
6649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6650 sctp_sblog(&so->so_rcv,
6651 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6652 }
6653 copied_so_far += cp_len;
6654 freed_so_far += (uint32_t)cp_len;
6655 freed_so_far += MSIZE;
6656 atomic_subtract_int(&control->length, cp_len);
6657 control->data = sctp_m_free(m);
6658 m = control->data;
6659 /* been through it all, must hold sb lock ok to null tail */
6660 if (control->data == NULL) {
6661 #ifdef INVARIANTS
6662 #if defined(__FreeBSD__) && !defined(__Userspace__)
6663 if ((control->end_added == 0) ||
6664 (TAILQ_NEXT(control, next) == NULL)) {
6665 /* If the end is not added, OR the
6666 * next is NOT null we MUST have the lock.
6667 */
6668 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6669 panic("Hmm we don't own the lock?");
6670 }
6671 }
6672 #endif
6673 #endif
6674 control->tail_mbuf = NULL;
6675 #ifdef INVARIANTS
6676 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6677 panic("end_added, nothing left and no MSG_EOR");
6678 }
6679 #endif
6680 }
6681 }
6682 } else {
6683 /* Do we need to trim the mbuf? */
6684 if (control->spec_flags & M_NOTIFICATION) {
6685 out_flags |= MSG_NOTIFICATION;
6686 }
6687 if ((in_flags & MSG_PEEK) == 0) {
6688 SCTP_BUF_RESV_UF(m, cp_len);
6689 SCTP_BUF_LEN(m) -= (int)cp_len;
6690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6691 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len);
6692 }
6693 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6694 if ((control->do_not_ref_stcb == 0) &&
6695 stcb) {
6696 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6697 }
6698 copied_so_far += cp_len;
6699 freed_so_far += (uint32_t)cp_len;
6700 freed_so_far += MSIZE;
6701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6702 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6703 SCTP_LOG_SBRESULT, 0);
6704 }
6705 atomic_subtract_int(&control->length, cp_len);
6706 } else {
6707 copied_so_far += cp_len;
6708 }
6709 }
6710 #if defined(__APPLE__) && !defined(__Userspace__)
6711 #if defined(APPLE_LEOPARD)
6712 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6713 #else
6714 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6715 #endif
6716 #else
6717 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6718 #endif
6719 break;
6720 }
6721 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6722 (control->do_not_ref_stcb == 0) &&
6723 (freed_so_far >= rwnd_req)) {
6724 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6725 }
6726 } /* end while(m) */
6727 /*
6728 * At this point we have looked at it all and we either have
6729 * a MSG_EOR/or read all the user wants... <OR>
6730 * control->length == 0.
6731 */
6732 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6733 /* we are done with this control */
6734 if (control->length == 0) {
6735 if (control->data) {
6736 #ifdef INVARIANTS
6737 panic("control->data not null at read eor?");
6738 #else
6739 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6740 sctp_m_freem(control->data);
6741 control->data = NULL;
6742 #endif
6743 }
6744 done_with_control:
6745 if (hold_rlock == 0) {
6746 SCTP_INP_READ_LOCK(inp);
6747 hold_rlock = 1;
6748 }
6749 TAILQ_REMOVE(&inp->read_queue, control, next);
6750 /* Add back any hiddend data */
6751 if (control->held_length) {
6752 held_length = 0;
6753 control->held_length = 0;
6754 wakeup_read_socket = 1;
6755 }
6756 if (control->aux_data) {
6757 sctp_m_free (control->aux_data);
6758 control->aux_data = NULL;
6759 }
6760 no_rcv_needed = control->do_not_ref_stcb;
6761 sctp_free_remote_addr(control->whoFrom);
6762 control->data = NULL;
6763 #ifdef INVARIANTS
6764 if (control->on_strm_q) {
6765 panic("About to free ctl:%p so:%p and its in %d",
6766 control, so, control->on_strm_q);
6767 }
6768 #endif
6769 sctp_free_a_readq(stcb, control);
6770 control = NULL;
6771 if ((freed_so_far >= rwnd_req) &&
6772 (no_rcv_needed == 0))
6773 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6774
6775 } else {
6776 /*
6777 * The user did not read all of this
6778 * message, turn off the returned MSG_EOR
6779 * since we are leaving more behind on the
6780 * control to read.
6781 */
6782 #ifdef INVARIANTS
6783 if (control->end_added &&
6784 (control->data == NULL) &&
6785 (control->tail_mbuf == NULL)) {
6786 panic("Gak, control->length is corrupt?");
6787 }
6788 #endif
6789 no_rcv_needed = control->do_not_ref_stcb;
6790 out_flags &= ~MSG_EOR;
6791 }
6792 }
6793 if (out_flags & MSG_EOR) {
6794 goto release;
6795 }
6796 #if defined(__APPLE__) && !defined(__Userspace__)
6797 #if defined(APPLE_LEOPARD)
6798 if ((uio->uio_resid == 0) ||
6799 #else
6800 if ((uio_resid(uio) == 0) ||
6801 #endif
6802 #else
6803 if ((uio->uio_resid == 0) ||
6804 #endif
6805 ((in_eeor_mode) &&
6806 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6807 goto release;
6808 }
6809 /*
6810 * If I hit here the receiver wants more and this message is
6811 * NOT done (pd-api). So two questions. Can we block? if not
6812 * we are done. Did the user NOT set MSG_WAITALL?
6813 */
6814 if (block_allowed == 0) {
6815 goto release;
6816 }
6817 /*
6818 * We need to wait for more data a few things: - We don't
6819 * sbunlock() so we don't get someone else reading. - We
6820 * must be sure to account for the case where what is added
6821 * is NOT to our control when we wakeup.
6822 */
6823
6824 /* Do we need to tell the transport a rwnd update might be
6825 * needed before we go to sleep?
6826 */
6827 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6828 ((freed_so_far >= rwnd_req) &&
6829 (control->do_not_ref_stcb == 0) &&
6830 (no_rcv_needed == 0))) {
6831 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6832 }
6833 wait_some_more:
6834 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6835 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6836 goto release;
6837 }
6838 #else
6839 if (so->so_state & SS_CANTRCVMORE) {
6840 goto release;
6841 }
6842 #endif
6843
6844 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6845 goto release;
6846
6847 if (hold_rlock == 1) {
6848 SCTP_INP_READ_UNLOCK(inp);
6849 hold_rlock = 0;
6850 }
6851 if (hold_sblock == 0) {
6852 SOCKBUF_LOCK(&so->so_rcv);
6853 hold_sblock = 1;
6854 }
6855 if ((copied_so_far) && (control->length == 0) &&
6856 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6857 goto release;
6858 }
6859 #if defined(__APPLE__) && !defined(__Userspace__)
6860 sbunlock(&so->so_rcv, 1);
6861 #endif
6862 if (so->so_rcv.sb_cc <= control->held_length) {
6863 error = sbwait(&so->so_rcv);
6864 if (error) {
6865 #if defined(__FreeBSD__) && !defined(__Userspace__)
6866 goto release;
6867 #else
6868 goto release_unlocked;
6869 #endif
6870 }
6871 control->held_length = 0;
6872 }
6873 #if defined(__APPLE__) && !defined(__Userspace__)
6874 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6875 #endif
6876 if (hold_sblock) {
6877 SOCKBUF_UNLOCK(&so->so_rcv);
6878 hold_sblock = 0;
6879 }
6880 if (control->length == 0) {
6881 /* still nothing here */
6882 if (control->end_added == 1) {
6883 /* he aborted, or is done i.e.did a shutdown */
6884 out_flags |= MSG_EOR;
6885 if (control->pdapi_aborted) {
6886 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6887 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6888
6889 out_flags |= MSG_TRUNC;
6890 } else {
6891 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6892 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6893 }
6894 goto done_with_control;
6895 }
6896 if (so->so_rcv.sb_cc > held_length) {
6897 control->held_length = so->so_rcv.sb_cc;
6898 held_length = 0;
6899 }
6900 goto wait_some_more;
6901 } else if (control->data == NULL) {
6902 /* we must re-sync since data
6903 * is probably being added
6904 */
6905 SCTP_INP_READ_LOCK(inp);
6906 if ((control->length > 0) && (control->data == NULL)) {
6907 /* big trouble.. we have the lock and its corrupt? */
6908 #ifdef INVARIANTS
6909 panic ("Impossible data==NULL length !=0");
6910 #endif
6911 out_flags |= MSG_EOR;
6912 out_flags |= MSG_TRUNC;
6913 control->length = 0;
6914 SCTP_INP_READ_UNLOCK(inp);
6915 goto done_with_control;
6916 }
6917 SCTP_INP_READ_UNLOCK(inp);
6918 /* We will fall around to get more data */
6919 }
6920 goto get_more_data;
6921 } else {
6922 /*-
6923 * Give caller back the mbuf chain,
6924 * store in uio_resid the length
6925 */
6926 wakeup_read_socket = 0;
6927 if ((control->end_added == 0) ||
6928 (TAILQ_NEXT(control, next) == NULL)) {
6929 /* Need to get rlock */
6930 if (hold_rlock == 0) {
6931 SCTP_INP_READ_LOCK(inp);
6932 hold_rlock = 1;
6933 }
6934 }
6935 if (control->end_added) {
6936 out_flags |= MSG_EOR;
6937 if ((control->do_not_ref_stcb == 0) &&
6938 (control->stcb != NULL) &&
6939 ((control->spec_flags & M_NOTIFICATION) == 0))
6940 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6941 }
6942 if (control->spec_flags & M_NOTIFICATION) {
6943 out_flags |= MSG_NOTIFICATION;
6944 }
6945 #if defined(__APPLE__) && !defined(__Userspace__)
6946 #if defined(APPLE_LEOPARD)
6947 uio->uio_resid = control->length;
6948 #else
6949 uio_setresid(uio, control->length);
6950 #endif
6951 #else
6952 uio->uio_resid = control->length;
6953 #endif
6954 *mp = control->data;
6955 m = control->data;
6956 while (m) {
6957 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6958 sctp_sblog(&so->so_rcv,
6959 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6960 }
6961 sctp_sbfree(control, stcb, &so->so_rcv, m);
6962 freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6963 freed_so_far += MSIZE;
6964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6965 sctp_sblog(&so->so_rcv,
6966 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6967 }
6968 m = SCTP_BUF_NEXT(m);
6969 }
6970 control->data = control->tail_mbuf = NULL;
6971 control->length = 0;
6972 if (out_flags & MSG_EOR) {
6973 /* Done with this control */
6974 goto done_with_control;
6975 }
6976 }
6977 release:
6978 if (hold_rlock == 1) {
6979 SCTP_INP_READ_UNLOCK(inp);
6980 hold_rlock = 0;
6981 }
6982 #if defined(__Userspace__)
6983 if (hold_sblock == 0) {
6984 SOCKBUF_LOCK(&so->so_rcv);
6985 hold_sblock = 1;
6986 }
6987 #else
6988 if (hold_sblock == 1) {
6989 SOCKBUF_UNLOCK(&so->so_rcv);
6990 hold_sblock = 0;
6991 }
6992 #endif
6993 #if defined(__APPLE__) && !defined(__Userspace__)
6994 sbunlock(&so->so_rcv, 1);
6995 #endif
6996
6997 #if defined(__FreeBSD__) && !defined(__Userspace__)
6998 sbunlock(&so->so_rcv);
6999 sockbuf_lock = 0;
7000 #endif
7001
7002 release_unlocked:
7003 if (hold_sblock) {
7004 SOCKBUF_UNLOCK(&so->so_rcv);
7005 hold_sblock = 0;
7006 }
7007 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
7008 if ((freed_so_far >= rwnd_req) &&
7009 (control && (control->do_not_ref_stcb == 0)) &&
7010 (no_rcv_needed == 0))
7011 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
7012 }
7013 out:
7014 if (msg_flags) {
7015 *msg_flags = out_flags;
7016 }
7017 if (((out_flags & MSG_EOR) == 0) &&
7018 ((in_flags & MSG_PEEK) == 0) &&
7019 (sinfo) &&
7020 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
7021 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
7022 struct sctp_extrcvinfo *s_extra;
7023 s_extra = (struct sctp_extrcvinfo *)sinfo;
7024 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
7025 }
7026 if (hold_rlock == 1) {
7027 SCTP_INP_READ_UNLOCK(inp);
7028 }
7029 if (hold_sblock) {
7030 SOCKBUF_UNLOCK(&so->so_rcv);
7031 }
7032 #if defined(__FreeBSD__) && !defined(__Userspace__)
7033 if (sockbuf_lock) {
7034 sbunlock(&so->so_rcv);
7035 }
7036 #endif
7037
7038 if (freecnt_applied) {
7039 /*
7040 * The lock on the socket buffer protects us so the free
7041 * code will stop. But since we used the socketbuf lock and
7042 * the sender uses the tcb_lock to increment, we need to use
7043 * the atomic add to the refcnt.
7044 */
7045 if (stcb == NULL) {
7046 #ifdef INVARIANTS
7047 panic("stcb for refcnt has gone NULL?");
7048 goto stage_left;
7049 #else
7050 goto stage_left;
7051 #endif
7052 }
7053 /* Save the value back for next time */
7054 stcb->freed_by_sorcv_sincelast = freed_so_far;
7055 atomic_add_int(&stcb->asoc.refcnt, -1);
7056 }
7057 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
7058 if (stcb) {
7059 sctp_misc_ints(SCTP_SORECV_DONE,
7060 freed_so_far,
7061 #if defined(__APPLE__) && !defined(__Userspace__)
7062 #if defined(APPLE_LEOPARD)
7063 ((uio) ? (slen - uio->uio_resid) : slen),
7064 #else
7065 ((uio) ? (slen - uio_resid(uio)) : slen),
7066 #endif
7067 #else
7068 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7069 #endif
7070 stcb->asoc.my_rwnd,
7071 so->so_rcv.sb_cc);
7072 } else {
7073 sctp_misc_ints(SCTP_SORECV_DONE,
7074 freed_so_far,
7075 #if defined(__APPLE__) && !defined(__Userspace__)
7076 #if defined(APPLE_LEOPARD)
7077 ((uio) ? (slen - uio->uio_resid) : slen),
7078 #else
7079 ((uio) ? (slen - uio_resid(uio)) : slen),
7080 #endif
7081 #else
7082 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7083 #endif
7084 0,
7085 so->so_rcv.sb_cc);
7086 }
7087 }
7088 stage_left:
7089 if (wakeup_read_socket) {
7090 sctp_sorwakeup(inp, so);
7091 }
7092 return (error);
7093 }
7094
7095
7096 #ifdef SCTP_MBUF_LOGGING
7097 struct mbuf *
7098 sctp_m_free(struct mbuf *m)
7099 {
7100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7101 sctp_log_mb(m, SCTP_MBUF_IFREE);
7102 }
7103 return (m_free(m));
7104 }
7105
7106 void
7107 sctp_m_freem(struct mbuf *mb)
7108 {
7109 while (mb != NULL)
7110 mb = sctp_m_free(mb);
7111 }
7112
7113 #endif
7114
7115 int
7116 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
7117 {
7118 /* Given a local address. For all associations
7119 * that holds the address, request a peer-set-primary.
7120 */
7121 struct sctp_ifa *ifa;
7122 struct sctp_laddr *wi;
7123
7124 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
7125 if (ifa == NULL) {
7126 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
7127 return (EADDRNOTAVAIL);
7128 }
7129 /* Now that we have the ifa we must awaken the
7130 * iterator with this message.
7131 */
7132 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
7133 if (wi == NULL) {
7134 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
7135 return (ENOMEM);
7136 }
7137 /* Now incr the count and int wi structure */
7138 SCTP_INCR_LADDR_COUNT();
7139 memset(wi, 0, sizeof(*wi));
7140 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
7141 wi->ifa = ifa;
7142 wi->action = SCTP_SET_PRIM_ADDR;
7143 atomic_add_int(&ifa->refcount, 1);
7144
7145 /* Now add it to the work queue */
7146 SCTP_WQ_ADDR_LOCK();
7147 /*
7148 * Should this really be a tailq? As it is we will process the
7149 * newest first :-0
7150 */
7151 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
7152 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
7153 (struct sctp_inpcb *)NULL,
7154 (struct sctp_tcb *)NULL,
7155 (struct sctp_nets *)NULL);
7156 SCTP_WQ_ADDR_UNLOCK();
7157 return (0);
7158 }
7159
7160 #if defined(__Userspace__)
7161 /* no sctp_soreceive for __Userspace__ now */
7162 #endif
7163
7164 #if !defined(__Userspace__)
7165 int
7166 sctp_soreceive( struct socket *so,
7167 struct sockaddr **psa,
7168 struct uio *uio,
7169 struct mbuf **mp0,
7170 struct mbuf **controlp,
7171 int *flagsp)
7172 {
7173 int error, fromlen;
7174 uint8_t sockbuf[256];
7175 struct sockaddr *from;
7176 struct sctp_extrcvinfo sinfo;
7177 int filling_sinfo = 1;
7178 int flags;
7179 struct sctp_inpcb *inp;
7180
7181 inp = (struct sctp_inpcb *)so->so_pcb;
7182 /* pickup the assoc we are reading from */
7183 if (inp == NULL) {
7184 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7185 return (EINVAL);
7186 }
7187 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
7188 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
7189 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
7190 (controlp == NULL)) {
7191 /* user does not want the sndrcv ctl */
7192 filling_sinfo = 0;
7193 }
7194 if (psa) {
7195 from = (struct sockaddr *)sockbuf;
7196 fromlen = sizeof(sockbuf);
7197 #ifdef HAVE_SA_LEN
7198 from->sa_len = 0;
7199 #endif
7200 } else {
7201 from = NULL;
7202 fromlen = 0;
7203 }
7204
7205 #if defined(__APPLE__) && !defined(__Userspace__)
7206 SCTP_SOCKET_LOCK(so, 1);
7207 #endif
7208 if (filling_sinfo) {
7209 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
7210 }
7211 if (flagsp != NULL) {
7212 flags = *flagsp;
7213 } else {
7214 flags = 0;
7215 }
7216 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
7217 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
7218 if (flagsp != NULL) {
7219 *flagsp = flags;
7220 }
7221 if (controlp != NULL) {
7222 /* copy back the sinfo in a CMSG format */
7223 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
7224 *controlp = sctp_build_ctl_nchunk(inp,
7225 (struct sctp_sndrcvinfo *)&sinfo);
7226 } else {
7227 *controlp = NULL;
7228 }
7229 }
7230 if (psa) {
7231 /* copy back the address info */
7232 #ifdef HAVE_SA_LEN
7233 if (from && from->sa_len) {
7234 #else
7235 if (from) {
7236 #endif
7237 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
7238 *psa = sodupsockaddr(from, M_NOWAIT);
7239 #else
7240 *psa = dup_sockaddr(from, mp0 == 0);
7241 #endif
7242 } else {
7243 *psa = NULL;
7244 }
7245 }
7246 #if defined(__APPLE__) && !defined(__Userspace__)
7247 SCTP_SOCKET_UNLOCK(so, 1);
7248 #endif
7249 return (error);
7250 }
7251
7252
7253 #if defined(_WIN32) && !defined(__Userspace__)
7254 /*
7255 * General routine to allocate a hash table with control of memory flags.
7256 * is in 7.0 and beyond for sure :-)
7257 */
7258 void *
7259 sctp_hashinit_flags(int elements, struct malloc_type *type,
7260 u_long *hashmask, int flags)
7261 {
7262 long hashsize;
7263 LIST_HEAD(generic, generic) *hashtbl;
7264 int i;
7265
7266
7267 if (elements <= 0) {
7268 #ifdef INVARIANTS
7269 panic("hashinit: bad elements");
7270 #else
7271 SCTP_PRINTF("hashinit: bad elements?");
7272 elements = 1;
7273 #endif
7274 }
7275 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7276 continue;
7277 hashsize >>= 1;
7278 if (flags & HASH_WAITOK)
7279 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
7280 else if (flags & HASH_NOWAIT)
7281 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
7282 else {
7283 #ifdef INVARIANTS
7284 panic("flag incorrect in hashinit_flags");
7285 #else
7286 return (NULL);
7287 #endif
7288 }
7289
7290 /* no memory? */
7291 if (hashtbl == NULL)
7292 return (NULL);
7293
7294 for (i = 0; i < hashsize; i++)
7295 LIST_INIT(&hashtbl[i]);
7296 *hashmask = hashsize - 1;
7297 return (hashtbl);
7298 }
7299 #endif
7300
7301 #else /* __Userspace__ ifdef above sctp_soreceive */
7302 /*
7303 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
7304 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
7305 *__FreeBSD__ must be excluded.
7306 *
7307 */
7308
7309 void *
7310 sctp_hashinit_flags(int elements, struct malloc_type *type,
7311 u_long *hashmask, int flags)
7312 {
7313 long hashsize;
7314 LIST_HEAD(generic, generic) *hashtbl;
7315 int i;
7316
7317 if (elements <= 0) {
7318 SCTP_PRINTF("hashinit: bad elements?");
7319 #ifdef INVARIANTS
7320 return (NULL);
7321 #else
7322 elements = 1;
7323 #endif
7324 }
7325 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7326 continue;
7327 hashsize >>= 1;
7328 /*cannot use MALLOC here because it has to be declared or defined
7329 using MALLOC_DECLARE or MALLOC_DEFINE first. */
7330 if (flags & HASH_WAITOK)
7331 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7332 else if (flags & HASH_NOWAIT)
7333 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7334 else {
7335 #ifdef INVARIANTS
7336 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7337 #endif
7338 return (NULL);
7339 }
7340
7341 /* no memory? */
7342 if (hashtbl == NULL)
7343 return (NULL);
7344
7345 for (i = 0; i < hashsize; i++)
7346 LIST_INIT(&hashtbl[i]);
7347 *hashmask = hashsize - 1;
7348 return (hashtbl);
7349 }
7350
7351
7352 void
7353 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7354 {
7355 LIST_HEAD(generic, generic) *hashtbl, *hp;
7356
7357 hashtbl = vhashtbl;
7358 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7359 if (!LIST_EMPTY(hp)) {
7360 SCTP_PRINTF("hashdestroy: hash not empty.\n");
7361 return;
7362 }
7363 FREE(hashtbl, type);
7364 }
7365
7366
7367 void
7368 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7369 {
7370 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7371 /*
7372 LIST_ENTRY(type) *start, *temp;
7373 */
7374 hashtbl = vhashtbl;
7375 /* Apparently temp is not dynamically allocated, so attempts to
7376 free it results in error.
7377 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7378 if (!LIST_EMPTY(hp)) {
7379 start = LIST_FIRST(hp);
7380 while (start != NULL) {
7381 temp = start;
7382 start = start->le_next;
7383 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7384 FREE(temp, type);
7385 }
7386 }
7387 */
7388 FREE(hashtbl, type);
7389 }
7390
7391
7392 #endif
7393
7394
7395 int
7396 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7397 int totaddr, int *error)
7398 {
7399 int added = 0;
7400 int i;
7401 struct sctp_inpcb *inp;
7402 struct sockaddr *sa;
7403 size_t incr = 0;
7404 #ifdef INET
7405 struct sockaddr_in *sin;
7406 #endif
7407 #ifdef INET6
7408 struct sockaddr_in6 *sin6;
7409 #endif
7410
7411 sa = addr;
7412 inp = stcb->sctp_ep;
7413 *error = 0;
7414 for (i = 0; i < totaddr; i++) {
7415 switch (sa->sa_family) {
7416 #ifdef INET
7417 case AF_INET:
7418 incr = sizeof(struct sockaddr_in);
7419 sin = (struct sockaddr_in *)sa;
7420 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7421 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7422 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7423 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7424 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7425 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
7426 *error = EINVAL;
7427 goto out_now;
7428 }
7429 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7430 SCTP_DONOT_SETSCOPE,
7431 SCTP_ADDR_IS_CONFIRMED)) {
7432 /* assoc gone no un-lock */
7433 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7434 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7435 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
7436 *error = ENOBUFS;
7437 goto out_now;
7438 }
7439 added++;
7440 break;
7441 #endif
7442 #ifdef INET6
7443 case AF_INET6:
7444 incr = sizeof(struct sockaddr_in6);
7445 sin6 = (struct sockaddr_in6 *)sa;
7446 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7447 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7448 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7449 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7450 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
7451 *error = EINVAL;
7452 goto out_now;
7453 }
7454 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7455 SCTP_DONOT_SETSCOPE,
7456 SCTP_ADDR_IS_CONFIRMED)) {
7457 /* assoc gone no un-lock */
7458 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7459 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7460 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
7461 *error = ENOBUFS;
7462 goto out_now;
7463 }
7464 added++;
7465 break;
7466 #endif
7467 #if defined(__Userspace__)
7468 case AF_CONN:
7469 incr = sizeof(struct sockaddr_conn);
7470 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7471 SCTP_DONOT_SETSCOPE,
7472 SCTP_ADDR_IS_CONFIRMED)) {
7473 /* assoc gone no un-lock */
7474 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7475 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7476 SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
7477 *error = ENOBUFS;
7478 goto out_now;
7479 }
7480 added++;
7481 break;
7482 #endif
7483 default:
7484 break;
7485 }
7486 sa = (struct sockaddr *)((caddr_t)sa + incr);
7487 }
7488 out_now:
7489 return (added);
7490 }
7491
7492 int
7493 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7494 unsigned int totaddr,
7495 unsigned int *num_v4, unsigned int *num_v6,
7496 unsigned int limit)
7497 {
7498 struct sockaddr *sa;
7499 struct sctp_tcb *stcb;
7500 unsigned int incr, at, i;
7501
7502 at = 0;
7503 sa = addr;
7504 *num_v6 = *num_v4 = 0;
7505 /* account and validate addresses */
7506 if (totaddr == 0) {
7507 return (EINVAL);
7508 }
7509 for (i = 0; i < totaddr; i++) {
7510 if (at + sizeof(struct sockaddr) > limit) {
7511 return (EINVAL);
7512 }
7513 switch (sa->sa_family) {
7514 #ifdef INET
7515 case AF_INET:
7516 incr = (unsigned int)sizeof(struct sockaddr_in);
7517 #ifdef HAVE_SA_LEN
7518 if (sa->sa_len != incr) {
7519 return (EINVAL);
7520 }
7521 #endif
7522 (*num_v4) += 1;
7523 break;
7524 #endif
7525 #ifdef INET6
7526 case AF_INET6:
7527 {
7528 struct sockaddr_in6 *sin6;
7529
7530 sin6 = (struct sockaddr_in6 *)sa;
7531 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7532 /* Must be non-mapped for connectx */
7533 return (EINVAL);
7534 }
7535 incr = (unsigned int)sizeof(struct sockaddr_in6);
7536 #ifdef HAVE_SA_LEN
7537 if (sa->sa_len != incr) {
7538 return (EINVAL);
7539 }
7540 #endif
7541 (*num_v6) += 1;
7542 break;
7543 }
7544 #endif
7545 default:
7546 return (EINVAL);
7547 }
7548 if ((at + incr) > limit) {
7549 return (EINVAL);
7550 }
7551 SCTP_INP_INCR_REF(inp);
7552 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7553 if (stcb != NULL) {
7554 SCTP_TCB_UNLOCK(stcb);
7555 return (EALREADY);
7556 } else {
7557 SCTP_INP_DECR_REF(inp);
7558 }
7559 at += incr;
7560 sa = (struct sockaddr *)((caddr_t)sa + incr);
7561 }
7562 return (0);
7563 }
7564
7565 /*
7566 * sctp_bindx(ADD) for one address.
7567 * assumes all arguments are valid/checked by caller.
7568 */
7569 void
7570 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7571 struct sockaddr *sa, uint32_t vrf_id, int *error,
7572 void *p)
7573 {
7574 #if defined(INET) && defined(INET6)
7575 struct sockaddr_in sin;
7576 #endif
7577 #ifdef INET6
7578 struct sockaddr_in6 *sin6;
7579 #endif
7580 #ifdef INET
7581 struct sockaddr_in *sinp;
7582 #endif
7583 struct sockaddr *addr_to_use;
7584 struct sctp_inpcb *lep;
7585 #ifdef SCTP_MVRF
7586 int i;
7587 #endif
7588 uint16_t port;
7589
7590 /* see if we're bound all already! */
7591 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7592 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7593 *error = EINVAL;
7594 return;
7595 }
7596 #ifdef SCTP_MVRF
7597 /* Is the VRF one we have */
7598 for (i = 0; i < inp->num_vrfs; i++) {
7599 if (vrf_id == inp->m_vrf_ids[i]) {
7600 break;
7601 }
7602 }
7603 if (i == inp->num_vrfs) {
7604 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7605 *error = EINVAL;
7606 return;
7607 }
7608 #endif
7609 switch (sa->sa_family) {
7610 #ifdef INET6
7611 case AF_INET6:
7612 #ifdef HAVE_SA_LEN
7613 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7614 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7615 *error = EINVAL;
7616 return;
7617 }
7618 #endif
7619 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7620 /* can only bind v6 on PF_INET6 sockets */
7621 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7622 *error = EINVAL;
7623 return;
7624 }
7625 sin6 = (struct sockaddr_in6 *)sa;
7626 port = sin6->sin6_port;
7627 #ifdef INET
7628 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7629 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7630 SCTP_IPV6_V6ONLY(inp)) {
7631 /* can't bind v4-mapped on PF_INET sockets */
7632 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7633 *error = EINVAL;
7634 return;
7635 }
7636 in6_sin6_2_sin(&sin, sin6);
7637 addr_to_use = (struct sockaddr *)&sin;
7638 } else {
7639 addr_to_use = sa;
7640 }
7641 #else
7642 addr_to_use = sa;
7643 #endif
7644 break;
7645 #endif
7646 #ifdef INET
7647 case AF_INET:
7648 #ifdef HAVE_SA_LEN
7649 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7651 *error = EINVAL;
7652 return;
7653 }
7654 #endif
7655 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7656 SCTP_IPV6_V6ONLY(inp)) {
7657 /* can't bind v4 on PF_INET sockets */
7658 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7659 *error = EINVAL;
7660 return;
7661 }
7662 sinp = (struct sockaddr_in *)sa;
7663 port = sinp->sin_port;
7664 addr_to_use = sa;
7665 break;
7666 #endif
7667 default:
7668 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7669 *error = EINVAL;
7670 return;
7671 }
7672 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7673 #if !(defined(_WIN32) || defined(__Userspace__))
7674 if (p == NULL) {
7675 /* Can't get proc for Net/Open BSD */
7676 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7677 *error = EINVAL;
7678 return;
7679 }
7680 #endif
7681 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p);
7682 return;
7683 }
7684 /* Validate the incoming port. */
7685 if ((port != 0) && (port != inp->sctp_lport)) {
7686 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7687 *error = EINVAL;
7688 return;
7689 }
7690 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id);
7691 if (lep == NULL) {
7692 /* add the address */
7693 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use,
7694 SCTP_ADD_IP_ADDRESS, vrf_id);
7695 } else {
7696 if (lep != inp) {
7697 *error = EADDRINUSE;
7698 }
7699 SCTP_INP_DECR_REF(lep);
7700 }
7701 }
7702
7703 /*
7704 * sctp_bindx(DELETE) for one address.
7705 * assumes all arguments are valid/checked by caller.
7706 */
7707 void
7708 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7709 struct sockaddr *sa, uint32_t vrf_id, int *error)
7710 {
7711 struct sockaddr *addr_to_use;
7712 #if defined(INET) && defined(INET6)
7713 struct sockaddr_in6 *sin6;
7714 struct sockaddr_in sin;
7715 #endif
7716 #ifdef SCTP_MVRF
7717 int i;
7718 #endif
7719
7720 /* see if we're bound all already! */
7721 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7722 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7723 *error = EINVAL;
7724 return;
7725 }
7726 #ifdef SCTP_MVRF
7727 /* Is the VRF one we have */
7728 for (i = 0; i < inp->num_vrfs; i++) {
7729 if (vrf_id == inp->m_vrf_ids[i]) {
7730 break;
7731 }
7732 }
7733 if (i == inp->num_vrfs) {
7734 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7735 *error = EINVAL;
7736 return;
7737 }
7738 #endif
7739 switch (sa->sa_family) {
7740 #ifdef INET6
7741 case AF_INET6:
7742 #ifdef HAVE_SA_LEN
7743 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7744 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7745 *error = EINVAL;
7746 return;
7747 }
7748 #endif
7749 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7750 /* can only bind v6 on PF_INET6 sockets */
7751 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7752 *error = EINVAL;
7753 return;
7754 }
7755 #ifdef INET
7756 sin6 = (struct sockaddr_in6 *)sa;
7757 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7758 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7759 SCTP_IPV6_V6ONLY(inp)) {
7760 /* can't bind mapped-v4 on PF_INET sockets */
7761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7762 *error = EINVAL;
7763 return;
7764 }
7765 in6_sin6_2_sin(&sin, sin6);
7766 addr_to_use = (struct sockaddr *)&sin;
7767 } else {
7768 addr_to_use = sa;
7769 }
7770 #else
7771 addr_to_use = sa;
7772 #endif
7773 break;
7774 #endif
7775 #ifdef INET
7776 case AF_INET:
7777 #ifdef HAVE_SA_LEN
7778 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7779 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7780 *error = EINVAL;
7781 return;
7782 }
7783 #endif
7784 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7785 SCTP_IPV6_V6ONLY(inp)) {
7786 /* can't bind v4 on PF_INET sockets */
7787 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7788 *error = EINVAL;
7789 return;
7790 }
7791 addr_to_use = sa;
7792 break;
7793 #endif
7794 default:
7795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7796 *error = EINVAL;
7797 return;
7798 }
7799 /* No lock required mgmt_ep_sa does its own locking. */
7800 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS,
7801 vrf_id);
7802 }
7803
7804 /*
7805 * returns the valid local address count for an assoc, taking into account
7806 * all scoping rules
7807 */
7808 int
7809 sctp_local_addr_count(struct sctp_tcb *stcb)
7810 {
7811 int loopback_scope;
7812 #if defined(INET)
7813 int ipv4_local_scope, ipv4_addr_legal;
7814 #endif
7815 #if defined(INET6)
7816 int local_scope, site_scope, ipv6_addr_legal;
7817 #endif
7818 #if defined(__Userspace__)
7819 int conn_addr_legal;
7820 #endif
7821 struct sctp_vrf *vrf;
7822 struct sctp_ifn *sctp_ifn;
7823 struct sctp_ifa *sctp_ifa;
7824 int count = 0;
7825
7826 /* Turn on all the appropriate scopes */
7827 loopback_scope = stcb->asoc.scope.loopback_scope;
7828 #if defined(INET)
7829 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7830 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7831 #endif
7832 #if defined(INET6)
7833 local_scope = stcb->asoc.scope.local_scope;
7834 site_scope = stcb->asoc.scope.site_scope;
7835 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7836 #endif
7837 #if defined(__Userspace__)
7838 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7839 #endif
7840 SCTP_IPI_ADDR_RLOCK();
7841 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7842 if (vrf == NULL) {
7843 /* no vrf, no addresses */
7844 SCTP_IPI_ADDR_RUNLOCK();
7845 return (0);
7846 }
7847
7848 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7849 /*
7850 * bound all case: go through all ifns on the vrf
7851 */
7852 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7853 if ((loopback_scope == 0) &&
7854 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7855 continue;
7856 }
7857 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7858 if (sctp_is_addr_restricted(stcb, sctp_ifa))
7859 continue;
7860 switch (sctp_ifa->address.sa.sa_family) {
7861 #ifdef INET
7862 case AF_INET:
7863 if (ipv4_addr_legal) {
7864 struct sockaddr_in *sin;
7865
7866 sin = &sctp_ifa->address.sin;
7867 if (sin->sin_addr.s_addr == 0) {
7868 /* skip unspecified addrs */
7869 continue;
7870 }
7871 #if defined(__FreeBSD__) && !defined(__Userspace__)
7872 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7873 &sin->sin_addr) != 0) {
7874 continue;
7875 }
7876 #endif
7877 if ((ipv4_local_scope == 0) &&
7878 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7879 continue;
7880 }
7881 /* count this one */
7882 count++;
7883 } else {
7884 continue;
7885 }
7886 break;
7887 #endif
7888 #ifdef INET6
7889 case AF_INET6:
7890 if (ipv6_addr_legal) {
7891 struct sockaddr_in6 *sin6;
7892
7893 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7894 struct sockaddr_in6 lsa6;
7895 #endif
7896 sin6 = &sctp_ifa->address.sin6;
7897 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7898 continue;
7899 }
7900 #if defined(__FreeBSD__) && !defined(__Userspace__)
7901 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7902 &sin6->sin6_addr) != 0) {
7903 continue;
7904 }
7905 #endif
7906 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7907 if (local_scope == 0)
7908 continue;
7909 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7910 if (sin6->sin6_scope_id == 0) {
7911 #ifdef SCTP_KAME
7912 if (sa6_recoverscope(sin6) != 0)
7913 /*
7914 * bad link
7915 * local
7916 * address
7917 */
7918 continue;
7919 #else
7920 lsa6 = *sin6;
7921 if (in6_recoverscope(&lsa6,
7922 &lsa6.sin6_addr,
7923 NULL))
7924 /*
7925 * bad link
7926 * local
7927 * address
7928 */
7929 continue;
7930 sin6 = &lsa6;
7931 #endif /* SCTP_KAME */
7932 }
7933 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7934 }
7935 if ((site_scope == 0) &&
7936 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7937 continue;
7938 }
7939 /* count this one */
7940 count++;
7941 }
7942 break;
7943 #endif
7944 #if defined(__Userspace__)
7945 case AF_CONN:
7946 if (conn_addr_legal) {
7947 count++;
7948 }
7949 break;
7950 #endif
7951 default:
7952 /* TSNH */
7953 break;
7954 }
7955 }
7956 }
7957 } else {
7958 /*
7959 * subset bound case
7960 */
7961 struct sctp_laddr *laddr;
7962 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7963 sctp_nxt_addr) {
7964 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7965 continue;
7966 }
7967 /* count this one */
7968 count++;
7969 }
7970 }
7971 SCTP_IPI_ADDR_RUNLOCK();
7972 return (count);
7973 }
7974
7975 #if defined(SCTP_LOCAL_TRACE_BUF)
7976
7977 void
7978 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7979 {
7980 uint32_t saveindex, newindex;
7981
7982 #if defined(_WIN32) && !defined(__Userspace__)
7983 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7984 return;
7985 }
7986 do {
7987 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7988 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7989 newindex = 1;
7990 } else {
7991 newindex = saveindex + 1;
7992 }
7993 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7994 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7995 saveindex = 0;
7996 }
7997 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7998 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7999 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
8000 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
8001 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
8002 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
8003 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
8004 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
8005 #else
8006 do {
8007 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
8008 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8009 newindex = 1;
8010 } else {
8011 newindex = saveindex + 1;
8012 }
8013 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
8014 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8015 saveindex = 0;
8016 }
8017 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
8018 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
8019 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
8020 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
8021 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
8022 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
8023 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
8024 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
8025 #endif
8026 }
8027
8028 #endif
8029 #if defined(__FreeBSD__) && !defined(__Userspace__)
8030 static void
8031 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
8032 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
8033 {
8034 struct ip *iph;
8035 #ifdef INET6
8036 struct ip6_hdr *ip6;
8037 #endif
8038 struct mbuf *sp, *last;
8039 struct udphdr *uhdr;
8040 uint16_t port;
8041
8042 if ((m->m_flags & M_PKTHDR) == 0) {
8043 /* Can't handle one that is not a pkt hdr */
8044 goto out;
8045 }
8046 /* Pull the src port */
8047 iph = mtod(m, struct ip *);
8048 uhdr = (struct udphdr *)((caddr_t)iph + off);
8049 port = uhdr->uh_sport;
8050 /* Split out the mbuf chain. Leave the
8051 * IP header in m, place the
8052 * rest in the sp.
8053 */
8054 sp = m_split(m, off, M_NOWAIT);
8055 if (sp == NULL) {
8056 /* Gak, drop packet, we can't do a split */
8057 goto out;
8058 }
8059 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
8060 /* Gak, packet can't have an SCTP header in it - too small */
8061 m_freem(sp);
8062 goto out;
8063 }
8064 /* Now pull up the UDP header and SCTP header together */
8065 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
8066 if (sp == NULL) {
8067 /* Gak pullup failed */
8068 goto out;
8069 }
8070 /* Trim out the UDP header */
8071 m_adj(sp, sizeof(struct udphdr));
8072
8073 /* Now reconstruct the mbuf chain */
8074 for (last = m; last->m_next; last = last->m_next);
8075 last->m_next = sp;
8076 m->m_pkthdr.len += sp->m_pkthdr.len;
8077 /*
8078 * The CSUM_DATA_VALID flags indicates that the HW checked the
8079 * UDP checksum and it was valid.
8080 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
8081 * the HW also verified the SCTP checksum. Therefore, clear the bit.
8082 */
8083 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
8084 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
8085 m->m_pkthdr.len,
8086 if_name(m->m_pkthdr.rcvif),
8087 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
8088 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
8089 iph = mtod(m, struct ip *);
8090 switch (iph->ip_v) {
8091 #ifdef INET
8092 case IPVERSION:
8093 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
8094 sctp_input_with_port(m, off, port);
8095 break;
8096 #endif
8097 #ifdef INET6
8098 case IPV6_VERSION >> 4:
8099 ip6 = mtod(m, struct ip6_hdr *);
8100 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
8101 sctp6_input_with_port(&m, &off, port);
8102 break;
8103 #endif
8104 default:
8105 goto out;
8106 break;
8107 }
8108 return;
8109 out:
8110 m_freem(m);
8111 }
8112
8113 #ifdef INET
8114 static void
8115 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
8116 {
8117 struct ip *outer_ip, *inner_ip;
8118 struct sctphdr *sh;
8119 struct icmp *icmp;
8120 struct udphdr *udp;
8121 struct sctp_inpcb *inp;
8122 struct sctp_tcb *stcb;
8123 struct sctp_nets *net;
8124 struct sctp_init_chunk *ch;
8125 struct sockaddr_in src, dst;
8126 uint8_t type, code;
8127
8128 inner_ip = (struct ip *)vip;
8129 icmp = (struct icmp *)((caddr_t)inner_ip -
8130 (sizeof(struct icmp) - sizeof(struct ip)));
8131 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
8132 if (ntohs(outer_ip->ip_len) <
8133 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
8134 return;
8135 }
8136 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
8137 sh = (struct sctphdr *)(udp + 1);
8138 memset(&src, 0, sizeof(struct sockaddr_in));
8139 src.sin_family = AF_INET;
8140 #ifdef HAVE_SIN_LEN
8141 src.sin_len = sizeof(struct sockaddr_in);
8142 #endif
8143 src.sin_port = sh->src_port;
8144 src.sin_addr = inner_ip->ip_src;
8145 memset(&dst, 0, sizeof(struct sockaddr_in));
8146 dst.sin_family = AF_INET;
8147 #ifdef HAVE_SIN_LEN
8148 dst.sin_len = sizeof(struct sockaddr_in);
8149 #endif
8150 dst.sin_port = sh->dest_port;
8151 dst.sin_addr = inner_ip->ip_dst;
8152 /*
8153 * 'dst' holds the dest of the packet that failed to be sent.
8154 * 'src' holds our local endpoint address. Thus we reverse
8155 * the dst and the src in the lookup.
8156 */
8157 inp = NULL;
8158 net = NULL;
8159 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8160 (struct sockaddr *)&src,
8161 &inp, &net, 1,
8162 SCTP_DEFAULT_VRFID);
8163 if ((stcb != NULL) &&
8164 (net != NULL) &&
8165 (inp != NULL)) {
8166 /* Check the UDP port numbers */
8167 if ((udp->uh_dport != net->port) ||
8168 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8169 SCTP_TCB_UNLOCK(stcb);
8170 return;
8171 }
8172 /* Check the verification tag */
8173 if (ntohl(sh->v_tag) != 0) {
8174 /*
8175 * This must be the verification tag used
8176 * for sending out packets. We don't
8177 * consider packets reflecting the
8178 * verification tag.
8179 */
8180 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
8181 SCTP_TCB_UNLOCK(stcb);
8182 return;
8183 }
8184 } else {
8185 if (ntohs(outer_ip->ip_len) >=
8186 sizeof(struct ip) +
8187 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
8188 /*
8189 * In this case we can check if we
8190 * got an INIT chunk and if the
8191 * initiate tag matches.
8192 */
8193 ch = (struct sctp_init_chunk *)(sh + 1);
8194 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
8195 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
8196 SCTP_TCB_UNLOCK(stcb);
8197 return;
8198 }
8199 } else {
8200 SCTP_TCB_UNLOCK(stcb);
8201 return;
8202 }
8203 }
8204 type = icmp->icmp_type;
8205 code = icmp->icmp_code;
8206 if ((type == ICMP_UNREACH) &&
8207 (code == ICMP_UNREACH_PORT)) {
8208 code = ICMP_UNREACH_PROTOCOL;
8209 }
8210 sctp_notify(inp, stcb, net, type, code,
8211 ntohs(inner_ip->ip_len),
8212 (uint32_t)ntohs(icmp->icmp_nextmtu));
8213 #if defined(__Userspace__)
8214 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8215 (stcb->sctp_socket != NULL)) {
8216 struct socket *upcall_socket;
8217
8218 upcall_socket = stcb->sctp_socket;
8219 SOCK_LOCK(upcall_socket);
8220 soref(upcall_socket);
8221 SOCK_UNLOCK(upcall_socket);
8222 if ((upcall_socket->so_upcall != NULL) &&
8223 (upcall_socket->so_error != 0)) {
8224 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8225 }
8226 ACCEPT_LOCK();
8227 SOCK_LOCK(upcall_socket);
8228 sorele(upcall_socket);
8229 }
8230 #endif
8231 } else {
8232 if ((stcb == NULL) && (inp != NULL)) {
8233 /* reduce ref-count */
8234 SCTP_INP_WLOCK(inp);
8235 SCTP_INP_DECR_REF(inp);
8236 SCTP_INP_WUNLOCK(inp);
8237 }
8238 if (stcb) {
8239 SCTP_TCB_UNLOCK(stcb);
8240 }
8241 }
8242 return;
8243 }
8244 #endif
8245
8246 #ifdef INET6
8247 static void
8248 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
8249 {
8250 struct ip6ctlparam *ip6cp;
8251 struct sctp_inpcb *inp;
8252 struct sctp_tcb *stcb;
8253 struct sctp_nets *net;
8254 struct sctphdr sh;
8255 struct udphdr udp;
8256 struct sockaddr_in6 src, dst;
8257 uint8_t type, code;
8258
8259 ip6cp = (struct ip6ctlparam *)d;
8260 /*
8261 * XXX: We assume that when IPV6 is non NULL, M and OFF are
8262 * valid.
8263 */
8264 if (ip6cp->ip6c_m == NULL) {
8265 return;
8266 }
8267 /* Check if we can safely examine the ports and the
8268 * verification tag of the SCTP common header.
8269 */
8270 if (ip6cp->ip6c_m->m_pkthdr.len <
8271 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
8272 return;
8273 }
8274 /* Copy out the UDP header. */
8275 memset(&udp, 0, sizeof(struct udphdr));
8276 m_copydata(ip6cp->ip6c_m,
8277 ip6cp->ip6c_off,
8278 sizeof(struct udphdr),
8279 (caddr_t)&udp);
8280 /* Copy out the port numbers and the verification tag. */
8281 memset(&sh, 0, sizeof(struct sctphdr));
8282 m_copydata(ip6cp->ip6c_m,
8283 ip6cp->ip6c_off + sizeof(struct udphdr),
8284 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
8285 (caddr_t)&sh);
8286 memset(&src, 0, sizeof(struct sockaddr_in6));
8287 src.sin6_family = AF_INET6;
8288 #ifdef HAVE_SIN6_LEN
8289 src.sin6_len = sizeof(struct sockaddr_in6);
8290 #endif
8291 src.sin6_port = sh.src_port;
8292 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
8293 #if defined(__FreeBSD__) && !defined(__Userspace__)
8294 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8295 return;
8296 }
8297 #endif
8298 memset(&dst, 0, sizeof(struct sockaddr_in6));
8299 dst.sin6_family = AF_INET6;
8300 #ifdef HAVE_SIN6_LEN
8301 dst.sin6_len = sizeof(struct sockaddr_in6);
8302 #endif
8303 dst.sin6_port = sh.dest_port;
8304 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
8305 #if defined(__FreeBSD__) && !defined(__Userspace__)
8306 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8307 return;
8308 }
8309 #endif
8310 inp = NULL;
8311 net = NULL;
8312 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8313 (struct sockaddr *)&src,
8314 &inp, &net, 1, SCTP_DEFAULT_VRFID);
8315 if ((stcb != NULL) &&
8316 (net != NULL) &&
8317 (inp != NULL)) {
8318 /* Check the UDP port numbers */
8319 if ((udp.uh_dport != net->port) ||
8320 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8321 SCTP_TCB_UNLOCK(stcb);
8322 return;
8323 }
8324 /* Check the verification tag */
8325 if (ntohl(sh.v_tag) != 0) {
8326 /*
8327 * This must be the verification tag used for
8328 * sending out packets. We don't consider
8329 * packets reflecting the verification tag.
8330 */
8331 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
8332 SCTP_TCB_UNLOCK(stcb);
8333 return;
8334 }
8335 } else {
8336 #if defined(__FreeBSD__) && !defined(__Userspace__)
8337 if (ip6cp->ip6c_m->m_pkthdr.len >=
8338 ip6cp->ip6c_off + sizeof(struct udphdr) +
8339 sizeof(struct sctphdr) +
8340 sizeof(struct sctp_chunkhdr) +
8341 offsetof(struct sctp_init, a_rwnd)) {
8342 /*
8343 * In this case we can check if we
8344 * got an INIT chunk and if the
8345 * initiate tag matches.
8346 */
8347 uint32_t initiate_tag;
8348 uint8_t chunk_type;
8349
8350 m_copydata(ip6cp->ip6c_m,
8351 ip6cp->ip6c_off +
8352 sizeof(struct udphdr) +
8353 sizeof(struct sctphdr),
8354 sizeof(uint8_t),
8355 (caddr_t)&chunk_type);
8356 m_copydata(ip6cp->ip6c_m,
8357 ip6cp->ip6c_off +
8358 sizeof(struct udphdr) +
8359 sizeof(struct sctphdr) +
8360 sizeof(struct sctp_chunkhdr),
8361 sizeof(uint32_t),
8362 (caddr_t)&initiate_tag);
8363 if ((chunk_type != SCTP_INITIATION) ||
8364 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
8365 SCTP_TCB_UNLOCK(stcb);
8366 return;
8367 }
8368 } else {
8369 SCTP_TCB_UNLOCK(stcb);
8370 return;
8371 }
8372 #else
8373 SCTP_TCB_UNLOCK(stcb);
8374 return;
8375 #endif
8376 }
8377 type = ip6cp->ip6c_icmp6->icmp6_type;
8378 code = ip6cp->ip6c_icmp6->icmp6_code;
8379 if ((type == ICMP6_DST_UNREACH) &&
8380 (code == ICMP6_DST_UNREACH_NOPORT)) {
8381 type = ICMP6_PARAM_PROB;
8382 code = ICMP6_PARAMPROB_NEXTHEADER;
8383 }
8384 sctp6_notify(inp, stcb, net, type, code,
8385 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
8386 #if defined(__Userspace__)
8387 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8388 (stcb->sctp_socket != NULL)) {
8389 struct socket *upcall_socket;
8390
8391 upcall_socket = stcb->sctp_socket;
8392 SOCK_LOCK(upcall_socket);
8393 soref(upcall_socket);
8394 SOCK_UNLOCK(upcall_socket);
8395 if ((upcall_socket->so_upcall != NULL) &&
8396 (upcall_socket->so_error != 0)) {
8397 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8398 }
8399 ACCEPT_LOCK();
8400 SOCK_LOCK(upcall_socket);
8401 sorele(upcall_socket);
8402 }
8403 #endif
8404 } else {
8405 if ((stcb == NULL) && (inp != NULL)) {
8406 /* reduce inp's ref-count */
8407 SCTP_INP_WLOCK(inp);
8408 SCTP_INP_DECR_REF(inp);
8409 SCTP_INP_WUNLOCK(inp);
8410 }
8411 if (stcb) {
8412 SCTP_TCB_UNLOCK(stcb);
8413 }
8414 }
8415 }
8416 #endif
8417
8418 void
8419 sctp_over_udp_stop(void)
8420 {
8421 /*
8422 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8423 */
8424 #ifdef INET
8425 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8426 soclose(SCTP_BASE_INFO(udp4_tun_socket));
8427 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
8428 }
8429 #endif
8430 #ifdef INET6
8431 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8432 soclose(SCTP_BASE_INFO(udp6_tun_socket));
8433 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
8434 }
8435 #endif
8436 }
8437
8438 int
8439 sctp_over_udp_start(void)
8440 {
8441 uint16_t port;
8442 int ret;
8443 #ifdef INET
8444 struct sockaddr_in sin;
8445 #endif
8446 #ifdef INET6
8447 struct sockaddr_in6 sin6;
8448 #endif
8449 /*
8450 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8451 */
8452 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
8453 if (ntohs(port) == 0) {
8454 /* Must have a port set */
8455 return (EINVAL);
8456 }
8457 #ifdef INET
8458 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8459 /* Already running -- must stop first */
8460 return (EALREADY);
8461 }
8462 #endif
8463 #ifdef INET6
8464 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8465 /* Already running -- must stop first */
8466 return (EALREADY);
8467 }
8468 #endif
8469 #ifdef INET
8470 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
8471 SOCK_DGRAM, IPPROTO_UDP,
8472 curthread->td_ucred, curthread))) {
8473 sctp_over_udp_stop();
8474 return (ret);
8475 }
8476 /* Call the special UDP hook. */
8477 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
8478 sctp_recv_udp_tunneled_packet,
8479 sctp_recv_icmp_tunneled_packet,
8480 NULL))) {
8481 sctp_over_udp_stop();
8482 return (ret);
8483 }
8484 /* Ok, we have a socket, bind it to the port. */
8485 memset(&sin, 0, sizeof(struct sockaddr_in));
8486 sin.sin_len = sizeof(struct sockaddr_in);
8487 sin.sin_family = AF_INET;
8488 sin.sin_port = htons(port);
8489 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
8490 (struct sockaddr *)&sin, curthread))) {
8491 sctp_over_udp_stop();
8492 return (ret);
8493 }
8494 #endif
8495 #ifdef INET6
8496 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
8497 SOCK_DGRAM, IPPROTO_UDP,
8498 curthread->td_ucred, curthread))) {
8499 sctp_over_udp_stop();
8500 return (ret);
8501 }
8502 /* Call the special UDP hook. */
8503 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
8504 sctp_recv_udp_tunneled_packet,
8505 sctp_recv_icmp6_tunneled_packet,
8506 NULL))) {
8507 sctp_over_udp_stop();
8508 return (ret);
8509 }
8510 /* Ok, we have a socket, bind it to the port. */
8511 memset(&sin6, 0, sizeof(struct sockaddr_in6));
8512 sin6.sin6_len = sizeof(struct sockaddr_in6);
8513 sin6.sin6_family = AF_INET6;
8514 sin6.sin6_port = htons(port);
8515 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
8516 (struct sockaddr *)&sin6, curthread))) {
8517 sctp_over_udp_stop();
8518 return (ret);
8519 }
8520 #endif
8521 return (0);
8522 }
8523 #endif
8524
8525 /*
8526 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
8527 * If all arguments are zero, zero is returned.
8528 */
8529 uint32_t
8530 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
8531 {
8532 if (mtu1 > 0) {
8533 if (mtu2 > 0) {
8534 if (mtu3 > 0) {
8535 return (min(mtu1, min(mtu2, mtu3)));
8536 } else {
8537 return (min(mtu1, mtu2));
8538 }
8539 } else {
8540 if (mtu3 > 0) {
8541 return (min(mtu1, mtu3));
8542 } else {
8543 return (mtu1);
8544 }
8545 }
8546 } else {
8547 if (mtu2 > 0) {
8548 if (mtu3 > 0) {
8549 return (min(mtu2, mtu3));
8550 } else {
8551 return (mtu2);
8552 }
8553 } else {
8554 return (mtu3);
8555 }
8556 }
8557 }
8558
8559 #if defined(__FreeBSD__) && !defined(__Userspace__)
8560 void
8561 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
8562 {
8563 struct in_conninfo inc;
8564
8565 memset(&inc, 0, sizeof(struct in_conninfo));
8566 inc.inc_fibnum = fibnum;
8567 switch (addr->sa.sa_family) {
8568 #ifdef INET
8569 case AF_INET:
8570 inc.inc_faddr = addr->sin.sin_addr;
8571 break;
8572 #endif
8573 #ifdef INET6
8574 case AF_INET6:
8575 inc.inc_flags |= INC_ISIPV6;
8576 inc.inc6_faddr = addr->sin6.sin6_addr;
8577 break;
8578 #endif
8579 default:
8580 return;
8581 }
8582 tcp_hc_updatemtu(&inc, (u_long)mtu);
8583 }
8584
8585 uint32_t
8586 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
8587 {
8588 struct in_conninfo inc;
8589
8590 memset(&inc, 0, sizeof(struct in_conninfo));
8591 inc.inc_fibnum = fibnum;
8592 switch (addr->sa.sa_family) {
8593 #ifdef INET
8594 case AF_INET:
8595 inc.inc_faddr = addr->sin.sin_addr;
8596 break;
8597 #endif
8598 #ifdef INET6
8599 case AF_INET6:
8600 inc.inc_flags |= INC_ISIPV6;
8601 inc.inc6_faddr = addr->sin6.sin6_addr;
8602 break;
8603 #endif
8604 default:
8605 return (0);
8606 }
8607 return ((uint32_t)tcp_hc_getmtu(&inc));
8608 }
8609 #endif
8610
8611 void
8612 sctp_set_state(struct sctp_tcb *stcb, int new_state)
8613 {
8614 #if defined(KDTRACE_HOOKS)
8615 int old_state = stcb->asoc.state;
8616 #endif
8617
8618 KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
8619 ("sctp_set_state: Can't set substate (new_state = %x)",
8620 new_state));
8621 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
8622 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8623 (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
8624 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
8625 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
8626 }
8627 #if defined(KDTRACE_HOOKS)
8628 if (((old_state & SCTP_STATE_MASK) != new_state) &&
8629 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
8630 (new_state == SCTP_STATE_INUSE))) {
8631 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8632 }
8633 #endif
8634 }
8635
8636 void
8637 sctp_add_substate(struct sctp_tcb *stcb, int substate)
8638 {
8639 #if defined(KDTRACE_HOOKS)
8640 int old_state = stcb->asoc.state;
8641 #endif
8642
8643 KASSERT((substate & SCTP_STATE_MASK) == 0,
8644 ("sctp_add_substate: Can't set state (substate = %x)",
8645 substate));
8646 stcb->asoc.state |= substate;
8647 #if defined(KDTRACE_HOOKS)
8648 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
8649 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
8650 ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
8651 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
8652 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8653 }
8654 #endif
8655 }
8656
8657