1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 366482 2020-10-06 11:08:52Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_var.h>
44 #include <netinet/sctp_sysctl.h>
45 #ifdef INET6
46 #if defined(__Userspace__) || defined(__FreeBSD__)
47 #include <netinet6/sctp6_var.h>
48 #endif
49 #endif
50 #include <netinet/sctp_header.h>
51 #include <netinet/sctp_output.h>
52 #include <netinet/sctp_uio.h>
53 #include <netinet/sctp_timer.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_auth.h>
56 #include <netinet/sctp_asconf.h>
57 #include <netinet/sctp_bsd_addr.h>
58 #if defined(__Userspace__)
59 #include <netinet/sctp_constants.h>
60 #endif
61 #if defined(__FreeBSD__) && !defined(__Userspace__)
62 #include <netinet/sctp_kdtrace.h>
63 #if defined(INET6) || defined(INET)
64 #include <netinet/tcp_var.h>
65 #endif
66 #include <netinet/udp.h>
67 #include <netinet/udp_var.h>
68 #include <sys/proc.h>
69 #ifdef INET6
70 #include <netinet/icmp6.h>
71 #endif
72 #endif
73
74 #if defined(_WIN32) && !defined(__Userspace__)
75 #if !defined(SCTP_LOCAL_TRACE_BUF)
76 #include "eventrace_netinet.h"
77 #include "sctputil.tmh" /* this is the file that will be auto generated */
78 #endif
79 #else
80 #ifndef KTR_SCTP
81 #define KTR_SCTP KTR_SUBSYS
82 #endif
83 #endif
84
85 extern const struct sctp_cc_functions sctp_cc_functions[];
86 extern const struct sctp_ss_functions sctp_ss_functions[];
87
88 void
sctp_sblog(struct sockbuf * sb,struct sctp_tcb * stcb,int from,int incr)89 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
90 {
91 #if defined(SCTP_LOCAL_TRACE_BUF)
92 struct sctp_cwnd_log sctp_clog;
93
94 sctp_clog.x.sb.stcb = stcb;
95 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
96 if (stcb)
97 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
98 else
99 sctp_clog.x.sb.stcb_sbcc = 0;
100 sctp_clog.x.sb.incr = incr;
101 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
102 SCTP_LOG_EVENT_SB,
103 from,
104 sctp_clog.x.misc.log1,
105 sctp_clog.x.misc.log2,
106 sctp_clog.x.misc.log3,
107 sctp_clog.x.misc.log4);
108 #endif
109 }
110
111 void
sctp_log_closing(struct sctp_inpcb * inp,struct sctp_tcb * stcb,int16_t loc)112 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
113 {
114 #if defined(SCTP_LOCAL_TRACE_BUF)
115 struct sctp_cwnd_log sctp_clog;
116
117 sctp_clog.x.close.inp = (void *)inp;
118 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
119 if (stcb) {
120 sctp_clog.x.close.stcb = (void *)stcb;
121 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
122 } else {
123 sctp_clog.x.close.stcb = 0;
124 sctp_clog.x.close.state = 0;
125 }
126 sctp_clog.x.close.loc = loc;
127 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
128 SCTP_LOG_EVENT_CLOSE,
129 0,
130 sctp_clog.x.misc.log1,
131 sctp_clog.x.misc.log2,
132 sctp_clog.x.misc.log3,
133 sctp_clog.x.misc.log4);
134 #endif
135 }
136
137 void
rto_logging(struct sctp_nets * net,int from)138 rto_logging(struct sctp_nets *net, int from)
139 {
140 #if defined(SCTP_LOCAL_TRACE_BUF)
141 struct sctp_cwnd_log sctp_clog;
142
143 memset(&sctp_clog, 0, sizeof(sctp_clog));
144 sctp_clog.x.rto.net = (void *) net;
145 sctp_clog.x.rto.rtt = net->rtt / 1000;
146 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
147 SCTP_LOG_EVENT_RTT,
148 from,
149 sctp_clog.x.misc.log1,
150 sctp_clog.x.misc.log2,
151 sctp_clog.x.misc.log3,
152 sctp_clog.x.misc.log4);
153 #endif
154 }
155
156 void
sctp_log_strm_del_alt(struct sctp_tcb * stcb,uint32_t tsn,uint16_t sseq,uint16_t stream,int from)157 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
158 {
159 #if defined(SCTP_LOCAL_TRACE_BUF)
160 struct sctp_cwnd_log sctp_clog;
161
162 sctp_clog.x.strlog.stcb = stcb;
163 sctp_clog.x.strlog.n_tsn = tsn;
164 sctp_clog.x.strlog.n_sseq = sseq;
165 sctp_clog.x.strlog.e_tsn = 0;
166 sctp_clog.x.strlog.e_sseq = 0;
167 sctp_clog.x.strlog.strm = stream;
168 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
169 SCTP_LOG_EVENT_STRM,
170 from,
171 sctp_clog.x.misc.log1,
172 sctp_clog.x.misc.log2,
173 sctp_clog.x.misc.log3,
174 sctp_clog.x.misc.log4);
175 #endif
176 }
177
178 void
sctp_log_nagle_event(struct sctp_tcb * stcb,int action)179 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
180 {
181 #if defined(SCTP_LOCAL_TRACE_BUF)
182 struct sctp_cwnd_log sctp_clog;
183
184 sctp_clog.x.nagle.stcb = (void *)stcb;
185 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
186 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
187 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
188 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
189 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
190 SCTP_LOG_EVENT_NAGLE,
191 action,
192 sctp_clog.x.misc.log1,
193 sctp_clog.x.misc.log2,
194 sctp_clog.x.misc.log3,
195 sctp_clog.x.misc.log4);
196 #endif
197 }
198
199 void
sctp_log_sack(uint32_t old_cumack,uint32_t cumack,uint32_t tsn,uint16_t gaps,uint16_t dups,int from)200 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
201 {
202 #if defined(SCTP_LOCAL_TRACE_BUF)
203 struct sctp_cwnd_log sctp_clog;
204
205 sctp_clog.x.sack.cumack = cumack;
206 sctp_clog.x.sack.oldcumack = old_cumack;
207 sctp_clog.x.sack.tsn = tsn;
208 sctp_clog.x.sack.numGaps = gaps;
209 sctp_clog.x.sack.numDups = dups;
210 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
211 SCTP_LOG_EVENT_SACK,
212 from,
213 sctp_clog.x.misc.log1,
214 sctp_clog.x.misc.log2,
215 sctp_clog.x.misc.log3,
216 sctp_clog.x.misc.log4);
217 #endif
218 }
219
220 void
sctp_log_map(uint32_t map,uint32_t cum,uint32_t high,int from)221 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
222 {
223 #if defined(SCTP_LOCAL_TRACE_BUF)
224 struct sctp_cwnd_log sctp_clog;
225
226 memset(&sctp_clog, 0, sizeof(sctp_clog));
227 sctp_clog.x.map.base = map;
228 sctp_clog.x.map.cum = cum;
229 sctp_clog.x.map.high = high;
230 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
231 SCTP_LOG_EVENT_MAP,
232 from,
233 sctp_clog.x.misc.log1,
234 sctp_clog.x.misc.log2,
235 sctp_clog.x.misc.log3,
236 sctp_clog.x.misc.log4);
237 #endif
238 }
239
240 void
sctp_log_fr(uint32_t biggest_tsn,uint32_t biggest_new_tsn,uint32_t tsn,int from)241 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
242 {
243 #if defined(SCTP_LOCAL_TRACE_BUF)
244 struct sctp_cwnd_log sctp_clog;
245
246 memset(&sctp_clog, 0, sizeof(sctp_clog));
247 sctp_clog.x.fr.largest_tsn = biggest_tsn;
248 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
249 sctp_clog.x.fr.tsn = tsn;
250 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
251 SCTP_LOG_EVENT_FR,
252 from,
253 sctp_clog.x.misc.log1,
254 sctp_clog.x.misc.log2,
255 sctp_clog.x.misc.log3,
256 sctp_clog.x.misc.log4);
257 #endif
258 }
259
260 #ifdef SCTP_MBUF_LOGGING
261 void
sctp_log_mb(struct mbuf * m,int from)262 sctp_log_mb(struct mbuf *m, int from)
263 {
264 #if defined(SCTP_LOCAL_TRACE_BUF)
265 struct sctp_cwnd_log sctp_clog;
266
267 sctp_clog.x.mb.mp = m;
268 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
269 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
270 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
271 if (SCTP_BUF_IS_EXTENDED(m)) {
272 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
273 #if defined(__APPLE__) && !defined(__Userspace__)
274 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
275 #else
276 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
277 #endif
278 } else {
279 sctp_clog.x.mb.ext = 0;
280 sctp_clog.x.mb.refcnt = 0;
281 }
282 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
283 SCTP_LOG_EVENT_MBUF,
284 from,
285 sctp_clog.x.misc.log1,
286 sctp_clog.x.misc.log2,
287 sctp_clog.x.misc.log3,
288 sctp_clog.x.misc.log4);
289 #endif
290 }
291
292 void
sctp_log_mbc(struct mbuf * m,int from)293 sctp_log_mbc(struct mbuf *m, int from)
294 {
295 struct mbuf *mat;
296
297 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
298 sctp_log_mb(mat, from);
299 }
300 }
301 #endif
302
303 void
sctp_log_strm_del(struct sctp_queued_to_read * control,struct sctp_queued_to_read * poschk,int from)304 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
305 {
306 #if defined(SCTP_LOCAL_TRACE_BUF)
307 struct sctp_cwnd_log sctp_clog;
308
309 if (control == NULL) {
310 SCTP_PRINTF("Gak log of NULL?\n");
311 return;
312 }
313 sctp_clog.x.strlog.stcb = control->stcb;
314 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
315 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
316 sctp_clog.x.strlog.strm = control->sinfo_stream;
317 if (poschk != NULL) {
318 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
319 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
320 } else {
321 sctp_clog.x.strlog.e_tsn = 0;
322 sctp_clog.x.strlog.e_sseq = 0;
323 }
324 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
325 SCTP_LOG_EVENT_STRM,
326 from,
327 sctp_clog.x.misc.log1,
328 sctp_clog.x.misc.log2,
329 sctp_clog.x.misc.log3,
330 sctp_clog.x.misc.log4);
331 #endif
332 }
333
334 void
sctp_log_cwnd(struct sctp_tcb * stcb,struct sctp_nets * net,int augment,uint8_t from)335 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
336 {
337 #if defined(SCTP_LOCAL_TRACE_BUF)
338 struct sctp_cwnd_log sctp_clog;
339
340 sctp_clog.x.cwnd.net = net;
341 if (stcb->asoc.send_queue_cnt > 255)
342 sctp_clog.x.cwnd.cnt_in_send = 255;
343 else
344 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
345 if (stcb->asoc.stream_queue_cnt > 255)
346 sctp_clog.x.cwnd.cnt_in_str = 255;
347 else
348 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
349
350 if (net) {
351 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
352 sctp_clog.x.cwnd.inflight = net->flight_size;
353 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
354 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
355 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
356 }
357 if (SCTP_CWNDLOG_PRESEND == from) {
358 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
359 }
360 sctp_clog.x.cwnd.cwnd_augment = augment;
361 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
362 SCTP_LOG_EVENT_CWND,
363 from,
364 sctp_clog.x.misc.log1,
365 sctp_clog.x.misc.log2,
366 sctp_clog.x.misc.log3,
367 sctp_clog.x.misc.log4);
368 #endif
369 }
370
371 #if !defined(__APPLE__) && !defined(__Userspace__)
372 void
sctp_log_lock(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint8_t from)373 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
374 {
375 #if defined(SCTP_LOCAL_TRACE_BUF)
376 struct sctp_cwnd_log sctp_clog;
377
378 memset(&sctp_clog, 0, sizeof(sctp_clog));
379 if (inp) {
380 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
381
382 } else {
383 sctp_clog.x.lock.sock = (void *) NULL;
384 }
385 sctp_clog.x.lock.inp = (void *) inp;
386 #if defined(__FreeBSD__)
387 if (stcb) {
388 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
389 } else {
390 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
391 }
392 if (inp) {
393 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
394 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
395 } else {
396 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
397 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
398 }
399 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
400 if (inp && (inp->sctp_socket)) {
401 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
402 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
403 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
404 } else {
405 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
406 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
407 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
408 }
409 #endif
410 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
411 SCTP_LOG_LOCK_EVENT,
412 from,
413 sctp_clog.x.misc.log1,
414 sctp_clog.x.misc.log2,
415 sctp_clog.x.misc.log3,
416 sctp_clog.x.misc.log4);
417 #endif
418 }
419 #endif
420
421 void
sctp_log_maxburst(struct sctp_tcb * stcb,struct sctp_nets * net,int error,int burst,uint8_t from)422 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
423 {
424 #if defined(SCTP_LOCAL_TRACE_BUF)
425 struct sctp_cwnd_log sctp_clog;
426
427 memset(&sctp_clog, 0, sizeof(sctp_clog));
428 sctp_clog.x.cwnd.net = net;
429 sctp_clog.x.cwnd.cwnd_new_value = error;
430 sctp_clog.x.cwnd.inflight = net->flight_size;
431 sctp_clog.x.cwnd.cwnd_augment = burst;
432 if (stcb->asoc.send_queue_cnt > 255)
433 sctp_clog.x.cwnd.cnt_in_send = 255;
434 else
435 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
436 if (stcb->asoc.stream_queue_cnt > 255)
437 sctp_clog.x.cwnd.cnt_in_str = 255;
438 else
439 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
440 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 SCTP_LOG_EVENT_MAXBURST,
442 from,
443 sctp_clog.x.misc.log1,
444 sctp_clog.x.misc.log2,
445 sctp_clog.x.misc.log3,
446 sctp_clog.x.misc.log4);
447 #endif
448 }
449
450 void
sctp_log_rwnd(uint8_t from,uint32_t peers_rwnd,uint32_t snd_size,uint32_t overhead)451 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
452 {
453 #if defined(SCTP_LOCAL_TRACE_BUF)
454 struct sctp_cwnd_log sctp_clog;
455
456 sctp_clog.x.rwnd.rwnd = peers_rwnd;
457 sctp_clog.x.rwnd.send_size = snd_size;
458 sctp_clog.x.rwnd.overhead = overhead;
459 sctp_clog.x.rwnd.new_rwnd = 0;
460 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
461 SCTP_LOG_EVENT_RWND,
462 from,
463 sctp_clog.x.misc.log1,
464 sctp_clog.x.misc.log2,
465 sctp_clog.x.misc.log3,
466 sctp_clog.x.misc.log4);
467 #endif
468 }
469
470 void
sctp_log_rwnd_set(uint8_t from,uint32_t peers_rwnd,uint32_t flight_size,uint32_t overhead,uint32_t a_rwndval)471 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
472 {
473 #if defined(SCTP_LOCAL_TRACE_BUF)
474 struct sctp_cwnd_log sctp_clog;
475
476 sctp_clog.x.rwnd.rwnd = peers_rwnd;
477 sctp_clog.x.rwnd.send_size = flight_size;
478 sctp_clog.x.rwnd.overhead = overhead;
479 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 SCTP_LOG_EVENT_RWND,
482 from,
483 sctp_clog.x.misc.log1,
484 sctp_clog.x.misc.log2,
485 sctp_clog.x.misc.log3,
486 sctp_clog.x.misc.log4);
487 #endif
488 }
489
490 #ifdef SCTP_MBCNT_LOGGING
491 static void
sctp_log_mbcnt(uint8_t from,uint32_t total_oq,uint32_t book,uint32_t total_mbcnt_q,uint32_t mbcnt)492 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
493 {
494 #if defined(SCTP_LOCAL_TRACE_BUF)
495 struct sctp_cwnd_log sctp_clog;
496
497 sctp_clog.x.mbcnt.total_queue_size = total_oq;
498 sctp_clog.x.mbcnt.size_change = book;
499 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
500 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 SCTP_LOG_EVENT_MBCNT,
503 from,
504 sctp_clog.x.misc.log1,
505 sctp_clog.x.misc.log2,
506 sctp_clog.x.misc.log3,
507 sctp_clog.x.misc.log4);
508 #endif
509 }
510 #endif
511
512 void
sctp_misc_ints(uint8_t from,uint32_t a,uint32_t b,uint32_t c,uint32_t d)513 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
514 {
515 #if defined(SCTP_LOCAL_TRACE_BUF)
516 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
517 SCTP_LOG_MISC_EVENT,
518 from,
519 a, b, c, d);
520 #endif
521 }
522
523 void
sctp_wakeup_log(struct sctp_tcb * stcb,uint32_t wake_cnt,int from)524 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
525 {
526 #if defined(SCTP_LOCAL_TRACE_BUF)
527 struct sctp_cwnd_log sctp_clog;
528
529 sctp_clog.x.wake.stcb = (void *)stcb;
530 sctp_clog.x.wake.wake_cnt = wake_cnt;
531 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
532 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
533 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
534
535 if (stcb->asoc.stream_queue_cnt < 0xff)
536 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
537 else
538 sctp_clog.x.wake.stream_qcnt = 0xff;
539
540 if (stcb->asoc.chunks_on_out_queue < 0xff)
541 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
542 else
543 sctp_clog.x.wake.chunks_on_oque = 0xff;
544
545 sctp_clog.x.wake.sctpflags = 0;
546 /* set in the defered mode stuff */
547 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
548 sctp_clog.x.wake.sctpflags |= 1;
549 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
550 sctp_clog.x.wake.sctpflags |= 2;
551 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
552 sctp_clog.x.wake.sctpflags |= 4;
553 /* what about the sb */
554 if (stcb->sctp_socket) {
555 struct socket *so = stcb->sctp_socket;
556
557 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
558 } else {
559 sctp_clog.x.wake.sbflags = 0xff;
560 }
561 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 SCTP_LOG_EVENT_WAKE,
563 from,
564 sctp_clog.x.misc.log1,
565 sctp_clog.x.misc.log2,
566 sctp_clog.x.misc.log3,
567 sctp_clog.x.misc.log4);
568 #endif
569 }
570
571 void
sctp_log_block(uint8_t from,struct sctp_association * asoc,ssize_t sendlen)572 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
573 {
574 #if defined(SCTP_LOCAL_TRACE_BUF)
575 struct sctp_cwnd_log sctp_clog;
576
577 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
578 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
579 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
580 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
581 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
582 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
583 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
584 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
585 SCTP_LOG_EVENT_BLOCK,
586 from,
587 sctp_clog.x.misc.log1,
588 sctp_clog.x.misc.log2,
589 sctp_clog.x.misc.log3,
590 sctp_clog.x.misc.log4);
591 #endif
592 }
593
594 int
sctp_fill_stat_log(void * optval SCTP_UNUSED,size_t * optsize SCTP_UNUSED)595 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
596 {
597 /* May need to fix this if ktrdump does not work */
598 return (0);
599 }
600
601 #ifdef SCTP_AUDITING_ENABLED
602 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
603 static int sctp_audit_indx = 0;
604
605 static
606 void
sctp_print_audit_report(void)607 sctp_print_audit_report(void)
608 {
609 int i;
610 int cnt;
611
612 cnt = 0;
613 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
614 if ((sctp_audit_data[i][0] == 0xe0) &&
615 (sctp_audit_data[i][1] == 0x01)) {
616 cnt = 0;
617 SCTP_PRINTF("\n");
618 } else if (sctp_audit_data[i][0] == 0xf0) {
619 cnt = 0;
620 SCTP_PRINTF("\n");
621 } else if ((sctp_audit_data[i][0] == 0xc0) &&
622 (sctp_audit_data[i][1] == 0x01)) {
623 SCTP_PRINTF("\n");
624 cnt = 0;
625 }
626 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
627 (uint32_t) sctp_audit_data[i][1]);
628 cnt++;
629 if ((cnt % 14) == 0)
630 SCTP_PRINTF("\n");
631 }
632 for (i = 0; i < sctp_audit_indx; i++) {
633 if ((sctp_audit_data[i][0] == 0xe0) &&
634 (sctp_audit_data[i][1] == 0x01)) {
635 cnt = 0;
636 SCTP_PRINTF("\n");
637 } else if (sctp_audit_data[i][0] == 0xf0) {
638 cnt = 0;
639 SCTP_PRINTF("\n");
640 } else if ((sctp_audit_data[i][0] == 0xc0) &&
641 (sctp_audit_data[i][1] == 0x01)) {
642 SCTP_PRINTF("\n");
643 cnt = 0;
644 }
645 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
646 (uint32_t) sctp_audit_data[i][1]);
647 cnt++;
648 if ((cnt % 14) == 0)
649 SCTP_PRINTF("\n");
650 }
651 SCTP_PRINTF("\n");
652 }
653
654 void
sctp_auditing(int from,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)655 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
656 struct sctp_nets *net)
657 {
658 int resend_cnt, tot_out, rep, tot_book_cnt;
659 struct sctp_nets *lnet;
660 struct sctp_tmit_chunk *chk;
661
662 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
663 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
664 sctp_audit_indx++;
665 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666 sctp_audit_indx = 0;
667 }
668 if (inp == NULL) {
669 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
670 sctp_audit_data[sctp_audit_indx][1] = 0x01;
671 sctp_audit_indx++;
672 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 sctp_audit_indx = 0;
674 }
675 return;
676 }
677 if (stcb == NULL) {
678 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
679 sctp_audit_data[sctp_audit_indx][1] = 0x02;
680 sctp_audit_indx++;
681 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
682 sctp_audit_indx = 0;
683 }
684 return;
685 }
686 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
687 sctp_audit_data[sctp_audit_indx][1] =
688 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
689 sctp_audit_indx++;
690 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 sctp_audit_indx = 0;
692 }
693 rep = 0;
694 tot_book_cnt = 0;
695 resend_cnt = tot_out = 0;
696 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 if (chk->sent == SCTP_DATAGRAM_RESEND) {
698 resend_cnt++;
699 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
700 tot_out += chk->book_size;
701 tot_book_cnt++;
702 }
703 }
704 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
705 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
706 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
707 sctp_audit_indx++;
708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
709 sctp_audit_indx = 0;
710 }
711 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
712 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
713 rep = 1;
714 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
715 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
716 sctp_audit_data[sctp_audit_indx][1] =
717 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
718 sctp_audit_indx++;
719 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
720 sctp_audit_indx = 0;
721 }
722 }
723 if (tot_out != stcb->asoc.total_flight) {
724 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
725 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
726 sctp_audit_indx++;
727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 sctp_audit_indx = 0;
729 }
730 rep = 1;
731 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
732 (int)stcb->asoc.total_flight);
733 stcb->asoc.total_flight = tot_out;
734 }
735 if (tot_book_cnt != stcb->asoc.total_flight_count) {
736 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
737 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
738 sctp_audit_indx++;
739 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
740 sctp_audit_indx = 0;
741 }
742 rep = 1;
743 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
744
745 stcb->asoc.total_flight_count = tot_book_cnt;
746 }
747 tot_out = 0;
748 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
749 tot_out += lnet->flight_size;
750 }
751 if (tot_out != stcb->asoc.total_flight) {
752 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
753 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
754 sctp_audit_indx++;
755 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
756 sctp_audit_indx = 0;
757 }
758 rep = 1;
759 SCTP_PRINTF("real flight:%d net total was %d\n",
760 stcb->asoc.total_flight, tot_out);
761 /* now corrective action */
762 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
763 tot_out = 0;
764 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
765 if ((chk->whoTo == lnet) &&
766 (chk->sent < SCTP_DATAGRAM_RESEND)) {
767 tot_out += chk->book_size;
768 }
769 }
770 if (lnet->flight_size != tot_out) {
771 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
772 (void *)lnet, lnet->flight_size,
773 tot_out);
774 lnet->flight_size = tot_out;
775 }
776 }
777 }
778 if (rep) {
779 sctp_print_audit_report();
780 }
781 }
782
783 void
sctp_audit_log(uint8_t ev,uint8_t fd)784 sctp_audit_log(uint8_t ev, uint8_t fd)
785 {
786
787 sctp_audit_data[sctp_audit_indx][0] = ev;
788 sctp_audit_data[sctp_audit_indx][1] = fd;
789 sctp_audit_indx++;
790 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
791 sctp_audit_indx = 0;
792 }
793 }
794
795 #endif
796
797 /*
798 * The conversion from time to ticks and vice versa is done by rounding
799 * upwards. This way we can test in the code the time to be positive and
800 * know that this corresponds to a positive number of ticks.
801 */
802
803 uint32_t
sctp_msecs_to_ticks(uint32_t msecs)804 sctp_msecs_to_ticks(uint32_t msecs)
805 {
806 uint64_t temp;
807 uint32_t ticks;
808
809 if (hz == 1000) {
810 ticks = msecs;
811 } else {
812 temp = (((uint64_t)msecs * hz) + 999) / 1000;
813 if (temp > UINT32_MAX) {
814 ticks = UINT32_MAX;
815 } else {
816 ticks = (uint32_t)temp;
817 }
818 }
819 return (ticks);
820 }
821
822 uint32_t
sctp_ticks_to_msecs(uint32_t ticks)823 sctp_ticks_to_msecs(uint32_t ticks)
824 {
825 uint64_t temp;
826 uint32_t msecs;
827
828 if (hz == 1000) {
829 msecs = ticks;
830 } else {
831 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
832 if (temp > UINT32_MAX) {
833 msecs = UINT32_MAX;
834 } else {
835 msecs = (uint32_t)temp;
836 }
837 }
838 return (msecs);
839 }
840
841 uint32_t
sctp_secs_to_ticks(uint32_t secs)842 sctp_secs_to_ticks(uint32_t secs)
843 {
844 uint64_t temp;
845 uint32_t ticks;
846
847 temp = (uint64_t)secs * hz;
848 if (temp > UINT32_MAX) {
849 ticks = UINT32_MAX;
850 } else {
851 ticks = (uint32_t)temp;
852 }
853 return (ticks);
854 }
855
856 uint32_t
sctp_ticks_to_secs(uint32_t ticks)857 sctp_ticks_to_secs(uint32_t ticks)
858 {
859 uint64_t temp;
860 uint32_t secs;
861
862 temp = ((uint64_t)ticks + (hz - 1)) / hz;
863 if (temp > UINT32_MAX) {
864 secs = UINT32_MAX;
865 } else {
866 secs = (uint32_t)temp;
867 }
868 return (secs);
869 }
870
871 /*
872 * sctp_stop_timers_for_shutdown() should be called
873 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
874 * state to make sure that all timers are stopped.
875 */
876 void
sctp_stop_timers_for_shutdown(struct sctp_tcb * stcb)877 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
878 {
879 struct sctp_inpcb *inp;
880 struct sctp_nets *net;
881
882 inp = stcb->sctp_ep;
883
884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
885 SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
887 SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
888 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
889 SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
890 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
891 SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
892 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
893 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
894 SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
895 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
896 SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
897 }
898 }
899
900 void
sctp_stop_association_timers(struct sctp_tcb * stcb,bool stop_assoc_kill_timer)901 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
902 {
903 struct sctp_inpcb *inp;
904 struct sctp_nets *net;
905
906 inp = stcb->sctp_ep;
907 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
908 SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
909 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
910 SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
911 if (stop_assoc_kill_timer) {
912 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
913 SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
914 }
915 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
916 SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
917 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
918 SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
919 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
920 SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
921 /* Mobility adaptation */
922 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
923 SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
924 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
925 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
926 SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
927 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
928 SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
929 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
930 SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
931 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
932 SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
933 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
934 SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
935 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
936 SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
937 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
938 SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
939 }
940 }
941
942 /*
943 * A list of sizes based on typical mtu's, used only if next hop size not
944 * returned. These values MUST be multiples of 4 and MUST be ordered.
945 */
946 static uint32_t sctp_mtu_sizes[] = {
947 68,
948 296,
949 508,
950 512,
951 544,
952 576,
953 1004,
954 1492,
955 1500,
956 1536,
957 2000,
958 2048,
959 4352,
960 4464,
961 8168,
962 17912,
963 32000,
964 65532
965 };
966
967 /*
968 * Return the largest MTU in sctp_mtu_sizes smaller than val.
969 * If val is smaller than the minimum, just return the largest
970 * multiple of 4 smaller or equal to val.
971 * Ensure that the result is a multiple of 4.
972 */
973 uint32_t
sctp_get_prev_mtu(uint32_t val)974 sctp_get_prev_mtu(uint32_t val)
975 {
976 uint32_t i;
977
978 val &= 0xfffffffc;
979 if (val <= sctp_mtu_sizes[0]) {
980 return (val);
981 }
982 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
983 if (val <= sctp_mtu_sizes[i]) {
984 break;
985 }
986 }
987 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
988 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
989 return (sctp_mtu_sizes[i - 1]);
990 }
991
992 /*
993 * Return the smallest MTU in sctp_mtu_sizes larger than val.
994 * If val is larger than the maximum, just return the largest multiple of 4 smaller
995 * or equal to val.
996 * Ensure that the result is a multiple of 4.
997 */
998 uint32_t
sctp_get_next_mtu(uint32_t val)999 sctp_get_next_mtu(uint32_t val)
1000 {
1001 /* select another MTU that is just bigger than this one */
1002 uint32_t i;
1003
1004 val &= 0xfffffffc;
1005 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
1006 if (val < sctp_mtu_sizes[i]) {
1007 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
1008 ("sctp_mtu_sizes[%u] not a multiple of 4", i));
1009 return (sctp_mtu_sizes[i]);
1010 }
1011 }
1012 return (val);
1013 }
1014
1015 void
sctp_fill_random_store(struct sctp_pcb * m)1016 sctp_fill_random_store(struct sctp_pcb *m)
1017 {
1018 /*
1019 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
1020 * our counter. The result becomes our good random numbers and we
1021 * then setup to give these out. Note that we do no locking to
1022 * protect this. This is ok, since if competing folks call this we
1023 * will get more gobbled gook in the random store which is what we
1024 * want. There is a danger that two guys will use the same random
1025 * numbers, but thats ok too since that is random as well :->
1026 */
1027 m->store_at = 0;
1028 #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION)
1029 for (int i = 0; i < (int) (sizeof(m->random_store) / sizeof(m->random_store[0])); i++) {
1030 m->random_store[i] = (uint8_t) rand();
1031 }
1032 #else
1033 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1034 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1035 sizeof(m->random_counter), (uint8_t *)m->random_store);
1036 #endif
1037 m->random_counter++;
1038 }
1039
1040 uint32_t
sctp_select_initial_TSN(struct sctp_pcb * inp)1041 sctp_select_initial_TSN(struct sctp_pcb *inp)
1042 {
1043 /*
1044 * A true implementation should use random selection process to get
1045 * the initial stream sequence number, using RFC1750 as a good
1046 * guideline
1047 */
1048 uint32_t x, *xp;
1049 uint8_t *p;
1050 int store_at, new_store;
1051
1052 if (inp->initial_sequence_debug != 0) {
1053 uint32_t ret;
1054
1055 ret = inp->initial_sequence_debug;
1056 inp->initial_sequence_debug++;
1057 return (ret);
1058 }
1059 retry:
1060 store_at = inp->store_at;
1061 new_store = store_at + sizeof(uint32_t);
1062 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
1063 new_store = 0;
1064 }
1065 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1066 goto retry;
1067 }
1068 if (new_store == 0) {
1069 /* Refill the random store */
1070 sctp_fill_random_store(inp);
1071 }
1072 p = &inp->random_store[store_at];
1073 xp = (uint32_t *)p;
1074 x = *xp;
1075 return (x);
1076 }
1077
1078 uint32_t
sctp_select_a_tag(struct sctp_inpcb * inp,uint16_t lport,uint16_t rport,int check)1079 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1080 {
1081 uint32_t x;
1082 struct timeval now;
1083
1084 if (check) {
1085 (void)SCTP_GETTIME_TIMEVAL(&now);
1086 }
1087 for (;;) {
1088 x = sctp_select_initial_TSN(&inp->sctp_ep);
1089 if (x == 0) {
1090 /* we never use 0 */
1091 continue;
1092 }
1093 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1094 break;
1095 }
1096 }
1097 return (x);
1098 }
1099
1100 int32_t
sctp_map_assoc_state(int kernel_state)1101 sctp_map_assoc_state(int kernel_state)
1102 {
1103 int32_t user_state;
1104
1105 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1106 user_state = SCTP_CLOSED;
1107 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1108 user_state = SCTP_SHUTDOWN_PENDING;
1109 } else {
1110 switch (kernel_state & SCTP_STATE_MASK) {
1111 case SCTP_STATE_EMPTY:
1112 user_state = SCTP_CLOSED;
1113 break;
1114 case SCTP_STATE_INUSE:
1115 user_state = SCTP_CLOSED;
1116 break;
1117 case SCTP_STATE_COOKIE_WAIT:
1118 user_state = SCTP_COOKIE_WAIT;
1119 break;
1120 case SCTP_STATE_COOKIE_ECHOED:
1121 user_state = SCTP_COOKIE_ECHOED;
1122 break;
1123 case SCTP_STATE_OPEN:
1124 user_state = SCTP_ESTABLISHED;
1125 break;
1126 case SCTP_STATE_SHUTDOWN_SENT:
1127 user_state = SCTP_SHUTDOWN_SENT;
1128 break;
1129 case SCTP_STATE_SHUTDOWN_RECEIVED:
1130 user_state = SCTP_SHUTDOWN_RECEIVED;
1131 break;
1132 case SCTP_STATE_SHUTDOWN_ACK_SENT:
1133 user_state = SCTP_SHUTDOWN_ACK_SENT;
1134 break;
1135 default:
1136 user_state = SCTP_CLOSED;
1137 break;
1138 }
1139 }
1140 return (user_state);
1141 }
1142
1143 int
sctp_init_asoc(struct sctp_inpcb * inp,struct sctp_tcb * stcb,uint32_t override_tag,uint32_t vrf_id,uint16_t o_strms)1144 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1145 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1146 {
1147 struct sctp_association *asoc;
1148 /*
1149 * Anything set to zero is taken care of by the allocation routine's
1150 * bzero
1151 */
1152
1153 /*
1154 * Up front select what scoping to apply on addresses I tell my peer
1155 * Not sure what to do with these right now, we will need to come up
1156 * with a way to set them. We may need to pass them through from the
1157 * caller in the sctp_aloc_assoc() function.
1158 */
1159 int i;
1160 #if defined(SCTP_DETAILED_STR_STATS)
1161 int j;
1162 #endif
1163
1164 asoc = &stcb->asoc;
1165 /* init all variables to a known value. */
1166 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1167 asoc->max_burst = inp->sctp_ep.max_burst;
1168 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1169 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1170 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1171 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1172 asoc->ecn_supported = inp->ecn_supported;
1173 asoc->prsctp_supported = inp->prsctp_supported;
1174 asoc->auth_supported = inp->auth_supported;
1175 asoc->asconf_supported = inp->asconf_supported;
1176 asoc->reconfig_supported = inp->reconfig_supported;
1177 asoc->nrsack_supported = inp->nrsack_supported;
1178 asoc->pktdrop_supported = inp->pktdrop_supported;
1179 asoc->idata_supported = inp->idata_supported;
1180 asoc->sctp_cmt_pf = (uint8_t)0;
1181 asoc->sctp_frag_point = inp->sctp_frag_point;
1182 asoc->sctp_features = inp->sctp_features;
1183 asoc->default_dscp = inp->sctp_ep.default_dscp;
1184 asoc->max_cwnd = inp->max_cwnd;
1185 #ifdef INET6
1186 if (inp->sctp_ep.default_flowlabel) {
1187 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1188 } else {
1189 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1190 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1191 asoc->default_flowlabel &= 0x000fffff;
1192 asoc->default_flowlabel |= 0x80000000;
1193 } else {
1194 asoc->default_flowlabel = 0;
1195 }
1196 }
1197 #endif
1198 asoc->sb_send_resv = 0;
1199 if (override_tag) {
1200 asoc->my_vtag = override_tag;
1201 } else {
1202 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1203 }
1204 /* Get the nonce tags */
1205 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1206 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1207 asoc->vrf_id = vrf_id;
1208
1209 #ifdef SCTP_ASOCLOG_OF_TSNS
1210 asoc->tsn_in_at = 0;
1211 asoc->tsn_out_at = 0;
1212 asoc->tsn_in_wrapped = 0;
1213 asoc->tsn_out_wrapped = 0;
1214 asoc->cumack_log_at = 0;
1215 asoc->cumack_log_atsnt = 0;
1216 #endif
1217 #ifdef SCTP_FS_SPEC_LOG
1218 asoc->fs_index = 0;
1219 #endif
1220 asoc->refcnt = 0;
1221 asoc->assoc_up_sent = 0;
1222 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1223 sctp_select_initial_TSN(&inp->sctp_ep);
1224 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1225 /* we are optimisitic here */
1226 asoc->peer_supports_nat = 0;
1227 asoc->sent_queue_retran_cnt = 0;
1228
1229 /* for CMT */
1230 asoc->last_net_cmt_send_started = NULL;
1231
1232 /* This will need to be adjusted */
1233 asoc->last_acked_seq = asoc->init_seq_number - 1;
1234 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1235 asoc->asconf_seq_in = asoc->last_acked_seq;
1236
1237 /* here we are different, we hold the next one we expect */
1238 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1239
1240 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1241 asoc->initial_rto = inp->sctp_ep.initial_rto;
1242
1243 asoc->default_mtu = inp->sctp_ep.default_mtu;
1244 asoc->max_init_times = inp->sctp_ep.max_init_times;
1245 asoc->max_send_times = inp->sctp_ep.max_send_times;
1246 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1247 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1248 asoc->free_chunk_cnt = 0;
1249
1250 asoc->iam_blocking = 0;
1251 asoc->context = inp->sctp_context;
1252 asoc->local_strreset_support = inp->local_strreset_support;
1253 asoc->def_send = inp->def_send;
1254 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1255 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1256 asoc->pr_sctp_cnt = 0;
1257 asoc->total_output_queue_size = 0;
1258
1259 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1260 asoc->scope.ipv6_addr_legal = 1;
1261 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1262 asoc->scope.ipv4_addr_legal = 1;
1263 } else {
1264 asoc->scope.ipv4_addr_legal = 0;
1265 }
1266 #if defined(__Userspace__)
1267 asoc->scope.conn_addr_legal = 0;
1268 #endif
1269 } else {
1270 asoc->scope.ipv6_addr_legal = 0;
1271 #if defined(__Userspace__)
1272 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1273 asoc->scope.conn_addr_legal = 1;
1274 asoc->scope.ipv4_addr_legal = 0;
1275 } else {
1276 asoc->scope.conn_addr_legal = 0;
1277 asoc->scope.ipv4_addr_legal = 1;
1278 }
1279 #else
1280 asoc->scope.ipv4_addr_legal = 1;
1281 #endif
1282 }
1283
1284 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1285 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1286
1287 asoc->smallest_mtu = inp->sctp_frag_point;
1288 asoc->minrto = inp->sctp_ep.sctp_minrto;
1289 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1290
1291 asoc->stream_locked_on = 0;
1292 asoc->ecn_echo_cnt_onq = 0;
1293 asoc->stream_locked = 0;
1294
1295 asoc->send_sack = 1;
1296
1297 LIST_INIT(&asoc->sctp_restricted_addrs);
1298
1299 TAILQ_INIT(&asoc->nets);
1300 TAILQ_INIT(&asoc->pending_reply_queue);
1301 TAILQ_INIT(&asoc->asconf_ack_sent);
1302 /* Setup to fill the hb random cache at first HB */
1303 asoc->hb_random_idx = 4;
1304
1305 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1306
1307 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1308 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1309
1310 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1311 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1312
1313 /*
1314 * Now the stream parameters, here we allocate space for all streams
1315 * that we request by default.
1316 */
1317 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1318 o_strms;
1319 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1320 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1321 SCTP_M_STRMO);
1322 if (asoc->strmout == NULL) {
1323 /* big trouble no memory */
1324 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1325 return (ENOMEM);
1326 }
1327 for (i = 0; i < asoc->streamoutcnt; i++) {
1328 /*
1329 * inbound side must be set to 0xffff, also NOTE when we get
1330 * the INIT-ACK back (for INIT sender) we MUST reduce the
1331 * count (streamoutcnt) but first check if we sent to any of
1332 * the upper streams that were dropped (if some were). Those
1333 * that were dropped must be notified to the upper layer as
1334 * failed to send.
1335 */
1336 asoc->strmout[i].next_mid_ordered = 0;
1337 asoc->strmout[i].next_mid_unordered = 0;
1338 TAILQ_INIT(&asoc->strmout[i].outqueue);
1339 asoc->strmout[i].chunks_on_queues = 0;
1340 #if defined(SCTP_DETAILED_STR_STATS)
1341 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1342 asoc->strmout[i].abandoned_sent[j] = 0;
1343 asoc->strmout[i].abandoned_unsent[j] = 0;
1344 }
1345 #else
1346 asoc->strmout[i].abandoned_sent[0] = 0;
1347 asoc->strmout[i].abandoned_unsent[0] = 0;
1348 #endif
1349 asoc->strmout[i].sid = i;
1350 asoc->strmout[i].last_msg_incomplete = 0;
1351 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1352 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1353 }
1354 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1355
1356 /* Now the mapping array */
1357 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1358 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1359 SCTP_M_MAP);
1360 if (asoc->mapping_array == NULL) {
1361 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1362 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1363 return (ENOMEM);
1364 }
1365 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1366 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1367 SCTP_M_MAP);
1368 if (asoc->nr_mapping_array == NULL) {
1369 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1370 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1371 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1372 return (ENOMEM);
1373 }
1374 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1375
1376 /* Now the init of the other outqueues */
1377 TAILQ_INIT(&asoc->free_chunks);
1378 TAILQ_INIT(&asoc->control_send_queue);
1379 TAILQ_INIT(&asoc->asconf_send_queue);
1380 TAILQ_INIT(&asoc->send_queue);
1381 TAILQ_INIT(&asoc->sent_queue);
1382 TAILQ_INIT(&asoc->resetHead);
1383 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1384 TAILQ_INIT(&asoc->asconf_queue);
1385 /* authentication fields */
1386 asoc->authinfo.random = NULL;
1387 asoc->authinfo.active_keyid = 0;
1388 asoc->authinfo.assoc_key = NULL;
1389 asoc->authinfo.assoc_keyid = 0;
1390 asoc->authinfo.recv_key = NULL;
1391 asoc->authinfo.recv_keyid = 0;
1392 LIST_INIT(&asoc->shared_keys);
1393 asoc->marked_retrans = 0;
1394 asoc->port = inp->sctp_ep.port;
1395 asoc->timoinit = 0;
1396 asoc->timodata = 0;
1397 asoc->timosack = 0;
1398 asoc->timoshutdown = 0;
1399 asoc->timoheartbeat = 0;
1400 asoc->timocookie = 0;
1401 asoc->timoshutdownack = 0;
1402 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1403 asoc->discontinuity_time = asoc->start_time;
1404 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1405 asoc->abandoned_unsent[i] = 0;
1406 asoc->abandoned_sent[i] = 0;
1407 }
1408 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1409 * the association is freed.
1410 */
1411 return (0);
1412 }
1413
1414 void
sctp_print_mapping_array(struct sctp_association * asoc)1415 sctp_print_mapping_array(struct sctp_association *asoc)
1416 {
1417 unsigned int i, limit;
1418
1419 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1420 asoc->mapping_array_size,
1421 asoc->mapping_array_base_tsn,
1422 asoc->cumulative_tsn,
1423 asoc->highest_tsn_inside_map,
1424 asoc->highest_tsn_inside_nr_map);
1425 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1426 if (asoc->mapping_array[limit - 1] != 0) {
1427 break;
1428 }
1429 }
1430 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1431 for (i = 0; i < limit; i++) {
1432 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1433 }
1434 if (limit % 16)
1435 SCTP_PRINTF("\n");
1436 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1437 if (asoc->nr_mapping_array[limit - 1]) {
1438 break;
1439 }
1440 }
1441 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1442 for (i = 0; i < limit; i++) {
1443 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1444 }
1445 if (limit % 16)
1446 SCTP_PRINTF("\n");
1447 }
1448
1449 int
sctp_expand_mapping_array(struct sctp_association * asoc,uint32_t needed)1450 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1451 {
1452 /* mapping array needs to grow */
1453 uint8_t *new_array1, *new_array2;
1454 uint32_t new_size;
1455
1456 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1457 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1458 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1459 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1460 /* can't get more, forget it */
1461 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1462 if (new_array1) {
1463 SCTP_FREE(new_array1, SCTP_M_MAP);
1464 }
1465 if (new_array2) {
1466 SCTP_FREE(new_array2, SCTP_M_MAP);
1467 }
1468 return (-1);
1469 }
1470 memset(new_array1, 0, new_size);
1471 memset(new_array2, 0, new_size);
1472 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1473 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1474 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1475 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1476 asoc->mapping_array = new_array1;
1477 asoc->nr_mapping_array = new_array2;
1478 asoc->mapping_array_size = new_size;
1479 return (0);
1480 }
1481
1482 static void
sctp_iterator_work(struct sctp_iterator * it)1483 sctp_iterator_work(struct sctp_iterator *it)
1484 {
1485 #if defined(__FreeBSD__) && !defined(__Userspace__)
1486 struct epoch_tracker et;
1487 #endif
1488 struct sctp_inpcb *tinp;
1489 int iteration_count = 0;
1490 int inp_skip = 0;
1491 int first_in = 1;
1492
1493 #if defined(__FreeBSD__) && !defined(__Userspace__)
1494 NET_EPOCH_ENTER(et);
1495 #endif
1496 SCTP_INP_INFO_RLOCK();
1497 SCTP_ITERATOR_LOCK();
1498 sctp_it_ctl.cur_it = it;
1499 if (it->inp) {
1500 SCTP_INP_RLOCK(it->inp);
1501 SCTP_INP_DECR_REF(it->inp);
1502 }
1503 if (it->inp == NULL) {
1504 /* iterator is complete */
1505 done_with_iterator:
1506 sctp_it_ctl.cur_it = NULL;
1507 SCTP_ITERATOR_UNLOCK();
1508 SCTP_INP_INFO_RUNLOCK();
1509 if (it->function_atend != NULL) {
1510 (*it->function_atend) (it->pointer, it->val);
1511 }
1512 SCTP_FREE(it, SCTP_M_ITER);
1513 #if defined(__FreeBSD__) && !defined(__Userspace__)
1514 NET_EPOCH_EXIT(et);
1515 #endif
1516 return;
1517 }
1518 select_a_new_ep:
1519 if (first_in) {
1520 first_in = 0;
1521 } else {
1522 SCTP_INP_RLOCK(it->inp);
1523 }
1524 while (((it->pcb_flags) &&
1525 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1526 ((it->pcb_features) &&
1527 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1528 /* endpoint flags or features don't match, so keep looking */
1529 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1530 SCTP_INP_RUNLOCK(it->inp);
1531 goto done_with_iterator;
1532 }
1533 tinp = it->inp;
1534 it->inp = LIST_NEXT(it->inp, sctp_list);
1535 it->stcb = NULL;
1536 SCTP_INP_RUNLOCK(tinp);
1537 if (it->inp == NULL) {
1538 goto done_with_iterator;
1539 }
1540 SCTP_INP_RLOCK(it->inp);
1541 }
1542 /* now go through each assoc which is in the desired state */
1543 if (it->done_current_ep == 0) {
1544 if (it->function_inp != NULL)
1545 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1546 it->done_current_ep = 1;
1547 }
1548 if (it->stcb == NULL) {
1549 /* run the per instance function */
1550 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1551 }
1552 if ((inp_skip) || it->stcb == NULL) {
1553 if (it->function_inp_end != NULL) {
1554 inp_skip = (*it->function_inp_end)(it->inp,
1555 it->pointer,
1556 it->val);
1557 }
1558 SCTP_INP_RUNLOCK(it->inp);
1559 goto no_stcb;
1560 }
1561 while (it->stcb) {
1562 SCTP_TCB_LOCK(it->stcb);
1563 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1564 /* not in the right state... keep looking */
1565 SCTP_TCB_UNLOCK(it->stcb);
1566 goto next_assoc;
1567 }
1568 /* see if we have limited out the iterator loop */
1569 iteration_count++;
1570 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1571 /* Pause to let others grab the lock */
1572 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1573 SCTP_TCB_UNLOCK(it->stcb);
1574 SCTP_INP_INCR_REF(it->inp);
1575 SCTP_INP_RUNLOCK(it->inp);
1576 SCTP_ITERATOR_UNLOCK();
1577 SCTP_INP_INFO_RUNLOCK();
1578 SCTP_INP_INFO_RLOCK();
1579 SCTP_ITERATOR_LOCK();
1580 if (sctp_it_ctl.iterator_flags) {
1581 /* We won't be staying here */
1582 SCTP_INP_DECR_REF(it->inp);
1583 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1584 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
1585 if (sctp_it_ctl.iterator_flags &
1586 SCTP_ITERATOR_MUST_EXIT) {
1587 goto done_with_iterator;
1588 }
1589 #endif
1590 if (sctp_it_ctl.iterator_flags &
1591 SCTP_ITERATOR_STOP_CUR_IT) {
1592 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1593 goto done_with_iterator;
1594 }
1595 if (sctp_it_ctl.iterator_flags &
1596 SCTP_ITERATOR_STOP_CUR_INP) {
1597 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1598 goto no_stcb;
1599 }
1600 /* If we reach here huh? */
1601 SCTP_PRINTF("Unknown it ctl flag %x\n",
1602 sctp_it_ctl.iterator_flags);
1603 sctp_it_ctl.iterator_flags = 0;
1604 }
1605 SCTP_INP_RLOCK(it->inp);
1606 SCTP_INP_DECR_REF(it->inp);
1607 SCTP_TCB_LOCK(it->stcb);
1608 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1609 iteration_count = 0;
1610 }
1611 KASSERT(it->inp == it->stcb->sctp_ep,
1612 ("%s: stcb %p does not belong to inp %p, but inp %p",
1613 __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1614
1615 /* run function on this one */
1616 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1617
1618 /*
1619 * we lie here, it really needs to have its own type but
1620 * first I must verify that this won't effect things :-0
1621 */
1622 if (it->no_chunk_output == 0)
1623 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1624
1625 SCTP_TCB_UNLOCK(it->stcb);
1626 next_assoc:
1627 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1628 if (it->stcb == NULL) {
1629 /* Run last function */
1630 if (it->function_inp_end != NULL) {
1631 inp_skip = (*it->function_inp_end)(it->inp,
1632 it->pointer,
1633 it->val);
1634 }
1635 }
1636 }
1637 SCTP_INP_RUNLOCK(it->inp);
1638 no_stcb:
1639 /* done with all assocs on this endpoint, move on to next endpoint */
1640 it->done_current_ep = 0;
1641 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1642 it->inp = NULL;
1643 } else {
1644 it->inp = LIST_NEXT(it->inp, sctp_list);
1645 }
1646 it->stcb = NULL;
1647 if (it->inp == NULL) {
1648 goto done_with_iterator;
1649 }
1650 goto select_a_new_ep;
1651 }
1652
1653 void
sctp_iterator_worker(void)1654 sctp_iterator_worker(void)
1655 {
1656 struct sctp_iterator *it;
1657
1658 /* This function is called with the WQ lock in place */
1659 sctp_it_ctl.iterator_running = 1;
1660 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1661 /* now lets work on this one */
1662 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1663 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1664 #if defined(__FreeBSD__) && !defined(__Userspace__)
1665 CURVNET_SET(it->vn);
1666 #endif
1667 sctp_iterator_work(it);
1668 #if defined(__FreeBSD__) && !defined(__Userspace__)
1669 CURVNET_RESTORE();
1670 #endif
1671 SCTP_IPI_ITERATOR_WQ_LOCK();
1672 #if !defined(__FreeBSD__) && !defined(__Userspace__)
1673 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1674 break;
1675 }
1676 #endif
1677 /*sa_ignore FREED_MEMORY*/
1678 }
1679 sctp_it_ctl.iterator_running = 0;
1680 return;
1681 }
1682
1683 static void
sctp_handle_addr_wq(void)1684 sctp_handle_addr_wq(void)
1685 {
1686 /* deal with the ADDR wq from the rtsock calls */
1687 struct sctp_laddr *wi, *nwi;
1688 struct sctp_asconf_iterator *asc;
1689
1690 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1691 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1692 if (asc == NULL) {
1693 /* Try later, no memory */
1694 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1695 (struct sctp_inpcb *)NULL,
1696 (struct sctp_tcb *)NULL,
1697 (struct sctp_nets *)NULL);
1698 return;
1699 }
1700 LIST_INIT(&asc->list_of_work);
1701 asc->cnt = 0;
1702
1703 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1704 LIST_REMOVE(wi, sctp_nxt_addr);
1705 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1706 asc->cnt++;
1707 }
1708
1709 if (asc->cnt == 0) {
1710 SCTP_FREE(asc, SCTP_M_ASC_IT);
1711 } else {
1712 int ret;
1713
1714 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1715 sctp_asconf_iterator_stcb,
1716 NULL, /* No ep end for boundall */
1717 SCTP_PCB_FLAGS_BOUNDALL,
1718 SCTP_PCB_ANY_FEATURES,
1719 SCTP_ASOC_ANY_STATE,
1720 (void *)asc, 0,
1721 sctp_asconf_iterator_end, NULL, 0);
1722 if (ret) {
1723 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1724 /* Freeing if we are stopping or put back on the addr_wq. */
1725 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1726 sctp_asconf_iterator_end(asc, 0);
1727 } else {
1728 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1729 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1730 }
1731 SCTP_FREE(asc, SCTP_M_ASC_IT);
1732 }
1733 }
1734 }
1735 }
1736
1737 /*-
1738 * The following table shows which pointers for the inp, stcb, or net are
1739 * stored for each timer after it was started.
1740 *
1741 *|Name |Timer |inp |stcb|net |
1742 *|-----------------------------|-----------------------------|----|----|----|
1743 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes |
1744 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes |
1745 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No |
1746 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes |
1747 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes |
1748 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes |
1749 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No |
1750 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes |
1751 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes |
1752 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes |
1753 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No |
1754 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No |
1755 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No |
1756 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No |
1757 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No |
1758 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No |
1759 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No |
1760 */
1761
1762 void
sctp_timeout_handler(void * t)1763 sctp_timeout_handler(void *t)
1764 {
1765 #if defined(__FreeBSD__) && !defined(__Userspace__)
1766 struct epoch_tracker et;
1767 #endif
1768 struct timeval tv;
1769 struct sctp_inpcb *inp;
1770 struct sctp_tcb *stcb;
1771 struct sctp_nets *net;
1772 struct sctp_timer *tmr;
1773 struct mbuf *op_err;
1774 #if defined(__APPLE__) && !defined(__Userspace__)
1775 struct socket *so;
1776 #endif
1777 #if defined(__Userspace__)
1778 struct socket *upcall_socket = NULL;
1779 #endif
1780 int type;
1781 int i, secret;
1782 bool did_output, released_asoc_reference;
1783
1784 /*
1785 * If inp, stcb or net are not NULL, then references to these were
1786 * added when the timer was started, and must be released before this
1787 * function returns.
1788 */
1789 tmr = (struct sctp_timer *)t;
1790 inp = (struct sctp_inpcb *)tmr->ep;
1791 stcb = (struct sctp_tcb *)tmr->tcb;
1792 net = (struct sctp_nets *)tmr->net;
1793 #if defined(__FreeBSD__) && !defined(__Userspace__)
1794 CURVNET_SET((struct vnet *)tmr->vnet);
1795 NET_EPOCH_ENTER(et);
1796 #endif
1797 released_asoc_reference = false;
1798
1799 #ifdef SCTP_AUDITING_ENABLED
1800 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1801 sctp_auditing(3, inp, stcb, net);
1802 #endif
1803
1804 /* sanity checks... */
1805 KASSERT(tmr->self == NULL || tmr->self == tmr,
1806 ("sctp_timeout_handler: tmr->self corrupted"));
1807 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
1808 ("sctp_timeout_handler: invalid timer type %d", tmr->type));
1809 type = tmr->type;
1810 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
1811 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
1812 type, stcb, stcb->sctp_ep));
1813 tmr->stopped_from = 0xa001;
1814 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) {
1815 SCTPDBG(SCTP_DEBUG_TIMER2,
1816 "Timer type %d handler exiting due to CLOSED association.\n",
1817 type);
1818 goto out_decr;
1819 }
1820 tmr->stopped_from = 0xa002;
1821 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1822 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1823 SCTPDBG(SCTP_DEBUG_TIMER2,
1824 "Timer type %d handler exiting due to not being active.\n",
1825 type);
1826 goto out_decr;
1827 }
1828
1829 tmr->stopped_from = 0xa003;
1830 if (stcb) {
1831 SCTP_TCB_LOCK(stcb);
1832 /*
1833 * Release reference so that association can be freed if
1834 * necessary below.
1835 * This is safe now that we have acquired the lock.
1836 */
1837 atomic_add_int(&stcb->asoc.refcnt, -1);
1838 released_asoc_reference = true;
1839 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1840 ((stcb->asoc.state == SCTP_STATE_EMPTY) ||
1841 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1842 SCTPDBG(SCTP_DEBUG_TIMER2,
1843 "Timer type %d handler exiting due to CLOSED association.\n",
1844 type);
1845 goto out;
1846 }
1847 } else if (inp != NULL) {
1848 SCTP_INP_WLOCK(inp);
1849 } else {
1850 SCTP_WQ_ADDR_LOCK();
1851 }
1852
1853 /* Record in stopped_from which timeout occurred. */
1854 tmr->stopped_from = type;
1855 /* mark as being serviced now */
1856 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1857 /*
1858 * Callout has been rescheduled.
1859 */
1860 goto out;
1861 }
1862 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1863 /*
1864 * Not active, so no action.
1865 */
1866 goto out;
1867 }
1868 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1869
1870 #if defined(__Userspace__)
1871 if ((stcb != NULL) &&
1872 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
1873 (stcb->sctp_socket != NULL)) {
1874 upcall_socket = stcb->sctp_socket;
1875 SOCK_LOCK(upcall_socket);
1876 soref(upcall_socket);
1877 SOCK_UNLOCK(upcall_socket);
1878 }
1879 #endif
1880 /* call the handler for the appropriate timer type */
1881 switch (type) {
1882 case SCTP_TIMER_TYPE_SEND:
1883 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1884 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1885 type, inp, stcb, net));
1886 SCTP_STAT_INCR(sctps_timodata);
1887 stcb->asoc.timodata++;
1888 stcb->asoc.num_send_timers_up--;
1889 if (stcb->asoc.num_send_timers_up < 0) {
1890 stcb->asoc.num_send_timers_up = 0;
1891 }
1892 SCTP_TCB_LOCK_ASSERT(stcb);
1893 if (sctp_t3rxt_timer(inp, stcb, net)) {
1894 /* no need to unlock on tcb its gone */
1895
1896 goto out_decr;
1897 }
1898 SCTP_TCB_LOCK_ASSERT(stcb);
1899 #ifdef SCTP_AUDITING_ENABLED
1900 sctp_auditing(4, inp, stcb, net);
1901 #endif
1902 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1903 did_output = true;
1904 if ((stcb->asoc.num_send_timers_up == 0) &&
1905 (stcb->asoc.sent_queue_cnt > 0)) {
1906 struct sctp_tmit_chunk *chk;
1907
1908 /*
1909 * Safeguard. If there on some on the sent queue
1910 * somewhere but no timers running something is
1911 * wrong... so we start a timer on the first chunk
1912 * on the send queue on whatever net it is sent to.
1913 */
1914 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1915 if (chk->whoTo != NULL) {
1916 break;
1917 }
1918 }
1919 if (chk != NULL) {
1920 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1921 }
1922 }
1923 break;
1924 case SCTP_TIMER_TYPE_INIT:
1925 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1926 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1927 type, inp, stcb, net));
1928 SCTP_STAT_INCR(sctps_timoinit);
1929 stcb->asoc.timoinit++;
1930 if (sctp_t1init_timer(inp, stcb, net)) {
1931 /* no need to unlock on tcb its gone */
1932 goto out_decr;
1933 }
1934 did_output = false;
1935 break;
1936 case SCTP_TIMER_TYPE_RECV:
1937 KASSERT(inp != NULL && stcb != NULL && net == NULL,
1938 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1939 type, inp, stcb, net));
1940 SCTP_STAT_INCR(sctps_timosack);
1941 stcb->asoc.timosack++;
1942 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1943 #ifdef SCTP_AUDITING_ENABLED
1944 sctp_auditing(4, inp, stcb, NULL);
1945 #endif
1946 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1947 did_output = true;
1948 break;
1949 case SCTP_TIMER_TYPE_SHUTDOWN:
1950 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1951 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1952 type, inp, stcb, net));
1953 SCTP_STAT_INCR(sctps_timoshutdown);
1954 stcb->asoc.timoshutdown++;
1955 if (sctp_shutdown_timer(inp, stcb, net)) {
1956 /* no need to unlock on tcb its gone */
1957 goto out_decr;
1958 }
1959 #ifdef SCTP_AUDITING_ENABLED
1960 sctp_auditing(4, inp, stcb, net);
1961 #endif
1962 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1963 did_output = true;
1964 break;
1965 case SCTP_TIMER_TYPE_HEARTBEAT:
1966 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1967 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1968 type, inp, stcb, net));
1969 SCTP_STAT_INCR(sctps_timoheartbeat);
1970 stcb->asoc.timoheartbeat++;
1971 if (sctp_heartbeat_timer(inp, stcb, net)) {
1972 /* no need to unlock on tcb its gone */
1973 goto out_decr;
1974 }
1975 #ifdef SCTP_AUDITING_ENABLED
1976 sctp_auditing(4, inp, stcb, net);
1977 #endif
1978 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1979 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1980 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1981 did_output = true;
1982 } else {
1983 did_output = false;
1984 }
1985 break;
1986 case SCTP_TIMER_TYPE_COOKIE:
1987 KASSERT(inp != NULL && stcb != NULL && net != NULL,
1988 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1989 type, inp, stcb, net));
1990 SCTP_STAT_INCR(sctps_timocookie);
1991 stcb->asoc.timocookie++;
1992 if (sctp_cookie_timer(inp, stcb, net)) {
1993 /* no need to unlock on tcb its gone */
1994 goto out_decr;
1995 }
1996 #ifdef SCTP_AUDITING_ENABLED
1997 sctp_auditing(4, inp, stcb, net);
1998 #endif
1999 /*
2000 * We consider T3 and Cookie timer pretty much the same with
2001 * respect to where from in chunk_output.
2002 */
2003 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
2004 did_output = true;
2005 break;
2006 case SCTP_TIMER_TYPE_NEWCOOKIE:
2007 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2008 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2009 type, inp, stcb, net));
2010 SCTP_STAT_INCR(sctps_timosecret);
2011 (void)SCTP_GETTIME_TIMEVAL(&tv);
2012 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
2013 inp->sctp_ep.last_secret_number =
2014 inp->sctp_ep.current_secret_number;
2015 inp->sctp_ep.current_secret_number++;
2016 if (inp->sctp_ep.current_secret_number >=
2017 SCTP_HOW_MANY_SECRETS) {
2018 inp->sctp_ep.current_secret_number = 0;
2019 }
2020 secret = (int)inp->sctp_ep.current_secret_number;
2021 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
2022 inp->sctp_ep.secret_key[secret][i] =
2023 sctp_select_initial_TSN(&inp->sctp_ep);
2024 }
2025 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
2026 did_output = false;
2027 break;
2028 case SCTP_TIMER_TYPE_PATHMTURAISE:
2029 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2030 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2031 type, inp, stcb, net));
2032 SCTP_STAT_INCR(sctps_timopathmtu);
2033 sctp_pathmtu_timer(inp, stcb, net);
2034 did_output = false;
2035 break;
2036 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2037 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2039 type, inp, stcb, net));
2040 if (sctp_shutdownack_timer(inp, stcb, net)) {
2041 /* no need to unlock on tcb its gone */
2042 goto out_decr;
2043 }
2044 SCTP_STAT_INCR(sctps_timoshutdownack);
2045 stcb->asoc.timoshutdownack++;
2046 #ifdef SCTP_AUDITING_ENABLED
2047 sctp_auditing(4, inp, stcb, net);
2048 #endif
2049 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
2050 did_output = true;
2051 break;
2052 case SCTP_TIMER_TYPE_ASCONF:
2053 KASSERT(inp != NULL && stcb != NULL && net != NULL,
2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2055 type, inp, stcb, net));
2056 SCTP_STAT_INCR(sctps_timoasconf);
2057 if (sctp_asconf_timer(inp, stcb, net)) {
2058 /* no need to unlock on tcb its gone */
2059 goto out_decr;
2060 }
2061 #ifdef SCTP_AUDITING_ENABLED
2062 sctp_auditing(4, inp, stcb, net);
2063 #endif
2064 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
2065 did_output = true;
2066 break;
2067 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2068 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2069 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2070 type, inp, stcb, net));
2071 SCTP_STAT_INCR(sctps_timoshutdownguard);
2072 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
2073 "Shutdown guard timer expired");
2074 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2075 /* no need to unlock on tcb its gone */
2076 goto out_decr;
2077 case SCTP_TIMER_TYPE_AUTOCLOSE:
2078 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2079 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2080 type, inp, stcb, net));
2081 SCTP_STAT_INCR(sctps_timoautoclose);
2082 sctp_autoclose_timer(inp, stcb);
2083 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2084 did_output = true;
2085 break;
2086 case SCTP_TIMER_TYPE_STRRESET:
2087 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2088 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2089 type, inp, stcb, net));
2090 SCTP_STAT_INCR(sctps_timostrmrst);
2091 if (sctp_strreset_timer(inp, stcb)) {
2092 /* no need to unlock on tcb its gone */
2093 goto out_decr;
2094 }
2095 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2096 did_output = true;
2097 break;
2098 case SCTP_TIMER_TYPE_INPKILL:
2099 KASSERT(inp != NULL && stcb == NULL && net == NULL,
2100 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2101 type, inp, stcb, net));
2102 SCTP_STAT_INCR(sctps_timoinpkill);
2103 /*
2104 * special case, take away our increment since WE are the
2105 * killer
2106 */
2107 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2108 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2109 #if defined(__APPLE__) && !defined(__Userspace__)
2110 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
2111 #endif
2112 SCTP_INP_DECR_REF(inp);
2113 SCTP_INP_WUNLOCK(inp);
2114 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2115 SCTP_CALLED_FROM_INPKILL_TIMER);
2116 #if defined(__APPLE__) && !defined(__Userspace__)
2117 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
2118 #endif
2119 inp = NULL;
2120 goto out_no_decr;
2121 case SCTP_TIMER_TYPE_ASOCKILL:
2122 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2123 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2124 type, inp, stcb, net));
2125 SCTP_STAT_INCR(sctps_timoassockill);
2126 /* Can we free it yet? */
2127 SCTP_INP_DECR_REF(inp);
2128 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2129 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2130 #if defined(__APPLE__) && !defined(__Userspace__)
2131 so = SCTP_INP_SO(inp);
2132 atomic_add_int(&stcb->asoc.refcnt, 1);
2133 SCTP_TCB_UNLOCK(stcb);
2134 SCTP_SOCKET_LOCK(so, 1);
2135 SCTP_TCB_LOCK(stcb);
2136 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2137 #endif
2138 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2139 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2140 #if defined(__APPLE__) && !defined(__Userspace__)
2141 SCTP_SOCKET_UNLOCK(so, 1);
2142 #endif
2143 /*
2144 * free asoc, always unlocks (or destroy's) so prevent
2145 * duplicate unlock or unlock of a free mtx :-0
2146 */
2147 stcb = NULL;
2148 goto out_no_decr;
2149 case SCTP_TIMER_TYPE_ADDR_WQ:
2150 KASSERT(inp == NULL && stcb == NULL && net == NULL,
2151 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2152 type, inp, stcb, net));
2153 sctp_handle_addr_wq();
2154 did_output = true;
2155 break;
2156 case SCTP_TIMER_TYPE_PRIM_DELETED:
2157 KASSERT(inp != NULL && stcb != NULL && net == NULL,
2158 ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2159 type, inp, stcb, net));
2160 SCTP_STAT_INCR(sctps_timodelprim);
2161 sctp_delete_prim_timer(inp, stcb);
2162 did_output = false;
2163 break;
2164 default:
2165 #ifdef INVARIANTS
2166 panic("Unknown timer type %d", type);
2167 #else
2168 goto out;
2169 #endif
2170 }
2171 #ifdef SCTP_AUDITING_ENABLED
2172 sctp_audit_log(0xF1, (uint8_t) type);
2173 if (inp != NULL)
2174 sctp_auditing(5, inp, stcb, net);
2175 #endif
2176 if (did_output && (stcb != NULL)) {
2177 /*
2178 * Now we need to clean up the control chunk chain if an
2179 * ECNE is on it. It must be marked as UNSENT again so next
2180 * call will continue to send it until such time that we get
2181 * a CWR, to remove it. It is, however, less likely that we
2182 * will find a ecn echo on the chain though.
2183 */
2184 sctp_fix_ecn_echo(&stcb->asoc);
2185 }
2186 out:
2187 if (stcb != NULL) {
2188 SCTP_TCB_UNLOCK(stcb);
2189 } else if (inp != NULL) {
2190 SCTP_INP_WUNLOCK(inp);
2191 } else {
2192 SCTP_WQ_ADDR_UNLOCK();
2193 }
2194
2195 out_decr:
2196 #if defined(__Userspace__)
2197 if (upcall_socket != NULL) {
2198 if ((upcall_socket->so_upcall != NULL) &&
2199 (upcall_socket->so_error != 0)) {
2200 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
2201 }
2202 ACCEPT_LOCK();
2203 SOCK_LOCK(upcall_socket);
2204 sorele(upcall_socket);
2205 }
2206 #endif
2207 /* These reference counts were incremented in sctp_timer_start(). */
2208 if (inp != NULL) {
2209 SCTP_INP_DECR_REF(inp);
2210 }
2211 if ((stcb != NULL) && !released_asoc_reference) {
2212 atomic_add_int(&stcb->asoc.refcnt, -1);
2213 }
2214 if (net != NULL) {
2215 sctp_free_remote_addr(net);
2216 }
2217 out_no_decr:
2218 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2219 #if defined(__FreeBSD__) && !defined(__Userspace__)
2220 CURVNET_RESTORE();
2221 NET_EPOCH_EXIT(et);
2222 #endif
2223 }
2224
2225 /*-
2226 * The following table shows which parameters must be provided
2227 * when calling sctp_timer_start(). For parameters not being
2228 * provided, NULL must be used.
2229 *
2230 * |Name |inp |stcb|net |
2231 * |-----------------------------|----|----|----|
2232 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2233 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2234 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2235 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2236 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2237 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2238 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2239 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2240 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2241 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes |
2242 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2243 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2244 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes |
2245 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2246 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2247 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2248 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2249 *
2250 */
2251
2252 void
sctp_timer_start(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net)2253 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2254 struct sctp_nets *net)
2255 {
2256 struct sctp_timer *tmr;
2257 uint32_t to_ticks;
2258 uint32_t rndval, jitter;
2259
2260 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2261 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
2262 t_type, stcb, stcb->sctp_ep));
2263 tmr = NULL;
2264 if (stcb != NULL) {
2265 SCTP_TCB_LOCK_ASSERT(stcb);
2266 } else if (inp != NULL) {
2267 SCTP_INP_WLOCK_ASSERT(inp);
2268 } else {
2269 SCTP_WQ_ADDR_LOCK_ASSERT();
2270 }
2271 if (stcb != NULL) {
2272 /* Don't restart timer on association that's about to be killed. */
2273 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2274 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2275 SCTPDBG(SCTP_DEBUG_TIMER2,
2276 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2277 t_type, inp, stcb, net);
2278 return;
2279 }
2280 /* Don't restart timer on net that's been removed. */
2281 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2282 SCTPDBG(SCTP_DEBUG_TIMER2,
2283 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2284 t_type, inp, stcb, net);
2285 return;
2286 }
2287 }
2288 switch (t_type) {
2289 case SCTP_TIMER_TYPE_SEND:
2290 /* Here we use the RTO timer. */
2291 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2292 #ifdef INVARIANTS
2293 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2294 t_type, inp, stcb, net);
2295 #else
2296 return;
2297 #endif
2298 }
2299 tmr = &net->rxt_timer;
2300 if (net->RTO == 0) {
2301 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2302 } else {
2303 to_ticks = sctp_msecs_to_ticks(net->RTO);
2304 }
2305 break;
2306 case SCTP_TIMER_TYPE_INIT:
2307 /*
2308 * Here we use the INIT timer default usually about 1
2309 * second.
2310 */
2311 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2312 #ifdef INVARIANTS
2313 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2314 t_type, inp, stcb, net);
2315 #else
2316 return;
2317 #endif
2318 }
2319 tmr = &net->rxt_timer;
2320 if (net->RTO == 0) {
2321 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2322 } else {
2323 to_ticks = sctp_msecs_to_ticks(net->RTO);
2324 }
2325 break;
2326 case SCTP_TIMER_TYPE_RECV:
2327 /*
2328 * Here we use the Delayed-Ack timer value from the inp,
2329 * ususually about 200ms.
2330 */
2331 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2332 #ifdef INVARIANTS
2333 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2334 t_type, inp, stcb, net);
2335 #else
2336 return;
2337 #endif
2338 }
2339 tmr = &stcb->asoc.dack_timer;
2340 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2341 break;
2342 case SCTP_TIMER_TYPE_SHUTDOWN:
2343 /* Here we use the RTO of the destination. */
2344 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2345 #ifdef INVARIANTS
2346 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2347 t_type, inp, stcb, net);
2348 #else
2349 return;
2350 #endif
2351 }
2352 tmr = &net->rxt_timer;
2353 if (net->RTO == 0) {
2354 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2355 } else {
2356 to_ticks = sctp_msecs_to_ticks(net->RTO);
2357 }
2358 break;
2359 case SCTP_TIMER_TYPE_HEARTBEAT:
2360 /*
2361 * The net is used here so that we can add in the RTO. Even
2362 * though we use a different timer. We also add the HB timer
2363 * PLUS a random jitter.
2364 */
2365 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2366 #ifdef INVARIANTS
2367 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2368 t_type, inp, stcb, net);
2369 #else
2370 return;
2371 #endif
2372 }
2373 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2374 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2375 SCTPDBG(SCTP_DEBUG_TIMER2,
2376 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2377 t_type, inp, stcb, net);
2378 return;
2379 }
2380 tmr = &net->hb_timer;
2381 if (net->RTO == 0) {
2382 to_ticks = stcb->asoc.initial_rto;
2383 } else {
2384 to_ticks = net->RTO;
2385 }
2386 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2387 jitter = rndval % to_ticks;
2388 if (jitter >= (to_ticks >> 1)) {
2389 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2390 } else {
2391 to_ticks = to_ticks - jitter;
2392 }
2393 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2394 !(net->dest_state & SCTP_ADDR_PF)) {
2395 to_ticks += net->heart_beat_delay;
2396 }
2397 /*
2398 * Now we must convert the to_ticks that are now in
2399 * ms to ticks.
2400 */
2401 to_ticks = sctp_msecs_to_ticks(to_ticks);
2402 break;
2403 case SCTP_TIMER_TYPE_COOKIE:
2404 /*
2405 * Here we can use the RTO timer from the network since one
2406 * RTT was complete. If a retransmission happened then we will
2407 * be using the RTO initial value.
2408 */
2409 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2410 #ifdef INVARIANTS
2411 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2412 t_type, inp, stcb, net);
2413 #else
2414 return;
2415 #endif
2416 }
2417 tmr = &net->rxt_timer;
2418 if (net->RTO == 0) {
2419 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2420 } else {
2421 to_ticks = sctp_msecs_to_ticks(net->RTO);
2422 }
2423 break;
2424 case SCTP_TIMER_TYPE_NEWCOOKIE:
2425 /*
2426 * Nothing needed but the endpoint here ususually about 60
2427 * minutes.
2428 */
2429 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2430 #ifdef INVARIANTS
2431 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2432 t_type, inp, stcb, net);
2433 #else
2434 return;
2435 #endif
2436 }
2437 tmr = &inp->sctp_ep.signature_change;
2438 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2439 break;
2440 case SCTP_TIMER_TYPE_PATHMTURAISE:
2441 /*
2442 * Here we use the value found in the EP for PMTUD, ususually
2443 * about 10 minutes.
2444 */
2445 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2446 #ifdef INVARIANTS
2447 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2448 t_type, inp, stcb, net);
2449 #else
2450 return;
2451 #endif
2452 }
2453 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2454 SCTPDBG(SCTP_DEBUG_TIMER2,
2455 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2456 t_type, inp, stcb, net);
2457 return;
2458 }
2459 tmr = &net->pmtu_timer;
2460 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2461 break;
2462 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2463 /* Here we use the RTO of the destination. */
2464 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2465 #ifdef INVARIANTS
2466 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2467 t_type, inp, stcb, net);
2468 #else
2469 return;
2470 #endif
2471 }
2472 tmr = &net->rxt_timer;
2473 if (net->RTO == 0) {
2474 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2475 } else {
2476 to_ticks = sctp_msecs_to_ticks(net->RTO);
2477 }
2478 break;
2479 case SCTP_TIMER_TYPE_ASCONF:
2480 /*
2481 * Here the timer comes from the stcb but its value is from
2482 * the net's RTO.
2483 */
2484 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2485 #ifdef INVARIANTS
2486 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2487 t_type, inp, stcb, net);
2488 #else
2489 return;
2490 #endif
2491 }
2492 tmr = &stcb->asoc.asconf_timer;
2493 if (net->RTO == 0) {
2494 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2495 } else {
2496 to_ticks = sctp_msecs_to_ticks(net->RTO);
2497 }
2498 break;
2499 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2500 /*
2501 * Here we use the endpoints shutdown guard timer usually
2502 * about 3 minutes.
2503 */
2504 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2505 #ifdef INVARIANTS
2506 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2507 t_type, inp, stcb, net);
2508 #else
2509 return;
2510 #endif
2511 }
2512 tmr = &stcb->asoc.shut_guard_timer;
2513 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2514 if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2515 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2516 } else {
2517 to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2518 }
2519 } else {
2520 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2521 }
2522 break;
2523 case SCTP_TIMER_TYPE_AUTOCLOSE:
2524 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2525 #ifdef INVARIANTS
2526 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2527 t_type, inp, stcb, net);
2528 #else
2529 return;
2530 #endif
2531 }
2532 tmr = &stcb->asoc.autoclose_timer;
2533 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2534 break;
2535 case SCTP_TIMER_TYPE_STRRESET:
2536 /*
2537 * Here the timer comes from the stcb but its value is from
2538 * the net's RTO.
2539 */
2540 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2541 #ifdef INVARIANTS
2542 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2543 t_type, inp, stcb, net);
2544 #else
2545 return;
2546 #endif
2547 }
2548 tmr = &stcb->asoc.strreset_timer;
2549 if (net->RTO == 0) {
2550 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2551 } else {
2552 to_ticks = sctp_msecs_to_ticks(net->RTO);
2553 }
2554 break;
2555 case SCTP_TIMER_TYPE_INPKILL:
2556 /*
2557 * The inp is setup to die. We re-use the signature_chage
2558 * timer since that has stopped and we are in the GONE
2559 * state.
2560 */
2561 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2562 #ifdef INVARIANTS
2563 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2564 t_type, inp, stcb, net);
2565 #else
2566 return;
2567 #endif
2568 }
2569 tmr = &inp->sctp_ep.signature_change;
2570 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2571 break;
2572 case SCTP_TIMER_TYPE_ASOCKILL:
2573 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2574 #ifdef INVARIANTS
2575 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2576 t_type, inp, stcb, net);
2577 #else
2578 return;
2579 #endif
2580 }
2581 tmr = &stcb->asoc.strreset_timer;
2582 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2583 break;
2584 case SCTP_TIMER_TYPE_ADDR_WQ:
2585 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2586 #ifdef INVARIANTS
2587 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2588 t_type, inp, stcb, net);
2589 #else
2590 return;
2591 #endif
2592 }
2593 /* Only 1 tick away :-) */
2594 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2595 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2596 break;
2597 case SCTP_TIMER_TYPE_PRIM_DELETED:
2598 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2599 #ifdef INVARIANTS
2600 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2601 t_type, inp, stcb, net);
2602 #else
2603 return;
2604 #endif
2605 }
2606 tmr = &stcb->asoc.delete_prim_timer;
2607 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2608 break;
2609 default:
2610 #ifdef INVARIANTS
2611 panic("Unknown timer type %d", t_type);
2612 #else
2613 return;
2614 #endif
2615 }
2616 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2617 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2618 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2619 /*
2620 * We do NOT allow you to have it already running. If it is,
2621 * we leave the current one up unchanged.
2622 */
2623 SCTPDBG(SCTP_DEBUG_TIMER2,
2624 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2625 t_type, inp, stcb, net);
2626 return;
2627 }
2628 /* At this point we can proceed. */
2629 if (t_type == SCTP_TIMER_TYPE_SEND) {
2630 stcb->asoc.num_send_timers_up++;
2631 }
2632 tmr->stopped_from = 0;
2633 tmr->type = t_type;
2634 tmr->ep = (void *)inp;
2635 tmr->tcb = (void *)stcb;
2636 if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2637 tmr->net = NULL;
2638 } else {
2639 tmr->net = (void *)net;
2640 }
2641 tmr->self = (void *)tmr;
2642 #if defined(__FreeBSD__) && !defined(__Userspace__)
2643 tmr->vnet = (void *)curvnet;
2644 #endif
2645 tmr->ticks = sctp_get_tick_count();
2646 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2647 SCTPDBG(SCTP_DEBUG_TIMER2,
2648 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2649 t_type, to_ticks, inp, stcb, net);
2650 /*
2651 * If this is a newly scheduled callout, as opposed to a
2652 * rescheduled one, increment relevant reference counts.
2653 */
2654 if (tmr->ep != NULL) {
2655 SCTP_INP_INCR_REF(inp);
2656 }
2657 if (tmr->tcb != NULL) {
2658 atomic_add_int(&stcb->asoc.refcnt, 1);
2659 }
2660 if (tmr->net != NULL) {
2661 atomic_add_int(&net->ref_count, 1);
2662 }
2663 } else {
2664 /*
2665 * This should not happen, since we checked for pending
2666 * above.
2667 */
2668 SCTPDBG(SCTP_DEBUG_TIMER2,
2669 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2670 t_type, to_ticks, inp, stcb, net);
2671 }
2672 return;
2673 }
2674
2675 /*-
2676 * The following table shows which parameters must be provided
2677 * when calling sctp_timer_stop(). For parameters not being
2678 * provided, NULL must be used.
2679 *
2680 * |Name |inp |stcb|net |
2681 * |-----------------------------|----|----|----|
2682 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes |
2683 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes |
2684 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No |
2685 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes |
2686 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes |
2687 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes |
2688 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No |
2689 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2690 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes |
2691 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No |
2692 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No |
2693 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No |
2694 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No |
2695 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No |
2696 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No |
2697 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No |
2698 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No |
2699 *
2700 */
2701
2702 void
sctp_timer_stop(int t_type,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t from)2703 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2704 struct sctp_nets *net, uint32_t from)
2705 {
2706 struct sctp_timer *tmr;
2707
2708 KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2709 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
2710 t_type, stcb, stcb->sctp_ep));
2711 if (stcb != NULL) {
2712 SCTP_TCB_LOCK_ASSERT(stcb);
2713 } else if (inp != NULL) {
2714 SCTP_INP_WLOCK_ASSERT(inp);
2715 } else {
2716 SCTP_WQ_ADDR_LOCK_ASSERT();
2717 }
2718 tmr = NULL;
2719 switch (t_type) {
2720 case SCTP_TIMER_TYPE_SEND:
2721 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2722 #ifdef INVARIANTS
2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2724 t_type, inp, stcb, net);
2725 #else
2726 return;
2727 #endif
2728 }
2729 tmr = &net->rxt_timer;
2730 break;
2731 case SCTP_TIMER_TYPE_INIT:
2732 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2733 #ifdef INVARIANTS
2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2735 t_type, inp, stcb, net);
2736 #else
2737 return;
2738 #endif
2739 }
2740 tmr = &net->rxt_timer;
2741 break;
2742 case SCTP_TIMER_TYPE_RECV:
2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2744 #ifdef INVARIANTS
2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2746 t_type, inp, stcb, net);
2747 #else
2748 return;
2749 #endif
2750 }
2751 tmr = &stcb->asoc.dack_timer;
2752 break;
2753 case SCTP_TIMER_TYPE_SHUTDOWN:
2754 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2755 #ifdef INVARIANTS
2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2757 t_type, inp, stcb, net);
2758 #else
2759 return;
2760 #endif
2761 }
2762 tmr = &net->rxt_timer;
2763 break;
2764 case SCTP_TIMER_TYPE_HEARTBEAT:
2765 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2766 #ifdef INVARIANTS
2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2768 t_type, inp, stcb, net);
2769 #else
2770 return;
2771 #endif
2772 }
2773 tmr = &net->hb_timer;
2774 break;
2775 case SCTP_TIMER_TYPE_COOKIE:
2776 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2777 #ifdef INVARIANTS
2778 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2779 t_type, inp, stcb, net);
2780 #else
2781 return;
2782 #endif
2783 }
2784 tmr = &net->rxt_timer;
2785 break;
2786 case SCTP_TIMER_TYPE_NEWCOOKIE:
2787 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2788 #ifdef INVARIANTS
2789 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2790 t_type, inp, stcb, net);
2791 #else
2792 return;
2793 #endif
2794 }
2795 tmr = &inp->sctp_ep.signature_change;
2796 break;
2797 case SCTP_TIMER_TYPE_PATHMTURAISE:
2798 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2799 #ifdef INVARIANTS
2800 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2801 t_type, inp, stcb, net);
2802 #else
2803 return;
2804 #endif
2805 }
2806 tmr = &net->pmtu_timer;
2807 break;
2808 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2809 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2810 #ifdef INVARIANTS
2811 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2812 t_type, inp, stcb, net);
2813 #else
2814 return;
2815 #endif
2816 }
2817 tmr = &net->rxt_timer;
2818 break;
2819 case SCTP_TIMER_TYPE_ASCONF:
2820 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2821 #ifdef INVARIANTS
2822 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2823 t_type, inp, stcb, net);
2824 #else
2825 return;
2826 #endif
2827 }
2828 tmr = &stcb->asoc.asconf_timer;
2829 break;
2830 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2831 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2832 #ifdef INVARIANTS
2833 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2834 t_type, inp, stcb, net);
2835 #else
2836 return;
2837 #endif
2838 }
2839 tmr = &stcb->asoc.shut_guard_timer;
2840 break;
2841 case SCTP_TIMER_TYPE_AUTOCLOSE:
2842 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2843 #ifdef INVARIANTS
2844 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2845 t_type, inp, stcb, net);
2846 #else
2847 return;
2848 #endif
2849 }
2850 tmr = &stcb->asoc.autoclose_timer;
2851 break;
2852 case SCTP_TIMER_TYPE_STRRESET:
2853 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2854 #ifdef INVARIANTS
2855 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2856 t_type, inp, stcb, net);
2857 #else
2858 return;
2859 #endif
2860 }
2861 tmr = &stcb->asoc.strreset_timer;
2862 break;
2863 case SCTP_TIMER_TYPE_INPKILL:
2864 /*
2865 * The inp is setup to die. We re-use the signature_chage
2866 * timer since that has stopped and we are in the GONE
2867 * state.
2868 */
2869 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2870 #ifdef INVARIANTS
2871 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2872 t_type, inp, stcb, net);
2873 #else
2874 return;
2875 #endif
2876 }
2877 tmr = &inp->sctp_ep.signature_change;
2878 break;
2879 case SCTP_TIMER_TYPE_ASOCKILL:
2880 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2881 #ifdef INVARIANTS
2882 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2883 t_type, inp, stcb, net);
2884 #else
2885 return;
2886 #endif
2887 }
2888 tmr = &stcb->asoc.strreset_timer;
2889 break;
2890 case SCTP_TIMER_TYPE_ADDR_WQ:
2891 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2892 #ifdef INVARIANTS
2893 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2894 t_type, inp, stcb, net);
2895 #else
2896 return;
2897 #endif
2898 }
2899 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2900 break;
2901 case SCTP_TIMER_TYPE_PRIM_DELETED:
2902 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2903 #ifdef INVARIANTS
2904 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2905 t_type, inp, stcb, net);
2906 #else
2907 return;
2908 #endif
2909 }
2910 tmr = &stcb->asoc.delete_prim_timer;
2911 break;
2912 default:
2913 #ifdef INVARIANTS
2914 panic("Unknown timer type %d", t_type);
2915 #else
2916 return;
2917 #endif
2918 }
2919 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2920 if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2921 (tmr->type != t_type)) {
2922 /*
2923 * Ok we have a timer that is under joint use. Cookie timer
2924 * per chance with the SEND timer. We therefore are NOT
2925 * running the timer that the caller wants stopped. So just
2926 * return.
2927 */
2928 SCTPDBG(SCTP_DEBUG_TIMER2,
2929 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2930 t_type, inp, stcb, net);
2931 return;
2932 }
2933 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2934 stcb->asoc.num_send_timers_up--;
2935 if (stcb->asoc.num_send_timers_up < 0) {
2936 stcb->asoc.num_send_timers_up = 0;
2937 }
2938 }
2939 tmr->self = NULL;
2940 tmr->stopped_from = from;
2941 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2942 KASSERT(tmr->ep == inp,
2943 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2944 t_type, inp, tmr->ep));
2945 KASSERT(tmr->tcb == stcb,
2946 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2947 t_type, stcb, tmr->tcb));
2948 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2949 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2950 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2951 t_type, net, tmr->net));
2952 SCTPDBG(SCTP_DEBUG_TIMER2,
2953 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2954 t_type, inp, stcb, net);
2955 /*
2956 * If the timer was actually stopped, decrement reference counts
2957 * that were incremented in sctp_timer_start().
2958 */
2959 if (tmr->ep != NULL) {
2960 SCTP_INP_DECR_REF(inp);
2961 tmr->ep = NULL;
2962 }
2963 if (tmr->tcb != NULL) {
2964 atomic_add_int(&stcb->asoc.refcnt, -1);
2965 tmr->tcb = NULL;
2966 }
2967 if (tmr->net != NULL) {
2968 /*
2969 * Can't use net, since it doesn't work for
2970 * SCTP_TIMER_TYPE_ASCONF.
2971 */
2972 sctp_free_remote_addr((struct sctp_nets *)tmr->net);
2973 tmr->net = NULL;
2974 }
2975 } else {
2976 SCTPDBG(SCTP_DEBUG_TIMER2,
2977 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2978 t_type, inp, stcb, net);
2979 }
2980 return;
2981 }
2982
2983 uint32_t
sctp_calculate_len(struct mbuf * m)2984 sctp_calculate_len(struct mbuf *m)
2985 {
2986 uint32_t tlen = 0;
2987 struct mbuf *at;
2988
2989 at = m;
2990 while (at) {
2991 tlen += SCTP_BUF_LEN(at);
2992 at = SCTP_BUF_NEXT(at);
2993 }
2994 return (tlen);
2995 }
2996
2997 void
sctp_mtu_size_reset(struct sctp_inpcb * inp,struct sctp_association * asoc,uint32_t mtu)2998 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2999 struct sctp_association *asoc, uint32_t mtu)
3000 {
3001 /*
3002 * Reset the P-MTU size on this association, this involves changing
3003 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
3004 * allow the DF flag to be cleared.
3005 */
3006 struct sctp_tmit_chunk *chk;
3007 unsigned int eff_mtu, ovh;
3008
3009 asoc->smallest_mtu = mtu;
3010 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3011 ovh = SCTP_MIN_OVERHEAD;
3012 } else {
3013 ovh = SCTP_MIN_V4_OVERHEAD;
3014 }
3015 eff_mtu = mtu - ovh;
3016 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
3017 if (chk->send_size > eff_mtu) {
3018 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
3019 }
3020 }
3021 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3022 if (chk->send_size > eff_mtu) {
3023 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
3024 }
3025 }
3026 }
3027
3028 /*
3029 * Given an association and starting time of the current RTT period, update
3030 * RTO in number of msecs. net should point to the current network.
3031 * Return 1, if an RTO update was performed, return 0 if no update was
3032 * performed due to invalid starting point.
3033 */
3034
3035 int
sctp_calculate_rto(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_nets * net,struct timeval * old,int rtt_from_sack)3036 sctp_calculate_rto(struct sctp_tcb *stcb,
3037 struct sctp_association *asoc,
3038 struct sctp_nets *net,
3039 struct timeval *old,
3040 int rtt_from_sack)
3041 {
3042 struct timeval now;
3043 uint64_t rtt_us; /* RTT in us */
3044 int32_t rtt; /* RTT in ms */
3045 uint32_t new_rto;
3046 int first_measure = 0;
3047
3048 /************************/
3049 /* 1. calculate new RTT */
3050 /************************/
3051 /* get the current time */
3052 if (stcb->asoc.use_precise_time) {
3053 (void)SCTP_GETPTIME_TIMEVAL(&now);
3054 } else {
3055 (void)SCTP_GETTIME_TIMEVAL(&now);
3056 }
3057 if ((old->tv_sec > now.tv_sec) ||
3058 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
3059 /* The starting point is in the future. */
3060 return (0);
3061 }
3062 timevalsub(&now, old);
3063 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
3064 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
3065 /* The RTT is larger than a sane value. */
3066 return (0);
3067 }
3068 /* store the current RTT in us */
3069 net->rtt = rtt_us;
3070 /* compute rtt in ms */
3071 rtt = (int32_t)(net->rtt / 1000);
3072 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
3073 /* Tell the CC module that a new update has just occurred from a sack */
3074 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
3075 }
3076 /* Do we need to determine the lan? We do this only
3077 * on sacks i.e. RTT being determined from data not
3078 * non-data (HB/INIT->INITACK).
3079 */
3080 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
3081 (net->lan_type == SCTP_LAN_UNKNOWN)) {
3082 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
3083 net->lan_type = SCTP_LAN_INTERNET;
3084 } else {
3085 net->lan_type = SCTP_LAN_LOCAL;
3086 }
3087 }
3088
3089 /***************************/
3090 /* 2. update RTTVAR & SRTT */
3091 /***************************/
3092 /*-
3093 * Compute the scaled average lastsa and the
3094 * scaled variance lastsv as described in van Jacobson
3095 * Paper "Congestion Avoidance and Control", Annex A.
3096 *
3097 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
3098 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
3099 */
3100 if (net->RTO_measured) {
3101 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
3102 net->lastsa += rtt;
3103 if (rtt < 0) {
3104 rtt = -rtt;
3105 }
3106 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
3107 net->lastsv += rtt;
3108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3109 rto_logging(net, SCTP_LOG_RTTVAR);
3110 }
3111 } else {
3112 /* First RTO measurment */
3113 net->RTO_measured = 1;
3114 first_measure = 1;
3115 net->lastsa = rtt << SCTP_RTT_SHIFT;
3116 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
3117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
3118 rto_logging(net, SCTP_LOG_INITIAL_RTT);
3119 }
3120 }
3121 if (net->lastsv == 0) {
3122 net->lastsv = SCTP_CLOCK_GRANULARITY;
3123 }
3124 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3125 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
3126 (stcb->asoc.sat_network_lockout == 0)) {
3127 stcb->asoc.sat_network = 1;
3128 } else if ((!first_measure) && stcb->asoc.sat_network) {
3129 stcb->asoc.sat_network = 0;
3130 stcb->asoc.sat_network_lockout = 1;
3131 }
3132 /* bound it, per C6/C7 in Section 5.3.1 */
3133 if (new_rto < stcb->asoc.minrto) {
3134 new_rto = stcb->asoc.minrto;
3135 }
3136 if (new_rto > stcb->asoc.maxrto) {
3137 new_rto = stcb->asoc.maxrto;
3138 }
3139 net->RTO = new_rto;
3140 return (1);
3141 }
3142
3143 /*
3144 * return a pointer to a contiguous piece of data from the given mbuf chain
3145 * starting at 'off' for 'len' bytes. If the desired piece spans more than
3146 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3147 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3148 */
3149 caddr_t
sctp_m_getptr(struct mbuf * m,int off,int len,uint8_t * in_ptr)3150 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
3151 {
3152 uint32_t count;
3153 uint8_t *ptr;
3154
3155 ptr = in_ptr;
3156 if ((off < 0) || (len <= 0))
3157 return (NULL);
3158
3159 /* find the desired start location */
3160 while ((m != NULL) && (off > 0)) {
3161 if (off < SCTP_BUF_LEN(m))
3162 break;
3163 off -= SCTP_BUF_LEN(m);
3164 m = SCTP_BUF_NEXT(m);
3165 }
3166 if (m == NULL)
3167 return (NULL);
3168
3169 /* is the current mbuf large enough (eg. contiguous)? */
3170 if ((SCTP_BUF_LEN(m) - off) >= len) {
3171 return (mtod(m, caddr_t) + off);
3172 } else {
3173 /* else, it spans more than one mbuf, so save a temp copy... */
3174 while ((m != NULL) && (len > 0)) {
3175 count = min(SCTP_BUF_LEN(m) - off, len);
3176 memcpy(ptr, mtod(m, caddr_t) + off, count);
3177 len -= count;
3178 ptr += count;
3179 off = 0;
3180 m = SCTP_BUF_NEXT(m);
3181 }
3182 if ((m == NULL) && (len > 0))
3183 return (NULL);
3184 else
3185 return ((caddr_t)in_ptr);
3186 }
3187 }
3188
3189 struct sctp_paramhdr *
sctp_get_next_param(struct mbuf * m,int offset,struct sctp_paramhdr * pull,int pull_limit)3190 sctp_get_next_param(struct mbuf *m,
3191 int offset,
3192 struct sctp_paramhdr *pull,
3193 int pull_limit)
3194 {
3195 /* This just provides a typed signature to Peter's Pull routine */
3196 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3197 (uint8_t *) pull));
3198 }
3199
3200 struct mbuf *
sctp_add_pad_tombuf(struct mbuf * m,int padlen)3201 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3202 {
3203 struct mbuf *m_last;
3204 caddr_t dp;
3205
3206 if (padlen > 3) {
3207 return (NULL);
3208 }
3209 if (padlen <= M_TRAILINGSPACE(m)) {
3210 /*
3211 * The easy way. We hope the majority of the time we hit
3212 * here :)
3213 */
3214 m_last = m;
3215 } else {
3216 /* Hard way we must grow the mbuf chain */
3217 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3218 if (m_last == NULL) {
3219 return (NULL);
3220 }
3221 SCTP_BUF_LEN(m_last) = 0;
3222 SCTP_BUF_NEXT(m_last) = NULL;
3223 SCTP_BUF_NEXT(m) = m_last;
3224 }
3225 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
3226 SCTP_BUF_LEN(m_last) += padlen;
3227 memset(dp, 0, padlen);
3228 return (m_last);
3229 }
3230
3231 struct mbuf *
sctp_pad_lastmbuf(struct mbuf * m,int padval,struct mbuf * last_mbuf)3232 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3233 {
3234 /* find the last mbuf in chain and pad it */
3235 struct mbuf *m_at;
3236
3237 if (last_mbuf != NULL) {
3238 return (sctp_add_pad_tombuf(last_mbuf, padval));
3239 } else {
3240 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3241 if (SCTP_BUF_NEXT(m_at) == NULL) {
3242 return (sctp_add_pad_tombuf(m_at, padval));
3243 }
3244 }
3245 }
3246 return (NULL);
3247 }
3248
3249 static void
sctp_notify_assoc_change(uint16_t state,struct sctp_tcb * stcb,uint16_t error,struct sctp_abort_chunk * abort,uint8_t from_peer,int so_locked)3250 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3251 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked)
3252 {
3253 struct mbuf *m_notify;
3254 struct sctp_assoc_change *sac;
3255 struct sctp_queued_to_read *control;
3256 unsigned int notif_len;
3257 uint16_t abort_len;
3258 unsigned int i;
3259 #if defined(__APPLE__) && !defined(__Userspace__)
3260 struct socket *so;
3261 #endif
3262
3263 if (stcb == NULL) {
3264 return;
3265 }
3266 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3267 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3268 if (abort != NULL) {
3269 abort_len = ntohs(abort->ch.chunk_length);
3270 /*
3271 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3272 * contiguous.
3273 */
3274 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3275 abort_len = SCTP_CHUNK_BUFFER_SIZE;
3276 }
3277 } else {
3278 abort_len = 0;
3279 }
3280 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3281 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3282 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3283 notif_len += abort_len;
3284 }
3285 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3286 if (m_notify == NULL) {
3287 /* Retry with smaller value. */
3288 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3289 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3290 if (m_notify == NULL) {
3291 goto set_error;
3292 }
3293 }
3294 SCTP_BUF_NEXT(m_notify) = NULL;
3295 sac = mtod(m_notify, struct sctp_assoc_change *);
3296 memset(sac, 0, notif_len);
3297 sac->sac_type = SCTP_ASSOC_CHANGE;
3298 sac->sac_flags = 0;
3299 sac->sac_length = sizeof(struct sctp_assoc_change);
3300 sac->sac_state = state;
3301 sac->sac_error = error;
3302 /* XXX verify these stream counts */
3303 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3304 sac->sac_inbound_streams = stcb->asoc.streamincnt;
3305 sac->sac_assoc_id = sctp_get_associd(stcb);
3306 if (notif_len > sizeof(struct sctp_assoc_change)) {
3307 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3308 i = 0;
3309 if (stcb->asoc.prsctp_supported == 1) {
3310 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3311 }
3312 if (stcb->asoc.auth_supported == 1) {
3313 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3314 }
3315 if (stcb->asoc.asconf_supported == 1) {
3316 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3317 }
3318 if (stcb->asoc.idata_supported == 1) {
3319 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3320 }
3321 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3322 if (stcb->asoc.reconfig_supported == 1) {
3323 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3324 }
3325 sac->sac_length += i;
3326 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3327 memcpy(sac->sac_info, abort, abort_len);
3328 sac->sac_length += abort_len;
3329 }
3330 }
3331 SCTP_BUF_LEN(m_notify) = sac->sac_length;
3332 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3333 0, 0, stcb->asoc.context, 0, 0, 0,
3334 m_notify);
3335 if (control != NULL) {
3336 control->length = SCTP_BUF_LEN(m_notify);
3337 control->spec_flags = M_NOTIFICATION;
3338 /* not that we need this */
3339 control->tail_mbuf = m_notify;
3340 sctp_add_to_readq(stcb->sctp_ep, stcb,
3341 control,
3342 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3343 so_locked);
3344 } else {
3345 sctp_m_freem(m_notify);
3346 }
3347 }
3348 /*
3349 * For 1-to-1 style sockets, we send up and error when an ABORT
3350 * comes in.
3351 */
3352 set_error:
3353 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3354 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3355 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3356 SOCK_LOCK(stcb->sctp_socket);
3357 if (from_peer) {
3358 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3359 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3360 stcb->sctp_socket->so_error = ECONNREFUSED;
3361 } else {
3362 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3363 stcb->sctp_socket->so_error = ECONNRESET;
3364 }
3365 } else {
3366 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3367 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3368 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3369 stcb->sctp_socket->so_error = ETIMEDOUT;
3370 } else {
3371 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3372 stcb->sctp_socket->so_error = ECONNABORTED;
3373 }
3374 }
3375 SOCK_UNLOCK(stcb->sctp_socket);
3376 }
3377 /* Wake ANY sleepers */
3378 #if defined(__APPLE__) && !defined(__Userspace__)
3379 so = SCTP_INP_SO(stcb->sctp_ep);
3380 if (!so_locked) {
3381 atomic_add_int(&stcb->asoc.refcnt, 1);
3382 SCTP_TCB_UNLOCK(stcb);
3383 SCTP_SOCKET_LOCK(so, 1);
3384 SCTP_TCB_LOCK(stcb);
3385 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3386 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3387 SCTP_SOCKET_UNLOCK(so, 1);
3388 return;
3389 }
3390 }
3391 #endif
3392 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3393 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3394 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3395 socantrcvmore(stcb->sctp_socket);
3396 }
3397 sorwakeup(stcb->sctp_socket);
3398 sowwakeup(stcb->sctp_socket);
3399 #if defined(__APPLE__) && !defined(__Userspace__)
3400 if (!so_locked) {
3401 SCTP_SOCKET_UNLOCK(so, 1);
3402 }
3403 #endif
3404 }
3405
3406 static void
sctp_notify_peer_addr_change(struct sctp_tcb * stcb,uint32_t state,struct sockaddr * sa,uint32_t error,int so_locked)3407 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3408 struct sockaddr *sa, uint32_t error, int so_locked)
3409 {
3410 struct mbuf *m_notify;
3411 struct sctp_paddr_change *spc;
3412 struct sctp_queued_to_read *control;
3413
3414 if ((stcb == NULL) ||
3415 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3416 /* event not enabled */
3417 return;
3418 }
3419 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3420 if (m_notify == NULL)
3421 return;
3422 SCTP_BUF_LEN(m_notify) = 0;
3423 spc = mtod(m_notify, struct sctp_paddr_change *);
3424 memset(spc, 0, sizeof(struct sctp_paddr_change));
3425 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3426 spc->spc_flags = 0;
3427 spc->spc_length = sizeof(struct sctp_paddr_change);
3428 switch (sa->sa_family) {
3429 #ifdef INET
3430 case AF_INET:
3431 #ifdef INET6
3432 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3433 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3434 (struct sockaddr_in6 *)&spc->spc_aaddr);
3435 } else {
3436 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3437 }
3438 #else
3439 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3440 #endif
3441 break;
3442 #endif
3443 #ifdef INET6
3444 case AF_INET6:
3445 {
3446 #ifdef SCTP_EMBEDDED_V6_SCOPE
3447 struct sockaddr_in6 *sin6;
3448 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3449 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3450
3451 #ifdef SCTP_EMBEDDED_V6_SCOPE
3452 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3453 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3454 if (sin6->sin6_scope_id == 0) {
3455 /* recover scope_id for user */
3456 #ifdef SCTP_KAME
3457 (void)sa6_recoverscope(sin6);
3458 #else
3459 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
3460 NULL);
3461 #endif
3462 } else {
3463 /* clear embedded scope_id for user */
3464 in6_clearscope(&sin6->sin6_addr);
3465 }
3466 }
3467 #endif /* SCTP_EMBEDDED_V6_SCOPE */
3468 break;
3469 }
3470 #endif
3471 #if defined(__Userspace__)
3472 case AF_CONN:
3473 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
3474 break;
3475 #endif
3476 default:
3477 /* TSNH */
3478 break;
3479 }
3480 spc->spc_state = state;
3481 spc->spc_error = error;
3482 spc->spc_assoc_id = sctp_get_associd(stcb);
3483
3484 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3485 SCTP_BUF_NEXT(m_notify) = NULL;
3486
3487 /* append to socket */
3488 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3489 0, 0, stcb->asoc.context, 0, 0, 0,
3490 m_notify);
3491 if (control == NULL) {
3492 /* no memory */
3493 sctp_m_freem(m_notify);
3494 return;
3495 }
3496 control->length = SCTP_BUF_LEN(m_notify);
3497 control->spec_flags = M_NOTIFICATION;
3498 /* not that we need this */
3499 control->tail_mbuf = m_notify;
3500 sctp_add_to_readq(stcb->sctp_ep, stcb,
3501 control,
3502 &stcb->sctp_socket->so_rcv, 1,
3503 SCTP_READ_LOCK_NOT_HELD,
3504 so_locked);
3505 }
3506
3507 static void
sctp_notify_send_failed(struct sctp_tcb * stcb,uint8_t sent,uint32_t error,struct sctp_tmit_chunk * chk,int so_locked)3508 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3509 struct sctp_tmit_chunk *chk, int so_locked)
3510 {
3511 struct mbuf *m_notify;
3512 struct sctp_send_failed *ssf;
3513 struct sctp_send_failed_event *ssfe;
3514 struct sctp_queued_to_read *control;
3515 struct sctp_chunkhdr *chkhdr;
3516 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3517
3518 if ((stcb == NULL) ||
3519 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3520 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3521 /* event not enabled */
3522 return;
3523 }
3524
3525 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3526 notifhdr_len = sizeof(struct sctp_send_failed_event);
3527 } else {
3528 notifhdr_len = sizeof(struct sctp_send_failed);
3529 }
3530 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3531 if (m_notify == NULL)
3532 /* no space left */
3533 return;
3534 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3535 if (stcb->asoc.idata_supported) {
3536 chkhdr_len = sizeof(struct sctp_idata_chunk);
3537 } else {
3538 chkhdr_len = sizeof(struct sctp_data_chunk);
3539 }
3540 /* Use some defaults in case we can't access the chunk header */
3541 if (chk->send_size >= chkhdr_len) {
3542 payload_len = chk->send_size - chkhdr_len;
3543 } else {
3544 payload_len = 0;
3545 }
3546 padding_len = 0;
3547 if (chk->data != NULL) {
3548 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3549 if (chkhdr != NULL) {
3550 chk_len = ntohs(chkhdr->chunk_length);
3551 if ((chk_len >= chkhdr_len) &&
3552 (chk->send_size >= chk_len) &&
3553 (chk->send_size - chk_len < 4)) {
3554 padding_len = chk->send_size - chk_len;
3555 payload_len = chk->send_size - chkhdr_len - padding_len;
3556 }
3557 }
3558 }
3559 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3560 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3561 memset(ssfe, 0, notifhdr_len);
3562 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3563 if (sent) {
3564 ssfe->ssfe_flags = SCTP_DATA_SENT;
3565 } else {
3566 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3567 }
3568 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3569 ssfe->ssfe_error = error;
3570 /* not exactly what the user sent in, but should be close :) */
3571 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3572 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3573 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3574 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3575 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3576 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3577 } else {
3578 ssf = mtod(m_notify, struct sctp_send_failed *);
3579 memset(ssf, 0, notifhdr_len);
3580 ssf->ssf_type = SCTP_SEND_FAILED;
3581 if (sent) {
3582 ssf->ssf_flags = SCTP_DATA_SENT;
3583 } else {
3584 ssf->ssf_flags = SCTP_DATA_UNSENT;
3585 }
3586 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3587 ssf->ssf_error = error;
3588 /* not exactly what the user sent in, but should be close :) */
3589 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3590 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3591 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3592 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3593 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3594 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3595 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3596 }
3597 if (chk->data != NULL) {
3598 /* Trim off the sctp chunk header (it should be there) */
3599 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3600 m_adj(chk->data, chkhdr_len);
3601 m_adj(chk->data, -padding_len);
3602 sctp_mbuf_crush(chk->data);
3603 chk->send_size -= (chkhdr_len + padding_len);
3604 }
3605 }
3606 SCTP_BUF_NEXT(m_notify) = chk->data;
3607 /* Steal off the mbuf */
3608 chk->data = NULL;
3609 /*
3610 * For this case, we check the actual socket buffer, since the assoc
3611 * is going away we don't want to overfill the socket buffer for a
3612 * non-reader
3613 */
3614 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3615 sctp_m_freem(m_notify);
3616 return;
3617 }
3618 /* append to socket */
3619 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3620 0, 0, stcb->asoc.context, 0, 0, 0,
3621 m_notify);
3622 if (control == NULL) {
3623 /* no memory */
3624 sctp_m_freem(m_notify);
3625 return;
3626 }
3627 control->length = SCTP_BUF_LEN(m_notify);
3628 control->spec_flags = M_NOTIFICATION;
3629 /* not that we need this */
3630 control->tail_mbuf = m_notify;
3631 sctp_add_to_readq(stcb->sctp_ep, stcb,
3632 control,
3633 &stcb->sctp_socket->so_rcv, 1,
3634 SCTP_READ_LOCK_NOT_HELD,
3635 so_locked);
3636 }
3637
3638 static void
sctp_notify_send_failed2(struct sctp_tcb * stcb,uint32_t error,struct sctp_stream_queue_pending * sp,int so_locked)3639 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3640 struct sctp_stream_queue_pending *sp, int so_locked)
3641 {
3642 struct mbuf *m_notify;
3643 struct sctp_send_failed *ssf;
3644 struct sctp_send_failed_event *ssfe;
3645 struct sctp_queued_to_read *control;
3646 int notifhdr_len;
3647
3648 if ((stcb == NULL) ||
3649 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3650 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3651 /* event not enabled */
3652 return;
3653 }
3654 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3655 notifhdr_len = sizeof(struct sctp_send_failed_event);
3656 } else {
3657 notifhdr_len = sizeof(struct sctp_send_failed);
3658 }
3659 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3660 if (m_notify == NULL) {
3661 /* no space left */
3662 return;
3663 }
3664 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3665 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3666 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3667 memset(ssfe, 0, notifhdr_len);
3668 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3669 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3670 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3671 ssfe->ssfe_error = error;
3672 /* not exactly what the user sent in, but should be close :) */
3673 ssfe->ssfe_info.snd_sid = sp->sid;
3674 if (sp->some_taken) {
3675 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3676 } else {
3677 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3678 }
3679 ssfe->ssfe_info.snd_ppid = sp->ppid;
3680 ssfe->ssfe_info.snd_context = sp->context;
3681 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3682 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3683 } else {
3684 ssf = mtod(m_notify, struct sctp_send_failed *);
3685 memset(ssf, 0, notifhdr_len);
3686 ssf->ssf_type = SCTP_SEND_FAILED;
3687 ssf->ssf_flags = SCTP_DATA_UNSENT;
3688 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3689 ssf->ssf_error = error;
3690 /* not exactly what the user sent in, but should be close :) */
3691 ssf->ssf_info.sinfo_stream = sp->sid;
3692 ssf->ssf_info.sinfo_ssn = 0;
3693 if (sp->some_taken) {
3694 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3695 } else {
3696 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3697 }
3698 ssf->ssf_info.sinfo_ppid = sp->ppid;
3699 ssf->ssf_info.sinfo_context = sp->context;
3700 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3701 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3702 }
3703 SCTP_BUF_NEXT(m_notify) = sp->data;
3704
3705 /* Steal off the mbuf */
3706 sp->data = NULL;
3707 /*
3708 * For this case, we check the actual socket buffer, since the assoc
3709 * is going away we don't want to overfill the socket buffer for a
3710 * non-reader
3711 */
3712 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3713 sctp_m_freem(m_notify);
3714 return;
3715 }
3716 /* append to socket */
3717 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3718 0, 0, stcb->asoc.context, 0, 0, 0,
3719 m_notify);
3720 if (control == NULL) {
3721 /* no memory */
3722 sctp_m_freem(m_notify);
3723 return;
3724 }
3725 control->length = SCTP_BUF_LEN(m_notify);
3726 control->spec_flags = M_NOTIFICATION;
3727 /* not that we need this */
3728 control->tail_mbuf = m_notify;
3729 sctp_add_to_readq(stcb->sctp_ep, stcb,
3730 control,
3731 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3732 }
3733
3734 static void
sctp_notify_adaptation_layer(struct sctp_tcb * stcb)3735 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3736 {
3737 struct mbuf *m_notify;
3738 struct sctp_adaptation_event *sai;
3739 struct sctp_queued_to_read *control;
3740
3741 if ((stcb == NULL) ||
3742 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3743 /* event not enabled */
3744 return;
3745 }
3746
3747 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3748 if (m_notify == NULL)
3749 /* no space left */
3750 return;
3751 SCTP_BUF_LEN(m_notify) = 0;
3752 sai = mtod(m_notify, struct sctp_adaptation_event *);
3753 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3754 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3755 sai->sai_flags = 0;
3756 sai->sai_length = sizeof(struct sctp_adaptation_event);
3757 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3758 sai->sai_assoc_id = sctp_get_associd(stcb);
3759
3760 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3761 SCTP_BUF_NEXT(m_notify) = NULL;
3762
3763 /* append to socket */
3764 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3765 0, 0, stcb->asoc.context, 0, 0, 0,
3766 m_notify);
3767 if (control == NULL) {
3768 /* no memory */
3769 sctp_m_freem(m_notify);
3770 return;
3771 }
3772 control->length = SCTP_BUF_LEN(m_notify);
3773 control->spec_flags = M_NOTIFICATION;
3774 /* not that we need this */
3775 control->tail_mbuf = m_notify;
3776 sctp_add_to_readq(stcb->sctp_ep, stcb,
3777 control,
3778 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3779 }
3780
3781 /* This always must be called with the read-queue LOCKED in the INP */
3782 static void
sctp_notify_partial_delivery_indication(struct sctp_tcb * stcb,uint32_t error,uint32_t val,int so_locked)3783 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3784 uint32_t val, int so_locked)
3785 {
3786 struct mbuf *m_notify;
3787 struct sctp_pdapi_event *pdapi;
3788 struct sctp_queued_to_read *control;
3789 struct sockbuf *sb;
3790
3791 if ((stcb == NULL) ||
3792 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3793 /* event not enabled */
3794 return;
3795 }
3796 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3797 return;
3798 }
3799
3800 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3801 if (m_notify == NULL)
3802 /* no space left */
3803 return;
3804 SCTP_BUF_LEN(m_notify) = 0;
3805 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3806 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3807 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3808 pdapi->pdapi_flags = 0;
3809 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3810 pdapi->pdapi_indication = error;
3811 pdapi->pdapi_stream = (val >> 16);
3812 pdapi->pdapi_seq = (val & 0x0000ffff);
3813 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3814
3815 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3816 SCTP_BUF_NEXT(m_notify) = NULL;
3817 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3818 0, 0, stcb->asoc.context, 0, 0, 0,
3819 m_notify);
3820 if (control == NULL) {
3821 /* no memory */
3822 sctp_m_freem(m_notify);
3823 return;
3824 }
3825 control->length = SCTP_BUF_LEN(m_notify);
3826 control->spec_flags = M_NOTIFICATION;
3827 /* not that we need this */
3828 control->tail_mbuf = m_notify;
3829 sb = &stcb->sctp_socket->so_rcv;
3830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3831 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3832 }
3833 sctp_sballoc(stcb, sb, m_notify);
3834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3835 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3836 }
3837 control->end_added = 1;
3838 if (stcb->asoc.control_pdapi)
3839 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3840 else {
3841 /* we really should not see this case */
3842 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3843 }
3844 if (stcb->sctp_ep && stcb->sctp_socket) {
3845 /* This should always be the case */
3846 #if defined(__APPLE__) && !defined(__Userspace__)
3847 struct socket *so;
3848
3849 so = SCTP_INP_SO(stcb->sctp_ep);
3850 if (!so_locked) {
3851 atomic_add_int(&stcb->asoc.refcnt, 1);
3852 SCTP_TCB_UNLOCK(stcb);
3853 SCTP_SOCKET_LOCK(so, 1);
3854 SCTP_TCB_LOCK(stcb);
3855 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3856 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3857 SCTP_SOCKET_UNLOCK(so, 1);
3858 return;
3859 }
3860 }
3861 #endif
3862 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3863 #if defined(__APPLE__) && !defined(__Userspace__)
3864 if (!so_locked) {
3865 SCTP_SOCKET_UNLOCK(so, 1);
3866 }
3867 #endif
3868 }
3869 }
3870
3871 static void
sctp_notify_shutdown_event(struct sctp_tcb * stcb)3872 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3873 {
3874 struct mbuf *m_notify;
3875 struct sctp_shutdown_event *sse;
3876 struct sctp_queued_to_read *control;
3877
3878 /*
3879 * For TCP model AND UDP connected sockets we will send an error up
3880 * when an SHUTDOWN completes
3881 */
3882 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3883 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3884 /* mark socket closed for read/write and wakeup! */
3885 #if defined(__APPLE__) && !defined(__Userspace__)
3886 struct socket *so;
3887
3888 so = SCTP_INP_SO(stcb->sctp_ep);
3889 atomic_add_int(&stcb->asoc.refcnt, 1);
3890 SCTP_TCB_UNLOCK(stcb);
3891 SCTP_SOCKET_LOCK(so, 1);
3892 SCTP_TCB_LOCK(stcb);
3893 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3894 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3895 SCTP_SOCKET_UNLOCK(so, 1);
3896 return;
3897 }
3898 #endif
3899 socantsendmore(stcb->sctp_socket);
3900 #if defined(__APPLE__) && !defined(__Userspace__)
3901 SCTP_SOCKET_UNLOCK(so, 1);
3902 #endif
3903 }
3904 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3905 /* event not enabled */
3906 return;
3907 }
3908
3909 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3910 if (m_notify == NULL)
3911 /* no space left */
3912 return;
3913 sse = mtod(m_notify, struct sctp_shutdown_event *);
3914 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3915 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3916 sse->sse_flags = 0;
3917 sse->sse_length = sizeof(struct sctp_shutdown_event);
3918 sse->sse_assoc_id = sctp_get_associd(stcb);
3919
3920 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3921 SCTP_BUF_NEXT(m_notify) = NULL;
3922
3923 /* append to socket */
3924 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3925 0, 0, stcb->asoc.context, 0, 0, 0,
3926 m_notify);
3927 if (control == NULL) {
3928 /* no memory */
3929 sctp_m_freem(m_notify);
3930 return;
3931 }
3932 control->length = SCTP_BUF_LEN(m_notify);
3933 control->spec_flags = M_NOTIFICATION;
3934 /* not that we need this */
3935 control->tail_mbuf = m_notify;
3936 sctp_add_to_readq(stcb->sctp_ep, stcb,
3937 control,
3938 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3939 }
3940
3941 static void
sctp_notify_sender_dry_event(struct sctp_tcb * stcb,int so_locked)3942 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3943 int so_locked)
3944 {
3945 struct mbuf *m_notify;
3946 struct sctp_sender_dry_event *event;
3947 struct sctp_queued_to_read *control;
3948
3949 if ((stcb == NULL) ||
3950 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3951 /* event not enabled */
3952 return;
3953 }
3954
3955 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3956 if (m_notify == NULL) {
3957 /* no space left */
3958 return;
3959 }
3960 SCTP_BUF_LEN(m_notify) = 0;
3961 event = mtod(m_notify, struct sctp_sender_dry_event *);
3962 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3963 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3964 event->sender_dry_flags = 0;
3965 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3966 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3967
3968 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3969 SCTP_BUF_NEXT(m_notify) = NULL;
3970
3971 /* append to socket */
3972 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3973 0, 0, stcb->asoc.context, 0, 0, 0,
3974 m_notify);
3975 if (control == NULL) {
3976 /* no memory */
3977 sctp_m_freem(m_notify);
3978 return;
3979 }
3980 control->length = SCTP_BUF_LEN(m_notify);
3981 control->spec_flags = M_NOTIFICATION;
3982 /* not that we need this */
3983 control->tail_mbuf = m_notify;
3984 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3985 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3986 }
3987
3988 void
sctp_notify_stream_reset_add(struct sctp_tcb * stcb,uint16_t numberin,uint16_t numberout,int flag)3989 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3990 {
3991 struct mbuf *m_notify;
3992 struct sctp_queued_to_read *control;
3993 struct sctp_stream_change_event *stradd;
3994
3995 if ((stcb == NULL) ||
3996 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3997 /* event not enabled */
3998 return;
3999 }
4000 if ((stcb->asoc.peer_req_out) && flag) {
4001 /* Peer made the request, don't tell the local user */
4002 stcb->asoc.peer_req_out = 0;
4003 return;
4004 }
4005 stcb->asoc.peer_req_out = 0;
4006 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
4007 if (m_notify == NULL)
4008 /* no space left */
4009 return;
4010 SCTP_BUF_LEN(m_notify) = 0;
4011 stradd = mtod(m_notify, struct sctp_stream_change_event *);
4012 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
4013 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
4014 stradd->strchange_flags = flag;
4015 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
4016 stradd->strchange_assoc_id = sctp_get_associd(stcb);
4017 stradd->strchange_instrms = numberin;
4018 stradd->strchange_outstrms = numberout;
4019 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
4020 SCTP_BUF_NEXT(m_notify) = NULL;
4021 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4022 /* no space */
4023 sctp_m_freem(m_notify);
4024 return;
4025 }
4026 /* append to socket */
4027 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4028 0, 0, stcb->asoc.context, 0, 0, 0,
4029 m_notify);
4030 if (control == NULL) {
4031 /* no memory */
4032 sctp_m_freem(m_notify);
4033 return;
4034 }
4035 control->length = SCTP_BUF_LEN(m_notify);
4036 control->spec_flags = M_NOTIFICATION;
4037 /* not that we need this */
4038 control->tail_mbuf = m_notify;
4039 sctp_add_to_readq(stcb->sctp_ep, stcb,
4040 control,
4041 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4042 }
4043
4044 void
sctp_notify_stream_reset_tsn(struct sctp_tcb * stcb,uint32_t sending_tsn,uint32_t recv_tsn,int flag)4045 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
4046 {
4047 struct mbuf *m_notify;
4048 struct sctp_queued_to_read *control;
4049 struct sctp_assoc_reset_event *strasoc;
4050
4051 if ((stcb == NULL) ||
4052 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
4053 /* event not enabled */
4054 return;
4055 }
4056 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
4057 if (m_notify == NULL)
4058 /* no space left */
4059 return;
4060 SCTP_BUF_LEN(m_notify) = 0;
4061 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
4062 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
4063 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
4064 strasoc->assocreset_flags = flag;
4065 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
4066 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
4067 strasoc->assocreset_local_tsn = sending_tsn;
4068 strasoc->assocreset_remote_tsn = recv_tsn;
4069 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
4070 SCTP_BUF_NEXT(m_notify) = NULL;
4071 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4072 /* no space */
4073 sctp_m_freem(m_notify);
4074 return;
4075 }
4076 /* append to socket */
4077 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4078 0, 0, stcb->asoc.context, 0, 0, 0,
4079 m_notify);
4080 if (control == NULL) {
4081 /* no memory */
4082 sctp_m_freem(m_notify);
4083 return;
4084 }
4085 control->length = SCTP_BUF_LEN(m_notify);
4086 control->spec_flags = M_NOTIFICATION;
4087 /* not that we need this */
4088 control->tail_mbuf = m_notify;
4089 sctp_add_to_readq(stcb->sctp_ep, stcb,
4090 control,
4091 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4092 }
4093
4094 static void
sctp_notify_stream_reset(struct sctp_tcb * stcb,int number_entries,uint16_t * list,int flag)4095 sctp_notify_stream_reset(struct sctp_tcb *stcb,
4096 int number_entries, uint16_t * list, int flag)
4097 {
4098 struct mbuf *m_notify;
4099 struct sctp_queued_to_read *control;
4100 struct sctp_stream_reset_event *strreset;
4101 int len;
4102
4103 if ((stcb == NULL) ||
4104 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
4105 /* event not enabled */
4106 return;
4107 }
4108
4109 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4110 if (m_notify == NULL)
4111 /* no space left */
4112 return;
4113 SCTP_BUF_LEN(m_notify) = 0;
4114 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
4115 if (len > M_TRAILINGSPACE(m_notify)) {
4116 /* never enough room */
4117 sctp_m_freem(m_notify);
4118 return;
4119 }
4120 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4121 memset(strreset, 0, len);
4122 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4123 strreset->strreset_flags = flag;
4124 strreset->strreset_length = len;
4125 strreset->strreset_assoc_id = sctp_get_associd(stcb);
4126 if (number_entries) {
4127 int i;
4128
4129 for (i = 0; i < number_entries; i++) {
4130 strreset->strreset_stream_list[i] = ntohs(list[i]);
4131 }
4132 }
4133 SCTP_BUF_LEN(m_notify) = len;
4134 SCTP_BUF_NEXT(m_notify) = NULL;
4135 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4136 /* no space */
4137 sctp_m_freem(m_notify);
4138 return;
4139 }
4140 /* append to socket */
4141 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4142 0, 0, stcb->asoc.context, 0, 0, 0,
4143 m_notify);
4144 if (control == NULL) {
4145 /* no memory */
4146 sctp_m_freem(m_notify);
4147 return;
4148 }
4149 control->length = SCTP_BUF_LEN(m_notify);
4150 control->spec_flags = M_NOTIFICATION;
4151 /* not that we need this */
4152 control->tail_mbuf = m_notify;
4153 sctp_add_to_readq(stcb->sctp_ep, stcb,
4154 control,
4155 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4156 }
4157
4158 static void
sctp_notify_remote_error(struct sctp_tcb * stcb,uint16_t error,struct sctp_error_chunk * chunk)4159 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4160 {
4161 struct mbuf *m_notify;
4162 struct sctp_remote_error *sre;
4163 struct sctp_queued_to_read *control;
4164 unsigned int notif_len;
4165 uint16_t chunk_len;
4166
4167 if ((stcb == NULL) ||
4168 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4169 return;
4170 }
4171 if (chunk != NULL) {
4172 chunk_len = ntohs(chunk->ch.chunk_length);
4173 /*
4174 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4175 * contiguous.
4176 */
4177 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4178 chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4179 }
4180 } else {
4181 chunk_len = 0;
4182 }
4183 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4184 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4185 if (m_notify == NULL) {
4186 /* Retry with smaller value. */
4187 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4188 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4189 if (m_notify == NULL) {
4190 return;
4191 }
4192 }
4193 SCTP_BUF_NEXT(m_notify) = NULL;
4194 sre = mtod(m_notify, struct sctp_remote_error *);
4195 memset(sre, 0, notif_len);
4196 sre->sre_type = SCTP_REMOTE_ERROR;
4197 sre->sre_flags = 0;
4198 sre->sre_length = sizeof(struct sctp_remote_error);
4199 sre->sre_error = error;
4200 sre->sre_assoc_id = sctp_get_associd(stcb);
4201 if (notif_len > sizeof(struct sctp_remote_error)) {
4202 memcpy(sre->sre_data, chunk, chunk_len);
4203 sre->sre_length += chunk_len;
4204 }
4205 SCTP_BUF_LEN(m_notify) = sre->sre_length;
4206 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4207 0, 0, stcb->asoc.context, 0, 0, 0,
4208 m_notify);
4209 if (control != NULL) {
4210 control->length = SCTP_BUF_LEN(m_notify);
4211 control->spec_flags = M_NOTIFICATION;
4212 /* not that we need this */
4213 control->tail_mbuf = m_notify;
4214 sctp_add_to_readq(stcb->sctp_ep, stcb,
4215 control,
4216 &stcb->sctp_socket->so_rcv, 1,
4217 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4218 } else {
4219 sctp_m_freem(m_notify);
4220 }
4221 }
4222
4223 void
sctp_ulp_notify(uint32_t notification,struct sctp_tcb * stcb,uint32_t error,void * data,int so_locked)4224 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4225 uint32_t error, void *data, int so_locked)
4226 {
4227 if ((stcb == NULL) ||
4228 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4229 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4230 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4231 /* If the socket is gone we are out of here */
4232 return;
4233 }
4234 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
4235 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4236 #else
4237 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
4238 #endif
4239 return;
4240 }
4241 #if defined(__APPLE__) && !defined(__Userspace__)
4242 if (so_locked) {
4243 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4244 } else {
4245 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4246 }
4247 #endif
4248 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4249 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4250 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4251 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4252 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4253 /* Don't report these in front states */
4254 return;
4255 }
4256 }
4257 switch (notification) {
4258 case SCTP_NOTIFY_ASSOC_UP:
4259 if (stcb->asoc.assoc_up_sent == 0) {
4260 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4261 stcb->asoc.assoc_up_sent = 1;
4262 }
4263 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4264 sctp_notify_adaptation_layer(stcb);
4265 }
4266 if (stcb->asoc.auth_supported == 0) {
4267 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4268 NULL, so_locked);
4269 }
4270 break;
4271 case SCTP_NOTIFY_ASSOC_DOWN:
4272 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4273 #if defined(__Userspace__)
4274 if (stcb->sctp_ep->recv_callback) {
4275 if (stcb->sctp_socket) {
4276 union sctp_sockstore addr;
4277 struct sctp_rcvinfo rcv;
4278
4279 memset(&addr, 0, sizeof(union sctp_sockstore));
4280 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4281 atomic_add_int(&stcb->asoc.refcnt, 1);
4282 SCTP_TCB_UNLOCK(stcb);
4283 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
4284 SCTP_TCB_LOCK(stcb);
4285 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4286 }
4287 }
4288 #endif
4289 break;
4290 case SCTP_NOTIFY_INTERFACE_DOWN:
4291 {
4292 struct sctp_nets *net;
4293
4294 net = (struct sctp_nets *)data;
4295 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4296 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4297 break;
4298 }
4299 case SCTP_NOTIFY_INTERFACE_UP:
4300 {
4301 struct sctp_nets *net;
4302
4303 net = (struct sctp_nets *)data;
4304 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4305 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4306 break;
4307 }
4308 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4309 {
4310 struct sctp_nets *net;
4311
4312 net = (struct sctp_nets *)data;
4313 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4314 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4315 break;
4316 }
4317 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4318 sctp_notify_send_failed2(stcb, error,
4319 (struct sctp_stream_queue_pending *)data, so_locked);
4320 break;
4321 case SCTP_NOTIFY_SENT_DG_FAIL:
4322 sctp_notify_send_failed(stcb, 1, error,
4323 (struct sctp_tmit_chunk *)data, so_locked);
4324 break;
4325 case SCTP_NOTIFY_UNSENT_DG_FAIL:
4326 sctp_notify_send_failed(stcb, 0, error,
4327 (struct sctp_tmit_chunk *)data, so_locked);
4328 break;
4329 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4330 {
4331 uint32_t val;
4332 val = *((uint32_t *)data);
4333
4334 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4335 break;
4336 }
4337 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4338 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4339 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4340 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4341 } else {
4342 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4343 }
4344 break;
4345 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4346 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4347 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4348 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4349 } else {
4350 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4351 }
4352 break;
4353 case SCTP_NOTIFY_ASSOC_RESTART:
4354 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4355 if (stcb->asoc.auth_supported == 0) {
4356 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4357 NULL, so_locked);
4358 }
4359 break;
4360 case SCTP_NOTIFY_STR_RESET_SEND:
4361 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
4362 break;
4363 case SCTP_NOTIFY_STR_RESET_RECV:
4364 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
4365 break;
4366 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4367 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4368 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
4369 break;
4370 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4371 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4372 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
4373 break;
4374 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4375 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4376 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
4377 break;
4378 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4379 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
4380 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
4381 break;
4382 case SCTP_NOTIFY_ASCONF_ADD_IP:
4383 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4384 error, so_locked);
4385 break;
4386 case SCTP_NOTIFY_ASCONF_DELETE_IP:
4387 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4388 error, so_locked);
4389 break;
4390 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4391 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4392 error, so_locked);
4393 break;
4394 case SCTP_NOTIFY_PEER_SHUTDOWN:
4395 sctp_notify_shutdown_event(stcb);
4396 break;
4397 case SCTP_NOTIFY_AUTH_NEW_KEY:
4398 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4399 (uint16_t)(uintptr_t)data,
4400 so_locked);
4401 break;
4402 case SCTP_NOTIFY_AUTH_FREE_KEY:
4403 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4404 (uint16_t)(uintptr_t)data,
4405 so_locked);
4406 break;
4407 case SCTP_NOTIFY_NO_PEER_AUTH:
4408 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4409 (uint16_t)(uintptr_t)data,
4410 so_locked);
4411 break;
4412 case SCTP_NOTIFY_SENDER_DRY:
4413 sctp_notify_sender_dry_event(stcb, so_locked);
4414 break;
4415 case SCTP_NOTIFY_REMOTE_ERROR:
4416 sctp_notify_remote_error(stcb, error, data);
4417 break;
4418 default:
4419 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4420 __func__, notification, notification);
4421 break;
4422 } /* end switch */
4423 }
4424
4425 void
4426 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked)
4427 {
4428 struct sctp_association *asoc;
4429 struct sctp_stream_out *outs;
4430 struct sctp_tmit_chunk *chk, *nchk;
4431 struct sctp_stream_queue_pending *sp, *nsp;
4432 int i;
4433
4434 if (stcb == NULL) {
4435 return;
4436 }
4437 asoc = &stcb->asoc;
4438 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4439 /* already being freed */
4440 return;
4441 }
4442 #if defined(__APPLE__) && !defined(__Userspace__)
4443 if (so_locked) {
4444 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4445 } else {
4446 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4447 }
4448 #endif
4449 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4450 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4451 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4452 return;
4453 }
4454 /* now through all the gunk freeing chunks */
4455 /* sent queue SHOULD be empty */
4456 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4457 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4458 asoc->sent_queue_cnt--;
4459 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4460 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4461 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4462 #ifdef INVARIANTS
4463 } else {
4464 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4465 #endif
4466 }
4467 }
4468 if (chk->data != NULL) {
4469 sctp_free_bufspace(stcb, asoc, chk, 1);
4470 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4471 error, chk, so_locked);
4472 if (chk->data) {
4473 sctp_m_freem(chk->data);
4474 chk->data = NULL;
4475 }
4476 }
4477 sctp_free_a_chunk(stcb, chk, so_locked);
4478 /*sa_ignore FREED_MEMORY*/
4479 }
4480 /* pending send queue SHOULD be empty */
4481 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4482 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4483 asoc->send_queue_cnt--;
4484 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4485 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4486 #ifdef INVARIANTS
4487 } else {
4488 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4489 #endif
4490 }
4491 if (chk->data != NULL) {
4492 sctp_free_bufspace(stcb, asoc, chk, 1);
4493 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4494 error, chk, so_locked);
4495 if (chk->data) {
4496 sctp_m_freem(chk->data);
4497 chk->data = NULL;
4498 }
4499 }
4500 sctp_free_a_chunk(stcb, chk, so_locked);
4501 /*sa_ignore FREED_MEMORY*/
4502 }
4503 for (i = 0; i < asoc->streamoutcnt; i++) {
4504 /* For each stream */
4505 outs = &asoc->strmout[i];
4506 /* clean up any sends there */
4507 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4508 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4509 TAILQ_REMOVE(&outs->outqueue, sp, next);
4510 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4511 sctp_free_spbufspace(stcb, asoc, sp);
4512 if (sp->data) {
4513 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4514 error, (void *)sp, so_locked);
4515 if (sp->data) {
4516 sctp_m_freem(sp->data);
4517 sp->data = NULL;
4518 sp->tail_mbuf = NULL;
4519 sp->length = 0;
4520 }
4521 }
4522 if (sp->net) {
4523 sctp_free_remote_addr(sp->net);
4524 sp->net = NULL;
4525 }
4526 /* Free the chunk */
4527 sctp_free_a_strmoq(stcb, sp, so_locked);
4528 /*sa_ignore FREED_MEMORY*/
4529 }
4530 }
4531 }
4532
4533 void
4534 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4535 struct sctp_abort_chunk *abort, int so_locked)
4536 {
4537 if (stcb == NULL) {
4538 return;
4539 }
4540 #if defined(__APPLE__) && !defined(__Userspace__)
4541 if (so_locked) {
4542 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4543 } else {
4544 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4545 }
4546 #endif
4547 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4548 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4549 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4550 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4551 }
4552 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4553 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4554 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4555 return;
4556 }
4557 SCTP_TCB_SEND_LOCK(stcb);
4558 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4559 /* Tell them we lost the asoc */
4560 sctp_report_all_outbound(stcb, error, so_locked);
4561 SCTP_TCB_SEND_UNLOCK(stcb);
4562 if (from_peer) {
4563 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4564 } else {
4565 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4566 }
4567 }
4568
4569 void
4570 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4571 struct mbuf *m, int iphlen,
4572 struct sockaddr *src, struct sockaddr *dst,
4573 struct sctphdr *sh, struct mbuf *op_err,
4574 #if defined(__FreeBSD__) && !defined(__Userspace__)
4575 uint8_t mflowtype, uint32_t mflowid,
4576 #endif
4577 uint32_t vrf_id, uint16_t port)
4578 {
4579 uint32_t vtag;
4580 #if defined(__APPLE__) && !defined(__Userspace__)
4581 struct socket *so;
4582 #endif
4583
4584 vtag = 0;
4585 if (stcb != NULL) {
4586 vtag = stcb->asoc.peer_vtag;
4587 vrf_id = stcb->asoc.vrf_id;
4588 }
4589 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4590 #if defined(__FreeBSD__) && !defined(__Userspace__)
4591 mflowtype, mflowid, inp->fibnum,
4592 #endif
4593 vrf_id, port);
4594 if (stcb != NULL) {
4595 /* We have a TCB to abort, send notification too */
4596 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4597 /* Ok, now lets free it */
4598 #if defined(__APPLE__) && !defined(__Userspace__)
4599 so = SCTP_INP_SO(inp);
4600 atomic_add_int(&stcb->asoc.refcnt, 1);
4601 SCTP_TCB_UNLOCK(stcb);
4602 SCTP_SOCKET_LOCK(so, 1);
4603 SCTP_TCB_LOCK(stcb);
4604 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4605 #endif
4606 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4607 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4608 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4609 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4610 }
4611 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4612 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4613 #if defined(__APPLE__) && !defined(__Userspace__)
4614 SCTP_SOCKET_UNLOCK(so, 1);
4615 #endif
4616 }
4617 }
4618 #ifdef SCTP_ASOCLOG_OF_TSNS
4619 void
4620 sctp_print_out_track_log(struct sctp_tcb *stcb)
4621 {
4622 #ifdef NOSIY_PRINTS
4623 int i;
4624 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4625 SCTP_PRINTF("IN bound TSN log-aaa\n");
4626 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4627 SCTP_PRINTF("None rcvd\n");
4628 goto none_in;
4629 }
4630 if (stcb->asoc.tsn_in_wrapped) {
4631 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4632 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4633 stcb->asoc.in_tsnlog[i].tsn,
4634 stcb->asoc.in_tsnlog[i].strm,
4635 stcb->asoc.in_tsnlog[i].seq,
4636 stcb->asoc.in_tsnlog[i].flgs,
4637 stcb->asoc.in_tsnlog[i].sz);
4638 }
4639 }
4640 if (stcb->asoc.tsn_in_at) {
4641 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4642 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4643 stcb->asoc.in_tsnlog[i].tsn,
4644 stcb->asoc.in_tsnlog[i].strm,
4645 stcb->asoc.in_tsnlog[i].seq,
4646 stcb->asoc.in_tsnlog[i].flgs,
4647 stcb->asoc.in_tsnlog[i].sz);
4648 }
4649 }
4650 none_in:
4651 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4652 if ((stcb->asoc.tsn_out_at == 0) &&
4653 (stcb->asoc.tsn_out_wrapped == 0)) {
4654 SCTP_PRINTF("None sent\n");
4655 }
4656 if (stcb->asoc.tsn_out_wrapped) {
4657 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4658 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4659 stcb->asoc.out_tsnlog[i].tsn,
4660 stcb->asoc.out_tsnlog[i].strm,
4661 stcb->asoc.out_tsnlog[i].seq,
4662 stcb->asoc.out_tsnlog[i].flgs,
4663 stcb->asoc.out_tsnlog[i].sz);
4664 }
4665 }
4666 if (stcb->asoc.tsn_out_at) {
4667 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4668 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4669 stcb->asoc.out_tsnlog[i].tsn,
4670 stcb->asoc.out_tsnlog[i].strm,
4671 stcb->asoc.out_tsnlog[i].seq,
4672 stcb->asoc.out_tsnlog[i].flgs,
4673 stcb->asoc.out_tsnlog[i].sz);
4674 }
4675 }
4676 #endif
4677 }
4678 #endif
4679
4680 void
4681 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4682 struct mbuf *op_err,
4683 int so_locked)
4684 {
4685 #if defined(__APPLE__) && !defined(__Userspace__)
4686 struct socket *so;
4687 #endif
4688
4689 #if defined(__APPLE__) && !defined(__Userspace__)
4690 so = SCTP_INP_SO(inp);
4691 #endif
4692 #if defined(__APPLE__) && !defined(__Userspace__)
4693 if (so_locked) {
4694 sctp_lock_assert(SCTP_INP_SO(inp));
4695 } else {
4696 sctp_unlock_assert(SCTP_INP_SO(inp));
4697 }
4698 #endif
4699 if (stcb == NULL) {
4700 /* Got to have a TCB */
4701 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4702 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4703 #if defined(__APPLE__) && !defined(__Userspace__)
4704 if (!so_locked) {
4705 SCTP_SOCKET_LOCK(so, 1);
4706 }
4707 #endif
4708 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4709 SCTP_CALLED_DIRECTLY_NOCMPSET);
4710 #if defined(__APPLE__) && !defined(__Userspace__)
4711 if (!so_locked) {
4712 SCTP_SOCKET_UNLOCK(so, 1);
4713 }
4714 #endif
4715 }
4716 }
4717 return;
4718 }
4719 /* notify the peer */
4720 sctp_send_abort_tcb(stcb, op_err, so_locked);
4721 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4722 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4723 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4724 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4725 }
4726 /* notify the ulp */
4727 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4728 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4729 }
4730 /* now free the asoc */
4731 #ifdef SCTP_ASOCLOG_OF_TSNS
4732 sctp_print_out_track_log(stcb);
4733 #endif
4734 #if defined(__APPLE__) && !defined(__Userspace__)
4735 if (!so_locked) {
4736 atomic_add_int(&stcb->asoc.refcnt, 1);
4737 SCTP_TCB_UNLOCK(stcb);
4738 SCTP_SOCKET_LOCK(so, 1);
4739 SCTP_TCB_LOCK(stcb);
4740 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4741 }
4742 #endif
4743 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4744 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4745 #if defined(__APPLE__) && !defined(__Userspace__)
4746 if (!so_locked) {
4747 SCTP_SOCKET_UNLOCK(so, 1);
4748 }
4749 #endif
4750 }
4751
4752 void
4753 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4754 struct sockaddr *src, struct sockaddr *dst,
4755 struct sctphdr *sh, struct sctp_inpcb *inp,
4756 struct mbuf *cause,
4757 #if defined(__FreeBSD__) && !defined(__Userspace__)
4758 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4759 #endif
4760 uint32_t vrf_id, uint16_t port)
4761 {
4762 struct sctp_chunkhdr *ch, chunk_buf;
4763 unsigned int chk_length;
4764 int contains_init_chunk;
4765
4766 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4767 /* Generate a TO address for future reference */
4768 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4769 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4770 #if defined(__APPLE__) && !defined(__Userspace__)
4771 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4772 #endif
4773 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4774 SCTP_CALLED_DIRECTLY_NOCMPSET);
4775 #if defined(__APPLE__) && !defined(__Userspace__)
4776 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4777 #endif
4778 }
4779 }
4780 contains_init_chunk = 0;
4781 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4782 sizeof(*ch), (uint8_t *) & chunk_buf);
4783 while (ch != NULL) {
4784 chk_length = ntohs(ch->chunk_length);
4785 if (chk_length < sizeof(*ch)) {
4786 /* break to abort land */
4787 break;
4788 }
4789 switch (ch->chunk_type) {
4790 case SCTP_INIT:
4791 contains_init_chunk = 1;
4792 break;
4793 case SCTP_PACKET_DROPPED:
4794 /* we don't respond to pkt-dropped */
4795 return;
4796 case SCTP_ABORT_ASSOCIATION:
4797 /* we don't respond with an ABORT to an ABORT */
4798 return;
4799 case SCTP_SHUTDOWN_COMPLETE:
4800 /*
4801 * we ignore it since we are not waiting for it and
4802 * peer is gone
4803 */
4804 return;
4805 case SCTP_SHUTDOWN_ACK:
4806 sctp_send_shutdown_complete2(src, dst, sh,
4807 #if defined(__FreeBSD__) && !defined(__Userspace__)
4808 mflowtype, mflowid, fibnum,
4809 #endif
4810 vrf_id, port);
4811 return;
4812 default:
4813 break;
4814 }
4815 offset += SCTP_SIZE32(chk_length);
4816 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4817 sizeof(*ch), (uint8_t *) & chunk_buf);
4818 }
4819 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4820 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4821 (contains_init_chunk == 0))) {
4822 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4823 #if defined(__FreeBSD__) && !defined(__Userspace__)
4824 mflowtype, mflowid, fibnum,
4825 #endif
4826 vrf_id, port);
4827 }
4828 }
4829
4830 /*
4831 * check the inbound datagram to make sure there is not an abort inside it,
4832 * if there is return 1, else return 0.
4833 */
4834 int
4835 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4836 {
4837 struct sctp_chunkhdr *ch;
4838 struct sctp_init_chunk *init_chk, chunk_buf;
4839 int offset;
4840 unsigned int chk_length;
4841
4842 offset = iphlen + sizeof(struct sctphdr);
4843 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4844 (uint8_t *) & chunk_buf);
4845 while (ch != NULL) {
4846 chk_length = ntohs(ch->chunk_length);
4847 if (chk_length < sizeof(*ch)) {
4848 /* packet is probably corrupt */
4849 break;
4850 }
4851 /* we seem to be ok, is it an abort? */
4852 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4853 /* yep, tell them */
4854 return (1);
4855 }
4856 if (ch->chunk_type == SCTP_INITIATION) {
4857 /* need to update the Vtag */
4858 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4859 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4860 if (init_chk != NULL) {
4861 *vtagfill = ntohl(init_chk->init.initiate_tag);
4862 }
4863 }
4864 /* Nope, move to the next chunk */
4865 offset += SCTP_SIZE32(chk_length);
4866 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4867 sizeof(*ch), (uint8_t *) & chunk_buf);
4868 }
4869 return (0);
4870 }
4871
4872 /*
4873 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4874 * set (i.e. it's 0) so, create this function to compare link local scopes
4875 */
4876 #ifdef INET6
4877 uint32_t
4878 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4879 {
4880 #if defined(__Userspace__)
4881 /*__Userspace__ Returning 1 here always */
4882 #endif
4883 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4884 struct sockaddr_in6 a, b;
4885
4886 /* save copies */
4887 a = *addr1;
4888 b = *addr2;
4889
4890 if (a.sin6_scope_id == 0)
4891 #ifdef SCTP_KAME
4892 if (sa6_recoverscope(&a)) {
4893 #else
4894 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4895 #endif /* SCTP_KAME */
4896 /* can't get scope, so can't match */
4897 return (0);
4898 }
4899 if (b.sin6_scope_id == 0)
4900 #ifdef SCTP_KAME
4901 if (sa6_recoverscope(&b)) {
4902 #else
4903 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4904 #endif /* SCTP_KAME */
4905 /* can't get scope, so can't match */
4906 return (0);
4907 }
4908 if (a.sin6_scope_id != b.sin6_scope_id)
4909 return (0);
4910 #else
4911 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4912 return (0);
4913 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4914
4915 return (1);
4916 }
4917
4918 #if defined(SCTP_EMBEDDED_V6_SCOPE)
4919 /*
4920 * returns a sockaddr_in6 with embedded scope recovered and removed
4921 */
4922 struct sockaddr_in6 *
4923 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4924 {
4925 /* check and strip embedded scope junk */
4926 if (addr->sin6_family == AF_INET6) {
4927 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4928 if (addr->sin6_scope_id == 0) {
4929 *store = *addr;
4930 #ifdef SCTP_KAME
4931 if (!sa6_recoverscope(store)) {
4932 #else
4933 if (!in6_recoverscope(store, &store->sin6_addr,
4934 NULL)) {
4935 #endif /* SCTP_KAME */
4936 /* use the recovered scope */
4937 addr = store;
4938 }
4939 } else {
4940 /* else, return the original "to" addr */
4941 in6_clearscope(&addr->sin6_addr);
4942 }
4943 }
4944 }
4945 return (addr);
4946 }
4947 #endif /* SCTP_EMBEDDED_V6_SCOPE */
4948 #endif
4949
4950 /*
4951 * are the two addresses the same? currently a "scopeless" check returns: 1
4952 * if same, 0 if not
4953 */
4954 int
4955 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4956 {
4957
4958 /* must be valid */
4959 if (sa1 == NULL || sa2 == NULL)
4960 return (0);
4961
4962 /* must be the same family */
4963 if (sa1->sa_family != sa2->sa_family)
4964 return (0);
4965
4966 switch (sa1->sa_family) {
4967 #ifdef INET6
4968 case AF_INET6:
4969 {
4970 /* IPv6 addresses */
4971 struct sockaddr_in6 *sin6_1, *sin6_2;
4972
4973 sin6_1 = (struct sockaddr_in6 *)sa1;
4974 sin6_2 = (struct sockaddr_in6 *)sa2;
4975 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4976 sin6_2));
4977 }
4978 #endif
4979 #ifdef INET
4980 case AF_INET:
4981 {
4982 /* IPv4 addresses */
4983 struct sockaddr_in *sin_1, *sin_2;
4984
4985 sin_1 = (struct sockaddr_in *)sa1;
4986 sin_2 = (struct sockaddr_in *)sa2;
4987 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4988 }
4989 #endif
4990 #if defined(__Userspace__)
4991 case AF_CONN:
4992 {
4993 struct sockaddr_conn *sconn_1, *sconn_2;
4994
4995 sconn_1 = (struct sockaddr_conn *)sa1;
4996 sconn_2 = (struct sockaddr_conn *)sa2;
4997 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4998 }
4999 #endif
5000 default:
5001 /* we don't do these... */
5002 return (0);
5003 }
5004 }
5005
5006 void
5007 sctp_print_address(struct sockaddr *sa)
5008 {
5009 #ifdef INET6
5010 #if defined(__FreeBSD__) && !defined(__Userspace__)
5011 char ip6buf[INET6_ADDRSTRLEN];
5012 #endif
5013 #endif
5014
5015 switch (sa->sa_family) {
5016 #ifdef INET6
5017 case AF_INET6:
5018 {
5019 struct sockaddr_in6 *sin6;
5020
5021 sin6 = (struct sockaddr_in6 *)sa;
5022 #if defined(__Userspace__)
5023 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
5024 ntohs(sin6->sin6_addr.s6_addr16[0]),
5025 ntohs(sin6->sin6_addr.s6_addr16[1]),
5026 ntohs(sin6->sin6_addr.s6_addr16[2]),
5027 ntohs(sin6->sin6_addr.s6_addr16[3]),
5028 ntohs(sin6->sin6_addr.s6_addr16[4]),
5029 ntohs(sin6->sin6_addr.s6_addr16[5]),
5030 ntohs(sin6->sin6_addr.s6_addr16[6]),
5031 ntohs(sin6->sin6_addr.s6_addr16[7]),
5032 ntohs(sin6->sin6_port),
5033 sin6->sin6_scope_id);
5034 #else
5035 #if defined(__FreeBSD__) && !defined(__Userspace__)
5036 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5037 ip6_sprintf(ip6buf, &sin6->sin6_addr),
5038 ntohs(sin6->sin6_port),
5039 sin6->sin6_scope_id);
5040 #else
5041 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
5042 ip6_sprintf(&sin6->sin6_addr),
5043 ntohs(sin6->sin6_port),
5044 sin6->sin6_scope_id);
5045 #endif
5046 #endif
5047 break;
5048 }
5049 #endif
5050 #ifdef INET
5051 case AF_INET:
5052 {
5053 struct sockaddr_in *sin;
5054 unsigned char *p;
5055
5056 sin = (struct sockaddr_in *)sa;
5057 p = (unsigned char *)&sin->sin_addr;
5058 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
5059 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
5060 break;
5061 }
5062 #endif
5063 #if defined(__Userspace__)
5064 case AF_CONN:
5065 {
5066 struct sockaddr_conn *sconn;
5067
5068 sconn = (struct sockaddr_conn *)sa;
5069 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
5070 break;
5071 }
5072 #endif
5073 default:
5074 SCTP_PRINTF("?\n");
5075 break;
5076 }
5077 }
5078
5079 void
5080 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
5081 struct sctp_inpcb *new_inp,
5082 struct sctp_tcb *stcb,
5083 int waitflags)
5084 {
5085 /*
5086 * go through our old INP and pull off any control structures that
5087 * belong to stcb and move then to the new inp.
5088 */
5089 struct socket *old_so, *new_so;
5090 struct sctp_queued_to_read *control, *nctl;
5091 struct sctp_readhead tmp_queue;
5092 struct mbuf *m;
5093 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5094 int error = 0;
5095 #endif
5096
5097 old_so = old_inp->sctp_socket;
5098 new_so = new_inp->sctp_socket;
5099 TAILQ_INIT(&tmp_queue);
5100 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)
5101 error = sblock(&old_so->so_rcv, waitflags);
5102 if (error) {
5103 /* Gak, can't get sblock, we have a problem.
5104 * data will be left stranded.. and we
5105 * don't dare look at it since the
5106 * other thread may be reading something.
5107 * Oh well, its a screwed up app that does
5108 * a peeloff OR a accept while reading
5109 * from the main socket... actually its
5110 * only the peeloff() case, since I think
5111 * read will fail on a listening socket..
5112 */
5113 return;
5114 }
5115 #endif
5116 /* lock the socket buffers */
5117 SCTP_INP_READ_LOCK(old_inp);
5118 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
5119 /* Pull off all for out target stcb */
5120 if (control->stcb == stcb) {
5121 /* remove it we want it */
5122 TAILQ_REMOVE(&old_inp->read_queue, control, next);
5123 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
5124 m = control->data;
5125 while (m) {
5126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5127 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
5128 }
5129 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
5130 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5131 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5132 }
5133 m = SCTP_BUF_NEXT(m);
5134 }
5135 }
5136 }
5137 SCTP_INP_READ_UNLOCK(old_inp);
5138 /* Remove the sb-lock on the old socket */
5139 #if defined(__APPLE__) && !defined(__Userspace__)
5140 sbunlock(&old_so->so_rcv, 1);
5141 #endif
5142
5143 #if defined(__FreeBSD__) && !defined(__Userspace__)
5144 sbunlock(&old_so->so_rcv);
5145 #endif
5146 /* Now we move them over to the new socket buffer */
5147 SCTP_INP_READ_LOCK(new_inp);
5148 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
5149 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
5150 m = control->data;
5151 while (m) {
5152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5153 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5154 }
5155 sctp_sballoc(stcb, &new_so->so_rcv, m);
5156 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5157 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5158 }
5159 m = SCTP_BUF_NEXT(m);
5160 }
5161 }
5162 SCTP_INP_READ_UNLOCK(new_inp);
5163 }
5164
5165 void
5166 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
5167 struct sctp_tcb *stcb,
5168 int so_locked
5169 #if !(defined(__APPLE__) && !defined(__Userspace__))
5170 SCTP_UNUSED
5171 #endif
5172 )
5173 {
5174 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
5175 #if defined(__APPLE__) && !defined(__Userspace__)
5176 struct socket *so;
5177
5178 so = SCTP_INP_SO(inp);
5179 if (!so_locked) {
5180 if (stcb) {
5181 atomic_add_int(&stcb->asoc.refcnt, 1);
5182 SCTP_TCB_UNLOCK(stcb);
5183 }
5184 SCTP_SOCKET_LOCK(so, 1);
5185 if (stcb) {
5186 SCTP_TCB_LOCK(stcb);
5187 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5188 }
5189 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5190 SCTP_SOCKET_UNLOCK(so, 1);
5191 return;
5192 }
5193 }
5194 #endif
5195 sctp_sorwakeup(inp, inp->sctp_socket);
5196 #if defined(__APPLE__) && !defined(__Userspace__)
5197 if (!so_locked) {
5198 SCTP_SOCKET_UNLOCK(so, 1);
5199 }
5200 #endif
5201 }
5202 }
5203 #if defined(__Userspace__)
5204
5205 void
5206 sctp_invoke_recv_callback(struct sctp_inpcb *inp,
5207 struct sctp_tcb *stcb,
5208 struct sctp_queued_to_read *control,
5209 int inp_read_lock_held)
5210 {
5211 uint32_t pd_point, length;
5212
5213 if ((inp->recv_callback == NULL) ||
5214 (stcb == NULL) ||
5215 (stcb->sctp_socket == NULL)) {
5216 return;
5217 }
5218
5219 length = control->length;
5220 if (stcb != NULL && stcb->sctp_socket != NULL) {
5221 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
5222 stcb->sctp_ep->partial_delivery_point);
5223 } else {
5224 pd_point = inp->partial_delivery_point;
5225 }
5226 if ((control->end_added == 1) || (length >= pd_point)) {
5227 struct socket *so;
5228 struct mbuf *m;
5229 char *buffer;
5230 struct sctp_rcvinfo rcv;
5231 union sctp_sockstore addr;
5232 int flags;
5233
5234 if ((buffer = malloc(length)) == NULL) {
5235 return;
5236 }
5237 if (inp_read_lock_held == 0) {
5238 SCTP_INP_READ_LOCK(inp);
5239 }
5240 so = stcb->sctp_socket;
5241 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
5242 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
5243 }
5244 m_copydata(control->data, 0, length, buffer);
5245 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
5246 rcv.rcv_sid = control->sinfo_stream;
5247 rcv.rcv_ssn = (uint16_t)control->mid;
5248 rcv.rcv_flags = control->sinfo_flags;
5249 rcv.rcv_ppid = control->sinfo_ppid;
5250 rcv.rcv_tsn = control->sinfo_tsn;
5251 rcv.rcv_cumtsn = control->sinfo_cumtsn;
5252 rcv.rcv_context = control->sinfo_context;
5253 rcv.rcv_assoc_id = control->sinfo_assoc_id;
5254 memset(&addr, 0, sizeof(union sctp_sockstore));
5255 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5256 #ifdef INET
5257 case AF_INET:
5258 addr.sin = control->whoFrom->ro._l_addr.sin;
5259 break;
5260 #endif
5261 #ifdef INET6
5262 case AF_INET6:
5263 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
5264 break;
5265 #endif
5266 case AF_CONN:
5267 addr.sconn = control->whoFrom->ro._l_addr.sconn;
5268 break;
5269 default:
5270 addr.sa = control->whoFrom->ro._l_addr.sa;
5271 break;
5272 }
5273 flags = 0;
5274 if (control->end_added == 1) {
5275 flags |= MSG_EOR;
5276 }
5277 if (control->spec_flags & M_NOTIFICATION) {
5278 flags |= MSG_NOTIFICATION;
5279 }
5280 sctp_m_freem(control->data);
5281 control->data = NULL;
5282 control->tail_mbuf = NULL;
5283 control->length = 0;
5284 if (control->end_added) {
5285 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5286 control->on_read_q = 0;
5287 sctp_free_remote_addr(control->whoFrom);
5288 control->whoFrom = NULL;
5289 sctp_free_a_readq(stcb, control);
5290 }
5291 atomic_add_int(&stcb->asoc.refcnt, 1);
5292 SCTP_TCB_UNLOCK(stcb);
5293 if (inp_read_lock_held == 0) {
5294 SCTP_INP_READ_UNLOCK(inp);
5295 }
5296 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
5297 SCTP_TCB_LOCK(stcb);
5298 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5299 }
5300 }
5301 #endif
5302
5303 void
5304 sctp_add_to_readq(struct sctp_inpcb *inp,
5305 struct sctp_tcb *stcb,
5306 struct sctp_queued_to_read *control,
5307 struct sockbuf *sb,
5308 int end,
5309 int inp_read_lock_held,
5310 int so_locked)
5311 {
5312 /*
5313 * Here we must place the control on the end of the socket read
5314 * queue AND increment sb_cc so that select will work properly on
5315 * read.
5316 */
5317 struct mbuf *m, *prev = NULL;
5318
5319 if (inp == NULL) {
5320 /* Gak, TSNH!! */
5321 #ifdef INVARIANTS
5322 panic("Gak, inp NULL on add_to_readq");
5323 #endif
5324 return;
5325 }
5326 #if defined(__APPLE__) && !defined(__Userspace__)
5327 if (so_locked) {
5328 sctp_lock_assert(SCTP_INP_SO(inp));
5329 } else {
5330 sctp_unlock_assert(SCTP_INP_SO(inp));
5331 }
5332 #endif
5333 if (inp_read_lock_held == 0)
5334 SCTP_INP_READ_LOCK(inp);
5335 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
5336 if (!control->on_strm_q) {
5337 sctp_free_remote_addr(control->whoFrom);
5338 if (control->data) {
5339 sctp_m_freem(control->data);
5340 control->data = NULL;
5341 }
5342 sctp_free_a_readq(stcb, control);
5343 }
5344 if (inp_read_lock_held == 0)
5345 SCTP_INP_READ_UNLOCK(inp);
5346 return;
5347 }
5348 if (!(control->spec_flags & M_NOTIFICATION)) {
5349 atomic_add_int(&inp->total_recvs, 1);
5350 if (!control->do_not_ref_stcb) {
5351 atomic_add_int(&stcb->total_recvs, 1);
5352 }
5353 }
5354 m = control->data;
5355 control->held_length = 0;
5356 control->length = 0;
5357 while (m) {
5358 if (SCTP_BUF_LEN(m) == 0) {
5359 /* Skip mbufs with NO length */
5360 if (prev == NULL) {
5361 /* First one */
5362 control->data = sctp_m_free(m);
5363 m = control->data;
5364 } else {
5365 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5366 m = SCTP_BUF_NEXT(prev);
5367 }
5368 if (m == NULL) {
5369 control->tail_mbuf = prev;
5370 }
5371 continue;
5372 }
5373 prev = m;
5374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5375 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5376 }
5377 sctp_sballoc(stcb, sb, m);
5378 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5379 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
5380 }
5381 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5382 m = SCTP_BUF_NEXT(m);
5383 }
5384 if (prev != NULL) {
5385 control->tail_mbuf = prev;
5386 } else {
5387 /* Everything got collapsed out?? */
5388 if (!control->on_strm_q) {
5389 sctp_free_remote_addr(control->whoFrom);
5390 sctp_free_a_readq(stcb, control);
5391 }
5392 if (inp_read_lock_held == 0)
5393 SCTP_INP_READ_UNLOCK(inp);
5394 return;
5395 }
5396 if (end) {
5397 control->end_added = 1;
5398 }
5399 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5400 control->on_read_q = 1;
5401 if (inp_read_lock_held == 0)
5402 SCTP_INP_READ_UNLOCK(inp);
5403 #if defined(__Userspace__)
5404 sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
5405 #endif
5406 if (inp && inp->sctp_socket) {
5407 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5408 }
5409 }
5410
5411 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5412 *************ALTERNATE ROUTING CODE
5413 */
5414
5415 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5416 *************ALTERNATE ROUTING CODE
5417 */
5418
5419 struct mbuf *
5420 sctp_generate_cause(uint16_t code, char *info)
5421 {
5422 struct mbuf *m;
5423 struct sctp_gen_error_cause *cause;
5424 size_t info_len;
5425 uint16_t len;
5426
5427 if ((code == 0) || (info == NULL)) {
5428 return (NULL);
5429 }
5430 info_len = strlen(info);
5431 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5432 return (NULL);
5433 }
5434 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5435 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5436 if (m != NULL) {
5437 SCTP_BUF_LEN(m) = len;
5438 cause = mtod(m, struct sctp_gen_error_cause *);
5439 cause->code = htons(code);
5440 cause->length = htons(len);
5441 memcpy(cause->info, info, info_len);
5442 }
5443 return (m);
5444 }
5445
5446 struct mbuf *
5447 sctp_generate_no_user_data_cause(uint32_t tsn)
5448 {
5449 struct mbuf *m;
5450 struct sctp_error_no_user_data *no_user_data_cause;
5451 uint16_t len;
5452
5453 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5454 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5455 if (m != NULL) {
5456 SCTP_BUF_LEN(m) = len;
5457 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5458 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5459 no_user_data_cause->cause.length = htons(len);
5460 no_user_data_cause->tsn = htonl(tsn);
5461 }
5462 return (m);
5463 }
5464
5465 #ifdef SCTP_MBCNT_LOGGING
5466 void
5467 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5468 struct sctp_tmit_chunk *tp1, int chk_cnt)
5469 {
5470 if (tp1->data == NULL) {
5471 return;
5472 }
5473 asoc->chunks_on_out_queue -= chk_cnt;
5474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5475 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5476 asoc->total_output_queue_size,
5477 tp1->book_size,
5478 0,
5479 tp1->mbcnt);
5480 }
5481 if (asoc->total_output_queue_size >= tp1->book_size) {
5482 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5483 } else {
5484 asoc->total_output_queue_size = 0;
5485 }
5486
5487 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5488 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5489 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5490 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5491 } else {
5492 stcb->sctp_socket->so_snd.sb_cc = 0;
5493 }
5494 }
5495 }
5496
5497 #endif
5498
5499 int
5500 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5501 uint8_t sent, int so_locked)
5502 {
5503 struct sctp_stream_out *strq;
5504 struct sctp_tmit_chunk *chk = NULL, *tp2;
5505 struct sctp_stream_queue_pending *sp;
5506 uint32_t mid;
5507 uint16_t sid;
5508 uint8_t foundeom = 0;
5509 int ret_sz = 0;
5510 int notdone;
5511 int do_wakeup_routine = 0;
5512
5513 #if defined(__APPLE__) && !defined(__Userspace__)
5514 if (so_locked) {
5515 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5516 } else {
5517 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5518 }
5519 #endif
5520 sid = tp1->rec.data.sid;
5521 mid = tp1->rec.data.mid;
5522 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5523 stcb->asoc.abandoned_sent[0]++;
5524 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5525 stcb->asoc.strmout[sid].abandoned_sent[0]++;
5526 #if defined(SCTP_DETAILED_STR_STATS)
5527 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5528 #endif
5529 } else {
5530 stcb->asoc.abandoned_unsent[0]++;
5531 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5532 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5533 #if defined(SCTP_DETAILED_STR_STATS)
5534 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5535 #endif
5536 }
5537 do {
5538 ret_sz += tp1->book_size;
5539 if (tp1->data != NULL) {
5540 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5541 sctp_flight_size_decrease(tp1);
5542 sctp_total_flight_decrease(stcb, tp1);
5543 }
5544 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5545 stcb->asoc.peers_rwnd += tp1->send_size;
5546 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5547 if (sent) {
5548 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5549 } else {
5550 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5551 }
5552 if (tp1->data) {
5553 sctp_m_freem(tp1->data);
5554 tp1->data = NULL;
5555 }
5556 do_wakeup_routine = 1;
5557 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5558 stcb->asoc.sent_queue_cnt_removeable--;
5559 }
5560 }
5561 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5562 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5563 SCTP_DATA_NOT_FRAG) {
5564 /* not frag'ed we ae done */
5565 notdone = 0;
5566 foundeom = 1;
5567 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5568 /* end of frag, we are done */
5569 notdone = 0;
5570 foundeom = 1;
5571 } else {
5572 /*
5573 * Its a begin or middle piece, we must mark all of
5574 * it
5575 */
5576 notdone = 1;
5577 tp1 = TAILQ_NEXT(tp1, sctp_next);
5578 }
5579 } while (tp1 && notdone);
5580 if (foundeom == 0) {
5581 /*
5582 * The multi-part message was scattered across the send and
5583 * sent queue.
5584 */
5585 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5586 if ((tp1->rec.data.sid != sid) ||
5587 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5588 break;
5589 }
5590 /* save to chk in case we have some on stream out
5591 * queue. If so and we have an un-transmitted one
5592 * we don't have to fudge the TSN.
5593 */
5594 chk = tp1;
5595 ret_sz += tp1->book_size;
5596 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5597 if (sent) {
5598 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5599 } else {
5600 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5601 }
5602 if (tp1->data) {
5603 sctp_m_freem(tp1->data);
5604 tp1->data = NULL;
5605 }
5606 /* No flight involved here book the size to 0 */
5607 tp1->book_size = 0;
5608 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5609 foundeom = 1;
5610 }
5611 do_wakeup_routine = 1;
5612 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5613 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5614 /* on to the sent queue so we can wait for it to be passed by. */
5615 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5616 sctp_next);
5617 stcb->asoc.send_queue_cnt--;
5618 stcb->asoc.sent_queue_cnt++;
5619 }
5620 }
5621 if (foundeom == 0) {
5622 /*
5623 * Still no eom found. That means there
5624 * is stuff left on the stream out queue.. yuck.
5625 */
5626 SCTP_TCB_SEND_LOCK(stcb);
5627 strq = &stcb->asoc.strmout[sid];
5628 sp = TAILQ_FIRST(&strq->outqueue);
5629 if (sp != NULL) {
5630 sp->discard_rest = 1;
5631 /*
5632 * We may need to put a chunk on the
5633 * queue that holds the TSN that
5634 * would have been sent with the LAST
5635 * bit.
5636 */
5637 if (chk == NULL) {
5638 /* Yep, we have to */
5639 sctp_alloc_a_chunk(stcb, chk);
5640 if (chk == NULL) {
5641 /* we are hosed. All we can
5642 * do is nothing.. which will
5643 * cause an abort if the peer is
5644 * paying attention.
5645 */
5646 goto oh_well;
5647 }
5648 memset(chk, 0, sizeof(*chk));
5649 chk->rec.data.rcv_flags = 0;
5650 chk->sent = SCTP_FORWARD_TSN_SKIP;
5651 chk->asoc = &stcb->asoc;
5652 if (stcb->asoc.idata_supported == 0) {
5653 if (sp->sinfo_flags & SCTP_UNORDERED) {
5654 chk->rec.data.mid = 0;
5655 } else {
5656 chk->rec.data.mid = strq->next_mid_ordered;
5657 }
5658 } else {
5659 if (sp->sinfo_flags & SCTP_UNORDERED) {
5660 chk->rec.data.mid = strq->next_mid_unordered;
5661 } else {
5662 chk->rec.data.mid = strq->next_mid_ordered;
5663 }
5664 }
5665 chk->rec.data.sid = sp->sid;
5666 chk->rec.data.ppid = sp->ppid;
5667 chk->rec.data.context = sp->context;
5668 chk->flags = sp->act_flags;
5669 chk->whoTo = NULL;
5670 #if defined(__FreeBSD__) && !defined(__Userspace__)
5671 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5672 #else
5673 chk->rec.data.tsn = stcb->asoc.sending_seq++;
5674 #endif
5675 strq->chunks_on_queues++;
5676 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5677 stcb->asoc.sent_queue_cnt++;
5678 stcb->asoc.pr_sctp_cnt++;
5679 }
5680 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5681 if (sp->sinfo_flags & SCTP_UNORDERED) {
5682 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5683 }
5684 if (stcb->asoc.idata_supported == 0) {
5685 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5686 strq->next_mid_ordered++;
5687 }
5688 } else {
5689 if (sp->sinfo_flags & SCTP_UNORDERED) {
5690 strq->next_mid_unordered++;
5691 } else {
5692 strq->next_mid_ordered++;
5693 }
5694 }
5695 oh_well:
5696 if (sp->data) {
5697 /* Pull any data to free up the SB and
5698 * allow sender to "add more" while we
5699 * will throw away :-)
5700 */
5701 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5702 ret_sz += sp->length;
5703 do_wakeup_routine = 1;
5704 sp->some_taken = 1;
5705 sctp_m_freem(sp->data);
5706 sp->data = NULL;
5707 sp->tail_mbuf = NULL;
5708 sp->length = 0;
5709 }
5710 }
5711 SCTP_TCB_SEND_UNLOCK(stcb);
5712 }
5713 if (do_wakeup_routine) {
5714 #if defined(__APPLE__) && !defined(__Userspace__)
5715 struct socket *so;
5716
5717 so = SCTP_INP_SO(stcb->sctp_ep);
5718 if (!so_locked) {
5719 atomic_add_int(&stcb->asoc.refcnt, 1);
5720 SCTP_TCB_UNLOCK(stcb);
5721 SCTP_SOCKET_LOCK(so, 1);
5722 SCTP_TCB_LOCK(stcb);
5723 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5724 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5725 /* assoc was freed while we were unlocked */
5726 SCTP_SOCKET_UNLOCK(so, 1);
5727 return (ret_sz);
5728 }
5729 }
5730 #endif
5731 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5732 #if defined(__APPLE__) && !defined(__Userspace__)
5733 if (!so_locked) {
5734 SCTP_SOCKET_UNLOCK(so, 1);
5735 }
5736 #endif
5737 }
5738 return (ret_sz);
5739 }
5740
5741 /*
5742 * checks to see if the given address, sa, is one that is currently known by
5743 * the kernel note: can't distinguish the same address on multiple interfaces
5744 * and doesn't handle multiple addresses with different zone/scope id's note:
5745 * ifa_ifwithaddr() compares the entire sockaddr struct
5746 */
5747 struct sctp_ifa *
5748 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5749 int holds_lock)
5750 {
5751 struct sctp_laddr *laddr;
5752
5753 if (holds_lock == 0) {
5754 SCTP_INP_RLOCK(inp);
5755 }
5756
5757 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5758 if (laddr->ifa == NULL)
5759 continue;
5760 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5761 continue;
5762 #ifdef INET
5763 if (addr->sa_family == AF_INET) {
5764 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5765 laddr->ifa->address.sin.sin_addr.s_addr) {
5766 /* found him. */
5767 break;
5768 }
5769 }
5770 #endif
5771 #ifdef INET6
5772 if (addr->sa_family == AF_INET6) {
5773 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5774 &laddr->ifa->address.sin6)) {
5775 /* found him. */
5776 break;
5777 }
5778 }
5779 #endif
5780 #if defined(__Userspace__)
5781 if (addr->sa_family == AF_CONN) {
5782 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5783 /* found him. */
5784 break;
5785 }
5786 }
5787 #endif
5788 }
5789 if (holds_lock == 0) {
5790 SCTP_INP_RUNLOCK(inp);
5791 }
5792 if (laddr != NULL) {
5793 return (laddr->ifa);
5794 } else {
5795 return (NULL);
5796 }
5797 }
5798
5799 uint32_t
5800 sctp_get_ifa_hash_val(struct sockaddr *addr)
5801 {
5802 switch (addr->sa_family) {
5803 #ifdef INET
5804 case AF_INET:
5805 {
5806 struct sockaddr_in *sin;
5807
5808 sin = (struct sockaddr_in *)addr;
5809 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5810 }
5811 #endif
5812 #ifdef INET6
5813 case AF_INET6:
5814 {
5815 struct sockaddr_in6 *sin6;
5816 uint32_t hash_of_addr;
5817
5818 sin6 = (struct sockaddr_in6 *)addr;
5819 #if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__)
5820 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5821 sin6->sin6_addr.s6_addr32[1] +
5822 sin6->sin6_addr.s6_addr32[2] +
5823 sin6->sin6_addr.s6_addr32[3]);
5824 #else
5825 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5826 ((uint32_t *)&sin6->sin6_addr)[1] +
5827 ((uint32_t *)&sin6->sin6_addr)[2] +
5828 ((uint32_t *)&sin6->sin6_addr)[3]);
5829 #endif
5830 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5831 return (hash_of_addr);
5832 }
5833 #endif
5834 #if defined(__Userspace__)
5835 case AF_CONN:
5836 {
5837 struct sockaddr_conn *sconn;
5838 uintptr_t temp;
5839
5840 sconn = (struct sockaddr_conn *)addr;
5841 temp = (uintptr_t)sconn->sconn_addr;
5842 return ((uint32_t)(temp ^ (temp >> 16)));
5843 }
5844 #endif
5845 default:
5846 break;
5847 }
5848 return (0);
5849 }
5850
5851 struct sctp_ifa *
5852 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5853 {
5854 struct sctp_ifa *sctp_ifap;
5855 struct sctp_vrf *vrf;
5856 struct sctp_ifalist *hash_head;
5857 uint32_t hash_of_addr;
5858
5859 if (holds_lock == 0) {
5860 SCTP_IPI_ADDR_RLOCK();
5861 } else {
5862 SCTP_IPI_ADDR_LOCK_ASSERT();
5863 }
5864
5865 vrf = sctp_find_vrf(vrf_id);
5866 if (vrf == NULL) {
5867 if (holds_lock == 0)
5868 SCTP_IPI_ADDR_RUNLOCK();
5869 return (NULL);
5870 }
5871
5872 hash_of_addr = sctp_get_ifa_hash_val(addr);
5873
5874 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5875 if (hash_head == NULL) {
5876 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5877 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5878 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5879 sctp_print_address(addr);
5880 SCTP_PRINTF("No such bucket for address\n");
5881 if (holds_lock == 0)
5882 SCTP_IPI_ADDR_RUNLOCK();
5883
5884 return (NULL);
5885 }
5886 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5887 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5888 continue;
5889 #ifdef INET
5890 if (addr->sa_family == AF_INET) {
5891 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5892 sctp_ifap->address.sin.sin_addr.s_addr) {
5893 /* found him. */
5894 break;
5895 }
5896 }
5897 #endif
5898 #ifdef INET6
5899 if (addr->sa_family == AF_INET6) {
5900 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5901 &sctp_ifap->address.sin6)) {
5902 /* found him. */
5903 break;
5904 }
5905 }
5906 #endif
5907 #if defined(__Userspace__)
5908 if (addr->sa_family == AF_CONN) {
5909 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5910 /* found him. */
5911 break;
5912 }
5913 }
5914 #endif
5915 }
5916 if (holds_lock == 0)
5917 SCTP_IPI_ADDR_RUNLOCK();
5918 return (sctp_ifap);
5919 }
5920
5921 static void
5922 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5923 uint32_t rwnd_req)
5924 {
5925 /* User pulled some data, do we need a rwnd update? */
5926 #if defined(__FreeBSD__) && !defined(__Userspace__)
5927 struct epoch_tracker et;
5928 #endif
5929 int r_unlocked = 0;
5930 uint32_t dif, rwnd;
5931 struct socket *so = NULL;
5932
5933 if (stcb == NULL)
5934 return;
5935
5936 atomic_add_int(&stcb->asoc.refcnt, 1);
5937
5938 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5939 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5940 /* Pre-check If we are freeing no update */
5941 goto no_lock;
5942 }
5943 SCTP_INP_INCR_REF(stcb->sctp_ep);
5944 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5945 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5946 goto out;
5947 }
5948 so = stcb->sctp_socket;
5949 if (so == NULL) {
5950 goto out;
5951 }
5952 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5953 /* Have you have freed enough to look */
5954 *freed_so_far = 0;
5955 /* Yep, its worth a look and the lock overhead */
5956
5957 /* Figure out what the rwnd would be */
5958 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5959 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5960 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5961 } else {
5962 dif = 0;
5963 }
5964 if (dif >= rwnd_req) {
5965 if (hold_rlock) {
5966 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5967 r_unlocked = 1;
5968 }
5969 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5970 /*
5971 * One last check before we allow the guy possibly
5972 * to get in. There is a race, where the guy has not
5973 * reached the gate. In that case
5974 */
5975 goto out;
5976 }
5977 SCTP_TCB_LOCK(stcb);
5978 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5979 /* No reports here */
5980 SCTP_TCB_UNLOCK(stcb);
5981 goto out;
5982 }
5983 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5984 #if defined(__FreeBSD__) && !defined(__Userspace__)
5985 NET_EPOCH_ENTER(et);
5986 #endif
5987 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5988
5989 sctp_chunk_output(stcb->sctp_ep, stcb,
5990 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5991 /* make sure no timer is running */
5992 #if defined(__FreeBSD__) && !defined(__Userspace__)
5993 NET_EPOCH_EXIT(et);
5994 #endif
5995 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5996 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5997 SCTP_TCB_UNLOCK(stcb);
5998 } else {
5999 /* Update how much we have pending */
6000 stcb->freed_by_sorcv_sincelast = dif;
6001 }
6002 out:
6003 if (so && r_unlocked && hold_rlock) {
6004 SCTP_INP_READ_LOCK(stcb->sctp_ep);
6005 }
6006
6007 SCTP_INP_DECR_REF(stcb->sctp_ep);
6008 no_lock:
6009 atomic_add_int(&stcb->asoc.refcnt, -1);
6010 return;
6011 }
6012
6013 int
6014 sctp_sorecvmsg(struct socket *so,
6015 struct uio *uio,
6016 struct mbuf **mp,
6017 struct sockaddr *from,
6018 int fromlen,
6019 int *msg_flags,
6020 struct sctp_sndrcvinfo *sinfo,
6021 int filling_sinfo)
6022 {
6023 /*
6024 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
6025 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
6026 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
6027 * On the way out we may send out any combination of:
6028 * MSG_NOTIFICATION MSG_EOR
6029 *
6030 */
6031 struct sctp_inpcb *inp = NULL;
6032 ssize_t my_len = 0;
6033 ssize_t cp_len = 0;
6034 int error = 0;
6035 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
6036 struct mbuf *m = NULL;
6037 struct sctp_tcb *stcb = NULL;
6038 int wakeup_read_socket = 0;
6039 int freecnt_applied = 0;
6040 int out_flags = 0, in_flags = 0;
6041 int block_allowed = 1;
6042 uint32_t freed_so_far = 0;
6043 ssize_t copied_so_far = 0;
6044 int in_eeor_mode = 0;
6045 int no_rcv_needed = 0;
6046 uint32_t rwnd_req = 0;
6047 int hold_sblock = 0;
6048 int hold_rlock = 0;
6049 ssize_t slen = 0;
6050 uint32_t held_length = 0;
6051 #if defined(__FreeBSD__) && !defined(__Userspace__)
6052 int sockbuf_lock = 0;
6053 #endif
6054
6055 if (uio == NULL) {
6056 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6057 return (EINVAL);
6058 }
6059
6060 if (msg_flags) {
6061 in_flags = *msg_flags;
6062 if (in_flags & MSG_PEEK)
6063 SCTP_STAT_INCR(sctps_read_peeks);
6064 } else {
6065 in_flags = 0;
6066 }
6067 #if defined(__APPLE__) && !defined(__Userspace__)
6068 #if defined(APPLE_LEOPARD)
6069 slen = uio->uio_resid;
6070 #else
6071 slen = uio_resid(uio);
6072 #endif
6073 #else
6074 slen = uio->uio_resid;
6075 #endif
6076
6077 /* Pull in and set up our int flags */
6078 if (in_flags & MSG_OOB) {
6079 /* Out of band's NOT supported */
6080 return (EOPNOTSUPP);
6081 }
6082 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
6083 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6084 return (EINVAL);
6085 }
6086 if ((in_flags & (MSG_DONTWAIT
6087 #if defined(__FreeBSD__) && !defined(__Userspace__)
6088 | MSG_NBIO
6089 #endif
6090 )) ||
6091 SCTP_SO_IS_NBIO(so)) {
6092 block_allowed = 0;
6093 }
6094 /* setup the endpoint */
6095 inp = (struct sctp_inpcb *)so->so_pcb;
6096 if (inp == NULL) {
6097 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
6098 return (EFAULT);
6099 }
6100 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
6101 /* Must be at least a MTU's worth */
6102 if (rwnd_req < SCTP_MIN_RWND)
6103 rwnd_req = SCTP_MIN_RWND;
6104 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
6105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6106 #if defined(__APPLE__) && !defined(__Userspace__)
6107 #if defined(APPLE_LEOPARD)
6108 sctp_misc_ints(SCTP_SORECV_ENTER,
6109 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
6110 #else
6111 sctp_misc_ints(SCTP_SORECV_ENTER,
6112 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
6113 #endif
6114 #else
6115 sctp_misc_ints(SCTP_SORECV_ENTER,
6116 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6117 #endif
6118 }
6119 #if defined(__Userspace__)
6120 SOCKBUF_LOCK(&so->so_rcv);
6121 hold_sblock = 1;
6122 #endif
6123 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6124 #if defined(__APPLE__) && !defined(__Userspace__)
6125 #if defined(APPLE_LEOPARD)
6126 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6127 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
6128 #else
6129 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6130 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
6131 #endif
6132 #else
6133 sctp_misc_ints(SCTP_SORECV_ENTERPL,
6134 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
6135 #endif
6136 }
6137
6138 #if defined(__APPLE__) && !defined(__Userspace__)
6139 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6140 #endif
6141 #if defined(__FreeBSD__) && !defined(__Userspace__)
6142 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
6143 #endif
6144 if (error) {
6145 goto release_unlocked;
6146 }
6147 #if defined(__FreeBSD__) && !defined(__Userspace__)
6148 sockbuf_lock = 1;
6149 #endif
6150 restart:
6151 #if defined(__Userspace__)
6152 if (hold_sblock == 0) {
6153 SOCKBUF_LOCK(&so->so_rcv);
6154 hold_sblock = 1;
6155 }
6156 #endif
6157 #if defined(__APPLE__) && !defined(__Userspace__)
6158 sbunlock(&so->so_rcv, 1);
6159 #endif
6160
6161 restart_nosblocks:
6162 if (hold_sblock == 0) {
6163 SOCKBUF_LOCK(&so->so_rcv);
6164 hold_sblock = 1;
6165 }
6166 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
6167 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
6168 goto out;
6169 }
6170 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6171 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6172 #else
6173 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
6174 #endif
6175 if (so->so_error) {
6176 error = so->so_error;
6177 if ((in_flags & MSG_PEEK) == 0)
6178 so->so_error = 0;
6179 goto out;
6180 } else {
6181 if (so->so_rcv.sb_cc == 0) {
6182 /* indicate EOF */
6183 error = 0;
6184 goto out;
6185 }
6186 }
6187 }
6188 if (so->so_rcv.sb_cc <= held_length) {
6189 if (so->so_error) {
6190 error = so->so_error;
6191 if ((in_flags & MSG_PEEK) == 0) {
6192 so->so_error = 0;
6193 }
6194 goto out;
6195 }
6196 if ((so->so_rcv.sb_cc == 0) &&
6197 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6198 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6199 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
6200 /* For active open side clear flags for re-use
6201 * passive open is blocked by connect.
6202 */
6203 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
6204 /* You were aborted, passive side always hits here */
6205 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
6206 error = ECONNRESET;
6207 }
6208 so->so_state &= ~(SS_ISCONNECTING |
6209 SS_ISDISCONNECTING |
6210 SS_ISCONFIRMING |
6211 SS_ISCONNECTED);
6212 if (error == 0) {
6213 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
6214 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
6215 error = ENOTCONN;
6216 }
6217 }
6218 goto out;
6219 }
6220 }
6221 if (block_allowed) {
6222 error = sbwait(&so->so_rcv);
6223 if (error) {
6224 goto out;
6225 }
6226 held_length = 0;
6227 goto restart_nosblocks;
6228 } else {
6229 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
6230 error = EWOULDBLOCK;
6231 goto out;
6232 }
6233 }
6234 if (hold_sblock == 1) {
6235 SOCKBUF_UNLOCK(&so->so_rcv);
6236 hold_sblock = 0;
6237 }
6238 #if defined(__APPLE__) && !defined(__Userspace__)
6239 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6240 #endif
6241 /* we possibly have data we can read */
6242 /*sa_ignore FREED_MEMORY*/
6243 control = TAILQ_FIRST(&inp->read_queue);
6244 if (control == NULL) {
6245 /* This could be happening since
6246 * the appender did the increment but as not
6247 * yet did the tailq insert onto the read_queue
6248 */
6249 if (hold_rlock == 0) {
6250 SCTP_INP_READ_LOCK(inp);
6251 }
6252 control = TAILQ_FIRST(&inp->read_queue);
6253 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
6254 #ifdef INVARIANTS
6255 panic("Huh, its non zero and nothing on control?");
6256 #endif
6257 so->so_rcv.sb_cc = 0;
6258 }
6259 SCTP_INP_READ_UNLOCK(inp);
6260 hold_rlock = 0;
6261 goto restart;
6262 }
6263
6264 if ((control->length == 0) &&
6265 (control->do_not_ref_stcb)) {
6266 /* Clean up code for freeing assoc that left behind a pdapi..
6267 * maybe a peer in EEOR that just closed after sending and
6268 * never indicated a EOR.
6269 */
6270 if (hold_rlock == 0) {
6271 hold_rlock = 1;
6272 SCTP_INP_READ_LOCK(inp);
6273 }
6274 control->held_length = 0;
6275 if (control->data) {
6276 /* Hmm there is data here .. fix */
6277 struct mbuf *m_tmp;
6278 int cnt = 0;
6279 m_tmp = control->data;
6280 while (m_tmp) {
6281 cnt += SCTP_BUF_LEN(m_tmp);
6282 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6283 control->tail_mbuf = m_tmp;
6284 control->end_added = 1;
6285 }
6286 m_tmp = SCTP_BUF_NEXT(m_tmp);
6287 }
6288 control->length = cnt;
6289 } else {
6290 /* remove it */
6291 TAILQ_REMOVE(&inp->read_queue, control, next);
6292 /* Add back any hiddend data */
6293 sctp_free_remote_addr(control->whoFrom);
6294 sctp_free_a_readq(stcb, control);
6295 }
6296 if (hold_rlock) {
6297 hold_rlock = 0;
6298 SCTP_INP_READ_UNLOCK(inp);
6299 }
6300 goto restart;
6301 }
6302 if ((control->length == 0) &&
6303 (control->end_added == 1)) {
6304 /* Do we also need to check for (control->pdapi_aborted == 1)? */
6305 if (hold_rlock == 0) {
6306 hold_rlock = 1;
6307 SCTP_INP_READ_LOCK(inp);
6308 }
6309 TAILQ_REMOVE(&inp->read_queue, control, next);
6310 if (control->data) {
6311 #ifdef INVARIANTS
6312 panic("control->data not null but control->length == 0");
6313 #else
6314 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
6315 sctp_m_freem(control->data);
6316 control->data = NULL;
6317 #endif
6318 }
6319 if (control->aux_data) {
6320 sctp_m_free (control->aux_data);
6321 control->aux_data = NULL;
6322 }
6323 #ifdef INVARIANTS
6324 if (control->on_strm_q) {
6325 panic("About to free ctl:%p so:%p and its in %d",
6326 control, so, control->on_strm_q);
6327 }
6328 #endif
6329 sctp_free_remote_addr(control->whoFrom);
6330 sctp_free_a_readq(stcb, control);
6331 if (hold_rlock) {
6332 hold_rlock = 0;
6333 SCTP_INP_READ_UNLOCK(inp);
6334 }
6335 goto restart;
6336 }
6337 if (control->length == 0) {
6338 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
6339 (filling_sinfo)) {
6340 /* find a more suitable one then this */
6341 ctl = TAILQ_NEXT(control, next);
6342 while (ctl) {
6343 if ((ctl->stcb != control->stcb) && (ctl->length) &&
6344 (ctl->some_taken ||
6345 (ctl->spec_flags & M_NOTIFICATION) ||
6346 ((ctl->do_not_ref_stcb == 0) &&
6347 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
6348 ) {
6349 /*-
6350 * If we have a different TCB next, and there is data
6351 * present. If we have already taken some (pdapi), OR we can
6352 * ref the tcb and no delivery as started on this stream, we
6353 * take it. Note we allow a notification on a different
6354 * assoc to be delivered..
6355 */
6356 control = ctl;
6357 goto found_one;
6358 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
6359 (ctl->length) &&
6360 ((ctl->some_taken) ||
6361 ((ctl->do_not_ref_stcb == 0) &&
6362 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
6363 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
6364 /*-
6365 * If we have the same tcb, and there is data present, and we
6366 * have the strm interleave feature present. Then if we have
6367 * taken some (pdapi) or we can refer to tht tcb AND we have
6368 * not started a delivery for this stream, we can take it.
6369 * Note we do NOT allow a notificaiton on the same assoc to
6370 * be delivered.
6371 */
6372 control = ctl;
6373 goto found_one;
6374 }
6375 ctl = TAILQ_NEXT(ctl, next);
6376 }
6377 }
6378 /*
6379 * if we reach here, not suitable replacement is available
6380 * <or> fragment interleave is NOT on. So stuff the sb_cc
6381 * into the our held count, and its time to sleep again.
6382 */
6383 held_length = so->so_rcv.sb_cc;
6384 control->held_length = so->so_rcv.sb_cc;
6385 goto restart;
6386 }
6387 /* Clear the held length since there is something to read */
6388 control->held_length = 0;
6389 found_one:
6390 /*
6391 * If we reach here, control has a some data for us to read off.
6392 * Note that stcb COULD be NULL.
6393 */
6394 if (hold_rlock == 0) {
6395 hold_rlock = 1;
6396 SCTP_INP_READ_LOCK(inp);
6397 }
6398 control->some_taken++;
6399 stcb = control->stcb;
6400 if (stcb) {
6401 if ((control->do_not_ref_stcb == 0) &&
6402 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6403 if (freecnt_applied == 0)
6404 stcb = NULL;
6405 } else if (control->do_not_ref_stcb == 0) {
6406 /* you can't free it on me please */
6407 /*
6408 * The lock on the socket buffer protects us so the
6409 * free code will stop. But since we used the socketbuf
6410 * lock and the sender uses the tcb_lock to increment,
6411 * we need to use the atomic add to the refcnt
6412 */
6413 if (freecnt_applied) {
6414 #ifdef INVARIANTS
6415 panic("refcnt already incremented");
6416 #else
6417 SCTP_PRINTF("refcnt already incremented?\n");
6418 #endif
6419 } else {
6420 atomic_add_int(&stcb->asoc.refcnt, 1);
6421 freecnt_applied = 1;
6422 }
6423 /*
6424 * Setup to remember how much we have not yet told
6425 * the peer our rwnd has opened up. Note we grab
6426 * the value from the tcb from last time.
6427 * Note too that sack sending clears this when a sack
6428 * is sent, which is fine. Once we hit the rwnd_req,
6429 * we then will go to the sctp_user_rcvd() that will
6430 * not lock until it KNOWs it MUST send a WUP-SACK.
6431 */
6432 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6433 stcb->freed_by_sorcv_sincelast = 0;
6434 }
6435 }
6436 if (stcb &&
6437 ((control->spec_flags & M_NOTIFICATION) == 0) &&
6438 control->do_not_ref_stcb == 0) {
6439 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6440 }
6441
6442 /* First lets get off the sinfo and sockaddr info */
6443 if ((sinfo != NULL) && (filling_sinfo != 0)) {
6444 sinfo->sinfo_stream = control->sinfo_stream;
6445 sinfo->sinfo_ssn = (uint16_t)control->mid;
6446 sinfo->sinfo_flags = control->sinfo_flags;
6447 sinfo->sinfo_ppid = control->sinfo_ppid;
6448 sinfo->sinfo_context =control->sinfo_context;
6449 sinfo->sinfo_timetolive = control->sinfo_timetolive;
6450 sinfo->sinfo_tsn = control->sinfo_tsn;
6451 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6452 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6453 nxt = TAILQ_NEXT(control, next);
6454 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6455 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6456 struct sctp_extrcvinfo *s_extra;
6457 s_extra = (struct sctp_extrcvinfo *)sinfo;
6458 if ((nxt) &&
6459 (nxt->length)) {
6460 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6461 if (nxt->sinfo_flags & SCTP_UNORDERED) {
6462 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6463 }
6464 if (nxt->spec_flags & M_NOTIFICATION) {
6465 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6466 }
6467 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6468 s_extra->serinfo_next_length = nxt->length;
6469 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6470 s_extra->serinfo_next_stream = nxt->sinfo_stream;
6471 if (nxt->tail_mbuf != NULL) {
6472 if (nxt->end_added) {
6473 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6474 }
6475 }
6476 } else {
6477 /* we explicitly 0 this, since the memcpy got
6478 * some other things beyond the older sinfo_
6479 * that is on the control's structure :-D
6480 */
6481 nxt = NULL;
6482 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6483 s_extra->serinfo_next_aid = 0;
6484 s_extra->serinfo_next_length = 0;
6485 s_extra->serinfo_next_ppid = 0;
6486 s_extra->serinfo_next_stream = 0;
6487 }
6488 }
6489 /*
6490 * update off the real current cum-ack, if we have an stcb.
6491 */
6492 if ((control->do_not_ref_stcb == 0) && stcb)
6493 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6494 /*
6495 * mask off the high bits, we keep the actual chunk bits in
6496 * there.
6497 */
6498 sinfo->sinfo_flags &= 0x00ff;
6499 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6500 sinfo->sinfo_flags |= SCTP_UNORDERED;
6501 }
6502 }
6503 #ifdef SCTP_ASOCLOG_OF_TSNS
6504 {
6505 int index, newindex;
6506 struct sctp_pcbtsn_rlog *entry;
6507 do {
6508 index = inp->readlog_index;
6509 newindex = index + 1;
6510 if (newindex >= SCTP_READ_LOG_SIZE) {
6511 newindex = 0;
6512 }
6513 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6514 entry = &inp->readlog[index];
6515 entry->vtag = control->sinfo_assoc_id;
6516 entry->strm = control->sinfo_stream;
6517 entry->seq = (uint16_t)control->mid;
6518 entry->sz = control->length;
6519 entry->flgs = control->sinfo_flags;
6520 }
6521 #endif
6522 if ((fromlen > 0) && (from != NULL)) {
6523 union sctp_sockstore store;
6524 size_t len;
6525
6526 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6527 #ifdef INET6
6528 case AF_INET6:
6529 len = sizeof(struct sockaddr_in6);
6530 store.sin6 = control->whoFrom->ro._l_addr.sin6;
6531 store.sin6.sin6_port = control->port_from;
6532 break;
6533 #endif
6534 #ifdef INET
6535 case AF_INET:
6536 #ifdef INET6
6537 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6538 len = sizeof(struct sockaddr_in6);
6539 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6540 &store.sin6);
6541 store.sin6.sin6_port = control->port_from;
6542 } else {
6543 len = sizeof(struct sockaddr_in);
6544 store.sin = control->whoFrom->ro._l_addr.sin;
6545 store.sin.sin_port = control->port_from;
6546 }
6547 #else
6548 len = sizeof(struct sockaddr_in);
6549 store.sin = control->whoFrom->ro._l_addr.sin;
6550 store.sin.sin_port = control->port_from;
6551 #endif
6552 break;
6553 #endif
6554 #if defined(__Userspace__)
6555 case AF_CONN:
6556 len = sizeof(struct sockaddr_conn);
6557 store.sconn = control->whoFrom->ro._l_addr.sconn;
6558 store.sconn.sconn_port = control->port_from;
6559 break;
6560 #endif
6561 default:
6562 len = 0;
6563 break;
6564 }
6565 memcpy(from, &store, min((size_t)fromlen, len));
6566 #if defined(SCTP_EMBEDDED_V6_SCOPE)
6567 #ifdef INET6
6568 {
6569 struct sockaddr_in6 lsa6, *from6;
6570
6571 from6 = (struct sockaddr_in6 *)from;
6572 sctp_recover_scope_mac(from6, (&lsa6));
6573 }
6574 #endif
6575 #endif
6576 }
6577 if (hold_rlock) {
6578 SCTP_INP_READ_UNLOCK(inp);
6579 hold_rlock = 0;
6580 }
6581 if (hold_sblock) {
6582 SOCKBUF_UNLOCK(&so->so_rcv);
6583 hold_sblock = 0;
6584 }
6585 /* now copy out what data we can */
6586 if (mp == NULL) {
6587 /* copy out each mbuf in the chain up to length */
6588 get_more_data:
6589 m = control->data;
6590 while (m) {
6591 /* Move out all we can */
6592 #if defined(__APPLE__) && !defined(__Userspace__)
6593 #if defined(APPLE_LEOPARD)
6594 cp_len = uio->uio_resid;
6595 #else
6596 cp_len = uio_resid(uio);
6597 #endif
6598 #else
6599 cp_len = uio->uio_resid;
6600 #endif
6601 my_len = SCTP_BUF_LEN(m);
6602 if (cp_len > my_len) {
6603 /* not enough in this buf */
6604 cp_len = my_len;
6605 }
6606 if (hold_rlock) {
6607 SCTP_INP_READ_UNLOCK(inp);
6608 hold_rlock = 0;
6609 }
6610 #if defined(__APPLE__) && !defined(__Userspace__)
6611 SCTP_SOCKET_UNLOCK(so, 0);
6612 #endif
6613 if (cp_len > 0)
6614 error = uiomove(mtod(m, char *), (int)cp_len, uio);
6615 #if defined(__APPLE__) && !defined(__Userspace__)
6616 SCTP_SOCKET_LOCK(so, 0);
6617 #endif
6618 /* re-read */
6619 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6620 goto release;
6621 }
6622
6623 if ((control->do_not_ref_stcb == 0) && stcb &&
6624 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6625 no_rcv_needed = 1;
6626 }
6627 if (error) {
6628 /* error we are out of here */
6629 goto release;
6630 }
6631 SCTP_INP_READ_LOCK(inp);
6632 hold_rlock = 1;
6633 if (cp_len == SCTP_BUF_LEN(m)) {
6634 if ((SCTP_BUF_NEXT(m)== NULL) &&
6635 (control->end_added)) {
6636 out_flags |= MSG_EOR;
6637 if ((control->do_not_ref_stcb == 0) &&
6638 (control->stcb != NULL) &&
6639 ((control->spec_flags & M_NOTIFICATION) == 0))
6640 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6641 }
6642 if (control->spec_flags & M_NOTIFICATION) {
6643 out_flags |= MSG_NOTIFICATION;
6644 }
6645 /* we ate up the mbuf */
6646 if (in_flags & MSG_PEEK) {
6647 /* just looking */
6648 m = SCTP_BUF_NEXT(m);
6649 copied_so_far += cp_len;
6650 } else {
6651 /* dispose of the mbuf */
6652 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6653 sctp_sblog(&so->so_rcv,
6654 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6655 }
6656 sctp_sbfree(control, stcb, &so->so_rcv, m);
6657 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6658 sctp_sblog(&so->so_rcv,
6659 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6660 }
6661 copied_so_far += cp_len;
6662 freed_so_far += (uint32_t)cp_len;
6663 freed_so_far += MSIZE;
6664 atomic_subtract_int(&control->length, cp_len);
6665 control->data = sctp_m_free(m);
6666 m = control->data;
6667 /* been through it all, must hold sb lock ok to null tail */
6668 if (control->data == NULL) {
6669 #ifdef INVARIANTS
6670 #if defined(__FreeBSD__) && !defined(__Userspace__)
6671 if ((control->end_added == 0) ||
6672 (TAILQ_NEXT(control, next) == NULL)) {
6673 /* If the end is not added, OR the
6674 * next is NOT null we MUST have the lock.
6675 */
6676 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6677 panic("Hmm we don't own the lock?");
6678 }
6679 }
6680 #endif
6681 #endif
6682 control->tail_mbuf = NULL;
6683 #ifdef INVARIANTS
6684 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6685 panic("end_added, nothing left and no MSG_EOR");
6686 }
6687 #endif
6688 }
6689 }
6690 } else {
6691 /* Do we need to trim the mbuf? */
6692 if (control->spec_flags & M_NOTIFICATION) {
6693 out_flags |= MSG_NOTIFICATION;
6694 }
6695 if ((in_flags & MSG_PEEK) == 0) {
6696 SCTP_BUF_RESV_UF(m, cp_len);
6697 SCTP_BUF_LEN(m) -= (int)cp_len;
6698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6699 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len);
6700 }
6701 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6702 if ((control->do_not_ref_stcb == 0) &&
6703 stcb) {
6704 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6705 }
6706 copied_so_far += cp_len;
6707 freed_so_far += (uint32_t)cp_len;
6708 freed_so_far += MSIZE;
6709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6710 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6711 SCTP_LOG_SBRESULT, 0);
6712 }
6713 atomic_subtract_int(&control->length, cp_len);
6714 } else {
6715 copied_so_far += cp_len;
6716 }
6717 }
6718 #if defined(__APPLE__) && !defined(__Userspace__)
6719 #if defined(APPLE_LEOPARD)
6720 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6721 #else
6722 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6723 #endif
6724 #else
6725 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6726 #endif
6727 break;
6728 }
6729 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6730 (control->do_not_ref_stcb == 0) &&
6731 (freed_so_far >= rwnd_req)) {
6732 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6733 }
6734 } /* end while(m) */
6735 /*
6736 * At this point we have looked at it all and we either have
6737 * a MSG_EOR/or read all the user wants... <OR>
6738 * control->length == 0.
6739 */
6740 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6741 /* we are done with this control */
6742 if (control->length == 0) {
6743 if (control->data) {
6744 #ifdef INVARIANTS
6745 panic("control->data not null at read eor?");
6746 #else
6747 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6748 sctp_m_freem(control->data);
6749 control->data = NULL;
6750 #endif
6751 }
6752 done_with_control:
6753 if (hold_rlock == 0) {
6754 SCTP_INP_READ_LOCK(inp);
6755 hold_rlock = 1;
6756 }
6757 TAILQ_REMOVE(&inp->read_queue, control, next);
6758 /* Add back any hiddend data */
6759 if (control->held_length) {
6760 held_length = 0;
6761 control->held_length = 0;
6762 wakeup_read_socket = 1;
6763 }
6764 if (control->aux_data) {
6765 sctp_m_free (control->aux_data);
6766 control->aux_data = NULL;
6767 }
6768 no_rcv_needed = control->do_not_ref_stcb;
6769 sctp_free_remote_addr(control->whoFrom);
6770 control->data = NULL;
6771 #ifdef INVARIANTS
6772 if (control->on_strm_q) {
6773 panic("About to free ctl:%p so:%p and its in %d",
6774 control, so, control->on_strm_q);
6775 }
6776 #endif
6777 sctp_free_a_readq(stcb, control);
6778 control = NULL;
6779 if ((freed_so_far >= rwnd_req) &&
6780 (no_rcv_needed == 0))
6781 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6782
6783 } else {
6784 /*
6785 * The user did not read all of this
6786 * message, turn off the returned MSG_EOR
6787 * since we are leaving more behind on the
6788 * control to read.
6789 */
6790 #ifdef INVARIANTS
6791 if (control->end_added &&
6792 (control->data == NULL) &&
6793 (control->tail_mbuf == NULL)) {
6794 panic("Gak, control->length is corrupt?");
6795 }
6796 #endif
6797 no_rcv_needed = control->do_not_ref_stcb;
6798 out_flags &= ~MSG_EOR;
6799 }
6800 }
6801 if (out_flags & MSG_EOR) {
6802 goto release;
6803 }
6804 #if defined(__APPLE__) && !defined(__Userspace__)
6805 #if defined(APPLE_LEOPARD)
6806 if ((uio->uio_resid == 0) ||
6807 #else
6808 if ((uio_resid(uio) == 0) ||
6809 #endif
6810 #else
6811 if ((uio->uio_resid == 0) ||
6812 #endif
6813 ((in_eeor_mode) &&
6814 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6815 goto release;
6816 }
6817 /*
6818 * If I hit here the receiver wants more and this message is
6819 * NOT done (pd-api). So two questions. Can we block? if not
6820 * we are done. Did the user NOT set MSG_WAITALL?
6821 */
6822 if (block_allowed == 0) {
6823 goto release;
6824 }
6825 /*
6826 * We need to wait for more data a few things: - We don't
6827 * sbunlock() so we don't get someone else reading. - We
6828 * must be sure to account for the case where what is added
6829 * is NOT to our control when we wakeup.
6830 */
6831
6832 /* Do we need to tell the transport a rwnd update might be
6833 * needed before we go to sleep?
6834 */
6835 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6836 ((freed_so_far >= rwnd_req) &&
6837 (control->do_not_ref_stcb == 0) &&
6838 (no_rcv_needed == 0))) {
6839 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6840 }
6841 wait_some_more:
6842 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
6843 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6844 goto release;
6845 }
6846 #else
6847 if (so->so_state & SS_CANTRCVMORE) {
6848 goto release;
6849 }
6850 #endif
6851
6852 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6853 goto release;
6854
6855 if (hold_rlock == 1) {
6856 SCTP_INP_READ_UNLOCK(inp);
6857 hold_rlock = 0;
6858 }
6859 if (hold_sblock == 0) {
6860 SOCKBUF_LOCK(&so->so_rcv);
6861 hold_sblock = 1;
6862 }
6863 if ((copied_so_far) && (control->length == 0) &&
6864 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6865 goto release;
6866 }
6867 #if defined(__APPLE__) && !defined(__Userspace__)
6868 sbunlock(&so->so_rcv, 1);
6869 #endif
6870 if (so->so_rcv.sb_cc <= control->held_length) {
6871 error = sbwait(&so->so_rcv);
6872 if (error) {
6873 #if defined(__FreeBSD__) && !defined(__Userspace__)
6874 goto release;
6875 #else
6876 goto release_unlocked;
6877 #endif
6878 }
6879 control->held_length = 0;
6880 }
6881 #if defined(__APPLE__) && !defined(__Userspace__)
6882 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6883 #endif
6884 if (hold_sblock) {
6885 SOCKBUF_UNLOCK(&so->so_rcv);
6886 hold_sblock = 0;
6887 }
6888 if (control->length == 0) {
6889 /* still nothing here */
6890 if (control->end_added == 1) {
6891 /* he aborted, or is done i.e.did a shutdown */
6892 out_flags |= MSG_EOR;
6893 if (control->pdapi_aborted) {
6894 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6895 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6896
6897 out_flags |= MSG_TRUNC;
6898 } else {
6899 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6900 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6901 }
6902 goto done_with_control;
6903 }
6904 if (so->so_rcv.sb_cc > held_length) {
6905 control->held_length = so->so_rcv.sb_cc;
6906 held_length = 0;
6907 }
6908 goto wait_some_more;
6909 } else if (control->data == NULL) {
6910 /* we must re-sync since data
6911 * is probably being added
6912 */
6913 SCTP_INP_READ_LOCK(inp);
6914 if ((control->length > 0) && (control->data == NULL)) {
6915 /* big trouble.. we have the lock and its corrupt? */
6916 #ifdef INVARIANTS
6917 panic ("Impossible data==NULL length !=0");
6918 #endif
6919 out_flags |= MSG_EOR;
6920 out_flags |= MSG_TRUNC;
6921 control->length = 0;
6922 SCTP_INP_READ_UNLOCK(inp);
6923 goto done_with_control;
6924 }
6925 SCTP_INP_READ_UNLOCK(inp);
6926 /* We will fall around to get more data */
6927 }
6928 goto get_more_data;
6929 } else {
6930 /*-
6931 * Give caller back the mbuf chain,
6932 * store in uio_resid the length
6933 */
6934 wakeup_read_socket = 0;
6935 if ((control->end_added == 0) ||
6936 (TAILQ_NEXT(control, next) == NULL)) {
6937 /* Need to get rlock */
6938 if (hold_rlock == 0) {
6939 SCTP_INP_READ_LOCK(inp);
6940 hold_rlock = 1;
6941 }
6942 }
6943 if (control->end_added) {
6944 out_flags |= MSG_EOR;
6945 if ((control->do_not_ref_stcb == 0) &&
6946 (control->stcb != NULL) &&
6947 ((control->spec_flags & M_NOTIFICATION) == 0))
6948 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6949 }
6950 if (control->spec_flags & M_NOTIFICATION) {
6951 out_flags |= MSG_NOTIFICATION;
6952 }
6953 #if defined(__APPLE__) && !defined(__Userspace__)
6954 #if defined(APPLE_LEOPARD)
6955 uio->uio_resid = control->length;
6956 #else
6957 uio_setresid(uio, control->length);
6958 #endif
6959 #else
6960 uio->uio_resid = control->length;
6961 #endif
6962 *mp = control->data;
6963 m = control->data;
6964 while (m) {
6965 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6966 sctp_sblog(&so->so_rcv,
6967 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6968 }
6969 sctp_sbfree(control, stcb, &so->so_rcv, m);
6970 freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6971 freed_so_far += MSIZE;
6972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6973 sctp_sblog(&so->so_rcv,
6974 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6975 }
6976 m = SCTP_BUF_NEXT(m);
6977 }
6978 control->data = control->tail_mbuf = NULL;
6979 control->length = 0;
6980 if (out_flags & MSG_EOR) {
6981 /* Done with this control */
6982 goto done_with_control;
6983 }
6984 }
6985 release:
6986 if (hold_rlock == 1) {
6987 SCTP_INP_READ_UNLOCK(inp);
6988 hold_rlock = 0;
6989 }
6990 #if defined(__Userspace__)
6991 if (hold_sblock == 0) {
6992 SOCKBUF_LOCK(&so->so_rcv);
6993 hold_sblock = 1;
6994 }
6995 #else
6996 if (hold_sblock == 1) {
6997 SOCKBUF_UNLOCK(&so->so_rcv);
6998 hold_sblock = 0;
6999 }
7000 #endif
7001 #if defined(__APPLE__) && !defined(__Userspace__)
7002 sbunlock(&so->so_rcv, 1);
7003 #endif
7004
7005 #if defined(__FreeBSD__) && !defined(__Userspace__)
7006 sbunlock(&so->so_rcv);
7007 sockbuf_lock = 0;
7008 #endif
7009
7010 release_unlocked:
7011 if (hold_sblock) {
7012 SOCKBUF_UNLOCK(&so->so_rcv);
7013 hold_sblock = 0;
7014 }
7015 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
7016 if ((freed_so_far >= rwnd_req) &&
7017 (control && (control->do_not_ref_stcb == 0)) &&
7018 (no_rcv_needed == 0))
7019 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
7020 }
7021 out:
7022 if (msg_flags) {
7023 *msg_flags = out_flags;
7024 }
7025 if (((out_flags & MSG_EOR) == 0) &&
7026 ((in_flags & MSG_PEEK) == 0) &&
7027 (sinfo) &&
7028 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
7029 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
7030 struct sctp_extrcvinfo *s_extra;
7031 s_extra = (struct sctp_extrcvinfo *)sinfo;
7032 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
7033 }
7034 if (hold_rlock == 1) {
7035 SCTP_INP_READ_UNLOCK(inp);
7036 }
7037 if (hold_sblock) {
7038 SOCKBUF_UNLOCK(&so->so_rcv);
7039 }
7040 #if defined(__FreeBSD__) && !defined(__Userspace__)
7041 if (sockbuf_lock) {
7042 sbunlock(&so->so_rcv);
7043 }
7044 #endif
7045
7046 if (freecnt_applied) {
7047 /*
7048 * The lock on the socket buffer protects us so the free
7049 * code will stop. But since we used the socketbuf lock and
7050 * the sender uses the tcb_lock to increment, we need to use
7051 * the atomic add to the refcnt.
7052 */
7053 if (stcb == NULL) {
7054 #ifdef INVARIANTS
7055 panic("stcb for refcnt has gone NULL?");
7056 goto stage_left;
7057 #else
7058 goto stage_left;
7059 #endif
7060 }
7061 /* Save the value back for next time */
7062 stcb->freed_by_sorcv_sincelast = freed_so_far;
7063 atomic_add_int(&stcb->asoc.refcnt, -1);
7064 }
7065 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
7066 if (stcb) {
7067 sctp_misc_ints(SCTP_SORECV_DONE,
7068 freed_so_far,
7069 #if defined(__APPLE__) && !defined(__Userspace__)
7070 #if defined(APPLE_LEOPARD)
7071 ((uio) ? (slen - uio->uio_resid) : slen),
7072 #else
7073 ((uio) ? (slen - uio_resid(uio)) : slen),
7074 #endif
7075 #else
7076 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7077 #endif
7078 stcb->asoc.my_rwnd,
7079 so->so_rcv.sb_cc);
7080 } else {
7081 sctp_misc_ints(SCTP_SORECV_DONE,
7082 freed_so_far,
7083 #if defined(__APPLE__) && !defined(__Userspace__)
7084 #if defined(APPLE_LEOPARD)
7085 ((uio) ? (slen - uio->uio_resid) : slen),
7086 #else
7087 ((uio) ? (slen - uio_resid(uio)) : slen),
7088 #endif
7089 #else
7090 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
7091 #endif
7092 0,
7093 so->so_rcv.sb_cc);
7094 }
7095 }
7096 stage_left:
7097 if (wakeup_read_socket) {
7098 sctp_sorwakeup(inp, so);
7099 }
7100 return (error);
7101 }
7102
7103 #ifdef SCTP_MBUF_LOGGING
7104 struct mbuf *
7105 sctp_m_free(struct mbuf *m)
7106 {
7107 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7108 sctp_log_mb(m, SCTP_MBUF_IFREE);
7109 }
7110 return (m_free(m));
7111 }
7112
7113 void
7114 sctp_m_freem(struct mbuf *mb)
7115 {
7116 while (mb != NULL)
7117 mb = sctp_m_free(mb);
7118 }
7119
7120 #endif
7121
7122 int
7123 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
7124 {
7125 /* Given a local address. For all associations
7126 * that holds the address, request a peer-set-primary.
7127 */
7128 struct sctp_ifa *ifa;
7129 struct sctp_laddr *wi;
7130
7131 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
7132 if (ifa == NULL) {
7133 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
7134 return (EADDRNOTAVAIL);
7135 }
7136 /* Now that we have the ifa we must awaken the
7137 * iterator with this message.
7138 */
7139 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
7140 if (wi == NULL) {
7141 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
7142 return (ENOMEM);
7143 }
7144 /* Now incr the count and int wi structure */
7145 SCTP_INCR_LADDR_COUNT();
7146 memset(wi, 0, sizeof(*wi));
7147 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
7148 wi->ifa = ifa;
7149 wi->action = SCTP_SET_PRIM_ADDR;
7150 atomic_add_int(&ifa->refcount, 1);
7151
7152 /* Now add it to the work queue */
7153 SCTP_WQ_ADDR_LOCK();
7154 /*
7155 * Should this really be a tailq? As it is we will process the
7156 * newest first :-0
7157 */
7158 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
7159 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
7160 (struct sctp_inpcb *)NULL,
7161 (struct sctp_tcb *)NULL,
7162 (struct sctp_nets *)NULL);
7163 SCTP_WQ_ADDR_UNLOCK();
7164 return (0);
7165 }
7166
7167 #if defined(__Userspace__)
7168 /* no sctp_soreceive for __Userspace__ now */
7169 #endif
7170 #if !defined(__Userspace__)
7171 int
7172 sctp_soreceive( struct socket *so,
7173 struct sockaddr **psa,
7174 struct uio *uio,
7175 struct mbuf **mp0,
7176 struct mbuf **controlp,
7177 int *flagsp)
7178 {
7179 int error, fromlen;
7180 uint8_t sockbuf[256];
7181 struct sockaddr *from;
7182 struct sctp_extrcvinfo sinfo;
7183 int filling_sinfo = 1;
7184 int flags;
7185 struct sctp_inpcb *inp;
7186
7187 inp = (struct sctp_inpcb *)so->so_pcb;
7188 /* pickup the assoc we are reading from */
7189 if (inp == NULL) {
7190 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7191 return (EINVAL);
7192 }
7193 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
7194 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
7195 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
7196 (controlp == NULL)) {
7197 /* user does not want the sndrcv ctl */
7198 filling_sinfo = 0;
7199 }
7200 if (psa) {
7201 from = (struct sockaddr *)sockbuf;
7202 fromlen = sizeof(sockbuf);
7203 #ifdef HAVE_SA_LEN
7204 from->sa_len = 0;
7205 #endif
7206 } else {
7207 from = NULL;
7208 fromlen = 0;
7209 }
7210
7211 #if defined(__APPLE__) && !defined(__Userspace__)
7212 SCTP_SOCKET_LOCK(so, 1);
7213 #endif
7214 if (filling_sinfo) {
7215 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
7216 }
7217 if (flagsp != NULL) {
7218 flags = *flagsp;
7219 } else {
7220 flags = 0;
7221 }
7222 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
7223 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
7224 if (flagsp != NULL) {
7225 *flagsp = flags;
7226 }
7227 if (controlp != NULL) {
7228 /* copy back the sinfo in a CMSG format */
7229 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
7230 *controlp = sctp_build_ctl_nchunk(inp,
7231 (struct sctp_sndrcvinfo *)&sinfo);
7232 } else {
7233 *controlp = NULL;
7234 }
7235 }
7236 if (psa) {
7237 /* copy back the address info */
7238 #ifdef HAVE_SA_LEN
7239 if (from && from->sa_len) {
7240 #else
7241 if (from) {
7242 #endif
7243 #if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__)
7244 *psa = sodupsockaddr(from, M_NOWAIT);
7245 #else
7246 *psa = dup_sockaddr(from, mp0 == 0);
7247 #endif
7248 } else {
7249 *psa = NULL;
7250 }
7251 }
7252 #if defined(__APPLE__) && !defined(__Userspace__)
7253 SCTP_SOCKET_UNLOCK(so, 1);
7254 #endif
7255 return (error);
7256 }
7257
7258 #if defined(_WIN32) && !defined(__Userspace__)
7259 /*
7260 * General routine to allocate a hash table with control of memory flags.
7261 * is in 7.0 and beyond for sure :-)
7262 */
7263 void *
7264 sctp_hashinit_flags(int elements, struct malloc_type *type,
7265 u_long *hashmask, int flags)
7266 {
7267 long hashsize;
7268 LIST_HEAD(generic, generic) *hashtbl;
7269 int i;
7270
7271
7272 if (elements <= 0) {
7273 #ifdef INVARIANTS
7274 panic("hashinit: bad elements");
7275 #else
7276 SCTP_PRINTF("hashinit: bad elements?");
7277 elements = 1;
7278 #endif
7279 }
7280 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7281 continue;
7282 hashsize >>= 1;
7283 if (flags & HASH_WAITOK)
7284 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
7285 else if (flags & HASH_NOWAIT)
7286 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
7287 else {
7288 #ifdef INVARIANTS
7289 panic("flag incorrect in hashinit_flags");
7290 #else
7291 return (NULL);
7292 #endif
7293 }
7294
7295 /* no memory? */
7296 if (hashtbl == NULL)
7297 return (NULL);
7298
7299 for (i = 0; i < hashsize; i++)
7300 LIST_INIT(&hashtbl[i]);
7301 *hashmask = hashsize - 1;
7302 return (hashtbl);
7303 }
7304 #endif
7305 #else /* __Userspace__ ifdef above sctp_soreceive */
7306 /*
7307 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
7308 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
7309 *__FreeBSD__ must be excluded.
7310 *
7311 */
7312
7313 void *
7314 sctp_hashinit_flags(int elements, struct malloc_type *type,
7315 u_long *hashmask, int flags)
7316 {
7317 long hashsize;
7318 LIST_HEAD(generic, generic) *hashtbl;
7319 int i;
7320
7321 if (elements <= 0) {
7322 SCTP_PRINTF("hashinit: bad elements?");
7323 #ifdef INVARIANTS
7324 return (NULL);
7325 #else
7326 elements = 1;
7327 #endif
7328 }
7329 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
7330 continue;
7331 hashsize >>= 1;
7332 /*cannot use MALLOC here because it has to be declared or defined
7333 using MALLOC_DECLARE or MALLOC_DEFINE first. */
7334 if (flags & HASH_WAITOK)
7335 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7336 else if (flags & HASH_NOWAIT)
7337 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
7338 else {
7339 #ifdef INVARIANTS
7340 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
7341 #endif
7342 return (NULL);
7343 }
7344
7345 /* no memory? */
7346 if (hashtbl == NULL)
7347 return (NULL);
7348
7349 for (i = 0; i < hashsize; i++)
7350 LIST_INIT(&hashtbl[i]);
7351 *hashmask = hashsize - 1;
7352 return (hashtbl);
7353 }
7354
7355 void
7356 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7357 {
7358 LIST_HEAD(generic, generic) *hashtbl, *hp;
7359
7360 hashtbl = vhashtbl;
7361 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7362 if (!LIST_EMPTY(hp)) {
7363 SCTP_PRINTF("hashdestroy: hash not empty.\n");
7364 return;
7365 }
7366 FREE(hashtbl, type);
7367 }
7368
7369 void
7370 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
7371 {
7372 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
7373 /*
7374 LIST_ENTRY(type) *start, *temp;
7375 */
7376 hashtbl = vhashtbl;
7377 /* Apparently temp is not dynamically allocated, so attempts to
7378 free it results in error.
7379 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
7380 if (!LIST_EMPTY(hp)) {
7381 start = LIST_FIRST(hp);
7382 while (start != NULL) {
7383 temp = start;
7384 start = start->le_next;
7385 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
7386 FREE(temp, type);
7387 }
7388 }
7389 */
7390 FREE(hashtbl, type);
7391 }
7392
7393 #endif
7394 int
7395 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7396 int totaddr, int *error)
7397 {
7398 int added = 0;
7399 int i;
7400 struct sctp_inpcb *inp;
7401 struct sockaddr *sa;
7402 size_t incr = 0;
7403 #ifdef INET
7404 struct sockaddr_in *sin;
7405 #endif
7406 #ifdef INET6
7407 struct sockaddr_in6 *sin6;
7408 #endif
7409
7410 sa = addr;
7411 inp = stcb->sctp_ep;
7412 *error = 0;
7413 for (i = 0; i < totaddr; i++) {
7414 switch (sa->sa_family) {
7415 #ifdef INET
7416 case AF_INET:
7417 incr = sizeof(struct sockaddr_in);
7418 sin = (struct sockaddr_in *)sa;
7419 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7420 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7421 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7422 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7423 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7424 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
7425 *error = EINVAL;
7426 goto out_now;
7427 }
7428 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7429 SCTP_DONOT_SETSCOPE,
7430 SCTP_ADDR_IS_CONFIRMED)) {
7431 /* assoc gone no un-lock */
7432 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7433 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7434 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
7435 *error = ENOBUFS;
7436 goto out_now;
7437 }
7438 added++;
7439 break;
7440 #endif
7441 #ifdef INET6
7442 case AF_INET6:
7443 incr = sizeof(struct sockaddr_in6);
7444 sin6 = (struct sockaddr_in6 *)sa;
7445 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7446 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7447 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7448 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7449 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
7450 *error = EINVAL;
7451 goto out_now;
7452 }
7453 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7454 SCTP_DONOT_SETSCOPE,
7455 SCTP_ADDR_IS_CONFIRMED)) {
7456 /* assoc gone no un-lock */
7457 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7458 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7459 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
7460 *error = ENOBUFS;
7461 goto out_now;
7462 }
7463 added++;
7464 break;
7465 #endif
7466 #if defined(__Userspace__)
7467 case AF_CONN:
7468 incr = sizeof(struct sockaddr_conn);
7469 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7470 SCTP_DONOT_SETSCOPE,
7471 SCTP_ADDR_IS_CONFIRMED)) {
7472 /* assoc gone no un-lock */
7473 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7474 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7475 SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
7476 *error = ENOBUFS;
7477 goto out_now;
7478 }
7479 added++;
7480 break;
7481 #endif
7482 default:
7483 break;
7484 }
7485 sa = (struct sockaddr *)((caddr_t)sa + incr);
7486 }
7487 out_now:
7488 return (added);
7489 }
7490
7491 int
7492 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7493 unsigned int totaddr,
7494 unsigned int *num_v4, unsigned int *num_v6,
7495 unsigned int limit)
7496 {
7497 struct sockaddr *sa;
7498 struct sctp_tcb *stcb;
7499 unsigned int incr, at, i;
7500
7501 at = 0;
7502 sa = addr;
7503 *num_v6 = *num_v4 = 0;
7504 /* account and validate addresses */
7505 if (totaddr == 0) {
7506 return (EINVAL);
7507 }
7508 for (i = 0; i < totaddr; i++) {
7509 if (at + sizeof(struct sockaddr) > limit) {
7510 return (EINVAL);
7511 }
7512 switch (sa->sa_family) {
7513 #ifdef INET
7514 case AF_INET:
7515 incr = (unsigned int)sizeof(struct sockaddr_in);
7516 #ifdef HAVE_SA_LEN
7517 if (sa->sa_len != incr) {
7518 return (EINVAL);
7519 }
7520 #endif
7521 (*num_v4) += 1;
7522 break;
7523 #endif
7524 #ifdef INET6
7525 case AF_INET6:
7526 {
7527 struct sockaddr_in6 *sin6;
7528
7529 sin6 = (struct sockaddr_in6 *)sa;
7530 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7531 /* Must be non-mapped for connectx */
7532 return (EINVAL);
7533 }
7534 incr = (unsigned int)sizeof(struct sockaddr_in6);
7535 #ifdef HAVE_SA_LEN
7536 if (sa->sa_len != incr) {
7537 return (EINVAL);
7538 }
7539 #endif
7540 (*num_v6) += 1;
7541 break;
7542 }
7543 #endif
7544 default:
7545 return (EINVAL);
7546 }
7547 if ((at + incr) > limit) {
7548 return (EINVAL);
7549 }
7550 SCTP_INP_INCR_REF(inp);
7551 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7552 if (stcb != NULL) {
7553 SCTP_TCB_UNLOCK(stcb);
7554 return (EALREADY);
7555 } else {
7556 SCTP_INP_DECR_REF(inp);
7557 }
7558 at += incr;
7559 sa = (struct sockaddr *)((caddr_t)sa + incr);
7560 }
7561 return (0);
7562 }
7563
7564 /*
7565 * sctp_bindx(ADD) for one address.
7566 * assumes all arguments are valid/checked by caller.
7567 */
7568 void
7569 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7570 struct sockaddr *sa, uint32_t vrf_id, int *error,
7571 void *p)
7572 {
7573 #if defined(INET) && defined(INET6)
7574 struct sockaddr_in sin;
7575 #endif
7576 #ifdef INET6
7577 struct sockaddr_in6 *sin6;
7578 #endif
7579 #ifdef INET
7580 struct sockaddr_in *sinp;
7581 #endif
7582 struct sockaddr *addr_to_use;
7583 struct sctp_inpcb *lep;
7584 #ifdef SCTP_MVRF
7585 int i;
7586 #endif
7587 uint16_t port;
7588
7589 /* see if we're bound all already! */
7590 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7591 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7592 *error = EINVAL;
7593 return;
7594 }
7595 #ifdef SCTP_MVRF
7596 /* Is the VRF one we have */
7597 for (i = 0; i < inp->num_vrfs; i++) {
7598 if (vrf_id == inp->m_vrf_ids[i]) {
7599 break;
7600 }
7601 }
7602 if (i == inp->num_vrfs) {
7603 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7604 *error = EINVAL;
7605 return;
7606 }
7607 #endif
7608 switch (sa->sa_family) {
7609 #ifdef INET6
7610 case AF_INET6:
7611 #ifdef HAVE_SA_LEN
7612 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7613 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7614 *error = EINVAL;
7615 return;
7616 }
7617 #endif
7618 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7619 /* can only bind v6 on PF_INET6 sockets */
7620 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7621 *error = EINVAL;
7622 return;
7623 }
7624 sin6 = (struct sockaddr_in6 *)sa;
7625 port = sin6->sin6_port;
7626 #ifdef INET
7627 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7628 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7629 SCTP_IPV6_V6ONLY(inp)) {
7630 /* can't bind v4-mapped on PF_INET sockets */
7631 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7632 *error = EINVAL;
7633 return;
7634 }
7635 in6_sin6_2_sin(&sin, sin6);
7636 addr_to_use = (struct sockaddr *)&sin;
7637 } else {
7638 addr_to_use = sa;
7639 }
7640 #else
7641 addr_to_use = sa;
7642 #endif
7643 break;
7644 #endif
7645 #ifdef INET
7646 case AF_INET:
7647 #ifdef HAVE_SA_LEN
7648 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7649 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7650 *error = EINVAL;
7651 return;
7652 }
7653 #endif
7654 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7655 SCTP_IPV6_V6ONLY(inp)) {
7656 /* can't bind v4 on PF_INET sockets */
7657 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7658 *error = EINVAL;
7659 return;
7660 }
7661 sinp = (struct sockaddr_in *)sa;
7662 port = sinp->sin_port;
7663 addr_to_use = sa;
7664 break;
7665 #endif
7666 default:
7667 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7668 *error = EINVAL;
7669 return;
7670 }
7671 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7672 #if !(defined(_WIN32) || defined(__Userspace__))
7673 if (p == NULL) {
7674 /* Can't get proc for Net/Open BSD */
7675 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7676 *error = EINVAL;
7677 return;
7678 }
7679 #endif
7680 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p);
7681 return;
7682 }
7683 /* Validate the incoming port. */
7684 if ((port != 0) && (port != inp->sctp_lport)) {
7685 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7686 *error = EINVAL;
7687 return;
7688 }
7689 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id);
7690 if (lep == NULL) {
7691 /* add the address */
7692 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use,
7693 SCTP_ADD_IP_ADDRESS, vrf_id);
7694 } else {
7695 if (lep != inp) {
7696 *error = EADDRINUSE;
7697 }
7698 SCTP_INP_DECR_REF(lep);
7699 }
7700 }
7701
7702 /*
7703 * sctp_bindx(DELETE) for one address.
7704 * assumes all arguments are valid/checked by caller.
7705 */
7706 void
7707 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7708 struct sockaddr *sa, uint32_t vrf_id, int *error)
7709 {
7710 struct sockaddr *addr_to_use;
7711 #if defined(INET) && defined(INET6)
7712 struct sockaddr_in6 *sin6;
7713 struct sockaddr_in sin;
7714 #endif
7715 #ifdef SCTP_MVRF
7716 int i;
7717 #endif
7718
7719 /* see if we're bound all already! */
7720 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7721 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7722 *error = EINVAL;
7723 return;
7724 }
7725 #ifdef SCTP_MVRF
7726 /* Is the VRF one we have */
7727 for (i = 0; i < inp->num_vrfs; i++) {
7728 if (vrf_id == inp->m_vrf_ids[i]) {
7729 break;
7730 }
7731 }
7732 if (i == inp->num_vrfs) {
7733 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7734 *error = EINVAL;
7735 return;
7736 }
7737 #endif
7738 switch (sa->sa_family) {
7739 #ifdef INET6
7740 case AF_INET6:
7741 #ifdef HAVE_SA_LEN
7742 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7744 *error = EINVAL;
7745 return;
7746 }
7747 #endif
7748 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7749 /* can only bind v6 on PF_INET6 sockets */
7750 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7751 *error = EINVAL;
7752 return;
7753 }
7754 #ifdef INET
7755 sin6 = (struct sockaddr_in6 *)sa;
7756 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7757 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7758 SCTP_IPV6_V6ONLY(inp)) {
7759 /* can't bind mapped-v4 on PF_INET sockets */
7760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7761 *error = EINVAL;
7762 return;
7763 }
7764 in6_sin6_2_sin(&sin, sin6);
7765 addr_to_use = (struct sockaddr *)&sin;
7766 } else {
7767 addr_to_use = sa;
7768 }
7769 #else
7770 addr_to_use = sa;
7771 #endif
7772 break;
7773 #endif
7774 #ifdef INET
7775 case AF_INET:
7776 #ifdef HAVE_SA_LEN
7777 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7779 *error = EINVAL;
7780 return;
7781 }
7782 #endif
7783 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7784 SCTP_IPV6_V6ONLY(inp)) {
7785 /* can't bind v4 on PF_INET sockets */
7786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7787 *error = EINVAL;
7788 return;
7789 }
7790 addr_to_use = sa;
7791 break;
7792 #endif
7793 default:
7794 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7795 *error = EINVAL;
7796 return;
7797 }
7798 /* No lock required mgmt_ep_sa does its own locking. */
7799 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS,
7800 vrf_id);
7801 }
7802
7803 /*
7804 * returns the valid local address count for an assoc, taking into account
7805 * all scoping rules
7806 */
7807 int
7808 sctp_local_addr_count(struct sctp_tcb *stcb)
7809 {
7810 int loopback_scope;
7811 #if defined(INET)
7812 int ipv4_local_scope, ipv4_addr_legal;
7813 #endif
7814 #if defined(INET6)
7815 int local_scope, site_scope, ipv6_addr_legal;
7816 #endif
7817 #if defined(__Userspace__)
7818 int conn_addr_legal;
7819 #endif
7820 struct sctp_vrf *vrf;
7821 struct sctp_ifn *sctp_ifn;
7822 struct sctp_ifa *sctp_ifa;
7823 int count = 0;
7824
7825 /* Turn on all the appropriate scopes */
7826 loopback_scope = stcb->asoc.scope.loopback_scope;
7827 #if defined(INET)
7828 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7829 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7830 #endif
7831 #if defined(INET6)
7832 local_scope = stcb->asoc.scope.local_scope;
7833 site_scope = stcb->asoc.scope.site_scope;
7834 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7835 #endif
7836 #if defined(__Userspace__)
7837 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7838 #endif
7839 SCTP_IPI_ADDR_RLOCK();
7840 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7841 if (vrf == NULL) {
7842 /* no vrf, no addresses */
7843 SCTP_IPI_ADDR_RUNLOCK();
7844 return (0);
7845 }
7846
7847 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7848 /*
7849 * bound all case: go through all ifns on the vrf
7850 */
7851 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7852 if ((loopback_scope == 0) &&
7853 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7854 continue;
7855 }
7856 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7857 if (sctp_is_addr_restricted(stcb, sctp_ifa))
7858 continue;
7859 switch (sctp_ifa->address.sa.sa_family) {
7860 #ifdef INET
7861 case AF_INET:
7862 if (ipv4_addr_legal) {
7863 struct sockaddr_in *sin;
7864
7865 sin = &sctp_ifa->address.sin;
7866 if (sin->sin_addr.s_addr == 0) {
7867 /* skip unspecified addrs */
7868 continue;
7869 }
7870 #if defined(__FreeBSD__) && !defined(__Userspace__)
7871 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7872 &sin->sin_addr) != 0) {
7873 continue;
7874 }
7875 #endif
7876 if ((ipv4_local_scope == 0) &&
7877 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7878 continue;
7879 }
7880 /* count this one */
7881 count++;
7882 } else {
7883 continue;
7884 }
7885 break;
7886 #endif
7887 #ifdef INET6
7888 case AF_INET6:
7889 if (ipv6_addr_legal) {
7890 struct sockaddr_in6 *sin6;
7891
7892 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7893 struct sockaddr_in6 lsa6;
7894 #endif
7895 sin6 = &sctp_ifa->address.sin6;
7896 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7897 continue;
7898 }
7899 #if defined(__FreeBSD__) && !defined(__Userspace__)
7900 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7901 &sin6->sin6_addr) != 0) {
7902 continue;
7903 }
7904 #endif
7905 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7906 if (local_scope == 0)
7907 continue;
7908 #if defined(SCTP_EMBEDDED_V6_SCOPE)
7909 if (sin6->sin6_scope_id == 0) {
7910 #ifdef SCTP_KAME
7911 if (sa6_recoverscope(sin6) != 0)
7912 /*
7913 * bad link
7914 * local
7915 * address
7916 */
7917 continue;
7918 #else
7919 lsa6 = *sin6;
7920 if (in6_recoverscope(&lsa6,
7921 &lsa6.sin6_addr,
7922 NULL))
7923 /*
7924 * bad link
7925 * local
7926 * address
7927 */
7928 continue;
7929 sin6 = &lsa6;
7930 #endif /* SCTP_KAME */
7931 }
7932 #endif /* SCTP_EMBEDDED_V6_SCOPE */
7933 }
7934 if ((site_scope == 0) &&
7935 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7936 continue;
7937 }
7938 /* count this one */
7939 count++;
7940 }
7941 break;
7942 #endif
7943 #if defined(__Userspace__)
7944 case AF_CONN:
7945 if (conn_addr_legal) {
7946 count++;
7947 }
7948 break;
7949 #endif
7950 default:
7951 /* TSNH */
7952 break;
7953 }
7954 }
7955 }
7956 } else {
7957 /*
7958 * subset bound case
7959 */
7960 struct sctp_laddr *laddr;
7961 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7962 sctp_nxt_addr) {
7963 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7964 continue;
7965 }
7966 /* count this one */
7967 count++;
7968 }
7969 }
7970 SCTP_IPI_ADDR_RUNLOCK();
7971 return (count);
7972 }
7973
7974 #if defined(SCTP_LOCAL_TRACE_BUF)
7975
7976 void
7977 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7978 {
7979 uint32_t saveindex, newindex;
7980
7981 #if defined(_WIN32) && !defined(__Userspace__)
7982 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7983 return;
7984 }
7985 do {
7986 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7987 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7988 newindex = 1;
7989 } else {
7990 newindex = saveindex + 1;
7991 }
7992 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7993 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7994 saveindex = 0;
7995 }
7996 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7997 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7998 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7999 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
8000 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
8001 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
8002 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
8003 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
8004 #else
8005 do {
8006 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
8007 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8008 newindex = 1;
8009 } else {
8010 newindex = saveindex + 1;
8011 }
8012 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
8013 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
8014 saveindex = 0;
8015 }
8016 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
8017 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
8018 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
8019 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
8020 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
8021 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
8022 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
8023 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
8024 #endif
8025 }
8026
8027 #endif
8028 #if defined(__FreeBSD__) && !defined(__Userspace__)
8029 static void
8030 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
8031 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
8032 {
8033 struct ip *iph;
8034 #ifdef INET6
8035 struct ip6_hdr *ip6;
8036 #endif
8037 struct mbuf *sp, *last;
8038 struct udphdr *uhdr;
8039 uint16_t port;
8040
8041 if ((m->m_flags & M_PKTHDR) == 0) {
8042 /* Can't handle one that is not a pkt hdr */
8043 goto out;
8044 }
8045 /* Pull the src port */
8046 iph = mtod(m, struct ip *);
8047 uhdr = (struct udphdr *)((caddr_t)iph + off);
8048 port = uhdr->uh_sport;
8049 /* Split out the mbuf chain. Leave the
8050 * IP header in m, place the
8051 * rest in the sp.
8052 */
8053 sp = m_split(m, off, M_NOWAIT);
8054 if (sp == NULL) {
8055 /* Gak, drop packet, we can't do a split */
8056 goto out;
8057 }
8058 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
8059 /* Gak, packet can't have an SCTP header in it - too small */
8060 m_freem(sp);
8061 goto out;
8062 }
8063 /* Now pull up the UDP header and SCTP header together */
8064 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
8065 if (sp == NULL) {
8066 /* Gak pullup failed */
8067 goto out;
8068 }
8069 /* Trim out the UDP header */
8070 m_adj(sp, sizeof(struct udphdr));
8071
8072 /* Now reconstruct the mbuf chain */
8073 for (last = m; last->m_next; last = last->m_next);
8074 last->m_next = sp;
8075 m->m_pkthdr.len += sp->m_pkthdr.len;
8076 /*
8077 * The CSUM_DATA_VALID flags indicates that the HW checked the
8078 * UDP checksum and it was valid.
8079 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
8080 * the HW also verified the SCTP checksum. Therefore, clear the bit.
8081 */
8082 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
8083 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
8084 m->m_pkthdr.len,
8085 if_name(m->m_pkthdr.rcvif),
8086 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
8087 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
8088 iph = mtod(m, struct ip *);
8089 switch (iph->ip_v) {
8090 #ifdef INET
8091 case IPVERSION:
8092 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
8093 sctp_input_with_port(m, off, port);
8094 break;
8095 #endif
8096 #ifdef INET6
8097 case IPV6_VERSION >> 4:
8098 ip6 = mtod(m, struct ip6_hdr *);
8099 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
8100 sctp6_input_with_port(&m, &off, port);
8101 break;
8102 #endif
8103 default:
8104 goto out;
8105 break;
8106 }
8107 return;
8108 out:
8109 m_freem(m);
8110 }
8111
8112 #ifdef INET
8113 static void
8114 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
8115 {
8116 struct ip *outer_ip, *inner_ip;
8117 struct sctphdr *sh;
8118 struct icmp *icmp;
8119 struct udphdr *udp;
8120 struct sctp_inpcb *inp;
8121 struct sctp_tcb *stcb;
8122 struct sctp_nets *net;
8123 struct sctp_init_chunk *ch;
8124 struct sockaddr_in src, dst;
8125 uint8_t type, code;
8126
8127 inner_ip = (struct ip *)vip;
8128 icmp = (struct icmp *)((caddr_t)inner_ip -
8129 (sizeof(struct icmp) - sizeof(struct ip)));
8130 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
8131 if (ntohs(outer_ip->ip_len) <
8132 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
8133 return;
8134 }
8135 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
8136 sh = (struct sctphdr *)(udp + 1);
8137 memset(&src, 0, sizeof(struct sockaddr_in));
8138 src.sin_family = AF_INET;
8139 #ifdef HAVE_SIN_LEN
8140 src.sin_len = sizeof(struct sockaddr_in);
8141 #endif
8142 src.sin_port = sh->src_port;
8143 src.sin_addr = inner_ip->ip_src;
8144 memset(&dst, 0, sizeof(struct sockaddr_in));
8145 dst.sin_family = AF_INET;
8146 #ifdef HAVE_SIN_LEN
8147 dst.sin_len = sizeof(struct sockaddr_in);
8148 #endif
8149 dst.sin_port = sh->dest_port;
8150 dst.sin_addr = inner_ip->ip_dst;
8151 /*
8152 * 'dst' holds the dest of the packet that failed to be sent.
8153 * 'src' holds our local endpoint address. Thus we reverse
8154 * the dst and the src in the lookup.
8155 */
8156 inp = NULL;
8157 net = NULL;
8158 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8159 (struct sockaddr *)&src,
8160 &inp, &net, 1,
8161 SCTP_DEFAULT_VRFID);
8162 if ((stcb != NULL) &&
8163 (net != NULL) &&
8164 (inp != NULL)) {
8165 /* Check the UDP port numbers */
8166 if ((udp->uh_dport != net->port) ||
8167 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8168 SCTP_TCB_UNLOCK(stcb);
8169 return;
8170 }
8171 /* Check the verification tag */
8172 if (ntohl(sh->v_tag) != 0) {
8173 /*
8174 * This must be the verification tag used
8175 * for sending out packets. We don't
8176 * consider packets reflecting the
8177 * verification tag.
8178 */
8179 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
8180 SCTP_TCB_UNLOCK(stcb);
8181 return;
8182 }
8183 } else {
8184 if (ntohs(outer_ip->ip_len) >=
8185 sizeof(struct ip) +
8186 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
8187 /*
8188 * In this case we can check if we
8189 * got an INIT chunk and if the
8190 * initiate tag matches.
8191 */
8192 ch = (struct sctp_init_chunk *)(sh + 1);
8193 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
8194 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
8195 SCTP_TCB_UNLOCK(stcb);
8196 return;
8197 }
8198 } else {
8199 SCTP_TCB_UNLOCK(stcb);
8200 return;
8201 }
8202 }
8203 type = icmp->icmp_type;
8204 code = icmp->icmp_code;
8205 if ((type == ICMP_UNREACH) &&
8206 (code == ICMP_UNREACH_PORT)) {
8207 code = ICMP_UNREACH_PROTOCOL;
8208 }
8209 sctp_notify(inp, stcb, net, type, code,
8210 ntohs(inner_ip->ip_len),
8211 (uint32_t)ntohs(icmp->icmp_nextmtu));
8212 #if defined(__Userspace__)
8213 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8214 (stcb->sctp_socket != NULL)) {
8215 struct socket *upcall_socket;
8216
8217 upcall_socket = stcb->sctp_socket;
8218 SOCK_LOCK(upcall_socket);
8219 soref(upcall_socket);
8220 SOCK_UNLOCK(upcall_socket);
8221 if ((upcall_socket->so_upcall != NULL) &&
8222 (upcall_socket->so_error != 0)) {
8223 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8224 }
8225 ACCEPT_LOCK();
8226 SOCK_LOCK(upcall_socket);
8227 sorele(upcall_socket);
8228 }
8229 #endif
8230 } else {
8231 if ((stcb == NULL) && (inp != NULL)) {
8232 /* reduce ref-count */
8233 SCTP_INP_WLOCK(inp);
8234 SCTP_INP_DECR_REF(inp);
8235 SCTP_INP_WUNLOCK(inp);
8236 }
8237 if (stcb) {
8238 SCTP_TCB_UNLOCK(stcb);
8239 }
8240 }
8241 return;
8242 }
8243 #endif
8244
8245 #ifdef INET6
8246 static void
8247 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
8248 {
8249 struct ip6ctlparam *ip6cp;
8250 struct sctp_inpcb *inp;
8251 struct sctp_tcb *stcb;
8252 struct sctp_nets *net;
8253 struct sctphdr sh;
8254 struct udphdr udp;
8255 struct sockaddr_in6 src, dst;
8256 uint8_t type, code;
8257
8258 ip6cp = (struct ip6ctlparam *)d;
8259 /*
8260 * XXX: We assume that when IPV6 is non NULL, M and OFF are
8261 * valid.
8262 */
8263 if (ip6cp->ip6c_m == NULL) {
8264 return;
8265 }
8266 /* Check if we can safely examine the ports and the
8267 * verification tag of the SCTP common header.
8268 */
8269 if (ip6cp->ip6c_m->m_pkthdr.len <
8270 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
8271 return;
8272 }
8273 /* Copy out the UDP header. */
8274 memset(&udp, 0, sizeof(struct udphdr));
8275 m_copydata(ip6cp->ip6c_m,
8276 ip6cp->ip6c_off,
8277 sizeof(struct udphdr),
8278 (caddr_t)&udp);
8279 /* Copy out the port numbers and the verification tag. */
8280 memset(&sh, 0, sizeof(struct sctphdr));
8281 m_copydata(ip6cp->ip6c_m,
8282 ip6cp->ip6c_off + sizeof(struct udphdr),
8283 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
8284 (caddr_t)&sh);
8285 memset(&src, 0, sizeof(struct sockaddr_in6));
8286 src.sin6_family = AF_INET6;
8287 #ifdef HAVE_SIN6_LEN
8288 src.sin6_len = sizeof(struct sockaddr_in6);
8289 #endif
8290 src.sin6_port = sh.src_port;
8291 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
8292 #if defined(__FreeBSD__) && !defined(__Userspace__)
8293 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8294 return;
8295 }
8296 #endif
8297 memset(&dst, 0, sizeof(struct sockaddr_in6));
8298 dst.sin6_family = AF_INET6;
8299 #ifdef HAVE_SIN6_LEN
8300 dst.sin6_len = sizeof(struct sockaddr_in6);
8301 #endif
8302 dst.sin6_port = sh.dest_port;
8303 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
8304 #if defined(__FreeBSD__) && !defined(__Userspace__)
8305 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
8306 return;
8307 }
8308 #endif
8309 inp = NULL;
8310 net = NULL;
8311 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
8312 (struct sockaddr *)&src,
8313 &inp, &net, 1, SCTP_DEFAULT_VRFID);
8314 if ((stcb != NULL) &&
8315 (net != NULL) &&
8316 (inp != NULL)) {
8317 /* Check the UDP port numbers */
8318 if ((udp.uh_dport != net->port) ||
8319 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
8320 SCTP_TCB_UNLOCK(stcb);
8321 return;
8322 }
8323 /* Check the verification tag */
8324 if (ntohl(sh.v_tag) != 0) {
8325 /*
8326 * This must be the verification tag used for
8327 * sending out packets. We don't consider
8328 * packets reflecting the verification tag.
8329 */
8330 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
8331 SCTP_TCB_UNLOCK(stcb);
8332 return;
8333 }
8334 } else {
8335 #if defined(__FreeBSD__) && !defined(__Userspace__)
8336 if (ip6cp->ip6c_m->m_pkthdr.len >=
8337 ip6cp->ip6c_off + sizeof(struct udphdr) +
8338 sizeof(struct sctphdr) +
8339 sizeof(struct sctp_chunkhdr) +
8340 offsetof(struct sctp_init, a_rwnd)) {
8341 /*
8342 * In this case we can check if we
8343 * got an INIT chunk and if the
8344 * initiate tag matches.
8345 */
8346 uint32_t initiate_tag;
8347 uint8_t chunk_type;
8348
8349 m_copydata(ip6cp->ip6c_m,
8350 ip6cp->ip6c_off +
8351 sizeof(struct udphdr) +
8352 sizeof(struct sctphdr),
8353 sizeof(uint8_t),
8354 (caddr_t)&chunk_type);
8355 m_copydata(ip6cp->ip6c_m,
8356 ip6cp->ip6c_off +
8357 sizeof(struct udphdr) +
8358 sizeof(struct sctphdr) +
8359 sizeof(struct sctp_chunkhdr),
8360 sizeof(uint32_t),
8361 (caddr_t)&initiate_tag);
8362 if ((chunk_type != SCTP_INITIATION) ||
8363 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
8364 SCTP_TCB_UNLOCK(stcb);
8365 return;
8366 }
8367 } else {
8368 SCTP_TCB_UNLOCK(stcb);
8369 return;
8370 }
8371 #else
8372 SCTP_TCB_UNLOCK(stcb);
8373 return;
8374 #endif
8375 }
8376 type = ip6cp->ip6c_icmp6->icmp6_type;
8377 code = ip6cp->ip6c_icmp6->icmp6_code;
8378 if ((type == ICMP6_DST_UNREACH) &&
8379 (code == ICMP6_DST_UNREACH_NOPORT)) {
8380 type = ICMP6_PARAM_PROB;
8381 code = ICMP6_PARAMPROB_NEXTHEADER;
8382 }
8383 sctp6_notify(inp, stcb, net, type, code,
8384 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
8385 #if defined(__Userspace__)
8386 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
8387 (stcb->sctp_socket != NULL)) {
8388 struct socket *upcall_socket;
8389
8390 upcall_socket = stcb->sctp_socket;
8391 SOCK_LOCK(upcall_socket);
8392 soref(upcall_socket);
8393 SOCK_UNLOCK(upcall_socket);
8394 if ((upcall_socket->so_upcall != NULL) &&
8395 (upcall_socket->so_error != 0)) {
8396 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
8397 }
8398 ACCEPT_LOCK();
8399 SOCK_LOCK(upcall_socket);
8400 sorele(upcall_socket);
8401 }
8402 #endif
8403 } else {
8404 if ((stcb == NULL) && (inp != NULL)) {
8405 /* reduce inp's ref-count */
8406 SCTP_INP_WLOCK(inp);
8407 SCTP_INP_DECR_REF(inp);
8408 SCTP_INP_WUNLOCK(inp);
8409 }
8410 if (stcb) {
8411 SCTP_TCB_UNLOCK(stcb);
8412 }
8413 }
8414 }
8415 #endif
8416
8417 void
8418 sctp_over_udp_stop(void)
8419 {
8420 /*
8421 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8422 */
8423 #ifdef INET
8424 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8425 soclose(SCTP_BASE_INFO(udp4_tun_socket));
8426 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
8427 }
8428 #endif
8429 #ifdef INET6
8430 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8431 soclose(SCTP_BASE_INFO(udp6_tun_socket));
8432 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
8433 }
8434 #endif
8435 }
8436
8437 int
8438 sctp_over_udp_start(void)
8439 {
8440 uint16_t port;
8441 int ret;
8442 #ifdef INET
8443 struct sockaddr_in sin;
8444 #endif
8445 #ifdef INET6
8446 struct sockaddr_in6 sin6;
8447 #endif
8448 /*
8449 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8450 */
8451 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
8452 if (ntohs(port) == 0) {
8453 /* Must have a port set */
8454 return (EINVAL);
8455 }
8456 #ifdef INET
8457 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8458 /* Already running -- must stop first */
8459 return (EALREADY);
8460 }
8461 #endif
8462 #ifdef INET6
8463 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8464 /* Already running -- must stop first */
8465 return (EALREADY);
8466 }
8467 #endif
8468 #ifdef INET
8469 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
8470 SOCK_DGRAM, IPPROTO_UDP,
8471 curthread->td_ucred, curthread))) {
8472 sctp_over_udp_stop();
8473 return (ret);
8474 }
8475 /* Call the special UDP hook. */
8476 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
8477 sctp_recv_udp_tunneled_packet,
8478 sctp_recv_icmp_tunneled_packet,
8479 NULL))) {
8480 sctp_over_udp_stop();
8481 return (ret);
8482 }
8483 /* Ok, we have a socket, bind it to the port. */
8484 memset(&sin, 0, sizeof(struct sockaddr_in));
8485 sin.sin_len = sizeof(struct sockaddr_in);
8486 sin.sin_family = AF_INET;
8487 sin.sin_port = htons(port);
8488 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
8489 (struct sockaddr *)&sin, curthread))) {
8490 sctp_over_udp_stop();
8491 return (ret);
8492 }
8493 #endif
8494 #ifdef INET6
8495 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
8496 SOCK_DGRAM, IPPROTO_UDP,
8497 curthread->td_ucred, curthread))) {
8498 sctp_over_udp_stop();
8499 return (ret);
8500 }
8501 /* Call the special UDP hook. */
8502 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
8503 sctp_recv_udp_tunneled_packet,
8504 sctp_recv_icmp6_tunneled_packet,
8505 NULL))) {
8506 sctp_over_udp_stop();
8507 return (ret);
8508 }
8509 /* Ok, we have a socket, bind it to the port. */
8510 memset(&sin6, 0, sizeof(struct sockaddr_in6));
8511 sin6.sin6_len = sizeof(struct sockaddr_in6);
8512 sin6.sin6_family = AF_INET6;
8513 sin6.sin6_port = htons(port);
8514 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
8515 (struct sockaddr *)&sin6, curthread))) {
8516 sctp_over_udp_stop();
8517 return (ret);
8518 }
8519 #endif
8520 return (0);
8521 }
8522 #endif
8523
8524 /*
8525 * sctp_min_mtu ()returns the minimum of all non-zero arguments.
8526 * If all arguments are zero, zero is returned.
8527 */
8528 uint32_t
8529 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
8530 {
8531 if (mtu1 > 0) {
8532 if (mtu2 > 0) {
8533 if (mtu3 > 0) {
8534 return (min(mtu1, min(mtu2, mtu3)));
8535 } else {
8536 return (min(mtu1, mtu2));
8537 }
8538 } else {
8539 if (mtu3 > 0) {
8540 return (min(mtu1, mtu3));
8541 } else {
8542 return (mtu1);
8543 }
8544 }
8545 } else {
8546 if (mtu2 > 0) {
8547 if (mtu3 > 0) {
8548 return (min(mtu2, mtu3));
8549 } else {
8550 return (mtu2);
8551 }
8552 } else {
8553 return (mtu3);
8554 }
8555 }
8556 }
8557
8558 #if defined(__FreeBSD__) && !defined(__Userspace__)
8559 void
8560 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
8561 {
8562 struct in_conninfo inc;
8563
8564 memset(&inc, 0, sizeof(struct in_conninfo));
8565 inc.inc_fibnum = fibnum;
8566 switch (addr->sa.sa_family) {
8567 #ifdef INET
8568 case AF_INET:
8569 inc.inc_faddr = addr->sin.sin_addr;
8570 break;
8571 #endif
8572 #ifdef INET6
8573 case AF_INET6:
8574 inc.inc_flags |= INC_ISIPV6;
8575 inc.inc6_faddr = addr->sin6.sin6_addr;
8576 break;
8577 #endif
8578 default:
8579 return;
8580 }
8581 tcp_hc_updatemtu(&inc, (u_long)mtu);
8582 }
8583
8584 uint32_t
8585 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
8586 {
8587 struct in_conninfo inc;
8588
8589 memset(&inc, 0, sizeof(struct in_conninfo));
8590 inc.inc_fibnum = fibnum;
8591 switch (addr->sa.sa_family) {
8592 #ifdef INET
8593 case AF_INET:
8594 inc.inc_faddr = addr->sin.sin_addr;
8595 break;
8596 #endif
8597 #ifdef INET6
8598 case AF_INET6:
8599 inc.inc_flags |= INC_ISIPV6;
8600 inc.inc6_faddr = addr->sin6.sin6_addr;
8601 break;
8602 #endif
8603 default:
8604 return (0);
8605 }
8606 return ((uint32_t)tcp_hc_getmtu(&inc));
8607 }
8608 #endif
8609
8610 void
8611 sctp_set_state(struct sctp_tcb *stcb, int new_state)
8612 {
8613 #if defined(KDTRACE_HOOKS)
8614 int old_state = stcb->asoc.state;
8615 #endif
8616
8617 KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
8618 ("sctp_set_state: Can't set substate (new_state = %x)",
8619 new_state));
8620 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
8621 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
8622 (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
8623 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
8624 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
8625 }
8626 #if defined(KDTRACE_HOOKS)
8627 if (((old_state & SCTP_STATE_MASK) != new_state) &&
8628 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
8629 (new_state == SCTP_STATE_INUSE))) {
8630 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8631 }
8632 #endif
8633 }
8634
8635 void
8636 sctp_add_substate(struct sctp_tcb *stcb, int substate)
8637 {
8638 #if defined(KDTRACE_HOOKS)
8639 int old_state = stcb->asoc.state;
8640 #endif
8641
8642 KASSERT((substate & SCTP_STATE_MASK) == 0,
8643 ("sctp_add_substate: Can't set state (substate = %x)",
8644 substate));
8645 stcb->asoc.state |= substate;
8646 #if defined(KDTRACE_HOOKS)
8647 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
8648 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
8649 ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
8650 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
8651 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
8652 }
8653 #endif
8654 }
8655
8656