1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 362153 2020-06-13 18:38:59Z tuexen $");
38 #endif
39
40 #include <netinet/sctp_os.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_input.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_crc32.h>
54 #if defined(__FreeBSD__) && !defined(__Userspace__)
55 #include <netinet/sctp_kdtrace.h>
56 #endif
57 #if defined(INET) || defined(INET6)
58 #if !defined(_WIN32)
59 #include <netinet/udp.h>
60 #endif
61 #endif
62 #if defined(__FreeBSD__) && !defined(__Userspace__)
63 #include <sys/smp.h>
64 #endif
65
66 static void
sctp_stop_all_cookie_timers(struct sctp_tcb * stcb)67 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
68 {
69 struct sctp_nets *net;
70
71 /* This now not only stops all cookie timers
72 * it also stops any INIT timers as well. This
73 * will make sure that the timers are stopped in
74 * all collision cases.
75 */
76 SCTP_TCB_LOCK_ASSERT(stcb);
77 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
78 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
79 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
80 stcb->sctp_ep,
81 stcb,
82 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
83 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
84 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
85 stcb->sctp_ep,
86 stcb,
87 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
88 }
89 }
90 }
91
92 /* INIT handler */
93 static void
sctp_handle_init(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_chunk * cp,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)94 sctp_handle_init(struct mbuf *m, int iphlen, int offset,
95 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
96 struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
97 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
98 #if defined(__FreeBSD__) && !defined(__Userspace__)
99 uint8_t mflowtype, uint32_t mflowid,
100 #endif
101 uint32_t vrf_id, uint16_t port)
102 {
103 struct sctp_init *init;
104 struct mbuf *op_err;
105
106 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
107 (void *)stcb);
108 if (stcb == NULL) {
109 SCTP_INP_RLOCK(inp);
110 }
111 /* validate length */
112 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
113 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
114 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
115 #if defined(__FreeBSD__) && !defined(__Userspace__)
116 mflowtype, mflowid,
117 #endif
118 vrf_id, port);
119 if (stcb)
120 *abort_no_unlock = 1;
121 goto outnow;
122 }
123 /* validate parameters */
124 init = &cp->init;
125 if (init->initiate_tag == 0) {
126 /* protocol error... send abort */
127 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
128 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
129 #if defined(__FreeBSD__) && !defined(__Userspace__)
130 mflowtype, mflowid,
131 #endif
132 vrf_id, port);
133 if (stcb)
134 *abort_no_unlock = 1;
135 goto outnow;
136 }
137 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
138 /* invalid parameter... send abort */
139 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
140 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
141 #if defined(__FreeBSD__) && !defined(__Userspace__)
142 mflowtype, mflowid,
143 #endif
144 vrf_id, port);
145 if (stcb)
146 *abort_no_unlock = 1;
147 goto outnow;
148 }
149 if (init->num_inbound_streams == 0) {
150 /* protocol error... send abort */
151 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
152 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
153 #if defined(__FreeBSD__) && !defined(__Userspace__)
154 mflowtype, mflowid,
155 #endif
156 vrf_id, port);
157 if (stcb)
158 *abort_no_unlock = 1;
159 goto outnow;
160 }
161 if (init->num_outbound_streams == 0) {
162 /* protocol error... send abort */
163 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
164 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
165 #if defined(__FreeBSD__) && !defined(__Userspace__)
166 mflowtype, mflowid,
167 #endif
168 vrf_id, port);
169 if (stcb)
170 *abort_no_unlock = 1;
171 goto outnow;
172 }
173 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
174 offset + ntohs(cp->ch.chunk_length))) {
175 /* auth parameter(s) error... send abort */
176 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
177 "Problem with AUTH parameters");
178 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
179 #if defined(__FreeBSD__) && !defined(__Userspace__)
180 mflowtype, mflowid,
181 #endif
182 vrf_id, port);
183 if (stcb)
184 *abort_no_unlock = 1;
185 goto outnow;
186 }
187 /* We are only accepting if we have a listening socket.*/
188 if ((stcb == NULL) &&
189 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
190 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
191 (!SCTP_IS_LISTENING(inp)))) {
192 /*
193 * FIX ME ?? What about TCP model and we have a
194 * match/restart case? Actually no fix is needed.
195 * the lookup will always find the existing assoc so stcb
196 * would not be NULL. It may be questionable to do this
197 * since we COULD just send back the INIT-ACK and hope that
198 * the app did accept()'s by the time the COOKIE was sent. But
199 * there is a price to pay for COOKIE generation and I don't
200 * want to pay it on the chance that the app will actually do
201 * some accepts(). The App just looses and should NOT be in
202 * this state :-)
203 */
204 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
205 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
206 "No listener");
207 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
208 #if defined(__FreeBSD__) && !defined(__Userspace__)
209 mflowtype, mflowid, inp->fibnum,
210 #endif
211 vrf_id, port);
212 }
213 goto outnow;
214 }
215 if ((stcb != NULL) &&
216 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
217 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
218 sctp_send_shutdown_ack(stcb, NULL);
219 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
220 } else {
221 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
222 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
223 src, dst, sh, cp,
224 #if defined(__FreeBSD__) && !defined(__Userspace__)
225 mflowtype, mflowid,
226 #endif
227 vrf_id, port);
228 }
229 outnow:
230 if (stcb == NULL) {
231 SCTP_INP_RUNLOCK(inp);
232 }
233 }
234
235 /*
236 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
237 */
238
239 int
sctp_is_there_unsent_data(struct sctp_tcb * stcb,int so_locked)240 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
241 {
242 int unsent_data;
243 unsigned int i;
244 struct sctp_stream_queue_pending *sp;
245 struct sctp_association *asoc;
246
247 /* This function returns if any stream has true unsent data on it.
248 * Note that as it looks through it will clean up any places that
249 * have old data that has been sent but left at top of stream queue.
250 */
251 asoc = &stcb->asoc;
252 unsent_data = 0;
253 SCTP_TCB_SEND_LOCK(stcb);
254 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
255 /* Check to see if some data queued */
256 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
257 /*sa_ignore FREED_MEMORY*/
258 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
259 if (sp == NULL) {
260 continue;
261 }
262 if ((sp->msg_is_complete) &&
263 (sp->length == 0) &&
264 (sp->sender_all_done)) {
265 /* We are doing differed cleanup. Last
266 * time through when we took all the data
267 * the sender_all_done was not set.
268 */
269 if (sp->put_last_out == 0) {
270 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
271 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
272 sp->sender_all_done,
273 sp->length,
274 sp->msg_is_complete,
275 sp->put_last_out);
276 }
277 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
278 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
279 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
280 if (sp->net) {
281 sctp_free_remote_addr(sp->net);
282 sp->net = NULL;
283 }
284 if (sp->data) {
285 sctp_m_freem(sp->data);
286 sp->data = NULL;
287 }
288 sctp_free_a_strmoq(stcb, sp, so_locked);
289 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
290 unsent_data++;
291 }
292 } else {
293 unsent_data++;
294 }
295 if (unsent_data > 0) {
296 break;
297 }
298 }
299 }
300 SCTP_TCB_SEND_UNLOCK(stcb);
301 return (unsent_data);
302 }
303
304 static int
sctp_process_init(struct sctp_init_chunk * cp,struct sctp_tcb * stcb)305 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
306 {
307 struct sctp_init *init;
308 struct sctp_association *asoc;
309 struct sctp_nets *lnet;
310 unsigned int i;
311
312 init = &cp->init;
313 asoc = &stcb->asoc;
314 /* save off parameters */
315 asoc->peer_vtag = ntohl(init->initiate_tag);
316 asoc->peers_rwnd = ntohl(init->a_rwnd);
317 /* init tsn's */
318 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
319
320 if (!TAILQ_EMPTY(&asoc->nets)) {
321 /* update any ssthresh's that may have a default */
322 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
323 lnet->ssthresh = asoc->peers_rwnd;
324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
325 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
326 }
327
328 }
329 }
330 SCTP_TCB_SEND_LOCK(stcb);
331 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
332 unsigned int newcnt;
333 struct sctp_stream_out *outs;
334 struct sctp_stream_queue_pending *sp, *nsp;
335 struct sctp_tmit_chunk *chk, *nchk;
336
337 /* abandon the upper streams */
338 newcnt = ntohs(init->num_inbound_streams);
339 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
340 if (chk->rec.data.sid >= newcnt) {
341 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
342 asoc->send_queue_cnt--;
343 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
344 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
345 #ifdef INVARIANTS
346 } else {
347 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
348 #endif
349 }
350 if (chk->data != NULL) {
351 sctp_free_bufspace(stcb, asoc, chk, 1);
352 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
353 0, chk, SCTP_SO_NOT_LOCKED);
354 if (chk->data) {
355 sctp_m_freem(chk->data);
356 chk->data = NULL;
357 }
358 }
359 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
360 /*sa_ignore FREED_MEMORY*/
361 }
362 }
363 if (asoc->strmout) {
364 for (i = newcnt; i < asoc->pre_open_streams; i++) {
365 outs = &asoc->strmout[i];
366 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
367 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
368 TAILQ_REMOVE(&outs->outqueue, sp, next);
369 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
370 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
371 stcb, 0, sp, SCTP_SO_NOT_LOCKED);
372 if (sp->data) {
373 sctp_m_freem(sp->data);
374 sp->data = NULL;
375 }
376 if (sp->net) {
377 sctp_free_remote_addr(sp->net);
378 sp->net = NULL;
379 }
380 /* Free the chunk */
381 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
382 /*sa_ignore FREED_MEMORY*/
383 }
384 outs->state = SCTP_STREAM_CLOSED;
385 }
386 }
387 /* cut back the count */
388 asoc->pre_open_streams = newcnt;
389 }
390 SCTP_TCB_SEND_UNLOCK(stcb);
391 asoc->streamoutcnt = asoc->pre_open_streams;
392 if (asoc->strmout) {
393 for (i = 0; i < asoc->streamoutcnt; i++) {
394 asoc->strmout[i].state = SCTP_STREAM_OPEN;
395 }
396 }
397 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
398 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
400 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
401 }
402 /* This is the next one we expect */
403 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
404
405 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
406 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
407
408 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
409 /* open the requested streams */
410
411 if (asoc->strmin != NULL) {
412 /* Free the old ones */
413 for (i = 0; i < asoc->streamincnt; i++) {
414 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
415 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
416 }
417 SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
418 }
419 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
420 asoc->streamincnt = ntohs(init->num_outbound_streams);
421 } else {
422 asoc->streamincnt = asoc->max_inbound_streams;
423 }
424 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
425 sizeof(struct sctp_stream_in), SCTP_M_STRMI);
426 if (asoc->strmin == NULL) {
427 /* we didn't get memory for the streams! */
428 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
429 return (-1);
430 }
431 for (i = 0; i < asoc->streamincnt; i++) {
432 asoc->strmin[i].sid = i;
433 asoc->strmin[i].last_mid_delivered = 0xffffffff;
434 TAILQ_INIT(&asoc->strmin[i].inqueue);
435 TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
436 asoc->strmin[i].pd_api_started = 0;
437 asoc->strmin[i].delivery_started = 0;
438 }
439 /*
440 * load_address_from_init will put the addresses into the
441 * association when the COOKIE is processed or the INIT-ACK is
442 * processed. Both types of COOKIE's existing and new call this
443 * routine. It will remove addresses that are no longer in the
444 * association (for the restarting case where addresses are
445 * removed). Up front when the INIT arrives we will discard it if it
446 * is a restart and new addresses have been added.
447 */
448 /* sa_ignore MEMLEAK */
449 return (0);
450 }
451
452 /*
453 * INIT-ACK message processing/consumption returns value < 0 on error
454 */
455 static int
sctp_process_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)456 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
457 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
458 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
459 struct sctp_nets *net, int *abort_no_unlock,
460 #if defined(__FreeBSD__) && !defined(__Userspace__)
461 uint8_t mflowtype, uint32_t mflowid,
462 #endif
463 uint32_t vrf_id)
464 {
465 struct sctp_association *asoc;
466 struct mbuf *op_err;
467 int retval, abort_flag, cookie_found;
468 int initack_limit;
469 int nat_friendly = 0;
470
471 /* First verify that we have no illegal param's */
472 abort_flag = 0;
473 cookie_found = 0;
474
475 op_err = sctp_arethere_unrecognized_parameters(m,
476 (offset + sizeof(struct sctp_init_chunk)),
477 &abort_flag, (struct sctp_chunkhdr *)cp,
478 &nat_friendly, &cookie_found);
479 if (abort_flag) {
480 /* Send an abort and notify peer */
481 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
482 *abort_no_unlock = 1;
483 return (-1);
484 }
485 if (!cookie_found) {
486 uint16_t len;
487
488 /* Only report the missing cookie parameter */
489 if (op_err != NULL) {
490 sctp_m_freem(op_err);
491 }
492 len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
493 /* We abort with an error of missing mandatory param */
494 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
495 if (op_err != NULL) {
496 struct sctp_error_missing_param *cause;
497
498 SCTP_BUF_LEN(op_err) = len;
499 cause = mtod(op_err, struct sctp_error_missing_param *);
500 /* Subtract the reserved param */
501 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
502 cause->cause.length = htons(len);
503 cause->num_missing_params = htonl(1);
504 cause->type[0] = htons(SCTP_STATE_COOKIE);
505 }
506 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
507 src, dst, sh, op_err,
508 #if defined(__FreeBSD__) && !defined(__Userspace__)
509 mflowtype, mflowid,
510 #endif
511 vrf_id, net->port);
512 *abort_no_unlock = 1;
513 return (-3);
514 }
515 asoc = &stcb->asoc;
516 asoc->peer_supports_nat = (uint8_t)nat_friendly;
517 /* process the peer's parameters in the INIT-ACK */
518 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
519 if (retval < 0) {
520 if (op_err != NULL) {
521 sctp_m_freem(op_err);
522 }
523 return (retval);
524 }
525 initack_limit = offset + ntohs(cp->ch.chunk_length);
526 /* load all addresses */
527 if ((retval = sctp_load_addresses_from_init(stcb, m,
528 (offset + sizeof(struct sctp_init_chunk)), initack_limit,
529 src, dst, NULL, stcb->asoc.port))) {
530 if (op_err != NULL) {
531 sctp_m_freem(op_err);
532 }
533 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
534 "Problem with address parameters");
535 SCTPDBG(SCTP_DEBUG_INPUT1,
536 "Load addresses from INIT causes an abort %d\n",
537 retval);
538 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
539 src, dst, sh, op_err,
540 #if defined(__FreeBSD__) && !defined(__Userspace__)
541 mflowtype, mflowid,
542 #endif
543 vrf_id, net->port);
544 *abort_no_unlock = 1;
545 return (-1);
546 }
547 /* if the peer doesn't support asconf, flush the asconf queue */
548 if (asoc->asconf_supported == 0) {
549 struct sctp_asconf_addr *param, *nparam;
550
551 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
552 TAILQ_REMOVE(&asoc->asconf_queue, param, next);
553 SCTP_FREE(param, SCTP_M_ASC_ADDR);
554 }
555 }
556
557 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
558 stcb->asoc.local_hmacs);
559 if (op_err) {
560 sctp_queue_op_err(stcb, op_err);
561 /* queuing will steal away the mbuf chain to the out queue */
562 op_err = NULL;
563 }
564 /* extract the cookie and queue it to "echo" it back... */
565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
566 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
567 stcb->asoc.overall_error_count,
568 0,
569 SCTP_FROM_SCTP_INPUT,
570 __LINE__);
571 }
572 stcb->asoc.overall_error_count = 0;
573 net->error_count = 0;
574
575 /*
576 * Cancel the INIT timer, We do this first before queueing the
577 * cookie. We always cancel at the primary to assue that we are
578 * canceling the timer started by the INIT which always goes to the
579 * primary.
580 */
581 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
582 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
583
584 /* calculate the RTO */
585 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
586 SCTP_RTT_FROM_NON_DATA);
587 #if defined(__Userspace__)
588 if (stcb->sctp_ep->recv_callback) {
589 if (stcb->sctp_socket) {
590 uint32_t inqueue_bytes, sb_free_now;
591 struct sctp_inpcb *inp;
592
593 inp = stcb->sctp_ep;
594 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
595 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
596
597 /* check if the amount free in the send socket buffer crossed the threshold */
598 if (inp->send_callback &&
599 (((inp->send_sb_threshold > 0) &&
600 (sb_free_now >= inp->send_sb_threshold) &&
601 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
602 (inp->send_sb_threshold == 0))) {
603 atomic_add_int(&stcb->asoc.refcnt, 1);
604 SCTP_TCB_UNLOCK(stcb);
605 inp->send_callback(stcb->sctp_socket, sb_free_now);
606 SCTP_TCB_LOCK(stcb);
607 atomic_subtract_int(&stcb->asoc.refcnt, 1);
608 }
609 }
610 }
611 #endif
612 retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
613 return (retval);
614 }
615
616 static void
sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net)617 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
618 struct sctp_tcb *stcb, struct sctp_nets *net)
619 {
620 union sctp_sockstore store;
621 struct sctp_nets *r_net, *f_net;
622 struct timeval tv;
623 int req_prim = 0;
624 uint16_t old_error_counter;
625
626 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
627 /* Invalid length */
628 return;
629 }
630
631 memset(&store, 0, sizeof(store));
632 switch (cp->heartbeat.hb_info.addr_family) {
633 #ifdef INET
634 case AF_INET:
635 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
636 store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
637 #ifdef HAVE_SIN_LEN
638 store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
639 #endif
640 store.sin.sin_port = stcb->rport;
641 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
642 sizeof(store.sin.sin_addr));
643 } else {
644 return;
645 }
646 break;
647 #endif
648 #ifdef INET6
649 case AF_INET6:
650 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
651 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
652 #ifdef HAVE_SIN6_LEN
653 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
654 #endif
655 store.sin6.sin6_port = stcb->rport;
656 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
657 } else {
658 return;
659 }
660 break;
661 #endif
662 #if defined(__Userspace__)
663 case AF_CONN:
664 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
665 store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
666 #ifdef HAVE_SCONN_LEN
667 store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
668 #endif
669 store.sconn.sconn_port = stcb->rport;
670 memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
671 } else {
672 return;
673 }
674 break;
675 #endif
676 default:
677 return;
678 }
679 r_net = sctp_findnet(stcb, &store.sa);
680 if (r_net == NULL) {
681 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
682 return;
683 }
684 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
685 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
686 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
687 /*
688 * If the its a HB and it's random value is correct when can
689 * confirm the destination.
690 */
691 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
692 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
693 stcb->asoc.primary_destination = r_net;
694 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
695 f_net = TAILQ_FIRST(&stcb->asoc.nets);
696 if (f_net != r_net) {
697 /* first one on the list is NOT the primary
698 * sctp_cmpaddr() is much more efficient if
699 * the primary is the first on the list, make it
700 * so.
701 */
702 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
703 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
704 }
705 req_prim = 1;
706 }
707 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
708 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
709 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
710 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
711 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
712 }
713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
714 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
715 stcb->asoc.overall_error_count,
716 0,
717 SCTP_FROM_SCTP_INPUT,
718 __LINE__);
719 }
720 stcb->asoc.overall_error_count = 0;
721 old_error_counter = r_net->error_count;
722 r_net->error_count = 0;
723 r_net->hb_responded = 1;
724 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
725 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
726 /* Now lets do a RTO with this */
727 sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
728 SCTP_RTT_FROM_NON_DATA);
729 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
730 r_net->dest_state |= SCTP_ADDR_REACHABLE;
731 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
732 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
733 }
734 if (r_net->dest_state & SCTP_ADDR_PF) {
735 r_net->dest_state &= ~SCTP_ADDR_PF;
736 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
737 }
738 if (old_error_counter > 0) {
739 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
740 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
741 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
742 }
743 if (r_net == stcb->asoc.primary_destination) {
744 if (stcb->asoc.alternate) {
745 /* release the alternate, primary is good */
746 sctp_free_remote_addr(stcb->asoc.alternate);
747 stcb->asoc.alternate = NULL;
748 }
749 }
750 /* Mobility adaptation */
751 if (req_prim) {
752 if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
753 SCTP_MOBILITY_BASE) ||
754 sctp_is_mobility_feature_on(stcb->sctp_ep,
755 SCTP_MOBILITY_FASTHANDOFF)) &&
756 sctp_is_mobility_feature_on(stcb->sctp_ep,
757 SCTP_MOBILITY_PRIM_DELETED)) {
758
759 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
760 stcb->sctp_ep, stcb, NULL,
761 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
762 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
763 SCTP_MOBILITY_FASTHANDOFF)) {
764 sctp_assoc_immediate_retrans(stcb,
765 stcb->asoc.primary_destination);
766 }
767 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
768 SCTP_MOBILITY_BASE)) {
769 sctp_move_chunks_from_net(stcb,
770 stcb->asoc.deleted_primary);
771 }
772 sctp_delete_prim_timer(stcb->sctp_ep, stcb);
773 }
774 }
775 }
776
777 static int
sctp_handle_nat_colliding_state(struct sctp_tcb * stcb)778 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
779 {
780 /*
781 * Return 0 means we want you to proceed with the abort
782 * non-zero means no abort processing.
783 */
784 uint32_t new_vtag;
785 struct sctpasochead *head;
786
787 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
788 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
789 new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
790 atomic_add_int(&stcb->asoc.refcnt, 1);
791 SCTP_TCB_UNLOCK(stcb);
792 SCTP_INP_INFO_WLOCK();
793 SCTP_TCB_LOCK(stcb);
794 atomic_subtract_int(&stcb->asoc.refcnt, 1);
795 } else {
796 return (0);
797 }
798 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
799 /* generate a new vtag and send init */
800 LIST_REMOVE(stcb, sctp_asocs);
801 stcb->asoc.my_vtag = new_vtag;
802 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
803 /* put it in the bucket in the vtag hash of assoc's for the system */
804 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
805 SCTP_INP_INFO_WUNLOCK();
806 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
807 return (1);
808 } else {
809 /* treat like a case where the cookie expired i.e.:
810 * - dump current cookie.
811 * - generate a new vtag.
812 * - resend init.
813 */
814 /* generate a new vtag and send init */
815 LIST_REMOVE(stcb, sctp_asocs);
816 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
817 sctp_stop_all_cookie_timers(stcb);
818 sctp_toss_old_cookies(stcb, &stcb->asoc);
819 stcb->asoc.my_vtag = new_vtag;
820 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
821 /* put it in the bucket in the vtag hash of assoc's for the system */
822 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
823 SCTP_INP_INFO_WUNLOCK();
824 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
825 return (1);
826 }
827 return (0);
828 }
829
830 static int
sctp_handle_nat_missing_state(struct sctp_tcb * stcb,struct sctp_nets * net)831 sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
832 struct sctp_nets *net)
833 {
834 /* return 0 means we want you to proceed with the abort
835 * non-zero means no abort processing
836 */
837 if (stcb->asoc.auth_supported == 0) {
838 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
839 return (0);
840 }
841 sctp_asconf_send_nat_state_update(stcb, net);
842 return (1);
843 }
844
845
846 /* Returns 1 if the stcb was aborted, 0 otherwise */
847 static int
sctp_handle_abort(struct sctp_abort_chunk * abort,struct sctp_tcb * stcb,struct sctp_nets * net)848 sctp_handle_abort(struct sctp_abort_chunk *abort,
849 struct sctp_tcb *stcb, struct sctp_nets *net)
850 {
851 #if defined(__APPLE__) && !defined(__Userspace__)
852 struct socket *so;
853 #endif
854 uint16_t len;
855 uint16_t error;
856
857 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
858 if (stcb == NULL)
859 return (0);
860
861 len = ntohs(abort->ch.chunk_length);
862 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
863 /* Need to check the cause codes for our
864 * two magic nat aborts which don't kill the assoc
865 * necessarily.
866 */
867 struct sctp_error_cause *cause;
868
869 cause = (struct sctp_error_cause *)(abort + 1);
870 error = ntohs(cause->code);
871 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
872 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
873 abort->ch.chunk_flags);
874 if (sctp_handle_nat_colliding_state(stcb)) {
875 return (0);
876 }
877 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
878 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
879 abort->ch.chunk_flags);
880 if (sctp_handle_nat_missing_state(stcb, net)) {
881 return (0);
882 }
883 }
884 } else {
885 error = 0;
886 }
887 /* stop any receive timers */
888 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
889 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
890 /* notify user of the abort and clean up... */
891 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
892 /* free the tcb */
893 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
894 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
895 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
896 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
897 }
898 #ifdef SCTP_ASOCLOG_OF_TSNS
899 sctp_print_out_track_log(stcb);
900 #endif
901 #if defined(__APPLE__) && !defined(__Userspace__)
902 so = SCTP_INP_SO(stcb->sctp_ep);
903 atomic_add_int(&stcb->asoc.refcnt, 1);
904 SCTP_TCB_UNLOCK(stcb);
905 SCTP_SOCKET_LOCK(so, 1);
906 SCTP_TCB_LOCK(stcb);
907 atomic_subtract_int(&stcb->asoc.refcnt, 1);
908 #endif
909 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
910 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
911 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
912 #if defined(__APPLE__) && !defined(__Userspace__)
913 SCTP_SOCKET_UNLOCK(so, 1);
914 #endif
915 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
916 return (1);
917 }
918
919 static void
sctp_start_net_timers(struct sctp_tcb * stcb)920 sctp_start_net_timers(struct sctp_tcb *stcb)
921 {
922 uint32_t cnt_hb_sent;
923 struct sctp_nets *net;
924
925 cnt_hb_sent = 0;
926 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
927 /* For each network start:
928 * 1) A pmtu timer.
929 * 2) A HB timer
930 * 3) If the dest in unconfirmed send
931 * a hb as well if under max_hb_burst have
932 * been sent.
933 */
934 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
935 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
936 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
937 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
938 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
939 cnt_hb_sent++;
940 }
941 }
942 if (cnt_hb_sent) {
943 sctp_chunk_output(stcb->sctp_ep, stcb,
944 SCTP_OUTPUT_FROM_COOKIE_ACK,
945 SCTP_SO_NOT_LOCKED);
946 }
947 }
948
949
950 static void
sctp_handle_shutdown(struct sctp_shutdown_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_flag)951 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
952 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
953 {
954 struct sctp_association *asoc;
955 int some_on_streamwheel;
956 int old_state;
957 #if defined(__APPLE__) && !defined(__Userspace__)
958 struct socket *so;
959 #endif
960
961 SCTPDBG(SCTP_DEBUG_INPUT2,
962 "sctp_handle_shutdown: handling SHUTDOWN\n");
963 if (stcb == NULL)
964 return;
965 asoc = &stcb->asoc;
966 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
967 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
968 return;
969 }
970 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
971 /* Shutdown NOT the expected size */
972 return;
973 }
974 old_state = SCTP_GET_STATE(stcb);
975 sctp_update_acked(stcb, cp, abort_flag);
976 if (*abort_flag) {
977 return;
978 }
979 if (asoc->control_pdapi) {
980 /* With a normal shutdown
981 * we assume the end of last record.
982 */
983 SCTP_INP_READ_LOCK(stcb->sctp_ep);
984 if (asoc->control_pdapi->on_strm_q) {
985 struct sctp_stream_in *strm;
986
987 strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
988 if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
989 /* Unordered */
990 TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
991 asoc->control_pdapi->on_strm_q = 0;
992 } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
993 /* Ordered */
994 TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
995 asoc->control_pdapi->on_strm_q = 0;
996 #ifdef INVARIANTS
997 } else {
998 panic("Unknown state on ctrl:%p on_strm_q:%d",
999 asoc->control_pdapi,
1000 asoc->control_pdapi->on_strm_q);
1001 #endif
1002 }
1003 }
1004 asoc->control_pdapi->end_added = 1;
1005 asoc->control_pdapi->pdapi_aborted = 1;
1006 asoc->control_pdapi = NULL;
1007 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1008 #if defined(__APPLE__) && !defined(__Userspace__)
1009 so = SCTP_INP_SO(stcb->sctp_ep);
1010 atomic_add_int(&stcb->asoc.refcnt, 1);
1011 SCTP_TCB_UNLOCK(stcb);
1012 SCTP_SOCKET_LOCK(so, 1);
1013 SCTP_TCB_LOCK(stcb);
1014 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1015 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1016 /* assoc was freed while we were unlocked */
1017 SCTP_SOCKET_UNLOCK(so, 1);
1018 return;
1019 }
1020 #endif
1021 if (stcb->sctp_socket) {
1022 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1023 }
1024 #if defined(__APPLE__) && !defined(__Userspace__)
1025 SCTP_SOCKET_UNLOCK(so, 1);
1026 #endif
1027 }
1028 /* goto SHUTDOWN_RECEIVED state to block new requests */
1029 if (stcb->sctp_socket) {
1030 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1031 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
1032 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
1033 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
1034 /* notify upper layer that peer has initiated a shutdown */
1035 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1036
1037 /* reset time */
1038 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
1039 }
1040 }
1041 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
1042 /*
1043 * stop the shutdown timer, since we WILL move to
1044 * SHUTDOWN-ACK-SENT.
1045 */
1046 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
1047 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1048 }
1049 /* Now is there unsent data on a stream somewhere? */
1050 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
1051
1052 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1053 !TAILQ_EMPTY(&asoc->sent_queue) ||
1054 some_on_streamwheel) {
1055 /* By returning we will push more data out */
1056 return;
1057 } else {
1058 /* no outstanding data to send, so move on... */
1059 /* send SHUTDOWN-ACK */
1060 /* move to SHUTDOWN-ACK-SENT state */
1061 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
1062 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1063 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1064 }
1065 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
1066 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
1067 sctp_stop_timers_for_shutdown(stcb);
1068 sctp_send_shutdown_ack(stcb, net);
1069 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
1070 stcb->sctp_ep, stcb, net);
1071 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1072 sctp_send_shutdown_ack(stcb, net);
1073 }
1074 }
1075 }
1076
1077 static void
sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk * cp SCTP_UNUSED,struct sctp_tcb * stcb,struct sctp_nets * net)1078 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1079 struct sctp_tcb *stcb,
1080 struct sctp_nets *net)
1081 {
1082 struct sctp_association *asoc;
1083 #if defined(__APPLE__) && !defined(__Userspace__)
1084 struct socket *so;
1085
1086 so = SCTP_INP_SO(stcb->sctp_ep);
1087 #endif
1088 SCTPDBG(SCTP_DEBUG_INPUT2,
1089 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1090 if (stcb == NULL)
1091 return;
1092
1093 asoc = &stcb->asoc;
1094 /* process according to association state */
1095 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1096 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1097 /* unexpected SHUTDOWN-ACK... do OOTB handling... */
1098 sctp_send_shutdown_complete(stcb, net, 1);
1099 SCTP_TCB_UNLOCK(stcb);
1100 return;
1101 }
1102 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
1103 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1104 /* unexpected SHUTDOWN-ACK... so ignore... */
1105 SCTP_TCB_UNLOCK(stcb);
1106 return;
1107 }
1108 if (asoc->control_pdapi) {
1109 /* With a normal shutdown
1110 * we assume the end of last record.
1111 */
1112 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1113 asoc->control_pdapi->end_added = 1;
1114 asoc->control_pdapi->pdapi_aborted = 1;
1115 asoc->control_pdapi = NULL;
1116 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1117 #if defined(__APPLE__) && !defined(__Userspace__)
1118 atomic_add_int(&stcb->asoc.refcnt, 1);
1119 SCTP_TCB_UNLOCK(stcb);
1120 SCTP_SOCKET_LOCK(so, 1);
1121 SCTP_TCB_LOCK(stcb);
1122 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1123 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1124 /* assoc was freed while we were unlocked */
1125 SCTP_SOCKET_UNLOCK(so, 1);
1126 return;
1127 }
1128 #endif
1129 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1130 #if defined(__APPLE__) && !defined(__Userspace__)
1131 SCTP_SOCKET_UNLOCK(so, 1);
1132 #endif
1133 }
1134 #ifdef INVARIANTS
1135 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1136 !TAILQ_EMPTY(&asoc->sent_queue) ||
1137 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
1138 panic("Queues are not empty when handling SHUTDOWN-ACK");
1139 }
1140 #endif
1141 /* stop the timer */
1142 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
1143 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1144 /* send SHUTDOWN-COMPLETE */
1145 sctp_send_shutdown_complete(stcb, net, 0);
1146 /* notify upper layer protocol */
1147 if (stcb->sctp_socket) {
1148 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1149 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1150 stcb->sctp_socket->so_snd.sb_cc = 0;
1151 }
1152 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1153 }
1154 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1155 /* free the TCB but first save off the ep */
1156 #if defined(__APPLE__) && !defined(__Userspace__)
1157 atomic_add_int(&stcb->asoc.refcnt, 1);
1158 SCTP_TCB_UNLOCK(stcb);
1159 SCTP_SOCKET_LOCK(so, 1);
1160 SCTP_TCB_LOCK(stcb);
1161 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1162 #endif
1163 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1164 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1165 #if defined(__APPLE__) && !defined(__Userspace__)
1166 SCTP_SOCKET_UNLOCK(so, 1);
1167 #endif
1168 }
1169
1170 static void
sctp_process_unrecog_chunk(struct sctp_tcb * stcb,uint8_t chunk_type)1171 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
1172 {
1173 switch (chunk_type) {
1174 case SCTP_ASCONF_ACK:
1175 case SCTP_ASCONF:
1176 sctp_asconf_cleanup(stcb);
1177 break;
1178 case SCTP_IFORWARD_CUM_TSN:
1179 case SCTP_FORWARD_CUM_TSN:
1180 stcb->asoc.prsctp_supported = 0;
1181 break;
1182 default:
1183 SCTPDBG(SCTP_DEBUG_INPUT2,
1184 "Peer does not support chunk type %d (0x%x).\n",
1185 chunk_type, chunk_type);
1186 break;
1187 }
1188 }
1189
1190 /*
1191 * Skip past the param header and then we will find the param that caused the
1192 * problem. There are a number of param's in a ASCONF OR the prsctp param
1193 * these will turn of specific features.
1194 * XXX: Is this the right thing to do?
1195 */
1196 static void
sctp_process_unrecog_param(struct sctp_tcb * stcb,uint16_t parameter_type)1197 sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
1198 {
1199 switch (parameter_type) {
1200 /* pr-sctp draft */
1201 case SCTP_PRSCTP_SUPPORTED:
1202 stcb->asoc.prsctp_supported = 0;
1203 break;
1204 case SCTP_SUPPORTED_CHUNK_EXT:
1205 break;
1206 /* draft-ietf-tsvwg-addip-sctp */
1207 case SCTP_HAS_NAT_SUPPORT:
1208 stcb->asoc.peer_supports_nat = 0;
1209 break;
1210 case SCTP_ADD_IP_ADDRESS:
1211 case SCTP_DEL_IP_ADDRESS:
1212 case SCTP_SET_PRIM_ADDR:
1213 stcb->asoc.asconf_supported = 0;
1214 break;
1215 case SCTP_SUCCESS_REPORT:
1216 case SCTP_ERROR_CAUSE_IND:
1217 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1218 SCTPDBG(SCTP_DEBUG_INPUT2,
1219 "Turning off ASCONF to this strange peer\n");
1220 stcb->asoc.asconf_supported = 0;
1221 break;
1222 default:
1223 SCTPDBG(SCTP_DEBUG_INPUT2,
1224 "Peer does not support param type %d (0x%x)??\n",
1225 parameter_type, parameter_type);
1226 break;
1227 }
1228 }
1229
1230 static int
sctp_handle_error(struct sctp_chunkhdr * ch,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t limit)1231 sctp_handle_error(struct sctp_chunkhdr *ch,
1232 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
1233 {
1234 struct sctp_error_cause *cause;
1235 struct sctp_association *asoc;
1236 uint32_t remaining_length, adjust;
1237 uint16_t code, cause_code, cause_length;
1238 #if defined(__APPLE__) && !defined(__Userspace__)
1239 struct socket *so;
1240 #endif
1241
1242 /* parse through all of the errors and process */
1243 asoc = &stcb->asoc;
1244 cause = (struct sctp_error_cause *)((caddr_t)ch +
1245 sizeof(struct sctp_chunkhdr));
1246 remaining_length = ntohs(ch->chunk_length);
1247 if (remaining_length > limit) {
1248 remaining_length = limit;
1249 }
1250 if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
1251 remaining_length -= sizeof(struct sctp_chunkhdr);
1252 } else {
1253 remaining_length = 0;
1254 }
1255 code = 0;
1256 while (remaining_length >= sizeof(struct sctp_error_cause)) {
1257 /* Process an Error Cause */
1258 cause_code = ntohs(cause->code);
1259 cause_length = ntohs(cause->length);
1260 if ((cause_length > remaining_length) || (cause_length == 0)) {
1261 /* Invalid cause length, possibly due to truncation. */
1262 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
1263 remaining_length, cause_length);
1264 return (0);
1265 }
1266 if (code == 0) {
1267 /* report the first error cause */
1268 code = cause_code;
1269 }
1270 switch (cause_code) {
1271 case SCTP_CAUSE_INVALID_STREAM:
1272 case SCTP_CAUSE_MISSING_PARAM:
1273 case SCTP_CAUSE_INVALID_PARAM:
1274 case SCTP_CAUSE_NO_USER_DATA:
1275 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
1276 cause_code);
1277 break;
1278 case SCTP_CAUSE_NAT_COLLIDING_STATE:
1279 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags: %x\n",
1280 ch->chunk_flags);
1281 if (sctp_handle_nat_colliding_state(stcb)) {
1282 return (0);
1283 }
1284 break;
1285 case SCTP_CAUSE_NAT_MISSING_STATE:
1286 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags: %x\n",
1287 ch->chunk_flags);
1288 if (sctp_handle_nat_missing_state(stcb, net)) {
1289 return (0);
1290 }
1291 break;
1292 case SCTP_CAUSE_STALE_COOKIE:
1293 /*
1294 * We only act if we have echoed a cookie and are
1295 * waiting.
1296 */
1297 if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
1298 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1299 struct sctp_error_stale_cookie *stale_cookie;
1300
1301 stale_cookie = (struct sctp_error_stale_cookie *)cause;
1302 asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time);
1303 /* Double it to be more robust on RTX */
1304 if (asoc->cookie_preserve_req <= UINT32_MAX / 2) {
1305 asoc->cookie_preserve_req *= 2;
1306 } else {
1307 asoc->cookie_preserve_req = UINT32_MAX;
1308 }
1309 asoc->stale_cookie_count++;
1310 if (asoc->stale_cookie_count >
1311 asoc->max_init_times) {
1312 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1313 /* now free the asoc */
1314 #if defined(__APPLE__) && !defined(__Userspace__)
1315 so = SCTP_INP_SO(stcb->sctp_ep);
1316 atomic_add_int(&stcb->asoc.refcnt, 1);
1317 SCTP_TCB_UNLOCK(stcb);
1318 SCTP_SOCKET_LOCK(so, 1);
1319 SCTP_TCB_LOCK(stcb);
1320 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1321 #endif
1322 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1323 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1324 #if defined(__APPLE__) && !defined(__Userspace__)
1325 SCTP_SOCKET_UNLOCK(so, 1);
1326 #endif
1327 return (-1);
1328 }
1329 /* blast back to INIT state */
1330 sctp_toss_old_cookies(stcb, &stcb->asoc);
1331 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
1332 sctp_stop_all_cookie_timers(stcb);
1333 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1334 }
1335 break;
1336 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1337 /*
1338 * Nothing we can do here, we don't do hostname
1339 * addresses so if the peer does not like my IPv6
1340 * (or IPv4 for that matter) it does not matter. If
1341 * they don't support that type of address, they can
1342 * NOT possibly get that packet type... i.e. with no
1343 * IPv6 you can't receive a IPv6 packet. so we can
1344 * safely ignore this one. If we ever added support
1345 * for HOSTNAME Addresses, then we would need to do
1346 * something here.
1347 */
1348 break;
1349 case SCTP_CAUSE_UNRECOG_CHUNK:
1350 if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
1351 struct sctp_error_unrecognized_chunk *unrec_chunk;
1352
1353 unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
1354 sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
1355 }
1356 break;
1357 case SCTP_CAUSE_UNRECOG_PARAM:
1358 /* XXX: We only consider the first parameter */
1359 if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
1360 struct sctp_paramhdr *unrec_parameter;
1361
1362 unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
1363 sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
1364 }
1365 break;
1366 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1367 /*
1368 * We ignore this since the timer will drive out a
1369 * new cookie anyway and there timer will drive us
1370 * to send a SHUTDOWN_COMPLETE. We can't send one
1371 * here since we don't have their tag.
1372 */
1373 break;
1374 case SCTP_CAUSE_DELETING_LAST_ADDR:
1375 case SCTP_CAUSE_RESOURCE_SHORTAGE:
1376 case SCTP_CAUSE_DELETING_SRC_ADDR:
1377 /*
1378 * We should NOT get these here, but in a
1379 * ASCONF-ACK.
1380 */
1381 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
1382 cause_code);
1383 break;
1384 case SCTP_CAUSE_OUT_OF_RESC:
1385 /*
1386 * And what, pray tell do we do with the fact that
1387 * the peer is out of resources? Not really sure we
1388 * could do anything but abort. I suspect this
1389 * should have came WITH an abort instead of in a
1390 * OP-ERROR.
1391 */
1392 break;
1393 default:
1394 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
1395 cause_code);
1396 break;
1397 }
1398 adjust = SCTP_SIZE32(cause_length);
1399 if (remaining_length >= adjust) {
1400 remaining_length -= adjust;
1401 } else {
1402 remaining_length = 0;
1403 }
1404 cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
1405 }
1406 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
1407 return (0);
1408 }
1409
1410 static int
sctp_handle_init_ack(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_init_ack_chunk * cp,struct sctp_tcb * stcb,struct sctp_nets * net,int * abort_no_unlock,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id)1411 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1412 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1413 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1414 struct sctp_nets *net, int *abort_no_unlock,
1415 #if defined(__FreeBSD__) && !defined(__Userspace__)
1416 uint8_t mflowtype, uint32_t mflowid,
1417 #endif
1418 uint32_t vrf_id)
1419 {
1420 struct sctp_init_ack *init_ack;
1421 struct mbuf *op_err;
1422
1423 SCTPDBG(SCTP_DEBUG_INPUT2,
1424 "sctp_handle_init_ack: handling INIT-ACK\n");
1425
1426 if (stcb == NULL) {
1427 SCTPDBG(SCTP_DEBUG_INPUT2,
1428 "sctp_handle_init_ack: TCB is null\n");
1429 return (-1);
1430 }
1431 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1432 /* Invalid length */
1433 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1434 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1435 src, dst, sh, op_err,
1436 #if defined(__FreeBSD__) && !defined(__Userspace__)
1437 mflowtype, mflowid,
1438 #endif
1439 vrf_id, net->port);
1440 *abort_no_unlock = 1;
1441 return (-1);
1442 }
1443 init_ack = &cp->init;
1444 /* validate parameters */
1445 if (init_ack->initiate_tag == 0) {
1446 /* protocol error... send an abort */
1447 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1448 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1449 src, dst, sh, op_err,
1450 #if defined(__FreeBSD__) && !defined(__Userspace__)
1451 mflowtype, mflowid,
1452 #endif
1453 vrf_id, net->port);
1454 *abort_no_unlock = 1;
1455 return (-1);
1456 }
1457 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1458 /* protocol error... send an abort */
1459 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1460 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1461 src, dst, sh, op_err,
1462 #if defined(__FreeBSD__) && !defined(__Userspace__)
1463 mflowtype, mflowid,
1464 #endif
1465 vrf_id, net->port);
1466 *abort_no_unlock = 1;
1467 return (-1);
1468 }
1469 if (init_ack->num_inbound_streams == 0) {
1470 /* protocol error... send an abort */
1471 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1472 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1473 src, dst, sh, op_err,
1474 #if defined(__FreeBSD__) && !defined(__Userspace__)
1475 mflowtype, mflowid,
1476 #endif
1477 vrf_id, net->port);
1478 *abort_no_unlock = 1;
1479 return (-1);
1480 }
1481 if (init_ack->num_outbound_streams == 0) {
1482 /* protocol error... send an abort */
1483 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1484 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1485 src, dst, sh, op_err,
1486 #if defined(__FreeBSD__) && !defined(__Userspace__)
1487 mflowtype, mflowid,
1488 #endif
1489 vrf_id, net->port);
1490 *abort_no_unlock = 1;
1491 return (-1);
1492 }
1493 /* process according to association state... */
1494 switch (SCTP_GET_STATE(stcb)) {
1495 case SCTP_STATE_COOKIE_WAIT:
1496 /* this is the expected state for this chunk */
1497 /* process the INIT-ACK parameters */
1498 if (stcb->asoc.primary_destination->dest_state &
1499 SCTP_ADDR_UNCONFIRMED) {
1500 /*
1501 * The primary is where we sent the INIT, we can
1502 * always consider it confirmed when the INIT-ACK is
1503 * returned. Do this before we load addresses
1504 * though.
1505 */
1506 stcb->asoc.primary_destination->dest_state &=
1507 ~SCTP_ADDR_UNCONFIRMED;
1508 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1509 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1510 }
1511 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1512 net, abort_no_unlock,
1513 #if defined(__FreeBSD__) && !defined(__Userspace__)
1514 mflowtype, mflowid,
1515 #endif
1516 vrf_id) < 0) {
1517 /* error in parsing parameters */
1518 return (-1);
1519 }
1520 /* update our state */
1521 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1522 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
1523
1524 /* reset the RTO calc */
1525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1526 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1527 stcb->asoc.overall_error_count,
1528 0,
1529 SCTP_FROM_SCTP_INPUT,
1530 __LINE__);
1531 }
1532 stcb->asoc.overall_error_count = 0;
1533 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1534 /*
1535 * collapse the init timer back in case of a exponential
1536 * backoff
1537 */
1538 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1539 stcb, net);
1540 /*
1541 * the send at the end of the inbound data processing will
1542 * cause the cookie to be sent
1543 */
1544 break;
1545 case SCTP_STATE_SHUTDOWN_SENT:
1546 /* incorrect state... discard */
1547 break;
1548 case SCTP_STATE_COOKIE_ECHOED:
1549 /* incorrect state... discard */
1550 break;
1551 case SCTP_STATE_OPEN:
1552 /* incorrect state... discard */
1553 break;
1554 case SCTP_STATE_EMPTY:
1555 case SCTP_STATE_INUSE:
1556 default:
1557 /* incorrect state... discard */
1558 return (-1);
1559 break;
1560 }
1561 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1562 return (0);
1563 }
1564
1565 static struct sctp_tcb *
1566 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1567 struct sockaddr *src, struct sockaddr *dst,
1568 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1569 struct sctp_inpcb *inp, struct sctp_nets **netp,
1570 struct sockaddr *init_src, int *notification,
1571 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1572 #if defined(__FreeBSD__) && !defined(__Userspace__)
1573 uint8_t mflowtype, uint32_t mflowid,
1574 #endif
1575 uint32_t vrf_id, uint16_t port);
1576
1577
1578 /*
1579 * handle a state cookie for an existing association m: input packet mbuf
1580 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1581 * "split" mbuf and the cookie signature does not exist offset: offset into
1582 * mbuf to the cookie-echo chunk
1583 */
1584 static struct sctp_tcb *
sctp_process_cookie_existing(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)1585 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1586 struct sockaddr *src, struct sockaddr *dst,
1587 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1588 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1589 struct sockaddr *init_src, int *notification,
1590 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1591 #if defined(__FreeBSD__) && !defined(__Userspace__)
1592 uint8_t mflowtype, uint32_t mflowid,
1593 #endif
1594 uint32_t vrf_id, uint16_t port)
1595 {
1596 struct sctp_association *asoc;
1597 struct sctp_init_chunk *init_cp, init_buf;
1598 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1599 struct sctp_asconf_addr *aparam, *naparam;
1600 struct sctp_asconf_ack *aack, *naack;
1601 struct sctp_tmit_chunk *chk, *nchk;
1602 struct sctp_stream_reset_list *strrst, *nstrrst;
1603 struct sctp_queued_to_read *sq, *nsq;
1604 struct sctp_nets *net;
1605 struct mbuf *op_err;
1606 struct timeval old;
1607 int init_offset, initack_offset, i;
1608 int retval;
1609 int spec_flag = 0;
1610 uint32_t how_indx;
1611 #if defined(SCTP_DETAILED_STR_STATS)
1612 int j;
1613 #endif
1614
1615 net = *netp;
1616 /* I know that the TCB is non-NULL from the caller */
1617 asoc = &stcb->asoc;
1618 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1619 if (asoc->cookie_how[how_indx] == 0)
1620 break;
1621 }
1622 if (how_indx < sizeof(asoc->cookie_how)) {
1623 asoc->cookie_how[how_indx] = 1;
1624 }
1625 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1626 /* SHUTDOWN came in after sending INIT-ACK */
1627 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1628 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1629 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1630 #if defined(__FreeBSD__) && !defined(__Userspace__)
1631 mflowtype, mflowid, inp->fibnum,
1632 #endif
1633 vrf_id, net->port);
1634 if (how_indx < sizeof(asoc->cookie_how))
1635 asoc->cookie_how[how_indx] = 2;
1636 return (NULL);
1637 }
1638 /*
1639 * find and validate the INIT chunk in the cookie (peer's info) the
1640 * INIT should start after the cookie-echo header struct (chunk
1641 * header, state cookie header struct)
1642 */
1643 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1644
1645 init_cp = (struct sctp_init_chunk *)
1646 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1647 (uint8_t *) & init_buf);
1648 if (init_cp == NULL) {
1649 /* could not pull a INIT chunk in cookie */
1650 return (NULL);
1651 }
1652 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1653 return (NULL);
1654 }
1655 /*
1656 * find and validate the INIT-ACK chunk in the cookie (my info) the
1657 * INIT-ACK follows the INIT chunk
1658 */
1659 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1660 initack_cp = (struct sctp_init_ack_chunk *)
1661 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1662 (uint8_t *) & initack_buf);
1663 if (initack_cp == NULL) {
1664 /* could not pull INIT-ACK chunk in cookie */
1665 return (NULL);
1666 }
1667 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1668 return (NULL);
1669 }
1670 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1671 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1672 /*
1673 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1674 * to get into the OPEN state
1675 */
1676 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1677 /*-
1678 * Opps, this means that we somehow generated two vtag's
1679 * the same. I.e. we did:
1680 * Us Peer
1681 * <---INIT(tag=a)------
1682 * ----INIT-ACK(tag=t)-->
1683 * ----INIT(tag=t)------> *1
1684 * <---INIT-ACK(tag=a)---
1685 * <----CE(tag=t)------------- *2
1686 *
1687 * At point *1 we should be generating a different
1688 * tag t'. Which means we would throw away the CE and send
1689 * ours instead. Basically this is case C (throw away side).
1690 */
1691 if (how_indx < sizeof(asoc->cookie_how))
1692 asoc->cookie_how[how_indx] = 17;
1693 return (NULL);
1694
1695 }
1696 switch (SCTP_GET_STATE(stcb)) {
1697 case SCTP_STATE_COOKIE_WAIT:
1698 case SCTP_STATE_COOKIE_ECHOED:
1699 /*
1700 * INIT was sent but got a COOKIE_ECHO with the
1701 * correct tags... just accept it...but we must
1702 * process the init so that we can make sure we
1703 * have the right seq no's.
1704 */
1705 /* First we must process the INIT !! */
1706 retval = sctp_process_init(init_cp, stcb);
1707 if (retval < 0) {
1708 if (how_indx < sizeof(asoc->cookie_how))
1709 asoc->cookie_how[how_indx] = 3;
1710 return (NULL);
1711 }
1712 /* we have already processed the INIT so no problem */
1713 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1714 stcb, net,
1715 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1716 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1717 stcb, net,
1718 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1719 /* update current state */
1720 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1721 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1722 else
1723 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1724
1725 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1726 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1727 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1728 stcb->sctp_ep, stcb, NULL);
1729 }
1730 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1731 sctp_stop_all_cookie_timers(stcb);
1732 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1733 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1734 (!SCTP_IS_LISTENING(inp))) {
1735 #if defined(__APPLE__) && !defined(__Userspace__)
1736 struct socket *so;
1737 #endif
1738 /*
1739 * Here is where collision would go if we
1740 * did a connect() and instead got a
1741 * init/init-ack/cookie done before the
1742 * init-ack came back..
1743 */
1744 stcb->sctp_ep->sctp_flags |=
1745 SCTP_PCB_FLAGS_CONNECTED;
1746 #if defined(__APPLE__) && !defined(__Userspace__)
1747 so = SCTP_INP_SO(stcb->sctp_ep);
1748 atomic_add_int(&stcb->asoc.refcnt, 1);
1749 SCTP_TCB_UNLOCK(stcb);
1750 SCTP_SOCKET_LOCK(so, 1);
1751 SCTP_TCB_LOCK(stcb);
1752 atomic_add_int(&stcb->asoc.refcnt, -1);
1753 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1754 SCTP_SOCKET_UNLOCK(so, 1);
1755 return (NULL);
1756 }
1757 #endif
1758 soisconnected(stcb->sctp_socket);
1759 #if defined(__APPLE__) && !defined(__Userspace__)
1760 SCTP_SOCKET_UNLOCK(so, 1);
1761 #endif
1762 }
1763 /* notify upper layer */
1764 *notification = SCTP_NOTIFY_ASSOC_UP;
1765 /*
1766 * since we did not send a HB make sure we
1767 * don't double things
1768 */
1769 old.tv_sec = cookie->time_entered.tv_sec;
1770 old.tv_usec = cookie->time_entered.tv_usec;
1771 net->hb_responded = 1;
1772 sctp_calculate_rto(stcb, asoc, net, &old,
1773 SCTP_RTT_FROM_NON_DATA);
1774
1775 if (stcb->asoc.sctp_autoclose_ticks &&
1776 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1777 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1778 inp, stcb, NULL);
1779 }
1780 break;
1781 default:
1782 /*
1783 * we're in the OPEN state (or beyond), so
1784 * peer must have simply lost the COOKIE-ACK
1785 */
1786 break;
1787 } /* end switch */
1788 sctp_stop_all_cookie_timers(stcb);
1789 /*
1790 * We ignore the return code here.. not sure if we should
1791 * somehow abort.. but we do have an existing asoc. This
1792 * really should not fail.
1793 */
1794 if (sctp_load_addresses_from_init(stcb, m,
1795 init_offset + sizeof(struct sctp_init_chunk),
1796 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1797 if (how_indx < sizeof(asoc->cookie_how))
1798 asoc->cookie_how[how_indx] = 4;
1799 return (NULL);
1800 }
1801 /* respond with a COOKIE-ACK */
1802 sctp_toss_old_cookies(stcb, asoc);
1803 sctp_send_cookie_ack(stcb);
1804 if (how_indx < sizeof(asoc->cookie_how))
1805 asoc->cookie_how[how_indx] = 5;
1806 return (stcb);
1807 }
1808
1809 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1810 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1811 cookie->tie_tag_my_vtag == 0 &&
1812 cookie->tie_tag_peer_vtag == 0) {
1813 /*
1814 * case C in Section 5.2.4 Table 2: XMOO silently discard
1815 */
1816 if (how_indx < sizeof(asoc->cookie_how))
1817 asoc->cookie_how[how_indx] = 6;
1818 return (NULL);
1819 }
1820 /* If nat support, and the below and stcb is established,
1821 * send back a ABORT(colliding state) if we are established.
1822 */
1823 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
1824 (asoc->peer_supports_nat) &&
1825 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1826 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1827 (asoc->peer_vtag == 0)))) {
1828 /* Special case - Peer's support nat. We may have
1829 * two init's that we gave out the same tag on since
1830 * one was not established.. i.e. we get INIT from host-1
1831 * behind the nat and we respond tag-a, we get a INIT from
1832 * host-2 behind the nat and we get tag-a again. Then we
1833 * bring up host-1 (or 2's) assoc, Then comes the cookie
1834 * from hsot-2 (or 1). Now we have colliding state. We must
1835 * send an abort here with colliding state indication.
1836 */
1837 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1838 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1839 #if defined(__FreeBSD__) && !defined(__Userspace__)
1840 mflowtype, mflowid, inp->fibnum,
1841 #endif
1842 vrf_id, port);
1843 return (NULL);
1844 }
1845 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1846 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1847 (asoc->peer_vtag == 0))) {
1848 /*
1849 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1850 * should be ok, re-accept peer info
1851 */
1852 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1853 /* Extension of case C.
1854 * If we hit this, then the random number
1855 * generator returned the same vtag when we
1856 * first sent our INIT-ACK and when we later sent
1857 * our INIT. The side with the seq numbers that are
1858 * different will be the one that normnally would
1859 * have hit case C. This in effect "extends" our vtags
1860 * in this collision case to be 64 bits. The same collision
1861 * could occur aka you get both vtag and seq number the
1862 * same twice in a row.. but is much less likely. If it
1863 * did happen then we would proceed through and bring
1864 * up the assoc.. we may end up with the wrong stream
1865 * setup however.. which would be bad.. but there is
1866 * no way to tell.. until we send on a stream that does
1867 * not exist :-)
1868 */
1869 if (how_indx < sizeof(asoc->cookie_how))
1870 asoc->cookie_how[how_indx] = 7;
1871
1872 return (NULL);
1873 }
1874 if (how_indx < sizeof(asoc->cookie_how))
1875 asoc->cookie_how[how_indx] = 8;
1876 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1877 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1878 sctp_stop_all_cookie_timers(stcb);
1879 /*
1880 * since we did not send a HB make sure we don't double
1881 * things
1882 */
1883 net->hb_responded = 1;
1884 if (stcb->asoc.sctp_autoclose_ticks &&
1885 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1886 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1887 NULL);
1888 }
1889 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1890 if (asoc->pre_open_streams < asoc->streamoutcnt) {
1891 asoc->pre_open_streams = asoc->streamoutcnt;
1892 }
1893
1894 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1895 /* Ok the peer probably discarded our
1896 * data (if we echoed a cookie+data). So anything
1897 * on the sent_queue should be marked for
1898 * retransmit, we may not get something to
1899 * kick us so it COULD still take a timeout
1900 * to move these.. but it can't hurt to mark them.
1901 */
1902 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1903 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1904 chk->sent = SCTP_DATAGRAM_RESEND;
1905 sctp_flight_size_decrease(chk);
1906 sctp_total_flight_decrease(stcb, chk);
1907 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1908 spec_flag++;
1909 }
1910 }
1911
1912 }
1913 /* process the INIT info (peer's info) */
1914 retval = sctp_process_init(init_cp, stcb);
1915 if (retval < 0) {
1916 if (how_indx < sizeof(asoc->cookie_how))
1917 asoc->cookie_how[how_indx] = 9;
1918 return (NULL);
1919 }
1920 if (sctp_load_addresses_from_init(stcb, m,
1921 init_offset + sizeof(struct sctp_init_chunk),
1922 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1923 if (how_indx < sizeof(asoc->cookie_how))
1924 asoc->cookie_how[how_indx] = 10;
1925 return (NULL);
1926 }
1927 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
1928 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
1929 *notification = SCTP_NOTIFY_ASSOC_UP;
1930
1931 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1932 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1933 (!SCTP_IS_LISTENING(inp))) {
1934 #if defined(__APPLE__) && !defined(__Userspace__)
1935 struct socket *so;
1936 #endif
1937 stcb->sctp_ep->sctp_flags |=
1938 SCTP_PCB_FLAGS_CONNECTED;
1939 #if defined(__APPLE__) && !defined(__Userspace__)
1940 so = SCTP_INP_SO(stcb->sctp_ep);
1941 atomic_add_int(&stcb->asoc.refcnt, 1);
1942 SCTP_TCB_UNLOCK(stcb);
1943 SCTP_SOCKET_LOCK(so, 1);
1944 SCTP_TCB_LOCK(stcb);
1945 atomic_add_int(&stcb->asoc.refcnt, -1);
1946 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1947 SCTP_SOCKET_UNLOCK(so, 1);
1948 return (NULL);
1949 }
1950 #endif
1951 soisconnected(stcb->sctp_socket);
1952 #if defined(__APPLE__) && !defined(__Userspace__)
1953 SCTP_SOCKET_UNLOCK(so, 1);
1954 #endif
1955 }
1956 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
1957 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1958 else
1959 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1960 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1961 } else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
1962 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1963 } else {
1964 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1965 }
1966 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
1967 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1968 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1969 stcb->sctp_ep, stcb, NULL);
1970 }
1971 sctp_stop_all_cookie_timers(stcb);
1972 sctp_toss_old_cookies(stcb, asoc);
1973 sctp_send_cookie_ack(stcb);
1974 if (spec_flag) {
1975 /* only if we have retrans set do we do this. What
1976 * this call does is get only the COOKIE-ACK out
1977 * and then when we return the normal call to
1978 * sctp_chunk_output will get the retrans out
1979 * behind this.
1980 */
1981 sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1982 }
1983 if (how_indx < sizeof(asoc->cookie_how))
1984 asoc->cookie_how[how_indx] = 11;
1985
1986 return (stcb);
1987 }
1988 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1989 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1990 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1991 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1992 cookie->tie_tag_peer_vtag != 0) {
1993 struct sctpasochead *head;
1994 #if defined(__APPLE__) && !defined(__Userspace__)
1995 struct socket *so;
1996 #endif
1997
1998 if (asoc->peer_supports_nat) {
1999 /* This is a gross gross hack.
2000 * Just call the cookie_new code since we
2001 * are allowing a duplicate association.
2002 * I hope this works...
2003 */
2004 return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
2005 sh, cookie, cookie_len,
2006 inp, netp, init_src,notification,
2007 auth_skipped, auth_offset, auth_len,
2008 #if defined(__FreeBSD__) && !defined(__Userspace__)
2009 mflowtype, mflowid,
2010 #endif
2011 vrf_id, port));
2012 }
2013 /*
2014 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
2015 */
2016 /* temp code */
2017 if (how_indx < sizeof(asoc->cookie_how))
2018 asoc->cookie_how[how_indx] = 12;
2019 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
2020 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
2021 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
2022 SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
2023
2024 /* notify upper layer */
2025 *notification = SCTP_NOTIFY_ASSOC_RESTART;
2026 atomic_add_int(&stcb->asoc.refcnt, 1);
2027 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
2028 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
2029 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
2030 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2031 }
2032 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
2033 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
2034 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2035 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
2036 }
2037 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2038 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2039 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2040 stcb->sctp_ep, stcb, NULL);
2041
2042 } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
2043 /* move to OPEN state, if not in SHUTDOWN_SENT */
2044 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2045 }
2046 if (asoc->pre_open_streams < asoc->streamoutcnt) {
2047 asoc->pre_open_streams = asoc->streamoutcnt;
2048 }
2049 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2050 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2051 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2052
2053 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2054
2055 asoc->str_reset_seq_in = asoc->init_seq_number;
2056
2057 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2058 if (asoc->mapping_array) {
2059 memset(asoc->mapping_array, 0,
2060 asoc->mapping_array_size);
2061 }
2062 if (asoc->nr_mapping_array) {
2063 memset(asoc->nr_mapping_array, 0,
2064 asoc->mapping_array_size);
2065 }
2066 SCTP_TCB_UNLOCK(stcb);
2067 #if defined(__APPLE__) && !defined(__Userspace__)
2068 so = SCTP_INP_SO(stcb->sctp_ep);
2069 SCTP_SOCKET_LOCK(so, 1);
2070 #endif
2071 SCTP_INP_INFO_WLOCK();
2072 SCTP_INP_WLOCK(stcb->sctp_ep);
2073 SCTP_TCB_LOCK(stcb);
2074 atomic_add_int(&stcb->asoc.refcnt, -1);
2075 /* send up all the data */
2076 SCTP_TCB_SEND_LOCK(stcb);
2077
2078 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
2079 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2080 stcb->asoc.strmout[i].chunks_on_queues = 0;
2081 #if defined(SCTP_DETAILED_STR_STATS)
2082 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
2083 asoc->strmout[i].abandoned_sent[j] = 0;
2084 asoc->strmout[i].abandoned_unsent[j] = 0;
2085 }
2086 #else
2087 asoc->strmout[i].abandoned_sent[0] = 0;
2088 asoc->strmout[i].abandoned_unsent[0] = 0;
2089 #endif
2090 stcb->asoc.strmout[i].sid = i;
2091 stcb->asoc.strmout[i].next_mid_ordered = 0;
2092 stcb->asoc.strmout[i].next_mid_unordered = 0;
2093 stcb->asoc.strmout[i].last_msg_incomplete = 0;
2094 }
2095 TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) {
2096 TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp);
2097 SCTP_FREE(strrst, SCTP_M_STRESET);
2098 }
2099 TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) {
2100 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
2101 if (sq->data) {
2102 sctp_m_freem(sq->data);
2103 sq->data = NULL;
2104 }
2105 sctp_free_remote_addr(sq->whoFrom);
2106 sq->whoFrom = NULL;
2107 sq->stcb = NULL;
2108 sctp_free_a_readq(stcb, sq);
2109 }
2110 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
2111 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
2112 if (chk->data) {
2113 sctp_m_freem(chk->data);
2114 chk->data = NULL;
2115 }
2116 if (chk->holds_key_ref)
2117 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
2118 sctp_free_remote_addr(chk->whoTo);
2119 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
2120 SCTP_DECR_CHK_COUNT();
2121 }
2122 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
2123 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
2124 if (chk->data) {
2125 sctp_m_freem(chk->data);
2126 chk->data = NULL;
2127 }
2128 if (chk->holds_key_ref)
2129 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
2130 sctp_free_remote_addr(chk->whoTo);
2131 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
2132 SCTP_DECR_CHK_COUNT();
2133 }
2134 TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) {
2135 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
2136 SCTP_FREE(aparam,SCTP_M_ASC_ADDR);
2137 }
2138 TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) {
2139 TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
2140 if (aack->data != NULL) {
2141 sctp_m_freem(aack->data);
2142 }
2143 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
2144 }
2145
2146
2147 /* process the INIT-ACK info (my info) */
2148 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2149 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2150
2151 /* pull from vtag hash */
2152 LIST_REMOVE(stcb, sctp_asocs);
2153 /* re-insert to new vtag position */
2154 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2155 SCTP_BASE_INFO(hashasocmark))];
2156 /*
2157 * put it in the bucket in the vtag hash of assoc's for the
2158 * system
2159 */
2160 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2161
2162 SCTP_TCB_SEND_UNLOCK(stcb);
2163 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2164 SCTP_INP_INFO_WUNLOCK();
2165 #if defined(__APPLE__) && !defined(__Userspace__)
2166 SCTP_SOCKET_UNLOCK(so, 1);
2167 #endif
2168 asoc->total_flight = 0;
2169 asoc->total_flight_count = 0;
2170 /* process the INIT info (peer's info) */
2171 retval = sctp_process_init(init_cp, stcb);
2172 if (retval < 0) {
2173 if (how_indx < sizeof(asoc->cookie_how))
2174 asoc->cookie_how[how_indx] = 13;
2175
2176 return (NULL);
2177 }
2178 /*
2179 * since we did not send a HB make sure we don't double
2180 * things
2181 */
2182 net->hb_responded = 1;
2183
2184 if (sctp_load_addresses_from_init(stcb, m,
2185 init_offset + sizeof(struct sctp_init_chunk),
2186 initack_offset, src, dst, init_src, stcb->asoc.port)) {
2187 if (how_indx < sizeof(asoc->cookie_how))
2188 asoc->cookie_how[how_indx] = 14;
2189
2190 return (NULL);
2191 }
2192 /* respond with a COOKIE-ACK */
2193 sctp_stop_all_cookie_timers(stcb);
2194 sctp_toss_old_cookies(stcb, asoc);
2195 sctp_send_cookie_ack(stcb);
2196 if (how_indx < sizeof(asoc->cookie_how))
2197 asoc->cookie_how[how_indx] = 15;
2198
2199 return (stcb);
2200 }
2201 if (how_indx < sizeof(asoc->cookie_how))
2202 asoc->cookie_how[how_indx] = 16;
2203 /* all other cases... */
2204 return (NULL);
2205 }
2206
2207
2208 /*
2209 * handle a state cookie for a new association m: input packet mbuf chain--
2210 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2211 * and the cookie signature does not exist offset: offset into mbuf to the
2212 * cookie-echo chunk length: length of the cookie chunk to: where the init
2213 * was from returns a new TCB
2214 */
2215 static struct sctp_tcb *
sctp_process_cookie_new(struct mbuf * m,int iphlen,int offset,struct sockaddr * src,struct sockaddr * dst,struct sctphdr * sh,struct sctp_state_cookie * cookie,int cookie_len,struct sctp_inpcb * inp,struct sctp_nets ** netp,struct sockaddr * init_src,int * notification,int auth_skipped,uint32_t auth_offset,uint32_t auth_len,uint8_t mflowtype,uint32_t mflowid,uint32_t vrf_id,uint16_t port)2216 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2217 struct sockaddr *src, struct sockaddr *dst,
2218 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2219 struct sctp_inpcb *inp, struct sctp_nets **netp,
2220 struct sockaddr *init_src, int *notification,
2221 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2222 #if defined(__FreeBSD__) && !defined(__Userspace__)
2223 uint8_t mflowtype, uint32_t mflowid,
2224 #endif
2225 uint32_t vrf_id, uint16_t port)
2226 {
2227 struct sctp_tcb *stcb;
2228 struct sctp_init_chunk *init_cp, init_buf;
2229 struct sctp_init_ack_chunk *initack_cp, initack_buf;
2230 union sctp_sockstore store;
2231 struct sctp_association *asoc;
2232 int init_offset, initack_offset, initack_limit;
2233 int retval;
2234 int error = 0;
2235 uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
2236 #if defined(__APPLE__) && !defined(__Userspace__)
2237 struct socket *so;
2238
2239 so = SCTP_INP_SO(inp);
2240 #endif
2241
2242 /*
2243 * find and validate the INIT chunk in the cookie (peer's info) the
2244 * INIT should start after the cookie-echo header struct (chunk
2245 * header, state cookie header struct)
2246 */
2247 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2248 init_cp = (struct sctp_init_chunk *)
2249 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2250 (uint8_t *) & init_buf);
2251 if (init_cp == NULL) {
2252 /* could not pull a INIT chunk in cookie */
2253 SCTPDBG(SCTP_DEBUG_INPUT1,
2254 "process_cookie_new: could not pull INIT chunk hdr\n");
2255 return (NULL);
2256 }
2257 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2258 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2259 return (NULL);
2260 }
2261 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2262 /*
2263 * find and validate the INIT-ACK chunk in the cookie (my info) the
2264 * INIT-ACK follows the INIT chunk
2265 */
2266 initack_cp = (struct sctp_init_ack_chunk *)
2267 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2268 (uint8_t *) & initack_buf);
2269 if (initack_cp == NULL) {
2270 /* could not pull INIT-ACK chunk in cookie */
2271 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2272 return (NULL);
2273 }
2274 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2275 return (NULL);
2276 }
2277 /*
2278 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2279 * "initack_limit" value. This is because the chk_length field
2280 * includes the length of the cookie, but the cookie is omitted when
2281 * the INIT and INIT_ACK are tacked onto the cookie...
2282 */
2283 initack_limit = offset + cookie_len;
2284
2285 /*
2286 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2287 * and popluate
2288 */
2289
2290 /*
2291 * Here we do a trick, we set in NULL for the proc/thread argument. We
2292 * do this since in effect we only use the p argument when
2293 * the socket is unbound and we must do an implicit bind.
2294 * Since we are getting a cookie, we cannot be unbound.
2295 */
2296 stcb = sctp_aloc_assoc(inp, init_src, &error,
2297 ntohl(initack_cp->init.initiate_tag), vrf_id,
2298 ntohs(initack_cp->init.num_outbound_streams),
2299 port,
2300 #if defined(__FreeBSD__) && !defined(__Userspace__)
2301 (struct thread *)NULL,
2302 #elif defined(_WIN32) && !defined(__Userspace__)
2303 (PKTHREAD)NULL,
2304 #else
2305 (struct proc *)NULL,
2306 #endif
2307 SCTP_DONT_INITIALIZE_AUTH_PARAMS);
2308 if (stcb == NULL) {
2309 struct mbuf *op_err;
2310
2311 /* memory problem? */
2312 SCTPDBG(SCTP_DEBUG_INPUT1,
2313 "process_cookie_new: no room for another TCB!\n");
2314 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2315 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2316 src, dst, sh, op_err,
2317 #if defined(__FreeBSD__) && !defined(__Userspace__)
2318 mflowtype, mflowid,
2319 #endif
2320 vrf_id, port);
2321 return (NULL);
2322 }
2323 asoc = &stcb->asoc;
2324 /* get scope variables out of cookie */
2325 asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2326 asoc->scope.site_scope = cookie->site_scope;
2327 asoc->scope.local_scope = cookie->local_scope;
2328 asoc->scope.loopback_scope = cookie->loopback_scope;
2329
2330 #if defined(__Userspace__)
2331 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2332 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
2333 (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
2334 #else
2335 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2336 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2337 #endif
2338 struct mbuf *op_err;
2339
2340 /*
2341 * Houston we have a problem. The EP changed while the
2342 * cookie was in flight. Only recourse is to abort the
2343 * association.
2344 */
2345 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2346 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2347 src, dst, sh, op_err,
2348 #if defined(__FreeBSD__) && !defined(__Userspace__)
2349 mflowtype, mflowid,
2350 #endif
2351 vrf_id, port);
2352 #if defined(__APPLE__) && !defined(__Userspace__)
2353 atomic_add_int(&stcb->asoc.refcnt, 1);
2354 SCTP_TCB_UNLOCK(stcb);
2355 SCTP_SOCKET_LOCK(so, 1);
2356 SCTP_TCB_LOCK(stcb);
2357 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2358 #endif
2359 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2360 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2361 #if defined(__APPLE__) && !defined(__Userspace__)
2362 SCTP_SOCKET_UNLOCK(so, 1);
2363 #endif
2364 return (NULL);
2365 }
2366 /* process the INIT-ACK info (my info) */
2367 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2368 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2369 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2370 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2371 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2372 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2373 asoc->str_reset_seq_in = asoc->init_seq_number;
2374
2375 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2376
2377 /* process the INIT info (peer's info) */
2378 retval = sctp_process_init(init_cp, stcb);
2379 if (retval < 0) {
2380 #if defined(__APPLE__) && !defined(__Userspace__)
2381 atomic_add_int(&stcb->asoc.refcnt, 1);
2382 SCTP_TCB_UNLOCK(stcb);
2383 SCTP_SOCKET_LOCK(so, 1);
2384 SCTP_TCB_LOCK(stcb);
2385 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2386 #endif
2387 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2388 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2389 #if defined(__APPLE__) && !defined(__Userspace__)
2390 SCTP_SOCKET_UNLOCK(so, 1);
2391 #endif
2392 return (NULL);
2393 }
2394 /* load all addresses */
2395 if (sctp_load_addresses_from_init(stcb, m,
2396 init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2397 src, dst, init_src, port)) {
2398 #if defined(__APPLE__) && !defined(__Userspace__)
2399 atomic_add_int(&stcb->asoc.refcnt, 1);
2400 SCTP_TCB_UNLOCK(stcb);
2401 SCTP_SOCKET_LOCK(so, 1);
2402 SCTP_TCB_LOCK(stcb);
2403 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2404 #endif
2405 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2406 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2407 #if defined(__APPLE__) && !defined(__Userspace__)
2408 SCTP_SOCKET_UNLOCK(so, 1);
2409 #endif
2410 return (NULL);
2411 }
2412 /*
2413 * verify any preceding AUTH chunk that was skipped
2414 */
2415 /* pull the local authentication parameters from the cookie/init-ack */
2416 sctp_auth_get_cookie_params(stcb, m,
2417 initack_offset + sizeof(struct sctp_init_ack_chunk),
2418 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2419 if (auth_skipped) {
2420 struct sctp_auth_chunk *auth;
2421
2422 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
2423 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2424 } else {
2425 auth = NULL;
2426 }
2427 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2428 /* auth HMAC failed, dump the assoc and packet */
2429 SCTPDBG(SCTP_DEBUG_AUTH1,
2430 "COOKIE-ECHO: AUTH failed\n");
2431 #if defined(__APPLE__) && !defined(__Userspace__)
2432 atomic_add_int(&stcb->asoc.refcnt, 1);
2433 SCTP_TCB_UNLOCK(stcb);
2434 SCTP_SOCKET_LOCK(so, 1);
2435 SCTP_TCB_LOCK(stcb);
2436 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2437 #endif
2438 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2439 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2440 #if defined(__APPLE__) && !defined(__Userspace__)
2441 SCTP_SOCKET_UNLOCK(so, 1);
2442 #endif
2443 return (NULL);
2444 } else {
2445 /* remaining chunks checked... good to go */
2446 stcb->asoc.authenticated = 1;
2447 }
2448 }
2449
2450 /*
2451 * if we're doing ASCONFs, check to see if we have any new local
2452 * addresses that need to get added to the peer (eg. addresses
2453 * changed while cookie echo in flight). This needs to be done
2454 * after we go to the OPEN state to do the correct asconf
2455 * processing. else, make sure we have the correct addresses in our
2456 * lists
2457 */
2458
2459 /* warning, we re-use sin, sin6, sa_store here! */
2460 /* pull in local_address (our "from" address) */
2461 switch (cookie->laddr_type) {
2462 #ifdef INET
2463 case SCTP_IPV4_ADDRESS:
2464 /* source addr is IPv4 */
2465 memset(&store.sin, 0, sizeof(struct sockaddr_in));
2466 store.sin.sin_family = AF_INET;
2467 #ifdef HAVE_SIN_LEN
2468 store.sin.sin_len = sizeof(struct sockaddr_in);
2469 #endif
2470 store.sin.sin_addr.s_addr = cookie->laddress[0];
2471 break;
2472 #endif
2473 #ifdef INET6
2474 case SCTP_IPV6_ADDRESS:
2475 /* source addr is IPv6 */
2476 memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2477 store.sin6.sin6_family = AF_INET6;
2478 #ifdef HAVE_SIN6_LEN
2479 store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2480 #endif
2481 store.sin6.sin6_scope_id = cookie->scope_id;
2482 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2483 break;
2484 #endif
2485 #if defined(__Userspace__)
2486 case SCTP_CONN_ADDRESS:
2487 /* source addr is conn */
2488 memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
2489 store.sconn.sconn_family = AF_CONN;
2490 #ifdef HAVE_SCONN_LEN
2491 store.sconn.sconn_len = sizeof(struct sockaddr_conn);
2492 #endif
2493 memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
2494 break;
2495 #endif
2496 default:
2497 #if defined(__APPLE__) && !defined(__Userspace__)
2498 atomic_add_int(&stcb->asoc.refcnt, 1);
2499 SCTP_TCB_UNLOCK(stcb);
2500 SCTP_SOCKET_LOCK(so, 1);
2501 SCTP_TCB_LOCK(stcb);
2502 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2503 #endif
2504 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2505 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2506 #if defined(__APPLE__) && !defined(__Userspace__)
2507 SCTP_SOCKET_UNLOCK(so, 1);
2508 #endif
2509 return (NULL);
2510 }
2511
2512 /* update current state */
2513 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2514 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
2515 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2516 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2517 stcb->sctp_ep, stcb, NULL);
2518 }
2519 sctp_stop_all_cookie_timers(stcb);
2520 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2521 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2522
2523 /* set up to notify upper layer */
2524 *notification = SCTP_NOTIFY_ASSOC_UP;
2525 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2526 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2527 (!SCTP_IS_LISTENING(inp))) {
2528 /*
2529 * This is an endpoint that called connect() how it got a
2530 * cookie that is NEW is a bit of a mystery. It must be that
2531 * the INIT was sent, but before it got there.. a complete
2532 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2533 * should have went to the other code.. not here.. oh well..
2534 * a bit of protection is worth having..
2535 */
2536 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2537 #if defined(__APPLE__) && !defined(__Userspace__)
2538 atomic_add_int(&stcb->asoc.refcnt, 1);
2539 SCTP_TCB_UNLOCK(stcb);
2540 SCTP_SOCKET_LOCK(so, 1);
2541 SCTP_TCB_LOCK(stcb);
2542 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2543 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2544 SCTP_SOCKET_UNLOCK(so, 1);
2545 return (NULL);
2546 }
2547 #endif
2548 soisconnected(stcb->sctp_socket);
2549 #if defined(__APPLE__) && !defined(__Userspace__)
2550 SCTP_SOCKET_UNLOCK(so, 1);
2551 #endif
2552 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2553 (SCTP_IS_LISTENING(inp))) {
2554 /*
2555 * We don't want to do anything with this one. Since it is
2556 * the listening guy. The timer will get started for
2557 * accepted connections in the caller.
2558 */
2559 ;
2560 }
2561 if (stcb->asoc.sctp_autoclose_ticks &&
2562 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2563 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2564 }
2565 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2566 *netp = sctp_findnet(stcb, init_src);
2567 if (*netp != NULL) {
2568 struct timeval old;
2569
2570 /*
2571 * Since we did not send a HB, make sure we don't double
2572 * things.
2573 */
2574 (*netp)->hb_responded = 1;
2575 /* Calculate the RTT. */
2576 old.tv_sec = cookie->time_entered.tv_sec;
2577 old.tv_usec = cookie->time_entered.tv_usec;
2578 sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
2579 }
2580 /* respond with a COOKIE-ACK */
2581 sctp_send_cookie_ack(stcb);
2582
2583 /*
2584 * check the address lists for any ASCONFs that need to be sent
2585 * AFTER the cookie-ack is sent
2586 */
2587 sctp_check_address_list(stcb, m,
2588 initack_offset + sizeof(struct sctp_init_ack_chunk),
2589 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2590 &store.sa, cookie->local_scope, cookie->site_scope,
2591 cookie->ipv4_scope, cookie->loopback_scope);
2592
2593
2594 return (stcb);
2595 }
2596
2597 /*
2598 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2599 * we NEED to make sure we are not already using the vtag. If so we
2600 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2601 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2602 SCTP_BASE_INFO(hashasocmark))];
2603 LIST_FOREACH(stcb, head, sctp_asocs) {
2604 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
2605 -- SEND ABORT - TRY AGAIN --
2606 }
2607 }
2608 */
2609
2610 /*
2611 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2612 * existing (non-NULL) TCB
2613 */
2614 static struct mbuf *
2615 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2616 struct sockaddr *src, struct sockaddr *dst,
2617 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2618 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2619 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2620 struct sctp_tcb **locked_tcb,
2621 #if defined(__FreeBSD__) && !defined(__Userspace__)
2622 uint8_t mflowtype, uint32_t mflowid,
2623 #endif
2624 uint32_t vrf_id, uint16_t port)
2625 {
2626 struct sctp_state_cookie *cookie;
2627 struct sctp_tcb *l_stcb = *stcb;
2628 struct sctp_inpcb *l_inp;
2629 struct sockaddr *to;
2630 struct sctp_pcb *ep;
2631 struct mbuf *m_sig;
2632 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2633 uint8_t *sig;
2634 uint8_t cookie_ok = 0;
2635 unsigned int sig_offset, cookie_offset;
2636 unsigned int cookie_len;
2637 struct timeval now;
2638 struct timeval time_expires;
2639 int notification = 0;
2640 struct sctp_nets *netl;
2641 int had_a_existing_tcb = 0;
2642 int send_int_conf = 0;
2643 #ifdef INET
2644 struct sockaddr_in sin;
2645 #endif
2646 #ifdef INET6
2647 struct sockaddr_in6 sin6;
2648 #endif
2649 #if defined(__Userspace__)
2650 struct sockaddr_conn sconn;
2651 #endif
2652
2653 SCTPDBG(SCTP_DEBUG_INPUT2,
2654 "sctp_handle_cookie: handling COOKIE-ECHO\n");
2655
2656 if (inp_p == NULL) {
2657 return (NULL);
2658 }
2659 cookie = &cp->cookie;
2660 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2661 cookie_len = ntohs(cp->ch.chunk_length);
2662
2663 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2664 sizeof(struct sctp_init_chunk) +
2665 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2666 /* cookie too small */
2667 return (NULL);
2668 }
2669 if ((cookie->peerport != sh->src_port) ||
2670 (cookie->myport != sh->dest_port) ||
2671 (cookie->my_vtag != sh->v_tag)) {
2672 /*
2673 * invalid ports or bad tag. Note that we always leave the
2674 * v_tag in the header in network order and when we stored
2675 * it in the my_vtag slot we also left it in network order.
2676 * This maintains the match even though it may be in the
2677 * opposite byte order of the machine :->
2678 */
2679 return (NULL);
2680 }
2681 #if defined(__Userspace__)
2682 /*
2683 * Recover the AF_CONN addresses within the cookie.
2684 * This needs to be done in the buffer provided for later processing
2685 * of the cookie and in the mbuf chain for HMAC validation.
2686 */
2687 if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) {
2688 struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src;
2689
2690 memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *));
2691 m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address),
2692 (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
2693 }
2694 if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) {
2695 struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst;
2696
2697 memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *));
2698 m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress),
2699 (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
2700 }
2701 #endif
2702 /*
2703 * split off the signature into its own mbuf (since it should not be
2704 * calculated in the sctp_hmac_m() call).
2705 */
2706 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2707 m_sig = m_split(m, sig_offset, M_NOWAIT);
2708 if (m_sig == NULL) {
2709 /* out of memory or ?? */
2710 return (NULL);
2711 }
2712 #ifdef SCTP_MBUF_LOGGING
2713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2714 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2715 }
2716 #endif
2717
2718 /*
2719 * compute the signature/digest for the cookie
2720 */
2721 ep = &(*inp_p)->sctp_ep;
2722 l_inp = *inp_p;
2723 if (l_stcb) {
2724 SCTP_TCB_UNLOCK(l_stcb);
2725 }
2726 SCTP_INP_RLOCK(l_inp);
2727 if (l_stcb) {
2728 SCTP_TCB_LOCK(l_stcb);
2729 }
2730 /* which cookie is it? */
2731 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2732 (ep->current_secret_number != ep->last_secret_number)) {
2733 /* it's the old cookie */
2734 (void)sctp_hmac_m(SCTP_HMAC,
2735 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2736 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2737 } else {
2738 /* it's the current cookie */
2739 (void)sctp_hmac_m(SCTP_HMAC,
2740 (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2741 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2742 }
2743 /* get the signature */
2744 SCTP_INP_RUNLOCK(l_inp);
2745 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2746 if (sig == NULL) {
2747 /* couldn't find signature */
2748 sctp_m_freem(m_sig);
2749 return (NULL);
2750 }
2751 /* compare the received digest with the computed digest */
2752 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2753 /* try the old cookie? */
2754 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2755 (ep->current_secret_number != ep->last_secret_number)) {
2756 /* compute digest with old */
2757 (void)sctp_hmac_m(SCTP_HMAC,
2758 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2759 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2760 /* compare */
2761 if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2762 cookie_ok = 1;
2763 }
2764 } else {
2765 cookie_ok = 1;
2766 }
2767
2768 /*
2769 * Now before we continue we must reconstruct our mbuf so that
2770 * normal processing of any other chunks will work.
2771 */
2772 {
2773 struct mbuf *m_at;
2774
2775 m_at = m;
2776 while (SCTP_BUF_NEXT(m_at) != NULL) {
2777 m_at = SCTP_BUF_NEXT(m_at);
2778 }
2779 SCTP_BUF_NEXT(m_at) = m_sig;
2780 }
2781
2782 if (cookie_ok == 0) {
2783 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2784 SCTPDBG(SCTP_DEBUG_INPUT2,
2785 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2786 (uint32_t) offset, cookie_offset, sig_offset);
2787 return (NULL);
2788 }
2789
2790 /*
2791 * check the cookie timestamps to be sure it's not stale
2792 */
2793 (void)SCTP_GETTIME_TIMEVAL(&now);
2794 /* Expire time is in Ticks, so we convert to seconds */
2795 time_expires.tv_sec = cookie->time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
2796 time_expires.tv_usec = cookie->time_entered.tv_usec;
2797 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
2798 if (timercmp(&now, &time_expires, >))
2799 #else
2800 if (timevalcmp(&now, &time_expires, >))
2801 #endif
2802 {
2803 /* cookie is stale! */
2804 struct mbuf *op_err;
2805 struct sctp_error_stale_cookie *cause;
2806 struct timeval diff;
2807 uint32_t staleness;
2808
2809 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2810 0, M_NOWAIT, 1, MT_DATA);
2811 if (op_err == NULL) {
2812 /* FOOBAR */
2813 return (NULL);
2814 }
2815 /* Set the len */
2816 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2817 cause = mtod(op_err, struct sctp_error_stale_cookie *);
2818 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2819 cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
2820 (sizeof(uint32_t))));
2821 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
2822 timersub(&now, &time_expires, &diff);
2823 #else
2824 diff = now;
2825 timevalsub(&diff, &time_expires);
2826 #endif
2827 if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
2828 staleness = UINT32_MAX;
2829 } else {
2830 staleness = diff.tv_sec * 1000000;
2831 }
2832 if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
2833 staleness += diff.tv_usec;
2834 } else {
2835 staleness = UINT32_MAX;
2836 }
2837 cause->stale_time = htonl(staleness);
2838 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2839 #if defined(__FreeBSD__) && !defined(__Userspace__)
2840 mflowtype, mflowid, l_inp->fibnum,
2841 #endif
2842 vrf_id, port);
2843 return (NULL);
2844 }
2845 /*
2846 * Now we must see with the lookup address if we have an existing
2847 * asoc. This will only happen if we were in the COOKIE-WAIT state
2848 * and a INIT collided with us and somewhere the peer sent the
2849 * cookie on another address besides the single address our assoc
2850 * had for him. In this case we will have one of the tie-tags set at
2851 * least AND the address field in the cookie can be used to look it
2852 * up.
2853 */
2854 to = NULL;
2855 switch (cookie->addr_type) {
2856 #ifdef INET6
2857 case SCTP_IPV6_ADDRESS:
2858 memset(&sin6, 0, sizeof(sin6));
2859 sin6.sin6_family = AF_INET6;
2860 #ifdef HAVE_SIN6_LEN
2861 sin6.sin6_len = sizeof(sin6);
2862 #endif
2863 sin6.sin6_port = sh->src_port;
2864 sin6.sin6_scope_id = cookie->scope_id;
2865 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2866 sizeof(sin6.sin6_addr.s6_addr));
2867 to = (struct sockaddr *)&sin6;
2868 break;
2869 #endif
2870 #ifdef INET
2871 case SCTP_IPV4_ADDRESS:
2872 memset(&sin, 0, sizeof(sin));
2873 sin.sin_family = AF_INET;
2874 #ifdef HAVE_SIN_LEN
2875 sin.sin_len = sizeof(sin);
2876 #endif
2877 sin.sin_port = sh->src_port;
2878 sin.sin_addr.s_addr = cookie->address[0];
2879 to = (struct sockaddr *)&sin;
2880 break;
2881 #endif
2882 #if defined(__Userspace__)
2883 case SCTP_CONN_ADDRESS:
2884 memset(&sconn, 0, sizeof(struct sockaddr_conn));
2885 sconn.sconn_family = AF_CONN;
2886 #ifdef HAVE_SCONN_LEN
2887 sconn.sconn_len = sizeof(struct sockaddr_conn);
2888 #endif
2889 sconn.sconn_port = sh->src_port;
2890 memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
2891 to = (struct sockaddr *)&sconn;
2892 break;
2893 #endif
2894 default:
2895 /* This should not happen */
2896 return (NULL);
2897 }
2898 if (*stcb == NULL) {
2899 /* Yep, lets check */
2900 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2901 if (*stcb == NULL) {
2902 /*
2903 * We should have only got back the same inp. If we
2904 * got back a different ep we have a problem. The
2905 * original findep got back l_inp and now
2906 */
2907 if (l_inp != *inp_p) {
2908 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2909 }
2910 } else {
2911 if (*locked_tcb == NULL) {
2912 /* In this case we found the assoc only
2913 * after we locked the create lock. This means
2914 * we are in a colliding case and we must make
2915 * sure that we unlock the tcb if its one of the
2916 * cases where we throw away the incoming packets.
2917 */
2918 *locked_tcb = *stcb;
2919
2920 /* We must also increment the inp ref count
2921 * since the ref_count flags was set when we
2922 * did not find the TCB, now we found it which
2923 * reduces the refcount.. we must raise it back
2924 * out to balance it all :-)
2925 */
2926 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2927 if ((*stcb)->sctp_ep != l_inp) {
2928 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2929 (void *)(*stcb)->sctp_ep, (void *)l_inp);
2930 }
2931 }
2932 }
2933 }
2934
2935 cookie_len -= SCTP_SIGNATURE_SIZE;
2936 if (*stcb == NULL) {
2937 /* this is the "normal" case... get a new TCB */
2938 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2939 cookie, cookie_len, *inp_p,
2940 netp, to, ¬ification,
2941 auth_skipped, auth_offset, auth_len,
2942 #if defined(__FreeBSD__) && !defined(__Userspace__)
2943 mflowtype, mflowid,
2944 #endif
2945 vrf_id, port);
2946 } else {
2947 /* this is abnormal... cookie-echo on existing TCB */
2948 had_a_existing_tcb = 1;
2949 *stcb = sctp_process_cookie_existing(m, iphlen, offset,
2950 src, dst, sh,
2951 cookie, cookie_len, *inp_p, *stcb, netp, to,
2952 ¬ification, auth_skipped, auth_offset, auth_len,
2953 #if defined(__FreeBSD__) && !defined(__Userspace__)
2954 mflowtype, mflowid,
2955 #endif
2956 vrf_id, port);
2957 }
2958
2959 if (*stcb == NULL) {
2960 /* still no TCB... must be bad cookie-echo */
2961 return (NULL);
2962 }
2963 #if defined(__FreeBSD__) && !defined(__Userspace__)
2964 if (*netp != NULL) {
2965 (*netp)->flowtype = mflowtype;
2966 (*netp)->flowid = mflowid;
2967 }
2968 #endif
2969 /*
2970 * Ok, we built an association so confirm the address we sent the
2971 * INIT-ACK to.
2972 */
2973 netl = sctp_findnet(*stcb, to);
2974 /*
2975 * This code should in theory NOT run but
2976 */
2977 if (netl == NULL) {
2978 /* TSNH! Huh, why do I need to add this address here? */
2979 if (sctp_add_remote_addr(*stcb, to, NULL, port,
2980 SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2981 return (NULL);
2982 }
2983 netl = sctp_findnet(*stcb, to);
2984 }
2985 if (netl) {
2986 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2987 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2988 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2989 netl);
2990 send_int_conf = 1;
2991 }
2992 }
2993 sctp_start_net_timers(*stcb);
2994 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2995 if (!had_a_existing_tcb ||
2996 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2997 /*
2998 * If we have a NEW cookie or the connect never
2999 * reached the connected state during collision we
3000 * must do the TCP accept thing.
3001 */
3002 struct socket *so, *oso;
3003 struct sctp_inpcb *inp;
3004
3005 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
3006 /*
3007 * For a restart we will keep the same
3008 * socket, no need to do anything. I THINK!!
3009 */
3010 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3011 if (send_int_conf) {
3012 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3013 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3014 }
3015 return (m);
3016 }
3017 oso = (*inp_p)->sctp_socket;
3018 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3019 SCTP_TCB_UNLOCK((*stcb));
3020 #if defined(__FreeBSD__) && !defined(__Userspace__)
3021 CURVNET_SET(oso->so_vnet);
3022 #endif
3023 #if defined(__APPLE__) && !defined(__Userspace__)
3024 SCTP_SOCKET_LOCK(oso, 1);
3025 #endif
3026 so = sonewconn(oso, 0
3027 #if defined(__APPLE__) && !defined(__Userspace__)
3028 ,NULL
3029 #endif
3030 );
3031 #if defined(__APPLE__) && !defined(__Userspace__)
3032 SCTP_SOCKET_UNLOCK(oso, 1);
3033 #endif
3034 #if defined(__FreeBSD__) && !defined(__Userspace__)
3035 CURVNET_RESTORE();
3036 #endif
3037 SCTP_TCB_LOCK((*stcb));
3038 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3039
3040 if (so == NULL) {
3041 struct mbuf *op_err;
3042 #if defined(__APPLE__) && !defined(__Userspace__)
3043 struct socket *pcb_so;
3044 #endif
3045 /* Too many sockets */
3046 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
3047 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
3048 sctp_abort_association(*inp_p, NULL, m, iphlen,
3049 src, dst, sh, op_err,
3050 #if defined(__FreeBSD__) && !defined(__Userspace__)
3051 mflowtype, mflowid,
3052 #endif
3053 vrf_id, port);
3054 #if defined(__APPLE__) && !defined(__Userspace__)
3055 pcb_so = SCTP_INP_SO(*inp_p);
3056 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3057 SCTP_TCB_UNLOCK((*stcb));
3058 SCTP_SOCKET_LOCK(pcb_so, 1);
3059 SCTP_TCB_LOCK((*stcb));
3060 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3061 #endif
3062 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
3063 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
3064 #if defined(__APPLE__) && !defined(__Userspace__)
3065 SCTP_SOCKET_UNLOCK(pcb_so, 1);
3066 #endif
3067 return (NULL);
3068 }
3069 inp = (struct sctp_inpcb *)so->so_pcb;
3070 SCTP_INP_INCR_REF(inp);
3071 /*
3072 * We add the unbound flag here so that
3073 * if we get an soabort() before we get the
3074 * move_pcb done, we will properly cleanup.
3075 */
3076 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
3077 SCTP_PCB_FLAGS_CONNECTED |
3078 SCTP_PCB_FLAGS_IN_TCPPOOL |
3079 SCTP_PCB_FLAGS_UNBOUND |
3080 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
3081 SCTP_PCB_FLAGS_DONT_WAKE);
3082 inp->sctp_features = (*inp_p)->sctp_features;
3083 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
3084 inp->sctp_socket = so;
3085 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
3086 inp->max_cwnd = (*inp_p)->max_cwnd;
3087 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
3088 inp->ecn_supported = (*inp_p)->ecn_supported;
3089 inp->prsctp_supported = (*inp_p)->prsctp_supported;
3090 inp->auth_supported = (*inp_p)->auth_supported;
3091 inp->asconf_supported = (*inp_p)->asconf_supported;
3092 inp->reconfig_supported = (*inp_p)->reconfig_supported;
3093 inp->nrsack_supported = (*inp_p)->nrsack_supported;
3094 inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
3095 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
3096 inp->sctp_context = (*inp_p)->sctp_context;
3097 inp->local_strreset_support = (*inp_p)->local_strreset_support;
3098 inp->fibnum = (*inp_p)->fibnum;
3099 inp->inp_starting_point_for_iterator = NULL;
3100 #if defined(__Userspace__)
3101 inp->ulp_info = (*inp_p)->ulp_info;
3102 inp->recv_callback = (*inp_p)->recv_callback;
3103 inp->send_callback = (*inp_p)->send_callback;
3104 inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
3105 #endif
3106 /*
3107 * copy in the authentication parameters from the
3108 * original endpoint
3109 */
3110 if (inp->sctp_ep.local_hmacs)
3111 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3112 inp->sctp_ep.local_hmacs =
3113 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
3114 if (inp->sctp_ep.local_auth_chunks)
3115 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
3116 inp->sctp_ep.local_auth_chunks =
3117 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
3118
3119 /*
3120 * Now we must move it from one hash table to
3121 * another and get the tcb in the right place.
3122 */
3123
3124 /* This is where the one-2-one socket is put into
3125 * the accept state waiting for the accept!
3126 */
3127 if (*stcb) {
3128 SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
3129 }
3130 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
3131
3132 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3133 SCTP_TCB_UNLOCK((*stcb));
3134
3135 #if defined(__FreeBSD__) && !defined(__Userspace__)
3136 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
3137 0);
3138 #else
3139 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
3140 #endif
3141 SCTP_TCB_LOCK((*stcb));
3142 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3143
3144
3145 /* now we must check to see if we were aborted while
3146 * the move was going on and the lock/unlock happened.
3147 */
3148 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3149 /* yep it was, we leave the
3150 * assoc attached to the socket since
3151 * the sctp_inpcb_free() call will send
3152 * an abort for us.
3153 */
3154 SCTP_INP_DECR_REF(inp);
3155 return (NULL);
3156 }
3157 SCTP_INP_DECR_REF(inp);
3158 /* Switch over to the new guy */
3159 *inp_p = inp;
3160 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3161 if (send_int_conf) {
3162 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3163 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3164 }
3165
3166 /* Pull it from the incomplete queue and wake the guy */
3167 #if defined(__APPLE__) && !defined(__Userspace__)
3168 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3169 SCTP_TCB_UNLOCK((*stcb));
3170 SCTP_SOCKET_LOCK(so, 1);
3171 #endif
3172 soisconnected(so);
3173 #if defined(__APPLE__) && !defined(__Userspace__)
3174 SCTP_TCB_LOCK((*stcb));
3175 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3176 SCTP_SOCKET_UNLOCK(so, 1);
3177 #endif
3178 return (m);
3179 }
3180 }
3181 if (notification) {
3182 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3183 }
3184 if (send_int_conf) {
3185 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3186 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3187 }
3188 return (m);
3189 }
3190
3191 static void
3192 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
3193 struct sctp_tcb *stcb, struct sctp_nets *net)
3194 {
3195 /* cp must not be used, others call this without a c-ack :-) */
3196 struct sctp_association *asoc;
3197 struct sctp_tmit_chunk *chk;
3198
3199 SCTPDBG(SCTP_DEBUG_INPUT2,
3200 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
3201 if ((stcb == NULL) || (net == NULL)) {
3202 return;
3203 }
3204
3205 asoc = &stcb->asoc;
3206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3207 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3208 asoc->overall_error_count,
3209 0,
3210 SCTP_FROM_SCTP_INPUT,
3211 __LINE__);
3212 }
3213 asoc->overall_error_count = 0;
3214 sctp_stop_all_cookie_timers(stcb);
3215 /* process according to association state */
3216 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
3217 /* state change only needed when I am in right state */
3218 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
3219 SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
3220 sctp_start_net_timers(stcb);
3221 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
3222 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3223 stcb->sctp_ep, stcb, NULL);
3224
3225 }
3226 /* update RTO */
3227 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
3228 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
3229 if (asoc->overall_error_count == 0) {
3230 sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
3231 SCTP_RTT_FROM_NON_DATA);
3232 }
3233 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
3234 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3235 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3236 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3237 #if defined(__APPLE__) && !defined(__Userspace__)
3238 struct socket *so;
3239
3240 #endif
3241 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3242 #if defined(__APPLE__) && !defined(__Userspace__)
3243 so = SCTP_INP_SO(stcb->sctp_ep);
3244 atomic_add_int(&stcb->asoc.refcnt, 1);
3245 SCTP_TCB_UNLOCK(stcb);
3246 SCTP_SOCKET_LOCK(so, 1);
3247 SCTP_TCB_LOCK(stcb);
3248 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3249 #endif
3250 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3251 soisconnected(stcb->sctp_socket);
3252 }
3253 #if defined(__APPLE__) && !defined(__Userspace__)
3254 SCTP_SOCKET_UNLOCK(so, 1);
3255 #endif
3256 }
3257 /*
3258 * since we did not send a HB make sure we don't double
3259 * things
3260 */
3261 net->hb_responded = 1;
3262
3263 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3264 /* We don't need to do the asconf thing,
3265 * nor hb or autoclose if the socket is closed.
3266 */
3267 goto closed_socket;
3268 }
3269
3270 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3271 stcb, net);
3272
3273
3274 if (stcb->asoc.sctp_autoclose_ticks &&
3275 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3276 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3277 stcb->sctp_ep, stcb, NULL);
3278 }
3279 /*
3280 * send ASCONF if parameters are pending and ASCONFs are
3281 * allowed (eg. addresses changed when init/cookie echo were
3282 * in flight)
3283 */
3284 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3285 (stcb->asoc.asconf_supported == 1) &&
3286 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3287 #ifdef SCTP_TIMER_BASED_ASCONF
3288 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3289 stcb->sctp_ep, stcb,
3290 stcb->asoc.primary_destination);
3291 #else
3292 sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3293 SCTP_ADDR_NOT_LOCKED);
3294 #endif
3295 }
3296 }
3297 closed_socket:
3298 /* Toss the cookie if I can */
3299 sctp_toss_old_cookies(stcb, asoc);
3300 /* Restart the timer if we have pending data */
3301 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3302 if (chk->whoTo != NULL) {
3303 break;
3304 }
3305 }
3306 if (chk != NULL) {
3307 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3308 }
3309 }
3310
3311 static void
3312 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3313 struct sctp_tcb *stcb)
3314 {
3315 struct sctp_nets *net;
3316 struct sctp_tmit_chunk *lchk;
3317 struct sctp_ecne_chunk bkup;
3318 uint8_t override_bit;
3319 uint32_t tsn, window_data_tsn;
3320 int len;
3321 unsigned int pkt_cnt;
3322
3323 len = ntohs(cp->ch.chunk_length);
3324 if ((len != sizeof(struct sctp_ecne_chunk)) &&
3325 (len != sizeof(struct old_sctp_ecne_chunk))) {
3326 return;
3327 }
3328 if (len == sizeof(struct old_sctp_ecne_chunk)) {
3329 /* Its the old format */
3330 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3331 bkup.num_pkts_since_cwr = htonl(1);
3332 cp = &bkup;
3333 }
3334 SCTP_STAT_INCR(sctps_recvecne);
3335 tsn = ntohl(cp->tsn);
3336 pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3337 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3338 if (lchk == NULL) {
3339 window_data_tsn = stcb->asoc.sending_seq - 1;
3340 } else {
3341 window_data_tsn = lchk->rec.data.tsn;
3342 }
3343
3344 /* Find where it was sent to if possible. */
3345 net = NULL;
3346 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3347 if (lchk->rec.data.tsn == tsn) {
3348 net = lchk->whoTo;
3349 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3350 break;
3351 }
3352 if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
3353 break;
3354 }
3355 }
3356 if (net == NULL) {
3357 /*
3358 * What to do. A previous send of a
3359 * CWR was possibly lost. See how old it is, we
3360 * may have it marked on the actual net.
3361 */
3362 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3363 if (tsn == net->last_cwr_tsn) {
3364 /* Found him, send it off */
3365 break;
3366 }
3367 }
3368 if (net == NULL) {
3369 /*
3370 * If we reach here, we need to send a special
3371 * CWR that says hey, we did this a long time
3372 * ago and you lost the response.
3373 */
3374 net = TAILQ_FIRST(&stcb->asoc.nets);
3375 if (net == NULL) {
3376 /* TSNH */
3377 return;
3378 }
3379 override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3380 } else {
3381 override_bit = 0;
3382 }
3383 } else {
3384 override_bit = 0;
3385 }
3386 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3387 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3388 /* JRS - Use the congestion control given in the pluggable CC module */
3389 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3390 /*
3391 * We reduce once every RTT. So we will only lower cwnd at
3392 * the next sending seq i.e. the window_data_tsn
3393 */
3394 net->cwr_window_tsn = window_data_tsn;
3395 net->ecn_ce_pkt_cnt += pkt_cnt;
3396 net->lost_cnt = pkt_cnt;
3397 net->last_cwr_tsn = tsn;
3398 } else {
3399 override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3400 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3401 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3402 /*
3403 * Another loss in the same window update how
3404 * many marks/packets lost we have had.
3405 */
3406 int cnt = 1;
3407 if (pkt_cnt > net->lost_cnt) {
3408 /* Should be the case */
3409 cnt = (pkt_cnt - net->lost_cnt);
3410 net->ecn_ce_pkt_cnt += cnt;
3411 }
3412 net->lost_cnt = pkt_cnt;
3413 net->last_cwr_tsn = tsn;
3414 /*
3415 * Most CC functions will ignore this call, since we are in-window
3416 * yet of the initial CE the peer saw.
3417 */
3418 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3419 }
3420 }
3421 /*
3422 * We always send a CWR this way if our previous one was lost our
3423 * peer will get an update, or if it is not time again to reduce we
3424 * still get the cwr to the peer. Note we set the override when we
3425 * could not find the TSN on the chunk or the destination network.
3426 */
3427 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3428 }
3429
3430 static void
3431 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3432 {
3433 /*
3434 * Here we get a CWR from the peer. We must look in the outqueue and
3435 * make sure that we have a covered ECNE in the control chunk part.
3436 * If so remove it.
3437 */
3438 struct sctp_tmit_chunk *chk, *nchk;
3439 struct sctp_ecne_chunk *ecne;
3440 int override;
3441 uint32_t cwr_tsn;
3442
3443 cwr_tsn = ntohl(cp->tsn);
3444 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3445 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
3446 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3447 continue;
3448 }
3449 if ((override == 0) && (chk->whoTo != net)) {
3450 /* Must be from the right src unless override is set */
3451 continue;
3452 }
3453 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3454 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3455 /* this covers this ECNE, we can remove it */
3456 stcb->asoc.ecn_echo_cnt_onq--;
3457 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3458 sctp_next);
3459 stcb->asoc.ctrl_queue_cnt--;
3460 sctp_m_freem(chk->data);
3461 chk->data = NULL;
3462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3463 if (override == 0) {
3464 break;
3465 }
3466 }
3467 }
3468 }
3469
3470 static void
3471 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3472 struct sctp_tcb *stcb, struct sctp_nets *net)
3473 {
3474 #if defined(__APPLE__) && !defined(__Userspace__)
3475 struct socket *so;
3476 #endif
3477
3478 SCTPDBG(SCTP_DEBUG_INPUT2,
3479 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3480 if (stcb == NULL)
3481 return;
3482
3483 /* process according to association state */
3484 if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3485 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
3486 SCTPDBG(SCTP_DEBUG_INPUT2,
3487 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3488 SCTP_TCB_UNLOCK(stcb);
3489 return;
3490 }
3491 /* notify upper layer protocol */
3492 if (stcb->sctp_socket) {
3493 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3494 }
3495 #ifdef INVARIANTS
3496 if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
3497 !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
3498 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3499 panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3500 }
3501 #endif
3502 /* stop the timer */
3503 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3504 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3505 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3506 /* free the TCB */
3507 SCTPDBG(SCTP_DEBUG_INPUT2,
3508 "sctp_handle_shutdown_complete: calls free-asoc\n");
3509 #if defined(__APPLE__) && !defined(__Userspace__)
3510 so = SCTP_INP_SO(stcb->sctp_ep);
3511 atomic_add_int(&stcb->asoc.refcnt, 1);
3512 SCTP_TCB_UNLOCK(stcb);
3513 SCTP_SOCKET_LOCK(so, 1);
3514 SCTP_TCB_LOCK(stcb);
3515 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3516 #endif
3517 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3518 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3519 #if defined(__APPLE__) && !defined(__Userspace__)
3520 SCTP_SOCKET_UNLOCK(so, 1);
3521 #endif
3522 return;
3523 }
3524
3525 static int
3526 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3527 struct sctp_nets *net, uint8_t flg)
3528 {
3529 switch (desc->chunk_type) {
3530 case SCTP_DATA:
3531 /* find the tsn to resend (possibly */
3532 {
3533 uint32_t tsn;
3534 struct sctp_tmit_chunk *tp1;
3535
3536 tsn = ntohl(desc->tsn_ifany);
3537 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3538 if (tp1->rec.data.tsn == tsn) {
3539 /* found it */
3540 break;
3541 }
3542 if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3543 /* not found */
3544 tp1 = NULL;
3545 break;
3546 }
3547 }
3548 if (tp1 == NULL) {
3549 /*
3550 * Do it the other way , aka without paying
3551 * attention to queue seq order.
3552 */
3553 SCTP_STAT_INCR(sctps_pdrpdnfnd);
3554 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3555 if (tp1->rec.data.tsn == tsn) {
3556 /* found it */
3557 break;
3558 }
3559 }
3560 }
3561 if (tp1 == NULL) {
3562 SCTP_STAT_INCR(sctps_pdrptsnnf);
3563 }
3564 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3565 uint8_t *ddp;
3566
3567 if (((flg & SCTP_BADCRC) == 0) &&
3568 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3569 return (0);
3570 }
3571 if ((stcb->asoc.peers_rwnd == 0) &&
3572 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3573 SCTP_STAT_INCR(sctps_pdrpdiwnp);
3574 return (0);
3575 }
3576 if (stcb->asoc.peers_rwnd == 0 &&
3577 (flg & SCTP_FROM_MIDDLE_BOX)) {
3578 SCTP_STAT_INCR(sctps_pdrpdizrw);
3579 return (0);
3580 }
3581 ddp = (uint8_t *) (mtod(tp1->data, caddr_t) +
3582 sizeof(struct sctp_data_chunk));
3583 {
3584 unsigned int iii;
3585
3586 for (iii = 0; iii < sizeof(desc->data_bytes);
3587 iii++) {
3588 if (ddp[iii] != desc->data_bytes[iii]) {
3589 SCTP_STAT_INCR(sctps_pdrpbadd);
3590 return (-1);
3591 }
3592 }
3593 }
3594
3595 if (tp1->do_rtt) {
3596 /*
3597 * this guy had a RTO calculation
3598 * pending on it, cancel it
3599 */
3600 if (tp1->whoTo->rto_needed == 0) {
3601 tp1->whoTo->rto_needed = 1;
3602 }
3603 tp1->do_rtt = 0;
3604 }
3605 SCTP_STAT_INCR(sctps_pdrpmark);
3606 if (tp1->sent != SCTP_DATAGRAM_RESEND)
3607 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3608 /*
3609 * mark it as if we were doing a FR, since
3610 * we will be getting gap ack reports behind
3611 * the info from the router.
3612 */
3613 tp1->rec.data.doing_fast_retransmit = 1;
3614 /*
3615 * mark the tsn with what sequences can
3616 * cause a new FR.
3617 */
3618 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3619 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3620 } else {
3621 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3622 }
3623
3624 /* restart the timer */
3625 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3626 stcb, tp1->whoTo,
3627 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3628 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3629 stcb, tp1->whoTo);
3630
3631 /* fix counts and things */
3632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3633 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3634 tp1->whoTo->flight_size,
3635 tp1->book_size,
3636 (uint32_t)(uintptr_t)stcb,
3637 tp1->rec.data.tsn);
3638 }
3639 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3640 sctp_flight_size_decrease(tp1);
3641 sctp_total_flight_decrease(stcb, tp1);
3642 }
3643 tp1->sent = SCTP_DATAGRAM_RESEND;
3644 } {
3645 /* audit code */
3646 unsigned int audit;
3647
3648 audit = 0;
3649 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3650 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3651 audit++;
3652 }
3653 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3654 sctp_next) {
3655 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3656 audit++;
3657 }
3658 if (audit != stcb->asoc.sent_queue_retran_cnt) {
3659 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3660 audit, stcb->asoc.sent_queue_retran_cnt);
3661 #ifndef SCTP_AUDITING_ENABLED
3662 stcb->asoc.sent_queue_retran_cnt = audit;
3663 #endif
3664 }
3665 }
3666 }
3667 break;
3668 case SCTP_ASCONF:
3669 {
3670 struct sctp_tmit_chunk *asconf;
3671
3672 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3673 sctp_next) {
3674 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3675 break;
3676 }
3677 }
3678 if (asconf) {
3679 if (asconf->sent != SCTP_DATAGRAM_RESEND)
3680 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3681 asconf->sent = SCTP_DATAGRAM_RESEND;
3682 asconf->snd_count--;
3683 }
3684 }
3685 break;
3686 case SCTP_INITIATION:
3687 /* resend the INIT */
3688 stcb->asoc.dropped_special_cnt++;
3689 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3690 /*
3691 * If we can get it in, in a few attempts we do
3692 * this, otherwise we let the timer fire.
3693 */
3694 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3695 stcb, net,
3696 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3697 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3698 }
3699 break;
3700 case SCTP_SELECTIVE_ACK:
3701 case SCTP_NR_SELECTIVE_ACK:
3702 /* resend the sack */
3703 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3704 break;
3705 case SCTP_HEARTBEAT_REQUEST:
3706 /* resend a demand HB */
3707 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3708 /* Only retransmit if we KNOW we wont destroy the tcb */
3709 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3710 }
3711 break;
3712 case SCTP_SHUTDOWN:
3713 sctp_send_shutdown(stcb, net);
3714 break;
3715 case SCTP_SHUTDOWN_ACK:
3716 sctp_send_shutdown_ack(stcb, net);
3717 break;
3718 case SCTP_COOKIE_ECHO:
3719 {
3720 struct sctp_tmit_chunk *cookie;
3721
3722 cookie = NULL;
3723 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3724 sctp_next) {
3725 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3726 break;
3727 }
3728 }
3729 if (cookie) {
3730 if (cookie->sent != SCTP_DATAGRAM_RESEND)
3731 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3732 cookie->sent = SCTP_DATAGRAM_RESEND;
3733 sctp_stop_all_cookie_timers(stcb);
3734 }
3735 }
3736 break;
3737 case SCTP_COOKIE_ACK:
3738 sctp_send_cookie_ack(stcb);
3739 break;
3740 case SCTP_ASCONF_ACK:
3741 /* resend last asconf ack */
3742 sctp_send_asconf_ack(stcb);
3743 break;
3744 case SCTP_IFORWARD_CUM_TSN:
3745 case SCTP_FORWARD_CUM_TSN:
3746 send_forward_tsn(stcb, &stcb->asoc);
3747 break;
3748 /* can't do anything with these */
3749 case SCTP_PACKET_DROPPED:
3750 case SCTP_INITIATION_ACK: /* this should not happen */
3751 case SCTP_HEARTBEAT_ACK:
3752 case SCTP_ABORT_ASSOCIATION:
3753 case SCTP_OPERATION_ERROR:
3754 case SCTP_SHUTDOWN_COMPLETE:
3755 case SCTP_ECN_ECHO:
3756 case SCTP_ECN_CWR:
3757 default:
3758 break;
3759 }
3760 return (0);
3761 }
3762
3763 void
3764 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3765 {
3766 uint32_t i;
3767 uint16_t temp;
3768
3769 /*
3770 * We set things to 0xffffffff since this is the last delivered sequence
3771 * and we will be sending in 0 after the reset.
3772 */
3773
3774 if (number_entries) {
3775 for (i = 0; i < number_entries; i++) {
3776 temp = ntohs(list[i]);
3777 if (temp >= stcb->asoc.streamincnt) {
3778 continue;
3779 }
3780 stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3781 }
3782 } else {
3783 list = NULL;
3784 for (i = 0; i < stcb->asoc.streamincnt; i++) {
3785 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3786 }
3787 }
3788 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3789 }
3790
3791 static void
3792 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3793 {
3794 uint32_t i;
3795 uint16_t temp;
3796
3797 if (number_entries > 0) {
3798 for (i = 0; i < number_entries; i++) {
3799 temp = ntohs(list[i]);
3800 if (temp >= stcb->asoc.streamoutcnt) {
3801 /* no such stream */
3802 continue;
3803 }
3804 stcb->asoc.strmout[temp].next_mid_ordered = 0;
3805 stcb->asoc.strmout[temp].next_mid_unordered = 0;
3806 }
3807 } else {
3808 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3809 stcb->asoc.strmout[i].next_mid_ordered = 0;
3810 stcb->asoc.strmout[i].next_mid_unordered = 0;
3811 }
3812 }
3813 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3814 }
3815
3816 static void
3817 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3818 {
3819 uint32_t i;
3820 uint16_t temp;
3821
3822 if (number_entries > 0) {
3823 for (i = 0; i < number_entries; i++) {
3824 temp = ntohs(list[i]);
3825 if (temp >= stcb->asoc.streamoutcnt) {
3826 /* no such stream */
3827 continue;
3828 }
3829 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3830 }
3831 } else {
3832 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3833 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3834 }
3835 }
3836 }
3837
3838
3839 struct sctp_stream_reset_request *
3840 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3841 {
3842 struct sctp_association *asoc;
3843 struct sctp_chunkhdr *ch;
3844 struct sctp_stream_reset_request *r;
3845 struct sctp_tmit_chunk *chk;
3846 int len, clen;
3847
3848 asoc = &stcb->asoc;
3849 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3850 asoc->stream_reset_outstanding = 0;
3851 return (NULL);
3852 }
3853 if (stcb->asoc.str_reset == NULL) {
3854 asoc->stream_reset_outstanding = 0;
3855 return (NULL);
3856 }
3857 chk = stcb->asoc.str_reset;
3858 if (chk->data == NULL) {
3859 return (NULL);
3860 }
3861 if (bchk) {
3862 /* he wants a copy of the chk pointer */
3863 *bchk = chk;
3864 }
3865 clen = chk->send_size;
3866 ch = mtod(chk->data, struct sctp_chunkhdr *);
3867 r = (struct sctp_stream_reset_request *)(ch + 1);
3868 if (ntohl(r->request_seq) == seq) {
3869 /* found it */
3870 return (r);
3871 }
3872 len = SCTP_SIZE32(ntohs(r->ph.param_length));
3873 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3874 /* move to the next one, there can only be a max of two */
3875 r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3876 if (ntohl(r->request_seq) == seq) {
3877 return (r);
3878 }
3879 }
3880 /* that seq is not here */
3881 return (NULL);
3882 }
3883
3884 static void
3885 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3886 {
3887 struct sctp_association *asoc;
3888 struct sctp_tmit_chunk *chk;
3889
3890 asoc = &stcb->asoc;
3891 chk = asoc->str_reset;
3892 if (chk == NULL) {
3893 return;
3894 }
3895 asoc->str_reset = NULL;
3896 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3897 NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3898 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3899 asoc->ctrl_queue_cnt--;
3900 if (chk->data) {
3901 sctp_m_freem(chk->data);
3902 chk->data = NULL;
3903 }
3904 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3905 }
3906
3907
3908 static int
3909 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3910 uint32_t seq, uint32_t action,
3911 struct sctp_stream_reset_response *respin)
3912 {
3913 uint16_t type;
3914 int lparam_len;
3915 struct sctp_association *asoc = &stcb->asoc;
3916 struct sctp_tmit_chunk *chk;
3917 struct sctp_stream_reset_request *req_param;
3918 struct sctp_stream_reset_out_request *req_out_param;
3919 struct sctp_stream_reset_in_request *req_in_param;
3920 uint32_t number_entries;
3921
3922 if (asoc->stream_reset_outstanding == 0) {
3923 /* duplicate */
3924 return (0);
3925 }
3926 if (seq == stcb->asoc.str_reset_seq_out) {
3927 req_param = sctp_find_stream_reset(stcb, seq, &chk);
3928 if (req_param != NULL) {
3929 stcb->asoc.str_reset_seq_out++;
3930 type = ntohs(req_param->ph.param_type);
3931 lparam_len = ntohs(req_param->ph.param_length);
3932 if (type == SCTP_STR_RESET_OUT_REQUEST) {
3933 int no_clear = 0;
3934
3935 req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3936 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3937 asoc->stream_reset_out_is_outstanding = 0;
3938 if (asoc->stream_reset_outstanding)
3939 asoc->stream_reset_outstanding--;
3940 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3941 /* do it */
3942 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3943 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3944 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3945 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3946 /* Set it up so we don't stop retransmitting */
3947 asoc->stream_reset_outstanding++;
3948 stcb->asoc.str_reset_seq_out--;
3949 asoc->stream_reset_out_is_outstanding = 1;
3950 no_clear = 1;
3951 } else {
3952 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3953 }
3954 if (no_clear == 0) {
3955 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3956 }
3957 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
3958 req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3959 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3960 if (asoc->stream_reset_outstanding)
3961 asoc->stream_reset_outstanding--;
3962 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3963 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3964 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3965 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3966 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3967 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3968 }
3969 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3970 /* Ok we now may have more streams */
3971 int num_stream;
3972
3973 num_stream = stcb->asoc.strm_pending_add_size;
3974 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3975 /* TSNH */
3976 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3977 }
3978 stcb->asoc.strm_pending_add_size = 0;
3979 if (asoc->stream_reset_outstanding)
3980 asoc->stream_reset_outstanding--;
3981 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3982 /* Put the new streams into effect */
3983 int i;
3984 for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
3985 asoc->strmout[i].state = SCTP_STREAM_OPEN;
3986 }
3987 asoc->streamoutcnt += num_stream;
3988 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3989 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3990 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3991 SCTP_STREAM_CHANGE_DENIED);
3992 } else {
3993 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3994 SCTP_STREAM_CHANGE_FAILED);
3995 }
3996 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3997 if (asoc->stream_reset_outstanding)
3998 asoc->stream_reset_outstanding--;
3999 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
4000 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
4001 SCTP_STREAM_CHANGE_DENIED);
4002 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
4003 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
4004 SCTP_STREAM_CHANGE_FAILED);
4005 }
4006 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
4007 /**
4008 * a) Adopt the new in tsn.
4009 * b) reset the map
4010 * c) Adopt the new out-tsn
4011 */
4012 struct sctp_stream_reset_response_tsn *resp;
4013 struct sctp_forward_tsn_chunk fwdtsn;
4014 int abort_flag = 0;
4015 if (respin == NULL) {
4016 /* huh ? */
4017 return (0);
4018 }
4019 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
4020 return (0);
4021 }
4022 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
4023 resp = (struct sctp_stream_reset_response_tsn *)respin;
4024 asoc->stream_reset_outstanding--;
4025 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
4026 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
4027 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
4028 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
4029 if (abort_flag) {
4030 return (1);
4031 }
4032 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
4033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
4034 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4035 }
4036
4037 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
4038 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
4039 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
4040
4041 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
4042 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
4043
4044 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
4045 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
4046
4047 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
4048 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
4049 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
4050 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
4051 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
4052 SCTP_ASSOC_RESET_DENIED);
4053 } else {
4054 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
4055 SCTP_ASSOC_RESET_FAILED);
4056 }
4057 }
4058 /* get rid of the request and get the request flags */
4059 if (asoc->stream_reset_outstanding == 0) {
4060 sctp_clean_up_stream_reset(stcb);
4061 }
4062 }
4063 }
4064 if (asoc->stream_reset_outstanding == 0) {
4065 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4066 }
4067 return (0);
4068 }
4069
4070 static void
4071 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
4072 struct sctp_tmit_chunk *chk,
4073 struct sctp_stream_reset_in_request *req, int trunc)
4074 {
4075 uint32_t seq;
4076 int len, i;
4077 int number_entries;
4078 uint16_t temp;
4079
4080 /*
4081 * peer wants me to send a str-reset to him for my outgoing seq's if
4082 * seq_in is right.
4083 */
4084 struct sctp_association *asoc = &stcb->asoc;
4085
4086 seq = ntohl(req->request_seq);
4087 if (asoc->str_reset_seq_in == seq) {
4088 asoc->last_reset_action[1] = asoc->last_reset_action[0];
4089 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4090 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4091 } else if (trunc) {
4092 /* Can't do it, since they exceeded our buffer size */
4093 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4094 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
4095 len = ntohs(req->ph.param_length);
4096 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
4097 if (number_entries) {
4098 for (i = 0; i < number_entries; i++) {
4099 temp = ntohs(req->list_of_streams[i]);
4100 if (temp >= stcb->asoc.streamoutcnt) {
4101 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4102 goto bad_boy;
4103 }
4104 req->list_of_streams[i] = temp;
4105 }
4106 for (i = 0; i < number_entries; i++) {
4107 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
4108 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
4109 }
4110 }
4111 } else {
4112 /* Its all */
4113 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
4114 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
4115 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
4116 }
4117 }
4118 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4119 } else {
4120 /* Can't do it, since we have sent one out */
4121 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4122 }
4123 bad_boy:
4124 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4125 asoc->str_reset_seq_in++;
4126 } else if (asoc->str_reset_seq_in - 1 == seq) {
4127 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4128 } else if (asoc->str_reset_seq_in - 2 == seq) {
4129 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4130 } else {
4131 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4132 }
4133 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4134 }
4135
4136 static int
4137 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
4138 struct sctp_tmit_chunk *chk,
4139 struct sctp_stream_reset_tsn_request *req)
4140 {
4141 /* reset all in and out and update the tsn */
4142 /*
4143 * A) reset my str-seq's on in and out. B) Select a receive next,
4144 * and set cum-ack to it. Also process this selected number as a
4145 * fwd-tsn as well. C) set in the response my next sending seq.
4146 */
4147 struct sctp_forward_tsn_chunk fwdtsn;
4148 struct sctp_association *asoc = &stcb->asoc;
4149 int abort_flag = 0;
4150 uint32_t seq;
4151
4152 seq = ntohl(req->request_seq);
4153 if (asoc->str_reset_seq_in == seq) {
4154 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
4155 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4156 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4157 } else {
4158 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
4159 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
4160 fwdtsn.ch.chunk_flags = 0;
4161 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
4162 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
4163 if (abort_flag) {
4164 return (1);
4165 }
4166 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
4167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
4168 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4169 }
4170 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4171 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
4172 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
4173 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
4174 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
4175 atomic_add_int(&asoc->sending_seq, 1);
4176 /* save off historical data for retrans */
4177 asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
4178 asoc->last_sending_seq[0] = asoc->sending_seq;
4179 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
4180 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
4181 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
4182 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
4183 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4184 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
4185 }
4186 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4187 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4188 asoc->str_reset_seq_in++;
4189 } else if (asoc->str_reset_seq_in - 1 == seq) {
4190 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4191 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4192 } else if (asoc->str_reset_seq_in - 2 == seq) {
4193 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
4194 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
4195 } else {
4196 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4197 }
4198 return (0);
4199 }
4200
4201 static void
4202 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
4203 struct sctp_tmit_chunk *chk,
4204 struct sctp_stream_reset_out_request *req, int trunc)
4205 {
4206 uint32_t seq, tsn;
4207 int number_entries, len;
4208 struct sctp_association *asoc = &stcb->asoc;
4209
4210 seq = ntohl(req->request_seq);
4211
4212 /* now if its not a duplicate we process it */
4213 if (asoc->str_reset_seq_in == seq) {
4214 len = ntohs(req->ph.param_length);
4215 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
4216 /*
4217 * the sender is resetting, handle the list issue.. we must
4218 * a) verify if we can do the reset, if so no problem b) If
4219 * we can't do the reset we must copy the request. c) queue
4220 * it, and setup the data in processor to trigger it off
4221 * when needed and dequeue all the queued data.
4222 */
4223 tsn = ntohl(req->send_reset_at_tsn);
4224
4225 /* move the reset action back one */
4226 asoc->last_reset_action[1] = asoc->last_reset_action[0];
4227 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4228 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4229 } else if (trunc) {
4230 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4231 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
4232 /* we can do it now */
4233 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
4234 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4235 } else {
4236 /*
4237 * we must queue it up and thus wait for the TSN's
4238 * to arrive that are at or before tsn
4239 */
4240 struct sctp_stream_reset_list *liste;
4241 int siz;
4242
4243 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4244 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4245 siz, SCTP_M_STRESET);
4246 if (liste == NULL) {
4247 /* gak out of memory */
4248 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4249 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4250 return;
4251 }
4252 liste->seq = seq;
4253 liste->tsn = tsn;
4254 liste->number_entries = number_entries;
4255 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4256 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4257 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
4258 }
4259 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4260 asoc->str_reset_seq_in++;
4261 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4262 /*
4263 * one seq back, just echo back last action since my
4264 * response was lost.
4265 */
4266 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4267 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4268 /*
4269 * two seq back, just echo back last action since my
4270 * response was lost.
4271 */
4272 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4273 } else {
4274 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4275 }
4276 }
4277
4278 static void
4279 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4280 struct sctp_stream_reset_add_strm *str_add)
4281 {
4282 /*
4283 * Peer is requesting to add more streams.
4284 * If its within our max-streams we will
4285 * allow it.
4286 */
4287 uint32_t num_stream, i;
4288 uint32_t seq;
4289 struct sctp_association *asoc = &stcb->asoc;
4290 struct sctp_queued_to_read *ctl, *nctl;
4291
4292 /* Get the number. */
4293 seq = ntohl(str_add->request_seq);
4294 num_stream = ntohs(str_add->number_of_streams);
4295 /* Now what would be the new total? */
4296 if (asoc->str_reset_seq_in == seq) {
4297 num_stream += stcb->asoc.streamincnt;
4298 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4299 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4300 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4301 } else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4302 (num_stream > 0xffff)) {
4303 /* We must reject it they ask for to many */
4304 denied:
4305 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4306 } else {
4307 /* Ok, we can do that :-) */
4308 struct sctp_stream_in *oldstrm;
4309
4310 /* save off the old */
4311 oldstrm = stcb->asoc.strmin;
4312 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4313 (num_stream * sizeof(struct sctp_stream_in)),
4314 SCTP_M_STRMI);
4315 if (stcb->asoc.strmin == NULL) {
4316 stcb->asoc.strmin = oldstrm;
4317 goto denied;
4318 }
4319 /* copy off the old data */
4320 for (i = 0; i < stcb->asoc.streamincnt; i++) {
4321 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4322 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4323 stcb->asoc.strmin[i].sid = i;
4324 stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
4325 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4326 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
4327 /* now anything on those queues? */
4328 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
4329 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
4330 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
4331 }
4332 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
4333 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
4334 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
4335 }
4336 }
4337 /* Init the new streams */
4338 for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4339 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4340 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4341 stcb->asoc.strmin[i].sid = i;
4342 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
4343 stcb->asoc.strmin[i].pd_api_started = 0;
4344 stcb->asoc.strmin[i].delivery_started = 0;
4345 }
4346 SCTP_FREE(oldstrm, SCTP_M_STRMI);
4347 /* update the size */
4348 stcb->asoc.streamincnt = num_stream;
4349 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4350 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4351 }
4352 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4353 asoc->str_reset_seq_in++;
4354 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4355 /*
4356 * one seq back, just echo back last action since my
4357 * response was lost.
4358 */
4359 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4360 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4361 /*
4362 * two seq back, just echo back last action since my
4363 * response was lost.
4364 */
4365 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4366 } else {
4367 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4368
4369 }
4370 }
4371
4372 static void
4373 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4374 struct sctp_stream_reset_add_strm *str_add)
4375 {
4376 /*
4377 * Peer is requesting to add more streams.
4378 * If its within our max-streams we will
4379 * allow it.
4380 */
4381 uint16_t num_stream;
4382 uint32_t seq;
4383 struct sctp_association *asoc = &stcb->asoc;
4384
4385 /* Get the number. */
4386 seq = ntohl(str_add->request_seq);
4387 num_stream = ntohs(str_add->number_of_streams);
4388 /* Now what would be the new total? */
4389 if (asoc->str_reset_seq_in == seq) {
4390 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4391 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4392 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4393 } else if (stcb->asoc.stream_reset_outstanding) {
4394 /* We must reject it we have something pending */
4395 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4396 } else {
4397 /* Ok, we can do that :-) */
4398 int mychk;
4399 mychk = stcb->asoc.streamoutcnt;
4400 mychk += num_stream;
4401 if (mychk < 0x10000) {
4402 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4403 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
4404 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4405 }
4406 } else {
4407 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4408 }
4409 }
4410 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4411 asoc->str_reset_seq_in++;
4412 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4413 /*
4414 * one seq back, just echo back last action since my
4415 * response was lost.
4416 */
4417 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4418 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4419 /*
4420 * two seq back, just echo back last action since my
4421 * response was lost.
4422 */
4423 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4424 } else {
4425 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4426 }
4427 }
4428
4429 #ifdef __GNUC__
4430 __attribute__ ((noinline))
4431 #endif
4432 static int
4433 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4434 struct sctp_chunkhdr *ch_req)
4435 {
4436 uint16_t remaining_length, param_len, ptype;
4437 struct sctp_paramhdr pstore;
4438 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4439 uint32_t seq = 0;
4440 int num_req = 0;
4441 int trunc = 0;
4442 struct sctp_tmit_chunk *chk;
4443 struct sctp_chunkhdr *ch;
4444 struct sctp_paramhdr *ph;
4445 int ret_code = 0;
4446 int num_param = 0;
4447
4448 /* now it may be a reset or a reset-response */
4449 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4450
4451 /* setup for adding the response */
4452 sctp_alloc_a_chunk(stcb, chk);
4453 if (chk == NULL) {
4454 return (ret_code);
4455 }
4456 chk->copy_by_ref = 0;
4457 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4458 chk->rec.chunk_id.can_take_data = 0;
4459 chk->flags = 0;
4460 chk->asoc = &stcb->asoc;
4461 chk->no_fr_allowed = 0;
4462 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4463 chk->book_size_scale = 0;
4464 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4465 if (chk->data == NULL) {
4466 strres_nochunk:
4467 if (chk->data) {
4468 sctp_m_freem(chk->data);
4469 chk->data = NULL;
4470 }
4471 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4472 return (ret_code);
4473 }
4474 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4475
4476 /* setup chunk parameters */
4477 chk->sent = SCTP_DATAGRAM_UNSENT;
4478 chk->snd_count = 0;
4479 chk->whoTo = NULL;
4480
4481 ch = mtod(chk->data, struct sctp_chunkhdr *);
4482 ch->chunk_type = SCTP_STREAM_RESET;
4483 ch->chunk_flags = 0;
4484 ch->chunk_length = htons(chk->send_size);
4485 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4486 offset += sizeof(struct sctp_chunkhdr);
4487 while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4488 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4489 if (ph == NULL) {
4490 /* TSNH */
4491 break;
4492 }
4493 param_len = ntohs(ph->param_length);
4494 if ((param_len > remaining_length) ||
4495 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4496 /* bad parameter length */
4497 break;
4498 }
4499 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4500 (uint8_t *)&cstore);
4501 if (ph == NULL) {
4502 /* TSNH */
4503 break;
4504 }
4505 ptype = ntohs(ph->param_type);
4506 num_param++;
4507 if (param_len > sizeof(cstore)) {
4508 trunc = 1;
4509 } else {
4510 trunc = 0;
4511 }
4512 if (num_param > SCTP_MAX_RESET_PARAMS) {
4513 /* hit the max of parameters already sorry.. */
4514 break;
4515 }
4516 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4517 struct sctp_stream_reset_out_request *req_out;
4518
4519 if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4520 break;
4521 }
4522 req_out = (struct sctp_stream_reset_out_request *)ph;
4523 num_req++;
4524 if (stcb->asoc.stream_reset_outstanding) {
4525 seq = ntohl(req_out->response_seq);
4526 if (seq == stcb->asoc.str_reset_seq_out) {
4527 /* implicit ack */
4528 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4529 }
4530 }
4531 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4532 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4533 struct sctp_stream_reset_add_strm *str_add;
4534
4535 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4536 break;
4537 }
4538 str_add = (struct sctp_stream_reset_add_strm *)ph;
4539 num_req++;
4540 sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4541 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4542 struct sctp_stream_reset_add_strm *str_add;
4543
4544 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4545 break;
4546 }
4547 str_add = (struct sctp_stream_reset_add_strm *)ph;
4548 num_req++;
4549 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4550 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4551 struct sctp_stream_reset_in_request *req_in;
4552
4553 num_req++;
4554 req_in = (struct sctp_stream_reset_in_request *)ph;
4555 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4556 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4557 struct sctp_stream_reset_tsn_request *req_tsn;
4558
4559 num_req++;
4560 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4561 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4562 ret_code = 1;
4563 goto strres_nochunk;
4564 }
4565 /* no more */
4566 break;
4567 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
4568 struct sctp_stream_reset_response *resp;
4569 uint32_t result;
4570
4571 if (param_len < sizeof(struct sctp_stream_reset_response)) {
4572 break;
4573 }
4574 resp = (struct sctp_stream_reset_response *)ph;
4575 seq = ntohl(resp->response_seq);
4576 result = ntohl(resp->result);
4577 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4578 ret_code = 1;
4579 goto strres_nochunk;
4580 }
4581 } else {
4582 break;
4583 }
4584 offset += SCTP_SIZE32(param_len);
4585 if (remaining_length >= SCTP_SIZE32(param_len)) {
4586 remaining_length -= SCTP_SIZE32(param_len);
4587 } else {
4588 remaining_length = 0;
4589 }
4590 }
4591 if (num_req == 0) {
4592 /* we have no response free the stuff */
4593 goto strres_nochunk;
4594 }
4595 /* ok we have a chunk to link in */
4596 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4597 chk,
4598 sctp_next);
4599 stcb->asoc.ctrl_queue_cnt++;
4600 return (ret_code);
4601 }
4602
4603 /*
4604 * Handle a router or endpoints report of a packet loss, there are two ways
4605 * to handle this, either we get the whole packet and must disect it
4606 * ourselves (possibly with truncation and or corruption) or it is a summary
4607 * from a middle box that did the disectting for us.
4608 */
4609 static void
4610 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4611 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4612 {
4613 uint32_t bottle_bw, on_queue;
4614 uint16_t trunc_len;
4615 unsigned int chlen;
4616 unsigned int at;
4617 struct sctp_chunk_desc desc;
4618 struct sctp_chunkhdr *ch;
4619
4620 chlen = ntohs(cp->ch.chunk_length);
4621 chlen -= sizeof(struct sctp_pktdrop_chunk);
4622 /* XXX possible chlen underflow */
4623 if (chlen == 0) {
4624 ch = NULL;
4625 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4626 SCTP_STAT_INCR(sctps_pdrpbwrpt);
4627 } else {
4628 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4629 chlen -= sizeof(struct sctphdr);
4630 /* XXX possible chlen underflow */
4631 memset(&desc, 0, sizeof(desc));
4632 }
4633 trunc_len = (uint16_t) ntohs(cp->trunc_len);
4634 if (trunc_len > limit) {
4635 trunc_len = limit;
4636 }
4637
4638 /* now the chunks themselves */
4639 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4640 desc.chunk_type = ch->chunk_type;
4641 /* get amount we need to move */
4642 at = ntohs(ch->chunk_length);
4643 if (at < sizeof(struct sctp_chunkhdr)) {
4644 /* corrupt chunk, maybe at the end? */
4645 SCTP_STAT_INCR(sctps_pdrpcrupt);
4646 break;
4647 }
4648 if (trunc_len == 0) {
4649 /* we are supposed to have all of it */
4650 if (at > chlen) {
4651 /* corrupt skip it */
4652 SCTP_STAT_INCR(sctps_pdrpcrupt);
4653 break;
4654 }
4655 } else {
4656 /* is there enough of it left ? */
4657 if (desc.chunk_type == SCTP_DATA) {
4658 if (chlen < (sizeof(struct sctp_data_chunk) +
4659 sizeof(desc.data_bytes))) {
4660 break;
4661 }
4662 } else {
4663 if (chlen < sizeof(struct sctp_chunkhdr)) {
4664 break;
4665 }
4666 }
4667 }
4668 if (desc.chunk_type == SCTP_DATA) {
4669 /* can we get out the tsn? */
4670 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4671 SCTP_STAT_INCR(sctps_pdrpmbda);
4672
4673 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4674 /* yep */
4675 struct sctp_data_chunk *dcp;
4676 uint8_t *ddp;
4677 unsigned int iii;
4678
4679 dcp = (struct sctp_data_chunk *)ch;
4680 ddp = (uint8_t *) (dcp + 1);
4681 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4682 desc.data_bytes[iii] = ddp[iii];
4683 }
4684 desc.tsn_ifany = dcp->dp.tsn;
4685 } else {
4686 /* nope we are done. */
4687 SCTP_STAT_INCR(sctps_pdrpnedat);
4688 break;
4689 }
4690 } else {
4691 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4692 SCTP_STAT_INCR(sctps_pdrpmbct);
4693 }
4694
4695 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4696 SCTP_STAT_INCR(sctps_pdrppdbrk);
4697 break;
4698 }
4699 if (SCTP_SIZE32(at) > chlen) {
4700 break;
4701 }
4702 chlen -= SCTP_SIZE32(at);
4703 if (chlen < sizeof(struct sctp_chunkhdr)) {
4704 /* done, none left */
4705 break;
4706 }
4707 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4708 }
4709 /* Now update any rwnd --- possibly */
4710 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4711 /* From a peer, we get a rwnd report */
4712 uint32_t a_rwnd;
4713
4714 SCTP_STAT_INCR(sctps_pdrpfehos);
4715
4716 bottle_bw = ntohl(cp->bottle_bw);
4717 on_queue = ntohl(cp->current_onq);
4718 if (bottle_bw && on_queue) {
4719 /* a rwnd report is in here */
4720 if (bottle_bw > on_queue)
4721 a_rwnd = bottle_bw - on_queue;
4722 else
4723 a_rwnd = 0;
4724
4725 if (a_rwnd == 0)
4726 stcb->asoc.peers_rwnd = 0;
4727 else {
4728 if (a_rwnd > stcb->asoc.total_flight) {
4729 stcb->asoc.peers_rwnd =
4730 a_rwnd - stcb->asoc.total_flight;
4731 } else {
4732 stcb->asoc.peers_rwnd = 0;
4733 }
4734 if (stcb->asoc.peers_rwnd <
4735 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4736 /* SWS sender side engages */
4737 stcb->asoc.peers_rwnd = 0;
4738 }
4739 }
4740 }
4741 } else {
4742 SCTP_STAT_INCR(sctps_pdrpfmbox);
4743 }
4744
4745 /* now middle boxes in sat networks get a cwnd bump */
4746 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4747 (stcb->asoc.sat_t3_loss_recovery == 0) &&
4748 (stcb->asoc.sat_network)) {
4749 /*
4750 * This is debatable but for sat networks it makes sense
4751 * Note if a T3 timer has went off, we will prohibit any
4752 * changes to cwnd until we exit the t3 loss recovery.
4753 */
4754 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4755 net, cp, &bottle_bw, &on_queue);
4756 }
4757 }
4758
4759 /*
4760 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4761 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4762 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4763 * length of the complete packet outputs: - length: modified to remaining
4764 * length after control processing - netp: modified to new sctp_nets after
4765 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4766 * bad packet,...) otherwise return the tcb for this packet
4767 */
4768 #ifdef __GNUC__
4769 __attribute__ ((noinline))
4770 #endif
4771 static struct sctp_tcb *
4772 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4773 struct sockaddr *src, struct sockaddr *dst,
4774 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4775 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4776 #if defined(__FreeBSD__) && !defined(__Userspace__)
4777 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4778 #endif
4779 uint32_t vrf_id, uint16_t port)
4780 {
4781 struct sctp_association *asoc;
4782 struct mbuf *op_err;
4783 char msg[SCTP_DIAG_INFO_LEN];
4784 uint32_t vtag_in;
4785 int num_chunks = 0; /* number of control chunks processed */
4786 uint32_t chk_length, contiguous;
4787 int ret;
4788 int abort_no_unlock = 0;
4789 int ecne_seen = 0;
4790 /*
4791 * How big should this be, and should it be alloc'd? Lets try the
4792 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4793 * until we get into jumbo grams and such..
4794 */
4795 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4796 int got_auth = 0;
4797 uint32_t auth_offset = 0, auth_len = 0;
4798 int auth_skipped = 0;
4799 int asconf_cnt = 0;
4800 #if defined(__APPLE__) && !defined(__Userspace__)
4801 struct socket *so;
4802 #endif
4803
4804 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4805 iphlen, *offset, length, (void *)stcb);
4806
4807 if (stcb) {
4808 SCTP_TCB_LOCK_ASSERT(stcb);
4809 }
4810 /* validate chunk header length... */
4811 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4812 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4813 ntohs(ch->chunk_length));
4814 *offset = length;
4815 return (stcb);
4816 }
4817 /*
4818 * validate the verification tag
4819 */
4820 vtag_in = ntohl(sh->v_tag);
4821
4822 if (ch->chunk_type == SCTP_INITIATION) {
4823 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4824 ntohs(ch->chunk_length), vtag_in);
4825 if (vtag_in != 0) {
4826 /* protocol error- silently discard... */
4827 SCTP_STAT_INCR(sctps_badvtag);
4828 if (stcb != NULL) {
4829 SCTP_TCB_UNLOCK(stcb);
4830 }
4831 return (NULL);
4832 }
4833 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4834 /*
4835 * If there is no stcb, skip the AUTH chunk and process
4836 * later after a stcb is found (to validate the lookup was
4837 * valid.
4838 */
4839 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4840 (stcb == NULL) &&
4841 (inp->auth_supported == 1)) {
4842 /* save this chunk for later processing */
4843 auth_skipped = 1;
4844 auth_offset = *offset;
4845 auth_len = ntohs(ch->chunk_length);
4846
4847 /* (temporarily) move past this chunk */
4848 *offset += SCTP_SIZE32(auth_len);
4849 if (*offset >= length) {
4850 /* no more data left in the mbuf chain */
4851 *offset = length;
4852 return (NULL);
4853 }
4854 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4855 sizeof(struct sctp_chunkhdr), chunk_buf);
4856 }
4857 if (ch == NULL) {
4858 /* Help */
4859 *offset = length;
4860 return (stcb);
4861 }
4862 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4863 goto process_control_chunks;
4864 }
4865 /*
4866 * first check if it's an ASCONF with an unknown src addr we
4867 * need to look inside to find the association
4868 */
4869 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4870 struct sctp_chunkhdr *asconf_ch = ch;
4871 uint32_t asconf_offset = 0, asconf_len = 0;
4872
4873 /* inp's refcount may be reduced */
4874 SCTP_INP_INCR_REF(inp);
4875
4876 asconf_offset = *offset;
4877 do {
4878 asconf_len = ntohs(asconf_ch->chunk_length);
4879 if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4880 break;
4881 stcb = sctp_findassociation_ep_asconf(m,
4882 *offset,
4883 dst,
4884 sh, &inp, netp, vrf_id);
4885 if (stcb != NULL)
4886 break;
4887 asconf_offset += SCTP_SIZE32(asconf_len);
4888 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4889 sizeof(struct sctp_chunkhdr), chunk_buf);
4890 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4891 if (stcb == NULL) {
4892 /*
4893 * reduce inp's refcount if not reduced in
4894 * sctp_findassociation_ep_asconf().
4895 */
4896 SCTP_INP_DECR_REF(inp);
4897 }
4898
4899 /* now go back and verify any auth chunk to be sure */
4900 if (auth_skipped && (stcb != NULL)) {
4901 struct sctp_auth_chunk *auth;
4902
4903 if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
4904 auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
4905 got_auth = 1;
4906 auth_skipped = 0;
4907 } else {
4908 auth = NULL;
4909 }
4910 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4911 auth_offset)) {
4912 /* auth HMAC failed so dump it */
4913 *offset = length;
4914 return (stcb);
4915 } else {
4916 /* remaining chunks are HMAC checked */
4917 stcb->asoc.authenticated = 1;
4918 }
4919 }
4920 }
4921 if (stcb == NULL) {
4922 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4923 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4924 msg);
4925 /* no association, so it's out of the blue... */
4926 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4927 #if defined(__FreeBSD__) && !defined(__Userspace__)
4928 mflowtype, mflowid, inp->fibnum,
4929 #endif
4930 vrf_id, port);
4931 *offset = length;
4932 return (NULL);
4933 }
4934 asoc = &stcb->asoc;
4935 /* ABORT and SHUTDOWN can use either v_tag... */
4936 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4937 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4938 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4939 /* Take the T-bit always into account. */
4940 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4941 (vtag_in == asoc->my_vtag)) ||
4942 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4943 (asoc->peer_vtag != htonl(0)) &&
4944 (vtag_in == asoc->peer_vtag))) {
4945 /* this is valid */
4946 } else {
4947 /* drop this packet... */
4948 SCTP_STAT_INCR(sctps_badvtag);
4949 if (stcb != NULL) {
4950 SCTP_TCB_UNLOCK(stcb);
4951 }
4952 return (NULL);
4953 }
4954 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4955 if (vtag_in != asoc->my_vtag) {
4956 /*
4957 * this could be a stale SHUTDOWN-ACK or the
4958 * peer never got the SHUTDOWN-COMPLETE and
4959 * is still hung; we have started a new asoc
4960 * but it won't complete until the shutdown
4961 * is completed
4962 */
4963 if (stcb != NULL) {
4964 SCTP_TCB_UNLOCK(stcb);
4965 }
4966 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4967 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4968 msg);
4969 sctp_handle_ootb(m, iphlen, *offset, src, dst,
4970 sh, inp, op_err,
4971 #if defined(__FreeBSD__) && !defined(__Userspace__)
4972 mflowtype, mflowid, fibnum,
4973 #endif
4974 vrf_id, port);
4975 return (NULL);
4976 }
4977 } else {
4978 /* for all other chunks, vtag must match */
4979 if (vtag_in != asoc->my_vtag) {
4980 /* invalid vtag... */
4981 SCTPDBG(SCTP_DEBUG_INPUT3,
4982 "invalid vtag: %xh, expect %xh\n",
4983 vtag_in, asoc->my_vtag);
4984 SCTP_STAT_INCR(sctps_badvtag);
4985 if (stcb != NULL) {
4986 SCTP_TCB_UNLOCK(stcb);
4987 }
4988 *offset = length;
4989 return (NULL);
4990 }
4991 }
4992 } /* end if !SCTP_COOKIE_ECHO */
4993 /*
4994 * process all control chunks...
4995 */
4996 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4997 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4998 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4999 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
5000 /* implied cookie-ack.. we must have lost the ack */
5001 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
5002 *netp);
5003 }
5004
5005 process_control_chunks:
5006 while (IS_SCTP_CONTROL(ch)) {
5007 /* validate chunk length */
5008 chk_length = ntohs(ch->chunk_length);
5009 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
5010 ch->chunk_type, chk_length);
5011 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
5012 if (chk_length < sizeof(*ch) ||
5013 (*offset + (int)chk_length) > length) {
5014 *offset = length;
5015 return (stcb);
5016 }
5017 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
5018 /*
5019 * INIT and INIT-ACK only gets the init ack "header" portion
5020 * only because we don't have to process the peer's COOKIE.
5021 * All others get a complete chunk.
5022 */
5023 switch (ch->chunk_type) {
5024 case SCTP_INITIATION:
5025 contiguous = sizeof(struct sctp_init_chunk);
5026 break;
5027 case SCTP_INITIATION_ACK:
5028 contiguous = sizeof(struct sctp_init_ack_chunk);
5029 break;
5030 default:
5031 contiguous = min(chk_length, sizeof(chunk_buf));
5032 break;
5033 }
5034 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5035 contiguous,
5036 chunk_buf);
5037 if (ch == NULL) {
5038 *offset = length;
5039 if (stcb != NULL) {
5040 SCTP_TCB_UNLOCK(stcb);
5041 }
5042 return (NULL);
5043 }
5044
5045 num_chunks++;
5046 /* Save off the last place we got a control from */
5047 if (stcb != NULL) {
5048 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
5049 /*
5050 * allow last_control to be NULL if
5051 * ASCONF... ASCONF processing will find the
5052 * right net later
5053 */
5054 if ((netp != NULL) && (*netp != NULL))
5055 stcb->asoc.last_control_chunk_from = *netp;
5056 }
5057 }
5058 #ifdef SCTP_AUDITING_ENABLED
5059 sctp_audit_log(0xB0, ch->chunk_type);
5060 #endif
5061
5062 /* check to see if this chunk required auth, but isn't */
5063 if ((stcb != NULL) &&
5064 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
5065 !stcb->asoc.authenticated) {
5066 /* "silently" ignore */
5067 SCTP_STAT_INCR(sctps_recvauthmissing);
5068 goto next_chunk;
5069 }
5070 switch (ch->chunk_type) {
5071 case SCTP_INITIATION:
5072 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
5073 /* The INIT chunk must be the only chunk. */
5074 if ((num_chunks > 1) ||
5075 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5076 /* RFC 4960 requires that no ABORT is sent */
5077 *offset = length;
5078 if (stcb != NULL) {
5079 SCTP_TCB_UNLOCK(stcb);
5080 }
5081 return (NULL);
5082 }
5083 /* Honor our resource limit. */
5084 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
5085 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5086 sctp_abort_association(inp, stcb, m, iphlen,
5087 src, dst, sh, op_err,
5088 #if defined(__FreeBSD__) && !defined(__Userspace__)
5089 mflowtype, mflowid,
5090 #endif
5091 vrf_id, port);
5092 *offset = length;
5093 return (NULL);
5094 }
5095 sctp_handle_init(m, iphlen, *offset, src, dst, sh,
5096 (struct sctp_init_chunk *)ch, inp,
5097 stcb, *netp, &abort_no_unlock,
5098 #if defined(__FreeBSD__) && !defined(__Userspace__)
5099 mflowtype, mflowid,
5100 #endif
5101 vrf_id, port);
5102 *offset = length;
5103 if ((!abort_no_unlock) && (stcb != NULL)) {
5104 SCTP_TCB_UNLOCK(stcb);
5105 }
5106 return (NULL);
5107 break;
5108 case SCTP_PAD_CHUNK:
5109 break;
5110 case SCTP_INITIATION_ACK:
5111 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
5112 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5113 /* We are not interested anymore */
5114 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
5115 ;
5116 } else {
5117 *offset = length;
5118 if (stcb != NULL) {
5119 #if defined(__APPLE__) && !defined(__Userspace__)
5120 so = SCTP_INP_SO(inp);
5121 atomic_add_int(&stcb->asoc.refcnt, 1);
5122 SCTP_TCB_UNLOCK(stcb);
5123 SCTP_SOCKET_LOCK(so, 1);
5124 SCTP_TCB_LOCK(stcb);
5125 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5126 #endif
5127 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5128 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5129 #if defined(__APPLE__) && !defined(__Userspace__)
5130 SCTP_SOCKET_UNLOCK(so, 1);
5131 #endif
5132 }
5133 return (NULL);
5134 }
5135 }
5136 /* The INIT-ACK chunk must be the only chunk. */
5137 if ((num_chunks > 1) ||
5138 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5139 *offset = length;
5140 return (stcb);
5141 }
5142 if ((netp != NULL) && (*netp != NULL)) {
5143 ret = sctp_handle_init_ack(m, iphlen, *offset,
5144 src, dst, sh,
5145 (struct sctp_init_ack_chunk *)ch,
5146 stcb, *netp,
5147 &abort_no_unlock,
5148 #if defined(__FreeBSD__) && !defined(__Userspace__)
5149 mflowtype, mflowid,
5150 #endif
5151 vrf_id);
5152 } else {
5153 ret = -1;
5154 }
5155 *offset = length;
5156 if (abort_no_unlock) {
5157 return (NULL);
5158 }
5159 /*
5160 * Special case, I must call the output routine to
5161 * get the cookie echoed
5162 */
5163 if ((stcb != NULL) && (ret == 0)) {
5164 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5165 }
5166 return (stcb);
5167 break;
5168 case SCTP_SELECTIVE_ACK:
5169 case SCTP_NR_SELECTIVE_ACK:
5170 {
5171 int abort_now = 0;
5172 uint32_t a_rwnd, cum_ack;
5173 uint16_t num_seg, num_nr_seg, num_dup;
5174 uint8_t flags;
5175 int offset_seg, offset_dup;
5176
5177 SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
5178 ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
5179 SCTP_STAT_INCR(sctps_recvsacks);
5180 if (stcb == NULL) {
5181 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
5182 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
5183 break;
5184 }
5185 if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5186 if (chk_length < sizeof(struct sctp_sack_chunk)) {
5187 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
5188 break;
5189 }
5190 } else {
5191 if (stcb->asoc.nrsack_supported == 0) {
5192 goto unknown_chunk;
5193 }
5194 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
5195 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
5196 break;
5197 }
5198 }
5199 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5200 /*-
5201 * If we have sent a shutdown-ack, we will pay no
5202 * attention to a sack sent in to us since
5203 * we don't care anymore.
5204 */
5205 break;
5206 }
5207 flags = ch->chunk_flags;
5208 if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
5209 struct sctp_sack_chunk *sack;
5210
5211 sack = (struct sctp_sack_chunk *)ch;
5212 cum_ack = ntohl(sack->sack.cum_tsn_ack);
5213 num_seg = ntohs(sack->sack.num_gap_ack_blks);
5214 num_nr_seg = 0;
5215 num_dup = ntohs(sack->sack.num_dup_tsns);
5216 a_rwnd = ntohl(sack->sack.a_rwnd);
5217 if (sizeof(struct sctp_sack_chunk) +
5218 num_seg * sizeof(struct sctp_gap_ack_block) +
5219 num_dup * sizeof(uint32_t) != chk_length) {
5220 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
5221 break;
5222 }
5223 offset_seg = *offset + sizeof(struct sctp_sack_chunk);
5224 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5225 } else {
5226 struct sctp_nr_sack_chunk *nr_sack;
5227
5228 nr_sack = (struct sctp_nr_sack_chunk *)ch;
5229 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
5230 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
5231 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
5232 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
5233 a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
5234 if (sizeof(struct sctp_nr_sack_chunk) +
5235 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5236 num_dup * sizeof(uint32_t) != chk_length) {
5237 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5238 break;
5239 }
5240 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5241 offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
5242 }
5243 SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5244 (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
5245 cum_ack, num_seg, a_rwnd);
5246 stcb->asoc.seen_a_sack_this_pkt = 1;
5247 if ((stcb->asoc.pr_sctp_cnt == 0) &&
5248 (num_seg == 0) && (num_nr_seg == 0) &&
5249 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5250 (stcb->asoc.saw_sack_with_frags == 0) &&
5251 (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5252 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5253 /*
5254 * We have a SIMPLE sack having no
5255 * prior segments and data on sent
5256 * queue to be acked. Use the
5257 * faster path sack processing. We
5258 * also allow window update sacks
5259 * with no missing segments to go
5260 * this way too.
5261 */
5262 sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5263 &abort_now, ecne_seen);
5264 } else {
5265 if ((netp != NULL) && (*netp != NULL)) {
5266 sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5267 num_seg, num_nr_seg, num_dup, &abort_now, flags,
5268 cum_ack, a_rwnd, ecne_seen);
5269 }
5270 }
5271 if (abort_now) {
5272 /* ABORT signal from sack processing */
5273 *offset = length;
5274 return (NULL);
5275 }
5276 if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5277 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5278 (stcb->asoc.stream_queue_cnt == 0)) {
5279 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5280 }
5281 break;
5282 }
5283 case SCTP_HEARTBEAT_REQUEST:
5284 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5285 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5286 SCTP_STAT_INCR(sctps_recvheartbeat);
5287 sctp_send_heartbeat_ack(stcb, m, *offset,
5288 chk_length, *netp);
5289 }
5290 break;
5291 case SCTP_HEARTBEAT_ACK:
5292 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
5293 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5294 /* Its not ours */
5295 *offset = length;
5296 return (stcb);
5297 }
5298 SCTP_STAT_INCR(sctps_recvheartbeatack);
5299 if ((netp != NULL) && (*netp != NULL)) {
5300 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5301 stcb, *netp);
5302 }
5303 break;
5304 case SCTP_ABORT_ASSOCIATION:
5305 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5306 (void *)stcb);
5307 *offset = length;
5308 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5309 if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
5310 return (NULL);
5311 } else {
5312 return (stcb);
5313 }
5314 } else {
5315 return (NULL);
5316 }
5317 break;
5318 case SCTP_SHUTDOWN:
5319 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5320 (void *)stcb);
5321 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5322 *offset = length;
5323 return (stcb);
5324 }
5325 if ((netp != NULL) && (*netp != NULL)) {
5326 int abort_flag = 0;
5327
5328 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5329 stcb, *netp, &abort_flag);
5330 if (abort_flag) {
5331 *offset = length;
5332 return (NULL);
5333 }
5334 }
5335 break;
5336 case SCTP_SHUTDOWN_ACK:
5337 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
5338 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5339 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5340 }
5341 *offset = length;
5342 return (NULL);
5343 break;
5344 case SCTP_OPERATION_ERROR:
5345 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
5346 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
5347 sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
5348 *offset = length;
5349 return (NULL);
5350 }
5351 break;
5352 case SCTP_COOKIE_ECHO:
5353 SCTPDBG(SCTP_DEBUG_INPUT3,
5354 "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
5355 if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
5356 ;
5357 } else {
5358 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5359 /* We are not interested anymore */
5360 abend:
5361 if (stcb != NULL) {
5362 SCTP_TCB_UNLOCK(stcb);
5363 }
5364 *offset = length;
5365 return (NULL);
5366 }
5367 }
5368 /*-
5369 * First are we accepting? We do this again here
5370 * since it is possible that a previous endpoint WAS
5371 * listening responded to a INIT-ACK and then
5372 * closed. We opened and bound.. and are now no
5373 * longer listening.
5374 *
5375 * XXXGL: notes on checking listen queue length.
5376 * 1) SCTP_IS_LISTENING() doesn't necessarily mean
5377 * SOLISTENING(), because a listening "UDP type"
5378 * socket isn't listening in terms of the socket
5379 * layer. It is a normal data flow socket, that
5380 * can fork off new connections. Thus, we should
5381 * look into sol_qlen only in case we are !UDP.
5382 * 2) Checking sol_qlen in general requires locking
5383 * the socket, and this code lacks that.
5384 */
5385 if ((stcb == NULL) &&
5386 (!SCTP_IS_LISTENING(inp) ||
5387 (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
5388 #if defined(__FreeBSD__) && !defined(__Userspace__)
5389 inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
5390 #else
5391 inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) {
5392 #endif
5393 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5394 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5395 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5396 sctp_abort_association(inp, stcb, m, iphlen,
5397 src, dst, sh, op_err,
5398 #if defined(__FreeBSD__) && !defined(__Userspace__)
5399 mflowtype, mflowid,
5400 #endif
5401 vrf_id, port);
5402 }
5403 *offset = length;
5404 return (NULL);
5405 } else {
5406 struct mbuf *ret_buf;
5407 struct sctp_inpcb *linp;
5408 struct sctp_tmit_chunk *chk;
5409
5410 if (stcb) {
5411 linp = NULL;
5412 } else {
5413 linp = inp;
5414 }
5415
5416 if (linp != NULL) {
5417 SCTP_ASOC_CREATE_LOCK(linp);
5418 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5419 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5420 SCTP_ASOC_CREATE_UNLOCK(linp);
5421 goto abend;
5422 }
5423 }
5424
5425 if (netp != NULL) {
5426 struct sctp_tcb *locked_stcb;
5427
5428 locked_stcb = stcb;
5429 ret_buf =
5430 sctp_handle_cookie_echo(m, iphlen,
5431 *offset,
5432 src, dst,
5433 sh,
5434 (struct sctp_cookie_echo_chunk *)ch,
5435 &inp, &stcb, netp,
5436 auth_skipped,
5437 auth_offset,
5438 auth_len,
5439 &locked_stcb,
5440 #if defined(__FreeBSD__) && !defined(__Userspace__)
5441 mflowtype,
5442 mflowid,
5443 #endif
5444 vrf_id,
5445 port);
5446 if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
5447 SCTP_TCB_UNLOCK(locked_stcb);
5448 }
5449 if (stcb != NULL) {
5450 SCTP_TCB_LOCK_ASSERT(stcb);
5451 }
5452 } else {
5453 ret_buf = NULL;
5454 }
5455 if (linp != NULL) {
5456 SCTP_ASOC_CREATE_UNLOCK(linp);
5457 }
5458 if (ret_buf == NULL) {
5459 if (stcb != NULL) {
5460 SCTP_TCB_UNLOCK(stcb);
5461 }
5462 SCTPDBG(SCTP_DEBUG_INPUT3,
5463 "GAK, null buffer\n");
5464 *offset = length;
5465 return (NULL);
5466 }
5467 /* if AUTH skipped, see if it verified... */
5468 if (auth_skipped) {
5469 got_auth = 1;
5470 auth_skipped = 0;
5471 }
5472 /* Restart the timer if we have pending data */
5473 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
5474 if (chk->whoTo != NULL) {
5475 break;
5476 }
5477 }
5478 if (chk != NULL) {
5479 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5480 }
5481 }
5482 break;
5483 case SCTP_COOKIE_ACK:
5484 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
5485 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5486 return (stcb);
5487 }
5488 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5489 /* We are not interested anymore */
5490 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5491 ;
5492 } else if (stcb) {
5493 #if defined(__APPLE__) && !defined(__Userspace__)
5494 so = SCTP_INP_SO(inp);
5495 atomic_add_int(&stcb->asoc.refcnt, 1);
5496 SCTP_TCB_UNLOCK(stcb);
5497 SCTP_SOCKET_LOCK(so, 1);
5498 SCTP_TCB_LOCK(stcb);
5499 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5500 #endif
5501 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5502 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5503 #if defined(__APPLE__) && !defined(__Userspace__)
5504 SCTP_SOCKET_UNLOCK(so, 1);
5505 #endif
5506 *offset = length;
5507 return (NULL);
5508 }
5509 }
5510 if ((netp != NULL) && (*netp != NULL)) {
5511 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
5512 }
5513 break;
5514 case SCTP_ECN_ECHO:
5515 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
5516 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5517 /* Its not ours */
5518 *offset = length;
5519 return (stcb);
5520 }
5521 if (stcb->asoc.ecn_supported == 0) {
5522 goto unknown_chunk;
5523 }
5524 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
5525 ecne_seen = 1;
5526 break;
5527 case SCTP_ECN_CWR:
5528 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
5529 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5530 *offset = length;
5531 return (stcb);
5532 }
5533 if (stcb->asoc.ecn_supported == 0) {
5534 goto unknown_chunk;
5535 }
5536 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5537 break;
5538 case SCTP_SHUTDOWN_COMPLETE:
5539 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
5540 /* must be first and only chunk */
5541 if ((num_chunks > 1) ||
5542 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5543 *offset = length;
5544 return (stcb);
5545 }
5546 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5547 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5548 stcb, *netp);
5549 }
5550 *offset = length;
5551 return (NULL);
5552 break;
5553 case SCTP_ASCONF:
5554 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5555 if (stcb != NULL) {
5556 if (stcb->asoc.asconf_supported == 0) {
5557 goto unknown_chunk;
5558 }
5559 sctp_handle_asconf(m, *offset, src,
5560 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5561 asconf_cnt++;
5562 }
5563 break;
5564 case SCTP_ASCONF_ACK:
5565 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
5566 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5567 /* Its not ours */
5568 *offset = length;
5569 return (stcb);
5570 }
5571 if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5572 if (stcb->asoc.asconf_supported == 0) {
5573 goto unknown_chunk;
5574 }
5575 /* He's alive so give him credit */
5576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5577 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5578 stcb->asoc.overall_error_count,
5579 0,
5580 SCTP_FROM_SCTP_INPUT,
5581 __LINE__);
5582 }
5583 stcb->asoc.overall_error_count = 0;
5584 sctp_handle_asconf_ack(m, *offset,
5585 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5586 if (abort_no_unlock)
5587 return (NULL);
5588 }
5589 break;
5590 case SCTP_FORWARD_CUM_TSN:
5591 case SCTP_IFORWARD_CUM_TSN:
5592 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD_TSN\n");
5593 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5594 /* Its not ours */
5595 *offset = length;
5596 return (stcb);
5597 }
5598
5599 if (stcb != NULL) {
5600 int abort_flag = 0;
5601
5602 if (stcb->asoc.prsctp_supported == 0) {
5603 goto unknown_chunk;
5604 }
5605 *fwd_tsn_seen = 1;
5606 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5607 /* We are not interested anymore */
5608 #if defined(__APPLE__) && !defined(__Userspace__)
5609 so = SCTP_INP_SO(inp);
5610 atomic_add_int(&stcb->asoc.refcnt, 1);
5611 SCTP_TCB_UNLOCK(stcb);
5612 SCTP_SOCKET_LOCK(so, 1);
5613 SCTP_TCB_LOCK(stcb);
5614 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5615 #endif
5616 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5617 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5618 #if defined(__APPLE__) && !defined(__Userspace__)
5619 SCTP_SOCKET_UNLOCK(so, 1);
5620 #endif
5621 *offset = length;
5622 return (NULL);
5623 }
5624 /*
5625 * For sending a SACK this looks like DATA
5626 * chunks.
5627 */
5628 stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5629 sctp_handle_forward_tsn(stcb,
5630 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5631 if (abort_flag) {
5632 *offset = length;
5633 return (NULL);
5634 }
5635 }
5636 break;
5637 case SCTP_STREAM_RESET:
5638 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5639 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5640 /* Its not ours */
5641 *offset = length;
5642 return (stcb);
5643 }
5644 if (stcb->asoc.reconfig_supported == 0) {
5645 goto unknown_chunk;
5646 }
5647 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5648 /* stop processing */
5649 *offset = length;
5650 return (NULL);
5651 }
5652 break;
5653 case SCTP_PACKET_DROPPED:
5654 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5655 /* re-get it all please */
5656 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5657 /* Its not ours */
5658 *offset = length;
5659 return (stcb);
5660 }
5661
5662 if ((ch != NULL) && (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
5663 if (stcb->asoc.pktdrop_supported == 0) {
5664 goto unknown_chunk;
5665 }
5666 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5667 stcb, *netp,
5668 min(chk_length, contiguous));
5669 }
5670 break;
5671 case SCTP_AUTHENTICATION:
5672 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5673 if (stcb == NULL) {
5674 /* save the first AUTH for later processing */
5675 if (auth_skipped == 0) {
5676 auth_offset = *offset;
5677 auth_len = chk_length;
5678 auth_skipped = 1;
5679 }
5680 /* skip this chunk (temporarily) */
5681 goto next_chunk;
5682 }
5683 if (stcb->asoc.auth_supported == 0) {
5684 goto unknown_chunk;
5685 }
5686 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5687 (chk_length > (sizeof(struct sctp_auth_chunk) +
5688 SCTP_AUTH_DIGEST_LEN_MAX))) {
5689 /* Its not ours */
5690 *offset = length;
5691 return (stcb);
5692 }
5693 if (got_auth == 1) {
5694 /* skip this chunk... it's already auth'd */
5695 goto next_chunk;
5696 }
5697 got_auth = 1;
5698 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5699 m, *offset)) {
5700 /* auth HMAC failed so dump the packet */
5701 *offset = length;
5702 return (stcb);
5703 } else {
5704 /* remaining chunks are HMAC checked */
5705 stcb->asoc.authenticated = 1;
5706 }
5707 break;
5708
5709 default:
5710 unknown_chunk:
5711 /* it's an unknown chunk! */
5712 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5713 struct sctp_gen_error_cause *cause;
5714 int len;
5715
5716 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5717 0, M_NOWAIT, 1, MT_DATA);
5718 if (op_err != NULL) {
5719 len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5720 cause = mtod(op_err, struct sctp_gen_error_cause *);
5721 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5722 cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5723 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5724 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5725 if (SCTP_BUF_NEXT(op_err) != NULL) {
5726 #ifdef SCTP_MBUF_LOGGING
5727 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5728 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5729 }
5730 #endif
5731 sctp_queue_op_err(stcb, op_err);
5732 } else {
5733 sctp_m_freem(op_err);
5734 }
5735 }
5736 }
5737 if ((ch->chunk_type & 0x80) == 0) {
5738 /* discard this packet */
5739 *offset = length;
5740 return (stcb);
5741 } /* else skip this bad chunk and continue... */
5742 break;
5743 } /* switch (ch->chunk_type) */
5744
5745
5746 next_chunk:
5747 /* get the next chunk */
5748 *offset += SCTP_SIZE32(chk_length);
5749 if (*offset >= length) {
5750 /* no more data left in the mbuf chain */
5751 break;
5752 }
5753 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5754 sizeof(struct sctp_chunkhdr), chunk_buf);
5755 if (ch == NULL) {
5756 *offset = length;
5757 return (stcb);
5758 }
5759 } /* while */
5760
5761 if ((asconf_cnt > 0) && (stcb != NULL)) {
5762 sctp_send_asconf_ack(stcb);
5763 }
5764 return (stcb);
5765 }
5766
5767
5768 /*
5769 * common input chunk processing (v4 and v6)
5770 */
5771 void
5772 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5773 struct sockaddr *src, struct sockaddr *dst,
5774 struct sctphdr *sh, struct sctp_chunkhdr *ch,
5775 uint8_t compute_crc,
5776 uint8_t ecn_bits,
5777 #if defined(__FreeBSD__) && !defined(__Userspace__)
5778 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5779 #endif
5780 uint32_t vrf_id, uint16_t port)
5781 {
5782 uint32_t high_tsn;
5783 int fwd_tsn_seen = 0, data_processed = 0;
5784 struct mbuf *m = *mm, *op_err;
5785 char msg[SCTP_DIAG_INFO_LEN];
5786 int un_sent;
5787 int cnt_ctrl_ready = 0;
5788 struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5789 struct sctp_tcb *stcb = NULL;
5790 struct sctp_nets *net = NULL;
5791 #if defined(__Userspace__)
5792 struct socket *upcall_socket = NULL;
5793 #endif
5794
5795 SCTP_STAT_INCR(sctps_recvdatagrams);
5796 #ifdef SCTP_AUDITING_ENABLED
5797 sctp_audit_log(0xE0, 1);
5798 sctp_auditing(0, inp, stcb, net);
5799 #endif
5800 if (compute_crc != 0) {
5801 uint32_t check, calc_check;
5802
5803 check = sh->checksum;
5804 sh->checksum = 0;
5805 calc_check = sctp_calculate_cksum(m, iphlen);
5806 sh->checksum = check;
5807 if (calc_check != check) {
5808 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
5809 calc_check, check, (void *)m, length, iphlen);
5810 stcb = sctp_findassociation_addr(m, offset, src, dst,
5811 sh, ch, &inp, &net, vrf_id);
5812 #if defined(INET) || defined(INET6)
5813 if ((ch->chunk_type != SCTP_INITIATION) &&
5814 (net != NULL) && (net->port != port)) {
5815 if (net->port == 0) {
5816 /* UDP encapsulation turned on. */
5817 net->mtu -= sizeof(struct udphdr);
5818 if (stcb->asoc.smallest_mtu > net->mtu) {
5819 sctp_pathmtu_adjustment(stcb, net->mtu);
5820 }
5821 } else if (port == 0) {
5822 /* UDP encapsulation turned off. */
5823 net->mtu += sizeof(struct udphdr);
5824 /* XXX Update smallest_mtu */
5825 }
5826 net->port = port;
5827 }
5828 #endif
5829 #if defined(__FreeBSD__) && !defined(__Userspace__)
5830 if (net != NULL) {
5831 net->flowtype = mflowtype;
5832 net->flowid = mflowid;
5833 }
5834 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5835 #endif
5836 if ((inp != NULL) && (stcb != NULL)) {
5837 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5839 } else if ((inp != NULL) && (stcb == NULL)) {
5840 inp_decr = inp;
5841 }
5842 SCTP_STAT_INCR(sctps_badsum);
5843 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5844 goto out;
5845 }
5846 }
5847 /* Destination port of 0 is illegal, based on RFC4960. */
5848 if (sh->dest_port == 0) {
5849 SCTP_STAT_INCR(sctps_hdrops);
5850 goto out;
5851 }
5852 stcb = sctp_findassociation_addr(m, offset, src, dst,
5853 sh, ch, &inp, &net, vrf_id);
5854 #if defined(INET) || defined(INET6)
5855 if ((ch->chunk_type != SCTP_INITIATION) &&
5856 (net != NULL) && (net->port != port)) {
5857 if (net->port == 0) {
5858 /* UDP encapsulation turned on. */
5859 net->mtu -= sizeof(struct udphdr);
5860 if (stcb->asoc.smallest_mtu > net->mtu) {
5861 sctp_pathmtu_adjustment(stcb, net->mtu);
5862 }
5863 } else if (port == 0) {
5864 /* UDP encapsulation turned off. */
5865 net->mtu += sizeof(struct udphdr);
5866 /* XXX Update smallest_mtu */
5867 }
5868 net->port = port;
5869 }
5870 #endif
5871 #if defined(__FreeBSD__) && !defined(__Userspace__)
5872 if (net != NULL) {
5873 net->flowtype = mflowtype;
5874 net->flowid = mflowid;
5875 }
5876 #endif
5877 if (inp == NULL) {
5878 #if defined(__FreeBSD__) && !defined(__Userspace__)
5879 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5880 #endif
5881 SCTP_STAT_INCR(sctps_noport);
5882 #if defined(__FreeBSD__) && !defined(__Userspace__)
5883 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5884 goto out;
5885 }
5886 #endif
5887 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5888 sctp_send_shutdown_complete2(src, dst, sh,
5889 #if defined(__FreeBSD__) && !defined(__Userspace__)
5890 mflowtype, mflowid, fibnum,
5891 #endif
5892 vrf_id, port);
5893 goto out;
5894 }
5895 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5896 goto out;
5897 }
5898 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5899 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5900 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5901 (ch->chunk_type != SCTP_INIT))) {
5902 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5903 "Out of the blue");
5904 sctp_send_abort(m, iphlen, src, dst,
5905 sh, 0, op_err,
5906 #if defined(__FreeBSD__) && !defined(__Userspace__)
5907 mflowtype, mflowid, fibnum,
5908 #endif
5909 vrf_id, port);
5910 }
5911 }
5912 goto out;
5913 } else if (stcb == NULL) {
5914 inp_decr = inp;
5915 }
5916 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
5917 (void *)m, iphlen, offset, length, (void *)stcb);
5918 if (stcb) {
5919 /* always clear this before beginning a packet */
5920 stcb->asoc.authenticated = 0;
5921 stcb->asoc.seen_a_sack_this_pkt = 0;
5922 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
5923 (void *)stcb, stcb->asoc.state);
5924
5925 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
5926 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5927 /*-
5928 * If we hit here, we had a ref count
5929 * up when the assoc was aborted and the
5930 * timer is clearing out the assoc, we should
5931 * NOT respond to any packet.. its OOTB.
5932 */
5933 SCTP_TCB_UNLOCK(stcb);
5934 stcb = NULL;
5935 #if defined(__FreeBSD__) && !defined(__Userspace__)
5936 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
5937 #endif
5938 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
5939 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5940 msg);
5941 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
5942 #if defined(__FreeBSD__) && !defined(__Userspace__)
5943 mflowtype, mflowid, inp->fibnum,
5944 #endif
5945 vrf_id, port);
5946 goto out;
5947 }
5948 }
5949 #if defined(__Userspace__)
5950 if ((stcb != NULL) &&
5951 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
5952 (stcb->sctp_socket != NULL)) {
5953 if (stcb->sctp_socket->so_head != NULL) {
5954 upcall_socket = stcb->sctp_socket->so_head;
5955 } else {
5956 upcall_socket = stcb->sctp_socket;
5957 }
5958 SOCK_LOCK(upcall_socket);
5959 soref(upcall_socket);
5960 SOCK_UNLOCK(upcall_socket);
5961 }
5962 #endif
5963 if (IS_SCTP_CONTROL(ch)) {
5964 /* process the control portion of the SCTP packet */
5965 /* sa_ignore NO_NULL_CHK */
5966 stcb = sctp_process_control(m, iphlen, &offset, length,
5967 src, dst, sh, ch,
5968 inp, stcb, &net, &fwd_tsn_seen,
5969 #if defined(__FreeBSD__) && !defined(__Userspace__)
5970 mflowtype, mflowid, fibnum,
5971 #endif
5972 vrf_id, port);
5973 if (stcb) {
5974 /* This covers us if the cookie-echo was there
5975 * and it changes our INP.
5976 */
5977 inp = stcb->sctp_ep;
5978 #if defined(INET) || defined(INET6)
5979 if ((ch->chunk_type != SCTP_INITIATION) &&
5980 (net != NULL) && (net->port != port)) {
5981 if (net->port == 0) {
5982 /* UDP encapsulation turned on. */
5983 net->mtu -= sizeof(struct udphdr);
5984 if (stcb->asoc.smallest_mtu > net->mtu) {
5985 sctp_pathmtu_adjustment(stcb, net->mtu);
5986 }
5987 } else if (port == 0) {
5988 /* UDP encapsulation turned off. */
5989 net->mtu += sizeof(struct udphdr);
5990 /* XXX Update smallest_mtu */
5991 }
5992 net->port = port;
5993 }
5994 #endif
5995 }
5996 } else {
5997 /*
5998 * no control chunks, so pre-process DATA chunks (these
5999 * checks are taken care of by control processing)
6000 */
6001
6002 /*
6003 * if DATA only packet, and auth is required, then punt...
6004 * can't have authenticated without any AUTH (control)
6005 * chunks
6006 */
6007 if ((stcb != NULL) &&
6008 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
6009 /* "silently" ignore */
6010 #if defined(__FreeBSD__) && !defined(__Userspace__)
6011 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
6012 #endif
6013 SCTP_STAT_INCR(sctps_recvauthmissing);
6014 goto out;
6015 }
6016 if (stcb == NULL) {
6017 /* out of the blue DATA chunk */
6018 #if defined(__FreeBSD__) && !defined(__Userspace__)
6019 SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
6020 #endif
6021 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6022 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6023 msg);
6024 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6025 #if defined(__FreeBSD__) && !defined(__Userspace__)
6026 mflowtype, mflowid, fibnum,
6027 #endif
6028 vrf_id, port);
6029 goto out;
6030 }
6031 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
6032 /* v_tag mismatch! */
6033 #if defined(__FreeBSD__) && !defined(__Userspace__)
6034 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
6035 #endif
6036 SCTP_STAT_INCR(sctps_badvtag);
6037 goto out;
6038 }
6039 }
6040
6041 #if defined(__FreeBSD__) && !defined(__Userspace__)
6042 SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
6043 #endif
6044 if (stcb == NULL) {
6045 /*
6046 * no valid TCB for this packet, or we found it's a bad
6047 * packet while processing control, or we're done with this
6048 * packet (done or skip rest of data), so we drop it...
6049 */
6050 goto out;
6051 }
6052 #if defined(__Userspace__)
6053 if ((upcall_socket == NULL) &&
6054 !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) &&
6055 (stcb->sctp_socket != NULL)) {
6056 if (stcb->sctp_socket->so_head != NULL) {
6057 upcall_socket = stcb->sctp_socket->so_head;
6058 } else {
6059 upcall_socket = stcb->sctp_socket;
6060 }
6061 SOCK_LOCK(upcall_socket);
6062 soref(upcall_socket);
6063 SOCK_UNLOCK(upcall_socket);
6064 }
6065 #endif
6066
6067 /*
6068 * DATA chunk processing
6069 */
6070 /* plow through the data chunks while length > offset */
6071
6072 /*
6073 * Rest should be DATA only. Check authentication state if AUTH for
6074 * DATA is required.
6075 */
6076 if ((length > offset) &&
6077 (stcb != NULL) &&
6078 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
6079 !stcb->asoc.authenticated) {
6080 /* "silently" ignore */
6081 SCTP_STAT_INCR(sctps_recvauthmissing);
6082 SCTPDBG(SCTP_DEBUG_AUTH1,
6083 "Data chunk requires AUTH, skipped\n");
6084 goto trigger_send;
6085 }
6086 if (length > offset) {
6087 int retval;
6088
6089 /*
6090 * First check to make sure our state is correct. We would
6091 * not get here unless we really did have a tag, so we don't
6092 * abort if this happens, just dump the chunk silently.
6093 */
6094 switch (SCTP_GET_STATE(stcb)) {
6095 case SCTP_STATE_COOKIE_ECHOED:
6096 /*
6097 * we consider data with valid tags in this state
6098 * shows us the cookie-ack was lost. Imply it was
6099 * there.
6100 */
6101 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
6102 break;
6103 case SCTP_STATE_COOKIE_WAIT:
6104 /*
6105 * We consider OOTB any data sent during asoc setup.
6106 */
6107 SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6108 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6109 msg);
6110 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6111 #if defined(__FreeBSD__) && !defined(__Userspace__)
6112 mflowtype, mflowid, inp->fibnum,
6113 #endif
6114 vrf_id, port);
6115 goto out;
6116 /*sa_ignore NOTREACHED*/
6117 break;
6118 case SCTP_STATE_EMPTY: /* should not happen */
6119 case SCTP_STATE_INUSE: /* should not happen */
6120 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
6121 case SCTP_STATE_SHUTDOWN_ACK_SENT:
6122 default:
6123 goto out;
6124 /*sa_ignore NOTREACHED*/
6125 break;
6126 case SCTP_STATE_OPEN:
6127 case SCTP_STATE_SHUTDOWN_SENT:
6128 break;
6129 }
6130 /* plow through the data chunks while length > offset */
6131 retval = sctp_process_data(mm, iphlen, &offset, length,
6132 inp, stcb, net, &high_tsn);
6133 if (retval == 2) {
6134 /*
6135 * The association aborted, NO UNLOCK needed since
6136 * the association is destroyed.
6137 */
6138 stcb = NULL;
6139 goto out;
6140 }
6141 data_processed = 1;
6142 /*
6143 * Anything important needs to have been m_copy'ed in
6144 * process_data
6145 */
6146 }
6147
6148 /* take care of ecn */
6149 if ((data_processed == 1) &&
6150 (stcb->asoc.ecn_supported == 1) &&
6151 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
6152 /* Yep, we need to add a ECNE */
6153 sctp_send_ecn_echo(stcb, net, high_tsn);
6154 }
6155
6156 if ((data_processed == 0) && (fwd_tsn_seen)) {
6157 int was_a_gap;
6158 uint32_t highest_tsn;
6159
6160 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
6161 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
6162 } else {
6163 highest_tsn = stcb->asoc.highest_tsn_inside_map;
6164 }
6165 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
6166 stcb->asoc.send_sack = 1;
6167 sctp_sack_check(stcb, was_a_gap);
6168 } else if (fwd_tsn_seen) {
6169 stcb->asoc.send_sack = 1;
6170 }
6171 /* trigger send of any chunks in queue... */
6172 trigger_send:
6173 #ifdef SCTP_AUDITING_ENABLED
6174 sctp_audit_log(0xE0, 2);
6175 sctp_auditing(1, inp, stcb, net);
6176 #endif
6177 SCTPDBG(SCTP_DEBUG_INPUT1,
6178 "Check for chunk output prw:%d tqe:%d tf=%d\n",
6179 stcb->asoc.peers_rwnd,
6180 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
6181 stcb->asoc.total_flight);
6182 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
6183 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
6184 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
6185 }
6186 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
6187 cnt_ctrl_ready ||
6188 stcb->asoc.trigger_reset ||
6189 ((un_sent > 0) &&
6190 (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) {
6191 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
6192 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
6193 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
6194 }
6195 #ifdef SCTP_AUDITING_ENABLED
6196 sctp_audit_log(0xE0, 3);
6197 sctp_auditing(2, inp, stcb, net);
6198 #endif
6199 out:
6200 if (stcb != NULL) {
6201 SCTP_TCB_UNLOCK(stcb);
6202 }
6203 #if defined(__Userspace__)
6204 if (upcall_socket != NULL) {
6205 if (upcall_socket->so_upcall != NULL) {
6206 if (soreadable(upcall_socket) ||
6207 sowriteable(upcall_socket) ||
6208 upcall_socket->so_error) {
6209 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
6210 }
6211 }
6212 ACCEPT_LOCK();
6213 SOCK_LOCK(upcall_socket);
6214 sorele(upcall_socket);
6215 }
6216 #endif
6217 if (inp_decr != NULL) {
6218 /* reduce ref-count */
6219 SCTP_INP_WLOCK(inp_decr);
6220 SCTP_INP_DECR_REF(inp_decr);
6221 SCTP_INP_WUNLOCK(inp_decr);
6222 }
6223 return;
6224 }
6225
6226 #ifdef INET
6227 #if !defined(__Userspace__)
6228 void
6229 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6230 {
6231 struct mbuf *m;
6232 int iphlen;
6233 uint32_t vrf_id = 0;
6234 uint8_t ecn_bits;
6235 struct sockaddr_in src, dst;
6236 struct ip *ip;
6237 struct sctphdr *sh;
6238 struct sctp_chunkhdr *ch;
6239 int length, offset;
6240 uint8_t compute_crc;
6241 #if defined(__FreeBSD__) && !defined(__Userspace__)
6242 uint32_t mflowid;
6243 uint8_t mflowtype;
6244 uint16_t fibnum;
6245 #endif
6246 #if defined(__Userspace__)
6247 uint16_t port = 0;
6248 #endif
6249
6250 iphlen = off;
6251 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6252 SCTP_RELEASE_PKT(i_pak);
6253 return;
6254 }
6255 m = SCTP_HEADER_TO_CHAIN(i_pak);
6256 #ifdef SCTP_MBUF_LOGGING
6257 /* Log in any input mbufs */
6258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6259 sctp_log_mbc(m, SCTP_MBUF_INPUT);
6260 }
6261 #endif
6262 #ifdef SCTP_PACKET_LOGGING
6263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6264 sctp_packet_log(m);
6265 }
6266 #endif
6267 #if defined(__FreeBSD__) && !defined(__Userspace__)
6268 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6269 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6270 m->m_pkthdr.len,
6271 if_name(m->m_pkthdr.rcvif),
6272 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6273 #endif
6274 #if defined(__APPLE__) && !defined(__Userspace__)
6275 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6276 "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
6277 m->m_pkthdr.len,
6278 m->m_pkthdr.rcvif->if_name,
6279 m->m_pkthdr.rcvif->if_unit,
6280 m->m_pkthdr.csum_flags);
6281 #endif
6282 #if defined(_WIN32) && !defined(__Userspace__)
6283 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6284 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6285 m->m_pkthdr.len,
6286 m->m_pkthdr.rcvif->if_xname,
6287 m->m_pkthdr.csum_flags);
6288 #endif
6289 #if defined(__FreeBSD__) && !defined(__Userspace__)
6290 mflowid = m->m_pkthdr.flowid;
6291 mflowtype = M_HASHTYPE_GET(m);
6292 fibnum = M_GETFIB(m);
6293 #endif
6294 SCTP_STAT_INCR(sctps_recvpackets);
6295 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6296 /* Get IP, SCTP, and first chunk header together in the first mbuf. */
6297 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6298 if (SCTP_BUF_LEN(m) < offset) {
6299 if ((m = m_pullup(m, offset)) == NULL) {
6300 SCTP_STAT_INCR(sctps_hdrops);
6301 return;
6302 }
6303 }
6304 ip = mtod(m, struct ip *);
6305 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6306 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6307 offset -= sizeof(struct sctp_chunkhdr);
6308 memset(&src, 0, sizeof(struct sockaddr_in));
6309 src.sin_family = AF_INET;
6310 #ifdef HAVE_SIN_LEN
6311 src.sin_len = sizeof(struct sockaddr_in);
6312 #endif
6313 src.sin_port = sh->src_port;
6314 src.sin_addr = ip->ip_src;
6315 memset(&dst, 0, sizeof(struct sockaddr_in));
6316 dst.sin_family = AF_INET;
6317 #ifdef HAVE_SIN_LEN
6318 dst.sin_len = sizeof(struct sockaddr_in);
6319 #endif
6320 dst.sin_port = sh->dest_port;
6321 dst.sin_addr = ip->ip_dst;
6322 #if defined(_WIN32) && !defined(__Userspace__)
6323 NTOHS(ip->ip_len);
6324 #endif
6325 #if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__))
6326 ip->ip_len = ntohs(ip->ip_len);
6327 #endif
6328 #if defined(__Userspace__)
6329 #if defined(__linux__) || defined(_WIN32)
6330 length = ip->ip_len;
6331 #else
6332 length = ip->ip_len + iphlen;
6333 #endif
6334 #elif defined(__FreeBSD__)
6335 length = ntohs(ip->ip_len);
6336 #elif defined(__APPLE__)
6337 length = ip->ip_len + iphlen;
6338 #else
6339 length = ip->ip_len;
6340 #endif
6341 /* Validate mbuf chain length with IP payload length. */
6342 if (SCTP_HEADER_LEN(m) != length) {
6343 SCTPDBG(SCTP_DEBUG_INPUT1,
6344 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6345 SCTP_STAT_INCR(sctps_hdrops);
6346 goto out;
6347 }
6348 /* SCTP does not allow broadcasts or multicasts */
6349 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6350 goto out;
6351 }
6352 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6353 goto out;
6354 }
6355 ecn_bits = ip->ip_tos;
6356 #if defined(__FreeBSD__) && !defined(__Userspace__)
6357 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6358 SCTP_STAT_INCR(sctps_recvhwcrc);
6359 compute_crc = 0;
6360 } else {
6361 #else
6362 if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
6363 ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
6364 (SCTP_IS_IT_LOOPBACK(m)))) {
6365 SCTP_STAT_INCR(sctps_recvhwcrc);
6366 compute_crc = 0;
6367 } else {
6368 #endif
6369 SCTP_STAT_INCR(sctps_recvswcrc);
6370 compute_crc = 1;
6371 }
6372 sctp_common_input_processing(&m, iphlen, offset, length,
6373 (struct sockaddr *)&src,
6374 (struct sockaddr *)&dst,
6375 sh, ch,
6376 compute_crc,
6377 ecn_bits,
6378 #if defined(__FreeBSD__) && !defined(__Userspace__)
6379 mflowtype, mflowid, fibnum,
6380 #endif
6381 vrf_id, port);
6382 out:
6383 if (m) {
6384 sctp_m_freem(m);
6385 }
6386 return;
6387 }
6388
6389 #if defined(__FreeBSD__) && !defined(__Userspace__)
6390 #if defined(SCTP_MCORE_INPUT) && defined(SMP)
6391 extern int *sctp_cpuarry;
6392 #endif
6393 #endif
6394
6395 #if defined(__FreeBSD__) && !defined(__Userspace__)
6396 int
6397 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6398 {
6399 struct mbuf *m;
6400 int off;
6401
6402 m = *mp;
6403 off = *offp;
6404 #else
6405 void
6406 sctp_input(struct mbuf *m, int off)
6407 {
6408 #endif
6409 #if defined(__FreeBSD__) && !defined(__Userspace__)
6410 #if defined(SCTP_MCORE_INPUT) && defined(SMP)
6411 if (mp_ncpus > 1) {
6412 struct ip *ip;
6413 struct sctphdr *sh;
6414 int offset;
6415 int cpu_to_use;
6416 uint32_t flowid, tag;
6417
6418 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
6419 flowid = m->m_pkthdr.flowid;
6420 } else {
6421 /* No flow id built by lower layers
6422 * fix it so we create one.
6423 */
6424 offset = off + sizeof(struct sctphdr);
6425 if (SCTP_BUF_LEN(m) < offset) {
6426 if ((m = m_pullup(m, offset)) == NULL) {
6427 SCTP_STAT_INCR(sctps_hdrops);
6428 return (IPPROTO_DONE);
6429 }
6430 }
6431 ip = mtod(m, struct ip *);
6432 sh = (struct sctphdr *)((caddr_t)ip + off);
6433 tag = htonl(sh->v_tag);
6434 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6435 m->m_pkthdr.flowid = flowid;
6436 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
6437 }
6438 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6439 sctp_queue_to_mcore(m, off, cpu_to_use);
6440 return (IPPROTO_DONE);
6441 }
6442 #endif
6443 #endif
6444 sctp_input_with_port(m, off, 0);
6445 #if defined(__FreeBSD__) && !defined(__Userspace__)
6446 return (IPPROTO_DONE);
6447 #endif
6448 }
6449 #endif
6450 #endif
6451