1 /* $NetBSD: sctp_indata.c,v 1.4 2016/04/25 21:21:02 rjs Exp $ */
2 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
3
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: sctp_indata.c,v 1.4 2016/04/25 21:21:02 rjs Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_ipsec.h"
38 #include "opt_inet.h"
39 #include "opt_sctp.h"
40 #endif /* _KERNEL_OPT */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49
50 #include <net/if.h>
51 #include <net/route.h>
52
53
54 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
55 #include <sys/limits.h>
56 #else
57 #include <machine/limits.h>
58 #endif
59 #include <machine/cpu.h>
60
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/ip.h>
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip_var.h>
70 #ifdef INET6
71 #include <netinet6/ip6_var.h>
72 #endif /* INET6 */
73 #include <netinet/ip_icmp.h>
74 #include <netinet/icmp_var.h>
75 #include <netinet/sctp_var.h>
76 #include <netinet/sctp_pcb.h>
77 #include <netinet/sctp_header.h>
78 #include <netinet/sctputil.h>
79 #include <netinet/sctp_output.h>
80 #include <netinet/sctp_input.h>
81 #include <netinet/sctp_hashdriver.h>
82 #include <netinet/sctp_indata.h>
83 #include <netinet/sctp_uio.h>
84 #include <netinet/sctp_timer.h>
85 #ifdef IPSEC
86 #include <netipsec/ipsec.h>
87 #include <netipsec/key.h>
88 #endif /*IPSEC*/
89
90 #include <net/net_osdep.h>
91
92 #ifdef SCTP_DEBUG
93 extern u_int32_t sctp_debug_on;
94 #endif
95
96 /*
97 * NOTES: On the outbound side of things I need to check the sack timer to
98 * see if I should generate a sack into the chunk queue (if I have data to
99 * send that is and will be sending it .. for bundling.
100 *
101 * The callback in sctp_usrreq.c will get called when the socket is read
102 * from. This will cause sctp_service_queues() to get called on the top
103 * entry in the list.
104 */
105
106 extern int sctp_strict_sacks;
107
108 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)109 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
110 {
111 u_int32_t calc, calc_w_oh;
112
113 #ifdef SCTP_DEBUG
114 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
115 printf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
116 (u_long)stcb->sctp_socket->so_rcv.sb_cc,
117 (u_long)stcb->sctp_socket->so_rcv.sb_hiwat,
118 (u_long)stcb->sctp_socket->so_rcv.sb_lowat,
119 (u_long)stcb->sctp_socket->so_rcv.sb_mbcnt,
120 (u_long)stcb->sctp_socket->so_rcv.sb_mbmax);
121 printf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
122 sctp_sbspace(&stcb->sctp_socket->so_rcv),
123 asoc->size_on_delivery_queue,
124 asoc->size_on_reasm_queue,
125 asoc->size_on_all_streams);
126 }
127 #endif
128 if (stcb->sctp_socket->so_rcv.sb_cc == 0 &&
129 asoc->size_on_delivery_queue == 0 &&
130 asoc->size_on_reasm_queue == 0 &&
131 asoc->size_on_all_streams == 0) {
132 /* Full rwnd granted */
133 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat,
134 SCTP_MINIMAL_RWND);
135 return;
136 }
137 /* get actual space */
138 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
139
140 /* take out what has NOT been put on socket queue and
141 * we yet hold for putting up.
142 */
143 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
144 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
145 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
146
147 /* what is the overhead of all these rwnd's */
148 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
149
150 asoc->my_rwnd = calc;
151 if (calc_w_oh == 0) {
152 /* If our overhead is greater than the advertised
153 * rwnd, we clamp the rwnd to 1. This lets us
154 * still accept inbound segments, but hopefully will
155 * shut the sender down when he finally gets the message.
156 */
157 asoc->my_rwnd = 1;
158 } else {
159 /* SWS threshold */
160 if (asoc->my_rwnd &&
161 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
162 /* SWS engaged, tell peer none left */
163 asoc->my_rwnd = 1;
164 #ifdef SCTP_DEBUG
165 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
166 printf(" - SWS zeros\n");
167 }
168 } else {
169 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
170 printf("\n");
171 }
172 #endif
173 }
174 }
175 }
176
177 /*
178 * Take a chk structure and build it into an mbuf. Hmm should we change things
179 * so that instead we store the data side in a chunk?
180 */
181 static struct mbuf *
sctp_build_ctl_nchunk(struct sctp_tcb * stcb,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t stream_no,uint16_t stream_seq,uint8_t flags)182 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
183 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
184 {
185 struct sctp_sndrcvinfo *outinfo;
186 struct cmsghdr *cmh;
187 struct mbuf *ret;
188
189 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
190 /* user does not want the sndrcv ctl */
191 return (NULL);
192 }
193
194 MGETHDR(ret, M_DONTWAIT, MT_CONTROL);
195 if (ret == NULL) {
196 /* No space */
197 return (ret);
198 }
199 /* We need a CMSG header followed by the struct */
200 cmh = mtod(ret, struct cmsghdr *);
201 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
202 cmh->cmsg_level = IPPROTO_SCTP;
203 cmh->cmsg_type = SCTP_SNDRCV;
204 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
205 outinfo->sinfo_stream = stream_no;
206 outinfo->sinfo_ssn = stream_seq;
207 if (flags & SCTP_DATA_UNORDERED) {
208 outinfo->sinfo_flags = MSG_UNORDERED;
209 } else {
210 outinfo->sinfo_flags = 0;
211 }
212 outinfo->sinfo_ppid = ppid;
213 outinfo->sinfo_context = context;
214 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
215 outinfo->sinfo_tsn = tsn;
216 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
217 ret->m_len = cmh->cmsg_len;
218 ret->m_pkthdr.len = ret->m_len;
219 /*
220 * We track how many control len's have gone upon the sb
221 * and do not count these in the rwnd calculation.
222 */
223 stcb->asoc.my_rwnd_control_len +=
224 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
225
226 return (ret);
227 }
228
229 /*
230 * Take a chk structure and build it into an mbuf. Should we change things
231 * so that instead we store the data side in a chunk?
232 */
233 static
234 struct mbuf *
sctp_build_ctl(struct sctp_tcb * stcb,struct sctp_tmit_chunk * chk)235 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
236 {
237 struct sctp_sndrcvinfo *outinfo;
238 struct cmsghdr *cmh;
239 struct mbuf *ret;
240 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
241 /* user does not want the sndrcv ctl */
242 return (NULL);
243 }
244 MGET(ret, M_DONTWAIT, MT_CONTROL);
245 if (ret == NULL) {
246 /* No space */
247 return (ret);
248 }
249
250 /* We need a CMSG header followed by the struct */
251 cmh = mtod(ret, struct cmsghdr *);
252 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
253 cmh->cmsg_level = IPPROTO_SCTP;
254 cmh->cmsg_type = SCTP_SNDRCV;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
256 outinfo->sinfo_stream = chk->rec.data.stream_number;
257 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
258 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
259 outinfo->sinfo_flags = MSG_UNORDERED;
260 } else {
261 outinfo->sinfo_flags = 0;
262 }
263 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
264 outinfo->sinfo_context = chk->rec.data.context;
265 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
266 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
267 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
268 ret->m_len = cmh->cmsg_len;
269 stcb->asoc.my_rwnd_control_len +=
270 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
271
272 return (ret);
273 }
274
275 int
sctp_deliver_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int hold_locks)276 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
277 struct sctp_tmit_chunk *chk, int hold_locks)
278 {
279 struct mbuf *control, *m;
280 int free_it;
281 struct sockaddr_in6 sin6;
282 const struct sockaddr *to;
283
284 #ifdef SCTP_DEBUG
285 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
286 printf("I am now in Deliver data! (%p)\n", chk);
287 }
288 #endif
289 /* get a write lock on the inp if not already */
290 if (hold_locks == 0) {
291 SCTP_TCB_UNLOCK(stcb);
292 SCTP_INP_WLOCK(stcb->sctp_ep);
293 SCTP_TCB_LOCK(stcb);
294 }
295 free_it = 0;
296 /* We always add it to the queue */
297 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
298 /* socket above is long gone */
299 #ifdef SCTP_DEBUG
300 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
301 printf("gone is gone!\n");
302 }
303 #endif
304 if (chk != NULL) {
305 if (chk->data)
306 sctp_m_freem(chk->data);
307 chk->data = NULL;
308 sctp_free_remote_addr(chk->whoTo);
309 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
310 sctppcbinfo.ipi_count_chunk--;
311 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
312 panic("Chunk count is negative");
313 }
314 sctppcbinfo.ipi_gencnt_chunk++;
315 }
316 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
317 asoc->size_on_delivery_queue -= chk->send_size;
318 asoc->cnt_on_delivery_queue--;
319 /*
320 * Lose the data pointer, since its in the socket buffer
321 */
322 if (chk->data)
323 sctp_m_freem(chk->data);
324 chk->data = NULL;
325 /* Now free the address and data */
326 sctp_free_remote_addr(chk->whoTo);
327 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
328 sctppcbinfo.ipi_count_chunk--;
329 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
330 panic("Chunk count is negative");
331 }
332 sctppcbinfo.ipi_gencnt_chunk++;
333 }
334 if (hold_locks == 0) {
335 SCTP_INP_WUNLOCK(stcb->sctp_ep);
336 }
337 return (0);
338 }
339 if (chk != NULL) {
340 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
341 asoc->size_on_delivery_queue += chk->send_size;
342 asoc->cnt_on_delivery_queue++;
343 }
344 if (asoc->fragmented_delivery_inprogress) {
345 /*
346 * oh oh, fragmented delivery in progress
347 * return out of here.
348 */
349 #ifdef SCTP_DEBUG
350 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
351 printf("Fragmented delivery in progress?\n");
352 }
353 #endif
354 if (hold_locks == 0) {
355 SCTP_INP_WUNLOCK(stcb->sctp_ep);
356 }
357 return (0);
358 }
359 /* Now grab the first one */
360 chk = TAILQ_FIRST(&asoc->delivery_queue);
361 if (chk == NULL) {
362 /* Nothing in queue */
363 #ifdef SCTP_DEBUG
364 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
365 printf("Nothing in queue?\n");
366 }
367 #endif
368 asoc->size_on_delivery_queue = 0;
369 asoc->cnt_on_delivery_queue = 0;
370 if (hold_locks == 0) {
371 SCTP_INP_WUNLOCK(stcb->sctp_ep);
372 }
373 return (0);
374 }
375
376 if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
377 /* Boy, there really is NO room */
378 if (hold_locks == 0) {
379 SCTP_INP_WUNLOCK(stcb->sctp_ep);
380 }
381 return (0);
382 }
383 #ifdef SCTP_DEBUG
384 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
385 printf("Now to the delivery with chk(%p)!\n", chk);
386 }
387 #endif
388 /* XXX need to append PKTHDR to the socket buffer first */
389 if ((chk->data->m_flags & M_PKTHDR) == 0) {
390 MGETHDR(m, M_DONTWAIT, MT_DATA);
391 if (m == NULL) {
392 /* no room! */
393 if (hold_locks == 0) {
394 SCTP_INP_WUNLOCK(stcb->sctp_ep);
395 }
396 return (0);
397 }
398 m->m_pkthdr.len = chk->send_size;
399 m->m_len = 0;
400 m->m_next = chk->data;
401 chk->data = m;
402 }
403 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
404 if (chk->data->m_next == NULL) {
405 /* hopefully we hit here most of the time */
406 chk->data->m_flags |= M_EOR;
407 } else {
408 /* Add the flag to the LAST mbuf in the chain */
409 m = chk->data;
410 while (m->m_next != NULL) {
411 m = m->m_next;
412 }
413 m->m_flags |= M_EOR;
414 }
415 }
416
417 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 struct sockaddr_in6 lsa6;
419
420 control = sctp_build_ctl(stcb, chk);
421 to = rtcache_getdst(&chk->whoTo->ro);
422 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
423 to->sa_family == AF_INET) {
424 const struct sockaddr_in *sin;
425
426 sin = (const struct sockaddr_in *)to;
427 in6_sin_2_v4mapsin6(sin, &sin6);
428 to = (struct sockaddr *)&sin6;
429 }
430 /* check and strip embedded scope junk */
431 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
432 &lsa6);
433 if (((const struct sockaddr_in *)to)->sin_port == 0) {
434 printf("Huh a, port is %d not net:%p %d?\n",
435 ((const struct sockaddr_in *)to)->sin_port,
436 chk->whoTo,
437 (int)(ntohs(stcb->rport)));
438 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
439 /* XXX */
440 }
441 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
442 /* Gak not enough room */
443 if (control) {
444 sctp_m_freem(control);
445 stcb->asoc.my_rwnd_control_len -=
446 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
447 }
448 goto skip;
449 }
450 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
451 to, chk->data, control, stcb->asoc.my_vtag,
452 stcb->sctp_ep)) {
453 /* Gak not enough room */
454 if (control) {
455 sctp_m_freem(control);
456 stcb->asoc.my_rwnd_control_len -=
457 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
458 }
459 } else {
460 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
461 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
462 stcb->asoc.my_rwnd_control_len +=
463 sizeof(struct mbuf);
464 }
465 } else {
466 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
467 }
468 free_it = 1;
469 }
470 } else {
471 /* append to a already started message. */
472 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
473 (long)chk->send_size) {
474 sbappend(&stcb->sctp_socket->so_rcv, chk->data);
475 free_it = 1;
476 }
477 }
478 skip:
479 if (hold_locks == 0) {
480 SCTP_INP_WUNLOCK(stcb->sctp_ep);
481 }
482 /* free up the one we inserted */
483 if (free_it) {
484 /* Pull it off the queue */
485 #ifdef SCTP_DEBUG
486 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
487 printf("Free_it true, doing tickle wakeup\n");
488 }
489 #endif
490 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
491 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
492 asoc->size_on_delivery_queue -= chk->send_size;
493 asoc->cnt_on_delivery_queue--;
494 /* Lose the data pointer, since its in the socket buffer */
495 chk->data = NULL;
496 /* Now free the address and data */
497 sctp_free_remote_addr(chk->whoTo);
498 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
499 sctppcbinfo.ipi_count_chunk--;
500 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
501 panic("Chunk count is negative");
502 }
503 sctppcbinfo.ipi_gencnt_chunk++;
504 }
505 return (free_it);
506 }
507
508 /*
509 * We are delivering currently from the reassembly queue. We must continue to
510 * deliver until we either:
511 * 1) run out of space.
512 * 2) run out of sequential TSN's
513 * 3) hit the SCTP_DATA_LAST_FRAG flag.
514 */
515 static void
sctp_service_reassembly(struct sctp_tcb * stcb,struct sctp_association * asoc,int hold_locks)516 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
517 {
518 const struct sockaddr *to;
519 struct sockaddr_in6 sin6;
520 struct sctp_tmit_chunk *chk, *at;
521 struct mbuf *control, *m;
522 u_int16_t nxt_todel;
523 u_int16_t stream_no;
524 int cntDel;
525 cntDel = stream_no = 0;
526 if (hold_locks == 0) {
527 /*
528 * you always have the TCB lock, we need
529 * to have the inp write lock as well.
530 */
531 SCTP_TCB_UNLOCK(stcb);
532 SCTP_INP_WLOCK(stcb->sctp_ep);
533 SCTP_TCB_LOCK(stcb);
534 }
535 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
536 /* socket above is long gone */
537 asoc->fragmented_delivery_inprogress = 0;
538 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
539 asoc->size_on_delivery_queue -= chk->send_size;
540 asoc->cnt_on_delivery_queue--;
541 /*
542 * Lose the data pointer, since its in the socket buffer
543 */
544 if (chk->data)
545 sctp_m_freem(chk->data);
546 chk->data = NULL;
547 /* Now free the address and data */
548 sctp_free_remote_addr(chk->whoTo);
549 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
550 sctppcbinfo.ipi_count_chunk--;
551 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
552 panic("Chunk count is negative");
553 }
554 sctppcbinfo.ipi_gencnt_chunk++;
555 }
556 if (hold_locks == 0) {
557 SCTP_INP_WUNLOCK(stcb->sctp_ep);
558 }
559 return;
560 }
561 do {
562 if (stcb->sctp_socket->so_rcv.sb_cc >=
563 stcb->sctp_socket->so_rcv.sb_hiwat) {
564 if (cntDel) {
565 sctp_sorwakeup(stcb->sctp_ep,
566 stcb->sctp_socket);
567 }
568 if (hold_locks == 0) {
569 SCTP_INP_WUNLOCK(stcb->sctp_ep);
570 }
571 return;
572 }
573 chk = TAILQ_FIRST(&asoc->reasmqueue);
574 if (chk == NULL) {
575 if (cntDel) {
576 sctp_sorwakeup(stcb->sctp_ep,
577 stcb->sctp_socket);
578 }
579 if (hold_locks == 0) {
580 SCTP_INP_WUNLOCK(stcb->sctp_ep);
581 }
582 return;
583 }
584 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
585 /* Can't deliver more :< */
586 if (cntDel) {
587 sctp_sorwakeup(stcb->sctp_ep,
588 stcb->sctp_socket);
589 }
590 if (hold_locks == 0) {
591 SCTP_INP_WUNLOCK(stcb->sctp_ep);
592 }
593 return;
594 }
595 stream_no = chk->rec.data.stream_number;
596 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
597 if (nxt_todel != chk->rec.data.stream_seq &&
598 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
599 /*
600 * Not the next sequence to deliver in its stream OR
601 * unordered
602 */
603 if (cntDel) {
604 sctp_sorwakeup(stcb->sctp_ep,
605 stcb->sctp_socket);
606 }
607 if (hold_locks == 0) {
608 SCTP_INP_WUNLOCK(stcb->sctp_ep);
609 }
610 return;
611 }
612
613 if ((chk->data->m_flags & M_PKTHDR) == 0) {
614 MGETHDR(m, M_DONTWAIT, MT_DATA);
615 if (m == NULL) {
616 /* no room! */
617 if (hold_locks == 0) {
618 SCTP_INP_WUNLOCK(stcb->sctp_ep);
619 }
620 return;
621 }
622 m->m_pkthdr.len = chk->send_size;
623 m->m_len = 0;
624 m->m_next = chk->data;
625 chk->data = m;
626 }
627 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
628 if (chk->data->m_next == NULL) {
629 /* hopefully we hit here most of the time */
630 chk->data->m_flags |= M_EOR;
631 } else {
632 /* Add the flag to the LAST mbuf in the chain */
633 m = chk->data;
634 while (m->m_next != NULL) {
635 m = m->m_next;
636 }
637 m->m_flags |= M_EOR;
638 }
639 }
640 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
641 struct sockaddr_in6 lsa6;
642
643 control = sctp_build_ctl(stcb, chk);
644 to = rtcache_getdst(&chk->whoTo->ro);
645 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
646 to->sa_family == AF_INET) {
647 const struct sockaddr_in *sin;
648
649 sin = satocsin(to);
650 in6_sin_2_v4mapsin6(sin, &sin6);
651 to = (struct sockaddr *)&sin6;
652 }
653 /* check and strip embedded scope junk */
654 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
655 &lsa6);
656 if (((const struct sockaddr_in *)to)->sin_port == 0) {
657 printf("Huh b, port is %d not net:%p %d?\n",
658 ((const struct sockaddr_in *)to)->sin_port,
659 chk->whoTo,
660 (int)(ntohs(stcb->rport)));
661 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
662 /* XXX */
663 }
664 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
665 (long)chk->send_size) {
666 if (control) {
667 sctp_m_freem(control);
668 stcb->asoc.my_rwnd_control_len -=
669 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
670 }
671 sctp_sorwakeup(stcb->sctp_ep,
672 stcb->sctp_socket);
673 if (hold_locks == 0) {
674 SCTP_INP_WUNLOCK(stcb->sctp_ep);
675 }
676 return;
677 }
678 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
679 to, chk->data, control, stcb->asoc.my_vtag,
680 stcb->sctp_ep)) {
681 /* Gak not enough room */
682 if (control) {
683 sctp_m_freem(control);
684 stcb->asoc.my_rwnd_control_len -=
685 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
686 }
687 sctp_sorwakeup(stcb->sctp_ep,
688 stcb->sctp_socket);
689 if (hold_locks == 0) {
690 SCTP_INP_WUNLOCK(stcb->sctp_ep);
691 }
692 return;
693 }
694 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
695 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
696 stcb->asoc.my_rwnd_control_len +=
697 sizeof(struct mbuf);
698 }
699 } else {
700 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
701 }
702 cntDel++;
703 } else {
704 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
705 (long)chk->send_size) {
706 sbappend(&stcb->sctp_socket->so_rcv, chk->data);
707 cntDel++;
708 } else {
709 /* out of space in the sb */
710 sctp_sorwakeup(stcb->sctp_ep,
711 stcb->sctp_socket);
712 if (hold_locks == 0) {
713 SCTP_INP_WUNLOCK(stcb->sctp_ep);
714 }
715 return;
716 }
717 }
718 /* pull it we did it */
719 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
720 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
721 asoc->fragmented_delivery_inprogress = 0;
722 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
723 asoc->strmin[stream_no].last_sequence_delivered++;
724 }
725 }
726 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
727 asoc->size_on_reasm_queue -= chk->send_size;
728 asoc->cnt_on_reasm_queue--;
729 /* free up the chk */
730 sctp_free_remote_addr(chk->whoTo);
731 chk->data = NULL;
732 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
733 sctppcbinfo.ipi_count_chunk--;
734 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
735 panic("Chunk count is negative");
736 }
737 sctppcbinfo.ipi_gencnt_chunk++;
738 if (asoc->fragmented_delivery_inprogress == 0) {
739 /*
740 * Now lets see if we can deliver the next one on the
741 * stream
742 */
743 /*u_int16_t nxt_todel;*/
744 struct sctp_stream_in *strm;
745
746 strm = &asoc->strmin[stream_no];
747 nxt_todel = strm->last_sequence_delivered + 1;
748 chk = TAILQ_FIRST(&strm->inqueue);
749 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
750 while (chk != NULL) {
751 /* all delivered */
752 if (nxt_todel ==
753 chk->rec.data.stream_seq) {
754 at = TAILQ_NEXT(chk, sctp_next);
755 TAILQ_REMOVE(&strm->inqueue,
756 chk, sctp_next);
757 asoc->size_on_all_streams -=
758 chk->send_size;
759 asoc->cnt_on_all_streams--;
760 strm->last_sequence_delivered++;
761 /*
762 * We ignore the return of
763 * deliver_data here since we
764 * always can hold the chunk on
765 * the d-queue. And we have a
766 * finite number that can be
767 * delivered from the strq.
768 */
769 sctp_deliver_data(stcb, asoc, chk, 1);
770 chk = at;
771 } else {
772 break;
773 }
774 nxt_todel =
775 strm->last_sequence_delivered + 1;
776 }
777 }
778 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
779 /* Here if deliver_data fails, we must break */
780 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
781 break;
782 }
783 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
784 if (hold_locks == 0) {
785 SCTP_INP_WUNLOCK(stcb->sctp_ep);
786 }
787 return;
788 }
789 chk = TAILQ_FIRST(&asoc->reasmqueue);
790 } while (chk);
791 if (cntDel) {
792 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
793 }
794 if (hold_locks == 0) {
795 SCTP_INP_WUNLOCK(stcb->sctp_ep);
796 }
797 }
798
799 /*
800 * Queue the chunk either right into the socket buffer if it is the next one
801 * to go OR put it in the correct place in the delivery queue. If we do
802 * append to the so_buf, keep doing so until we are out of order.
803 * One big question still remains, what to do when the socket buffer is FULL??
804 */
805 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int * abort_flag)806 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
807 struct sctp_tmit_chunk *chk, int *abort_flag)
808 {
809 struct sctp_stream_in *strm;
810 struct sctp_tmit_chunk *at;
811 int queue_needed;
812 u_int16_t nxt_todel;
813 struct mbuf *oper;
814
815 /*** FIX FIX FIX ???
816 * Need to add code to deal with 16 bit seq wrap
817 * without a TSN wrap for ordered delivery (maybe).
818 * FIX FIX FIX ???
819 */
820 queue_needed = 1;
821 asoc->size_on_all_streams += chk->send_size;
822 asoc->cnt_on_all_streams++;
823 strm = &asoc->strmin[chk->rec.data.stream_number];
824 nxt_todel = strm->last_sequence_delivered + 1;
825 #ifdef SCTP_STR_LOGGING
826 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
827 #endif
828 #ifdef SCTP_DEBUG
829 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
830 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
831 (u_int)chk->rec.data.stream_seq,
832 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
833 }
834 #endif
835 if (compare_with_wrap(strm->last_sequence_delivered,
836 chk->rec.data.stream_seq, MAX_SEQ) ||
837 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
838 /* The incoming sseq is behind where we last delivered? */
839 #ifdef SCTP_DEBUG
840 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
841 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
842 chk->rec.data.stream_seq,
843 strm->last_sequence_delivered);
844 }
845 #endif
846 /*
847 * throw it in the stream so it gets cleaned up in
848 * association destruction
849 */
850 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
851 MGET(oper, M_DONTWAIT, MT_DATA);
852 if (oper) {
853 struct sctp_paramhdr *ph;
854 u_int32_t *ippp;
855
856 oper->m_len = sizeof(struct sctp_paramhdr) +
857 sizeof(*ippp);
858 ph = mtod(oper, struct sctp_paramhdr *);
859 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
860 ph->param_length = htons(oper->m_len);
861 ippp = (u_int32_t *)(ph + 1);
862 *ippp = htonl(0x00000001);
863 }
864 sctp_abort_an_association(stcb->sctp_ep, stcb,
865 SCTP_PEER_FAULTY, oper);
866
867 *abort_flag = 1;
868 return;
869
870 }
871 if (nxt_todel == chk->rec.data.stream_seq) {
872 /* can be delivered right away */
873 #ifdef SCTP_DEBUG
874 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
875 printf("It's NEXT!\n");
876 }
877 #endif
878 #ifdef SCTP_STR_LOGGING
879 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
880 #endif
881 queue_needed = 0;
882 asoc->size_on_all_streams -= chk->send_size;
883 asoc->cnt_on_all_streams--;
884 strm->last_sequence_delivered++;
885 sctp_deliver_data(stcb, asoc, chk, 0);
886 chk = TAILQ_FIRST(&strm->inqueue);
887 while (chk != NULL) {
888 /* all delivered */
889 nxt_todel = strm->last_sequence_delivered + 1;
890 if (nxt_todel == chk->rec.data.stream_seq) {
891 at = TAILQ_NEXT(chk, sctp_next);
892 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
893 asoc->size_on_all_streams -= chk->send_size;
894 asoc->cnt_on_all_streams--;
895 strm->last_sequence_delivered++;
896 /*
897 * We ignore the return of deliver_data here
898 * since we always can hold the chunk on the
899 * d-queue. And we have a finite number that
900 * can be delivered from the strq.
901 */
902 #ifdef SCTP_STR_LOGGING
903 sctp_log_strm_del(chk, NULL,
904 SCTP_STR_LOG_FROM_IMMED_DEL);
905 #endif
906 sctp_deliver_data(stcb, asoc, chk, 0);
907 chk = at;
908 continue;
909 }
910 break;
911 }
912 }
913 if (queue_needed) {
914 /*
915 * Ok, we did not deliver this guy, find
916 * the correct place to put it on the queue.
917 */
918 #ifdef SCTP_DEBUG
919 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
920 printf("Queue Needed!\n");
921 }
922 #endif
923 if (TAILQ_EMPTY(&strm->inqueue)) {
924 /* Empty queue */
925 #ifdef SCTP_STR_LOGGING
926 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
927 #endif
928 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
929 } else {
930 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
931 if (compare_with_wrap(at->rec.data.stream_seq,
932 chk->rec.data.stream_seq, MAX_SEQ)) {
933 /*
934 * one in queue is bigger than the new
935 * one, insert before this one
936 */
937 #ifdef SCTP_STR_LOGGING
938 sctp_log_strm_del(chk, at,
939 SCTP_STR_LOG_FROM_INSERT_MD);
940 #endif
941 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
942 break;
943 } else if (at->rec.data.stream_seq ==
944 chk->rec.data.stream_seq) {
945 /*
946 * Gak, He sent me a duplicate str seq
947 * number
948 */
949 /*
950 * foo bar, I guess I will just free
951 * this new guy, should we abort too?
952 * FIX ME MAYBE? Or it COULD be that
953 * the SSN's have wrapped. Maybe I
954 * should compare to TSN somehow...
955 * sigh for now just blow away the
956 * chunk!
957 */
958
959 if (chk->data)
960 sctp_m_freem(chk->data);
961 chk->data = NULL;
962 asoc->size_on_all_streams -= chk->send_size;
963 asoc->cnt_on_all_streams--;
964 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
965 sctp_free_remote_addr(chk->whoTo);
966 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
967 sctppcbinfo.ipi_count_chunk--;
968 if ((int)sctppcbinfo.ipi_count_chunk <
969 0) {
970 panic("Chunk count is negative");
971 }
972 sctppcbinfo.ipi_gencnt_chunk++;
973 return;
974 } else {
975 if (TAILQ_NEXT(at, sctp_next) == NULL) {
976 /*
977 * We are at the end, insert it
978 * after this one
979 */
980 #ifdef SCTP_STR_LOGGING
981 sctp_log_strm_del(chk, at,
982 SCTP_STR_LOG_FROM_INSERT_TL);
983 #endif
984 TAILQ_INSERT_AFTER(&strm->inqueue,
985 at, chk, sctp_next);
986 break;
987 }
988 }
989 }
990 }
991 } else {
992 /* We delivered some chunks, wake them up */
993
994 #ifdef SCTP_DEBUG
995 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
996 printf("Doing WAKEUP!\n");
997 }
998 #endif
999 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1000 }
1001 }
1002
1003 /*
1004 * Returns two things: You get the total size of the deliverable parts of the
1005 * first fragmented message on the reassembly queue. And you get a 1 back if
1006 * all of the message is ready or a 0 back if the message is still incomplete
1007 */
1008 static int
sctp_is_all_msg_on_reasm(struct sctp_association * asoc,int * t_size)1009 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1010 {
1011 struct sctp_tmit_chunk *chk;
1012 u_int32_t tsn;
1013
1014 *t_size = 0;
1015 chk = TAILQ_FIRST(&asoc->reasmqueue);
1016 if (chk == NULL) {
1017 /* nothing on the queue */
1018 return (0);
1019 }
1020 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1021 /* Not a first on the queue */
1022 return (0);
1023 }
1024 tsn = chk->rec.data.TSN_seq;
1025 while (chk) {
1026 if (tsn != chk->rec.data.TSN_seq) {
1027 return (0);
1028 }
1029 *t_size += chk->send_size;
1030 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1031 return (1);
1032 }
1033 tsn++;
1034 chk = TAILQ_NEXT(chk, sctp_next);
1035 }
1036 return (0);
1037 }
1038
1039 /*
1040 * Dump onto the re-assembly queue, in its proper place. After dumping on
1041 * the queue, see if anthing can be delivered. If so pull it off (or as much
1042 * as we can. If we run out of space then we must dump what we can and set
1043 * the appropriate flag to say we queued what we could.
1044 */
1045 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int * abort_flag)1046 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1047 struct sctp_tmit_chunk *chk, int *abort_flag)
1048 {
1049 struct mbuf *oper;
1050 u_int16_t nxt_todel;
1051 u_int32_t cum_ackp1, prev_tsn, post_tsn;
1052 int tsize;
1053 struct sctp_tmit_chunk *at, *prev, *next;
1054
1055 prev = next = NULL;
1056 cum_ackp1 = asoc->tsn_last_delivered + 1;
1057
1058 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1059 /* This is the first one on the queue */
1060 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1061 /*
1062 * we do not check for delivery of anything when
1063 * only one fragment is here
1064 */
1065 asoc->size_on_reasm_queue = chk->send_size;
1066 asoc->cnt_on_reasm_queue++;
1067 if (chk->rec.data.TSN_seq == cum_ackp1) {
1068 if (asoc->fragmented_delivery_inprogress == 0 &&
1069 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1070 SCTP_DATA_FIRST_FRAG) {
1071 /*
1072 * An empty queue, no delivery inprogress, we
1073 * hit the next one and it does NOT have a
1074 * FIRST fragment mark.
1075 */
1076 #ifdef SCTP_DEBUG
1077 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1078 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1079 }
1080 #endif
1081 MGET(oper, M_DONTWAIT, MT_DATA);
1082 if (oper) {
1083 struct sctp_paramhdr *ph;
1084 u_int32_t *ippp;
1085
1086 oper->m_len =
1087 sizeof(struct sctp_paramhdr) +
1088 sizeof(*ippp);
1089 ph = mtod(oper, struct sctp_paramhdr *);
1090 ph->param_type =
1091 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1092 ph->param_length = htons(oper->m_len);
1093 ippp = (u_int32_t *)(ph + 1);
1094 *ippp = htonl(0x10000001);
1095 }
1096 sctp_abort_an_association(stcb->sctp_ep, stcb,
1097 SCTP_PEER_FAULTY, oper);
1098 *abort_flag = 1;
1099 } else if (asoc->fragmented_delivery_inprogress &&
1100 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1101 /*
1102 * We are doing a partial delivery and the NEXT
1103 * chunk MUST be either the LAST or MIDDLE
1104 * fragment NOT a FIRST
1105 */
1106 #ifdef SCTP_DEBUG
1107 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1108 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1109 }
1110 #endif
1111 MGET(oper, M_DONTWAIT, MT_DATA);
1112 if (oper) {
1113 struct sctp_paramhdr *ph;
1114 u_int32_t *ippp;
1115
1116 oper->m_len =
1117 sizeof(struct sctp_paramhdr) +
1118 sizeof(*ippp);
1119 ph = mtod(oper, struct sctp_paramhdr *);
1120 ph->param_type =
1121 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1122 ph->param_length = htons(oper->m_len);
1123 ippp = (u_int32_t *)(ph + 1);
1124 *ippp = htonl(0x10000002);
1125 }
1126 sctp_abort_an_association(stcb->sctp_ep, stcb,
1127 SCTP_PEER_FAULTY, oper);
1128 *abort_flag = 1;
1129 } else if (asoc->fragmented_delivery_inprogress) {
1130 /* Here we are ok with a MIDDLE or LAST piece */
1131 if (chk->rec.data.stream_number !=
1132 asoc->str_of_pdapi) {
1133 /* Got to be the right STR No */
1134 #ifdef SCTP_DEBUG
1135 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1136 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1137 chk->rec.data.stream_number,
1138 asoc->str_of_pdapi);
1139 }
1140 #endif
1141 MGET(oper, M_DONTWAIT, MT_DATA);
1142 if (oper) {
1143 struct sctp_paramhdr *ph;
1144 u_int32_t *ippp;
1145 oper->m_len =
1146 sizeof(struct sctp_paramhdr) +
1147 sizeof(*ippp);
1148 ph = mtod(oper,
1149 struct sctp_paramhdr *);
1150 ph->param_type =
1151 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1152 ph->param_length =
1153 htons(oper->m_len);
1154 ippp = (u_int32_t *)(ph + 1);
1155 *ippp = htonl(0x10000003);
1156 }
1157 sctp_abort_an_association(stcb->sctp_ep,
1158 stcb, SCTP_PEER_FAULTY, oper);
1159 *abort_flag = 1;
1160 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1161 SCTP_DATA_UNORDERED &&
1162 chk->rec.data.stream_seq !=
1163 asoc->ssn_of_pdapi) {
1164 /* Got to be the right STR Seq */
1165 #ifdef SCTP_DEBUG
1166 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1167 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1168 chk->rec.data.stream_seq,
1169 asoc->ssn_of_pdapi);
1170 }
1171 #endif
1172 MGET(oper, M_DONTWAIT, MT_DATA);
1173 if (oper) {
1174 struct sctp_paramhdr *ph;
1175 u_int32_t *ippp;
1176 oper->m_len =
1177 sizeof(struct sctp_paramhdr) +
1178 sizeof(*ippp);
1179 ph = mtod(oper,
1180 struct sctp_paramhdr *);
1181 ph->param_type =
1182 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1183 ph->param_length =
1184 htons(oper->m_len);
1185 ippp = (u_int32_t *)(ph + 1);
1186 *ippp = htonl(0x10000004);
1187 }
1188 sctp_abort_an_association(stcb->sctp_ep,
1189 stcb, SCTP_PEER_FAULTY, oper);
1190 *abort_flag = 1;
1191 }
1192 }
1193 }
1194 return;
1195 }
1196 /* Find its place */
1197 at = TAILQ_FIRST(&asoc->reasmqueue);
1198
1199 /* Grab the top flags */
1200 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1201 if (compare_with_wrap(at->rec.data.TSN_seq,
1202 chk->rec.data.TSN_seq, MAX_TSN)) {
1203 /*
1204 * one in queue is bigger than the new one, insert
1205 * before this one
1206 */
1207 /* A check */
1208 asoc->size_on_reasm_queue += chk->send_size;
1209 asoc->cnt_on_reasm_queue++;
1210 next = at;
1211 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1212 break;
1213 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1214 /* Gak, He sent me a duplicate str seq number */
1215 /*
1216 * foo bar, I guess I will just free this new guy,
1217 * should we abort too? FIX ME MAYBE? Or it COULD be
1218 * that the SSN's have wrapped. Maybe I should compare
1219 * to TSN somehow... sigh for now just blow away the
1220 * chunk!
1221 */
1222 if (chk->data)
1223 sctp_m_freem(chk->data);
1224 chk->data = NULL;
1225 sctp_free_remote_addr(chk->whoTo);
1226 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1227 sctppcbinfo.ipi_count_chunk--;
1228 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1229 panic("Chunk count is negative");
1230 }
1231 sctppcbinfo.ipi_gencnt_chunk++;
1232 return;
1233 } else {
1234 prev = at;
1235 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1236 /*
1237 * We are at the end, insert it after this one
1238 */
1239 /* check it first */
1240 asoc->size_on_reasm_queue += chk->send_size;
1241 asoc->cnt_on_reasm_queue++;
1242 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1243 break;
1244 }
1245 }
1246 }
1247 /* Now the audits */
1248 if (prev) {
1249 prev_tsn = chk->rec.data.TSN_seq - 1;
1250 if (prev_tsn == prev->rec.data.TSN_seq) {
1251 /*
1252 * Ok the one I am dropping onto the end
1253 * is the NEXT. A bit of valdiation here.
1254 */
1255 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1256 SCTP_DATA_FIRST_FRAG ||
1257 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1258 SCTP_DATA_MIDDLE_FRAG) {
1259 /*
1260 * Insert chk MUST be a MIDDLE or LAST fragment
1261 */
1262 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1263 SCTP_DATA_FIRST_FRAG) {
1264 #ifdef SCTP_DEBUG
1265 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1266 printf("Prev check - It can be a midlle or last but not a first\n");
1267 printf("Gak, Evil plot, it's a FIRST!\n");
1268 }
1269 #endif
1270 MGET(oper, M_DONTWAIT, MT_DATA);
1271 if (oper) {
1272 struct sctp_paramhdr *ph;
1273 u_int32_t *ippp;
1274
1275 oper->m_len =
1276 sizeof(struct sctp_paramhdr) +
1277 sizeof(*ippp);
1278 ph = mtod(oper,
1279 struct sctp_paramhdr *);
1280 ph->param_type =
1281 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1282 ph->param_length =
1283 htons(oper->m_len);
1284
1285 ippp = (u_int32_t *)(ph + 1);
1286 *ippp = htonl(0x10000005);
1287 }
1288 sctp_abort_an_association(stcb->sctp_ep,
1289 stcb, SCTP_PEER_FAULTY, oper);
1290 *abort_flag = 1;
1291 return;
1292 }
1293 if (chk->rec.data.stream_number !=
1294 prev->rec.data.stream_number) {
1295 /*
1296 * Huh, need the correct STR here, they
1297 * must be the same.
1298 */
1299 #ifdef SCTP_DEBUG
1300 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1301 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1302 chk->rec.data.stream_number,
1303 prev->rec.data.stream_number);
1304 }
1305 #endif
1306 MGET(oper, M_DONTWAIT, MT_DATA);
1307 if (oper) {
1308 struct sctp_paramhdr *ph;
1309 u_int32_t *ippp;
1310
1311 oper->m_len =
1312 sizeof(struct sctp_paramhdr) +
1313 sizeof(*ippp);
1314 ph = mtod(oper,
1315 struct sctp_paramhdr *);
1316 ph->param_type =
1317 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1318 ph->param_length =
1319 htons(oper->m_len);
1320 ippp = (u_int32_t *)(ph + 1);
1321 *ippp = htonl(0x10000006);
1322 }
1323
1324 sctp_abort_an_association(stcb->sctp_ep,
1325 stcb, SCTP_PEER_FAULTY, oper);
1326
1327 *abort_flag = 1;
1328 return;
1329 }
1330 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1331 chk->rec.data.stream_seq !=
1332 prev->rec.data.stream_seq) {
1333 /*
1334 * Huh, need the correct STR here, they
1335 * must be the same.
1336 */
1337 #ifdef SCTP_DEBUG
1338 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1339 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1340 chk->rec.data.stream_seq,
1341 prev->rec.data.stream_seq);
1342 }
1343 #endif
1344 MGET(oper, M_DONTWAIT, MT_DATA);
1345 if (oper) {
1346 struct sctp_paramhdr *ph;
1347 u_int32_t *ippp;
1348
1349 oper->m_len =
1350 sizeof(struct sctp_paramhdr) +
1351 sizeof(*ippp);
1352 ph = mtod(oper,
1353 struct sctp_paramhdr *);
1354 ph->param_type =
1355 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1356 ph->param_length =
1357 htons(oper->m_len);
1358 ippp = (u_int32_t *)(ph + 1);
1359 *ippp = htonl(0x10000007);
1360 }
1361
1362 sctp_abort_an_association(stcb->sctp_ep,
1363 stcb, SCTP_PEER_FAULTY, oper);
1364
1365 *abort_flag = 1;
1366 return;
1367 }
1368 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1369 SCTP_DATA_LAST_FRAG) {
1370 /* Insert chk MUST be a FIRST */
1371 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1372 SCTP_DATA_FIRST_FRAG) {
1373 #ifdef SCTP_DEBUG
1374 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1375 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1376 }
1377 #endif
1378 MGET(oper, M_DONTWAIT, MT_DATA);
1379 if (oper) {
1380 struct sctp_paramhdr *ph;
1381 u_int32_t *ippp;
1382
1383 oper->m_len =
1384 sizeof(struct sctp_paramhdr) +
1385 sizeof(*ippp);
1386 ph = mtod(oper,
1387 struct sctp_paramhdr *);
1388 ph->param_type =
1389 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1390 ph->param_length =
1391 htons(oper->m_len);
1392 ippp = (u_int32_t *)(ph + 1);
1393 *ippp = htonl(0x10000008);
1394 }
1395
1396 sctp_abort_an_association(stcb->sctp_ep,
1397 stcb, SCTP_PEER_FAULTY, oper);
1398
1399 *abort_flag = 1;
1400 return;
1401 }
1402 }
1403 }
1404 }
1405
1406 if (next) {
1407 post_tsn = chk->rec.data.TSN_seq + 1;
1408 if (post_tsn == next->rec.data.TSN_seq) {
1409 /*
1410 * Ok the one I am inserting ahead of
1411 * is my NEXT one. A bit of valdiation here.
1412 */
1413 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1414 /* Insert chk MUST be a last fragment */
1415 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1416 != SCTP_DATA_LAST_FRAG) {
1417 #ifdef SCTP_DEBUG
1418 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1419 printf("Next chk - Next is FIRST, we must be LAST\n");
1420 printf("Gak, Evil plot, its not a last!\n");
1421 }
1422 #endif
1423 MGET(oper, M_DONTWAIT, MT_DATA);
1424 if (oper) {
1425 struct sctp_paramhdr *ph;
1426 u_int32_t *ippp;
1427
1428 oper->m_len =
1429 sizeof(struct sctp_paramhdr) +
1430 sizeof(*ippp);
1431 ph = mtod(oper,
1432 struct sctp_paramhdr *);
1433 ph->param_type =
1434 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1435 ph->param_length =
1436 htons(oper->m_len);
1437 ippp = (u_int32_t *)(ph + 1);
1438 *ippp = htonl(0x10000009);
1439 }
1440
1441 sctp_abort_an_association(stcb->sctp_ep,
1442 stcb, SCTP_PEER_FAULTY, oper);
1443
1444 *abort_flag = 1;
1445 return;
1446 }
1447 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1448 SCTP_DATA_MIDDLE_FRAG ||
1449 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1450 SCTP_DATA_LAST_FRAG) {
1451 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1452 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1453 SCTP_DATA_LAST_FRAG) {
1454 #ifdef SCTP_DEBUG
1455 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1456 printf("Next chk - Next is a MIDDLE/LAST\n");
1457 printf("Gak, Evil plot, new prev chunk is a LAST\n");
1458 }
1459 #endif
1460 MGET(oper, M_DONTWAIT, MT_DATA);
1461 if (oper) {
1462 struct sctp_paramhdr *ph;
1463 u_int32_t *ippp;
1464
1465 oper->m_len =
1466 sizeof(struct sctp_paramhdr) +
1467 sizeof(*ippp);
1468 ph = mtod(oper,
1469 struct sctp_paramhdr *);
1470 ph->param_type =
1471 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1472 ph->param_length =
1473 htons(oper->m_len);
1474 ippp = (u_int32_t *)(ph + 1);
1475 *ippp = htonl(0x1000000a);
1476 }
1477 sctp_abort_an_association(stcb->sctp_ep,
1478 stcb, SCTP_PEER_FAULTY, oper);
1479
1480 *abort_flag = 1;
1481 return;
1482 }
1483 if (chk->rec.data.stream_number !=
1484 next->rec.data.stream_number) {
1485 /*
1486 * Huh, need the correct STR here, they
1487 * must be the same.
1488 */
1489 #ifdef SCTP_DEBUG
1490 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1491 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1492 chk->rec.data.stream_number,
1493 next->rec.data.stream_number);
1494 }
1495 #endif
1496 MGET(oper, M_DONTWAIT, MT_DATA);
1497 if (oper) {
1498 struct sctp_paramhdr *ph;
1499 u_int32_t *ippp;
1500
1501 oper->m_len =
1502 sizeof(struct sctp_paramhdr) +
1503 sizeof(*ippp);
1504 ph = mtod(oper,
1505 struct sctp_paramhdr *);
1506 ph->param_type =
1507 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1508 ph->param_length =
1509 htons(oper->m_len);
1510 ippp = (u_int32_t *)(ph + 1);
1511 *ippp = htonl(0x1000000b);
1512 }
1513
1514 sctp_abort_an_association(stcb->sctp_ep,
1515 stcb, SCTP_PEER_FAULTY, oper);
1516
1517 *abort_flag = 1;
1518 return;
1519 }
1520 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1521 chk->rec.data.stream_seq !=
1522 next->rec.data.stream_seq) {
1523 /*
1524 * Huh, need the correct STR here, they
1525 * must be the same.
1526 */
1527 #ifdef SCTP_DEBUG
1528 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1529 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1530 chk->rec.data.stream_seq,
1531 next->rec.data.stream_seq);
1532 }
1533 #endif
1534 MGET(oper, M_DONTWAIT, MT_DATA);
1535 if (oper) {
1536 struct sctp_paramhdr *ph;
1537 u_int32_t *ippp;
1538
1539 oper->m_len =
1540 sizeof(struct sctp_paramhdr) +
1541 sizeof(*ippp);
1542 ph = mtod(oper,
1543 struct sctp_paramhdr *);
1544 ph->param_type =
1545 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1546 ph->param_length =
1547 htons(oper->m_len);
1548 ippp = (u_int32_t *)(ph + 1);
1549 *ippp = htonl(0x1000000c);
1550 }
1551
1552 sctp_abort_an_association(stcb->sctp_ep,
1553 stcb, SCTP_PEER_FAULTY, oper);
1554
1555 *abort_flag = 1;
1556 return;
1557
1558 }
1559 }
1560 }
1561 }
1562 /*
1563 * now that we have all in there place we must check a number of
1564 * things to see if we can send data to the ULP.
1565 */
1566 /* we need to do some delivery, if we can */
1567 chk = TAILQ_FIRST(&asoc->reasmqueue);
1568 if (chk == NULL) {
1569 /* Huh? */
1570 asoc->size_on_reasm_queue = 0;
1571 asoc->cnt_on_reasm_queue = 0;
1572 return;
1573 }
1574 if (asoc->fragmented_delivery_inprogress == 0) {
1575 nxt_todel =
1576 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1577 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1578 (nxt_todel == chk->rec.data.stream_seq ||
1579 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1580 /*
1581 * Yep the first one is here and its
1582 * ok to deliver but should we?
1583 */
1584 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1585 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1586 (asoc->size_on_reasm_queue >=
1587 (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) &&
1588 tsize))) {
1589 /*
1590 * Yes, we setup to
1591 * start reception, by backing down the TSN
1592 * just in case we can't deliver. If we
1593 */
1594 asoc->fragmented_delivery_inprogress = 1;
1595 asoc->tsn_last_delivered =
1596 chk->rec.data.TSN_seq - 1;
1597 asoc->str_of_pdapi =
1598 chk->rec.data.stream_number;
1599 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1600 asoc->fragment_flags = chk->rec.data.rcv_flags;
1601 sctp_service_reassembly(stcb, asoc, 0);
1602 }
1603 }
1604 } else {
1605 sctp_service_reassembly(stcb, asoc, 0);
1606 }
1607 }
1608
1609 /*
1610 * This is an unfortunate routine. It checks to make sure a evil guy is not
1611 * stuffing us full of bad packet fragments. A broken peer could also do this
1612 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1613 * :< more cycles.
1614 */
1615 static int
sctp_does_chk_belong_to_reasm(struct sctp_association * asoc,struct sctp_tmit_chunk * chk)1616 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1617 struct sctp_tmit_chunk *chk)
1618 {
1619 struct sctp_tmit_chunk *at;
1620 u_int32_t tsn_est;
1621
1622 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1623 if (compare_with_wrap(chk->rec.data.TSN_seq,
1624 at->rec.data.TSN_seq, MAX_TSN)) {
1625 /* is it one bigger? */
1626 tsn_est = at->rec.data.TSN_seq + 1;
1627 if (tsn_est == chk->rec.data.TSN_seq) {
1628 /* yep. It better be a last then*/
1629 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1630 SCTP_DATA_LAST_FRAG) {
1631 /*
1632 * Ok this guy belongs next to a guy
1633 * that is NOT last, it should be a
1634 * middle/last, not a complete chunk.
1635 */
1636 return (1);
1637 } else {
1638 /*
1639 * This guy is ok since its a LAST and
1640 * the new chunk is a fully self-
1641 * contained one.
1642 */
1643 return (0);
1644 }
1645 }
1646 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1647 /* Software error since I have a dup? */
1648 return (1);
1649 } else {
1650 /*
1651 * Ok, 'at' is larger than new chunk but does it
1652 * need to be right before it.
1653 */
1654 tsn_est = chk->rec.data.TSN_seq + 1;
1655 if (tsn_est == at->rec.data.TSN_seq) {
1656 /* Yep, It better be a first */
1657 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1658 SCTP_DATA_FIRST_FRAG) {
1659 return (1);
1660 } else {
1661 return (0);
1662 }
1663 }
1664 }
1665 }
1666 return (0);
1667 }
1668
1669 extern unsigned int sctp_max_chunks_on_queue;
1670 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,struct sctp_data_chunk * ch,int chk_length,struct sctp_nets * net,u_int32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk)1671 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1672 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1673 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1674 int *break_flag, int last_chunk)
1675 {
1676 /* Process a data chunk */
1677 /* struct sctp_tmit_chunk *chk;*/
1678 struct sctp_tmit_chunk *chk;
1679 u_int32_t tsn, gap;
1680 struct mbuf *dmbuf;
1681 int the_len;
1682 u_int16_t strmno, strmseq;
1683 struct mbuf *oper;
1684
1685 chk = NULL;
1686 tsn = ntohl(ch->dp.tsn);
1687 #ifdef SCTP_MAP_LOGGING
1688 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1689 #endif
1690 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1691 asoc->cumulative_tsn == tsn) {
1692 /* It is a duplicate */
1693 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1694 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1695 /* Record a dup for the next outbound sack */
1696 asoc->dup_tsns[asoc->numduptsns] = tsn;
1697 asoc->numduptsns++;
1698 }
1699 return (0);
1700 }
1701 /* Calculate the number of TSN's between the base and this TSN */
1702 if (tsn >= asoc->mapping_array_base_tsn) {
1703 gap = tsn - asoc->mapping_array_base_tsn;
1704 } else {
1705 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1706 }
1707 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1708 /* Can't hold the bit in the mapping at max array, toss it */
1709 return (0);
1710 }
1711 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1712 if (sctp_expand_mapping_array(asoc)) {
1713 /* Can't expand, drop it */
1714 return (0);
1715 }
1716 }
1717 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1718 *high_tsn = tsn;
1719 }
1720 /* See if we have received this one already */
1721 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1722 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1723 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1724 /* Record a dup for the next outbound sack */
1725 asoc->dup_tsns[asoc->numduptsns] = tsn;
1726 asoc->numduptsns++;
1727 }
1728 if (!callout_pending(&asoc->dack_timer.timer)) {
1729 /*
1730 * By starting the timer we assure that we
1731 * WILL sack at the end of the packet
1732 * when sctp_sack_check gets called.
1733 */
1734 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1735 stcb, NULL);
1736 }
1737 return (0);
1738 }
1739 /*
1740 * Check to see about the GONE flag, duplicates would cause
1741 * a sack to be sent up above
1742 */
1743 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1744 /*
1745 * wait a minute, this guy is gone, there is no
1746 * longer a receiver. Send peer an ABORT!
1747 */
1748 struct mbuf *op_err;
1749 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1750 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1751 *abort_flag = 1;
1752 return (0);
1753 }
1754 /*
1755 * Now before going further we see if there is room. If NOT then
1756 * we MAY let one through only IF this TSN is the one we are
1757 * waiting for on a partial delivery API.
1758 */
1759
1760 /* now do the tests */
1761 if (((asoc->cnt_on_all_streams +
1762 asoc->cnt_on_delivery_queue +
1763 asoc->cnt_on_reasm_queue +
1764 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1765 (((int)asoc->my_rwnd) <= 0)) {
1766 /*
1767 * When we have NO room in the rwnd we check
1768 * to make sure the reader is doing its job...
1769 */
1770 if (stcb->sctp_socket->so_rcv.sb_cc) {
1771 /* some to read, wake-up */
1772 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1773 }
1774 /* now is it in the mapping array of what we have accepted? */
1775 if (compare_with_wrap(tsn,
1776 asoc->highest_tsn_inside_map, MAX_TSN)) {
1777
1778 /* Nope not in the valid range dump it */
1779 #ifdef SCTP_DEBUG
1780 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1781 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1782 (u_long)tsn, (u_long)asoc->my_rwnd,
1783 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1784 stcb->asoc.cnt_on_delivery_queue);
1785 }
1786 #endif
1787 sctp_set_rwnd(stcb, asoc);
1788 if ((asoc->cnt_on_all_streams +
1789 asoc->cnt_on_delivery_queue +
1790 asoc->cnt_on_reasm_queue +
1791 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1792 sctp_pegs[SCTP_MSGC_DROP]++;
1793 } else {
1794 sctp_pegs[SCTP_RWND_DROPS]++;
1795 }
1796 *break_flag = 1;
1797 return (0);
1798 }
1799 }
1800 strmno = ntohs(ch->dp.stream_id);
1801 if (strmno >= asoc->streamincnt) {
1802 struct sctp_paramhdr *phdr;
1803 struct mbuf *mb;
1804
1805 MGETHDR(mb, M_DONTWAIT, MT_DATA);
1806 if (mb != NULL) {
1807 /* add some space up front so prepend will work well */
1808 mb->m_data += sizeof(struct sctp_chunkhdr);
1809 phdr = mtod(mb, struct sctp_paramhdr *);
1810 /*
1811 * Error causes are just param's and this one has
1812 * two back to back phdr, one with the error type
1813 * and size, the other with the streamid and a rsvd
1814 */
1815 mb->m_pkthdr.len = mb->m_len =
1816 (sizeof(struct sctp_paramhdr) * 2);
1817 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1818 phdr->param_length =
1819 htons(sizeof(struct sctp_paramhdr) * 2);
1820 phdr++;
1821 /* We insert the stream in the type field */
1822 phdr->param_type = ch->dp.stream_id;
1823 /* And set the length to 0 for the rsvd field */
1824 phdr->param_length = 0;
1825 sctp_queue_op_err(stcb, mb);
1826 }
1827 sctp_pegs[SCTP_BAD_STRMNO]++;
1828 return (0);
1829 }
1830 /*
1831 * Before we continue lets validate that we are not
1832 * being fooled by an evil attacker. We can only
1833 * have 4k chunks based on our TSN spread allowed
1834 * by the mapping array 512 * 8 bits, so there is
1835 * no way our stream sequence numbers could have wrapped.
1836 * We of course only validate the FIRST fragment so the
1837 * bit must be set.
1838 */
1839 strmseq = ntohs(ch->dp.stream_sequence);
1840 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1841 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1842 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1843 strmseq, MAX_SEQ) ||
1844 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1845 /* The incoming sseq is behind where we last delivered? */
1846 #ifdef SCTP_DEBUG
1847 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1848 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1849 strmseq,
1850 asoc->strmin[strmno].last_sequence_delivered);
1851 }
1852 #endif
1853 /*
1854 * throw it in the stream so it gets cleaned up in
1855 * association destruction
1856 */
1857 MGET(oper, M_DONTWAIT, MT_DATA);
1858 if (oper) {
1859 struct sctp_paramhdr *ph;
1860 u_int32_t *ippp;
1861
1862 oper->m_len = sizeof(struct sctp_paramhdr) +
1863 sizeof(*ippp);
1864 ph = mtod(oper, struct sctp_paramhdr *);
1865 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1866 ph->param_length = htons(oper->m_len);
1867 ippp = (u_int32_t *)(ph + 1);
1868 *ippp = htonl(0x20000001);
1869 }
1870 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1871 oper);
1872 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1873 *abort_flag = 1;
1874 return (0);
1875 }
1876
1877 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1878 if (last_chunk == 0) {
1879 dmbuf = sctp_m_copym(*m,
1880 (offset + sizeof(struct sctp_data_chunk)),
1881 the_len, M_DONTWAIT);
1882 } else {
1883 /* We can steal the last chunk */
1884 dmbuf = *m;
1885 /* lop off the top part */
1886 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1887 if (dmbuf->m_pkthdr.len > the_len) {
1888 /* Trim the end round bytes off too */
1889 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1890 }
1891 sctp_pegs[SCTP_NO_COPY_IN]++;
1892 }
1893 if (dmbuf == NULL) {
1894 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1895 return (0);
1896 }
1897 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1898 asoc->fragmented_delivery_inprogress == 0 &&
1899 TAILQ_EMPTY(&asoc->delivery_queue) &&
1900 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1901 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1902 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1903 ((long)(stcb->sctp_socket->so_rcv.sb_hiwat -
1904 stcb->sctp_socket->so_rcv.sb_cc) >= (long)the_len)) {
1905 /* Candidate for express delivery */
1906 /*
1907 * Its not fragmented,
1908 * No PD-API is up,
1909 * Nothing in the delivery queue,
1910 * Its un-ordered OR ordered and the next to deliver AND
1911 * nothing else is stuck on the stream queue,
1912 * And there is room for it in the socket buffer.
1913 * Lets just stuff it up the buffer....
1914 */
1915
1916 struct mbuf *control, *mmm;
1917 struct sockaddr_in6 sin6;
1918 struct sockaddr_in6 lsa6;
1919 const struct sockaddr *to;
1920
1921 /* It would be nice to avoid this copy if we could :< */
1922 control = sctp_build_ctl_nchunk(stcb, tsn,
1923 ch->dp.protocol_id, 0, strmno, strmseq,
1924 ch->ch.chunk_flags);
1925 /* XXX need to append PKTHDR to the socket buffer first */
1926
1927 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1928 struct mbuf *tmp;
1929 MGETHDR(tmp, M_DONTWAIT, MT_DATA);
1930 if (tmp == NULL) {
1931
1932 /* no room! */
1933 if (control) {
1934 sctp_m_freem(control);
1935 stcb->asoc.my_rwnd_control_len -=
1936 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1937 }
1938
1939 goto failed_express_del;
1940 }
1941 tmp->m_pkthdr.len = the_len;
1942 tmp->m_len = 0;
1943 tmp->m_next = dmbuf;
1944 dmbuf = tmp;
1945 }
1946 to = rtcache_getdst(&net->ro);
1947 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1948 to->sa_family == AF_INET) {
1949 const struct sockaddr_in *sin;
1950
1951 sin = satocsin(to);
1952 in6_sin_2_v4mapsin6(sin, &sin6);
1953 to = (struct sockaddr *)&sin6;
1954 }
1955
1956 /* check and strip embedded scope junk */
1957 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1958 &lsa6);
1959 if (((const struct sockaddr_in *)to)->sin_port == 0) {
1960 printf("Huh c, port is %d not net:%p %d?\n",
1961 ((const struct sockaddr_in *)to)->sin_port,
1962 net,
1963 (int)(ntohs(stcb->rport)));
1964 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
1965 /* XXX */
1966 }
1967
1968 mmm = dmbuf;
1969 /* Mark the EOR */
1970 while (mmm->m_next != NULL) {
1971 mmm = mmm->m_next;
1972 }
1973 mmm->m_flags |= M_EOR;
1974 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1975 /* we have a new high score */
1976 asoc->highest_tsn_inside_map = tsn;
1977 #ifdef SCTP_MAP_LOGGING
1978 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1979 #endif
1980 }
1981 SCTP_TCB_UNLOCK(stcb);
1982 SCTP_INP_WLOCK(stcb->sctp_ep);
1983 SCTP_TCB_LOCK(stcb);
1984 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
1985 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
1986 if (control) {
1987 sctp_m_freem(control);
1988 stcb->asoc.my_rwnd_control_len -=
1989 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1990 }
1991 sctp_m_freem(dmbuf);
1992 goto failed_express_del;
1993 }
1994 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
1995 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
1996 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1997 }
1998 } else {
1999 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2000 }
2001 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2002 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2003 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2004
2005 /* for ordered, bump what we delivered */
2006 asoc->strmin[strmno].last_sequence_delivered++;
2007 }
2008 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2009 #ifdef SCTP_STR_LOGGING
2010 sctp_log_strm_del_alt(tsn, strmseq,
2011 SCTP_STR_LOG_FROM_EXPRS_DEL);
2012 #endif
2013 #ifdef SCTP_DEBUG
2014 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2015 printf("Express Delivery succeeds\n");
2016 }
2017 #endif
2018 goto finish_express_del;
2019 }
2020
2021 failed_express_del:
2022 /* If we reach here this is a new chunk */
2023 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2024 if (chk == NULL) {
2025 /* No memory so we drop the chunk */
2026 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2027 if (last_chunk == 0) {
2028 /* we copied it, free the copy */
2029 sctp_m_freem(dmbuf);
2030 }
2031 return (0);
2032 }
2033 sctppcbinfo.ipi_count_chunk++;
2034 sctppcbinfo.ipi_gencnt_chunk++;
2035 chk->rec.data.TSN_seq = tsn;
2036 chk->rec.data.stream_seq = strmseq;
2037 chk->rec.data.stream_number = strmno;
2038 chk->rec.data.payloadtype = ch->dp.protocol_id;
2039 chk->rec.data.context = 0;
2040 chk->rec.data.doing_fast_retransmit = 0;
2041 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2042 chk->asoc = asoc;
2043 chk->send_size = the_len;
2044 chk->whoTo = net;
2045 net->ref_count++;
2046 chk->data = dmbuf;
2047
2048
2049 /* Mark it as received */
2050 /* Now queue it where it belongs */
2051 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2052 SCTP_DATA_NOT_FRAG) {
2053 /* First a sanity check */
2054 if (asoc->fragmented_delivery_inprogress) {
2055 /*
2056 * Ok, we have a fragmented delivery in progress
2057 * if this chunk is next to deliver OR belongs in
2058 * our view to the reassembly, the peer is evil
2059 * or broken.
2060 */
2061 u_int32_t estimate_tsn;
2062 estimate_tsn = asoc->tsn_last_delivered + 1;
2063 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2064 (estimate_tsn == chk->rec.data.TSN_seq)) {
2065 /* Evil/Broke peer */
2066 MGET(oper, M_DONTWAIT, MT_DATA);
2067 if (oper) {
2068 struct sctp_paramhdr *ph;
2069 u_int32_t *ippp;
2070
2071 oper->m_len =
2072 sizeof(struct sctp_paramhdr) +
2073 sizeof(*ippp);
2074 ph = mtod(oper, struct sctp_paramhdr *);
2075 ph->param_type =
2076 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2077 ph->param_length = htons(oper->m_len);
2078 ippp = (u_int32_t *)(ph + 1);
2079 *ippp = htonl(0x20000002);
2080 }
2081 sctp_abort_an_association(stcb->sctp_ep, stcb,
2082 SCTP_PEER_FAULTY, oper);
2083
2084 *abort_flag = 1;
2085 sctp_pegs[SCTP_DROP_FRAG]++;
2086 return (0);
2087 } else {
2088 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2089 MGET(oper, M_DONTWAIT, MT_DATA);
2090 if (oper) {
2091 struct sctp_paramhdr *ph;
2092 u_int32_t *ippp;
2093
2094 oper->m_len =
2095 sizeof(struct sctp_paramhdr) +
2096 sizeof(*ippp);
2097 ph = mtod(oper,
2098 struct sctp_paramhdr *);
2099 ph->param_type =
2100 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2101 ph->param_length =
2102 htons(oper->m_len);
2103 ippp = (u_int32_t *)(ph + 1);
2104 *ippp = htonl(0x20000003);
2105 }
2106 sctp_abort_an_association(stcb->sctp_ep,
2107 stcb, SCTP_PEER_FAULTY, oper);
2108
2109 *abort_flag = 1;
2110 sctp_pegs[SCTP_DROP_FRAG]++;
2111 return (0);
2112 }
2113 }
2114 } else {
2115 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2116 /*
2117 * Reassembly queue is NOT empty
2118 * validate that this chk does not need to
2119 * be in reasembly queue. If it does then
2120 * our peer is broken or evil.
2121 */
2122 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2123 MGET(oper, M_DONTWAIT, MT_DATA);
2124 if (oper) {
2125 struct sctp_paramhdr *ph;
2126 u_int32_t *ippp;
2127
2128 oper->m_len =
2129 sizeof(struct sctp_paramhdr) +
2130 sizeof(*ippp);
2131 ph = mtod(oper,
2132 struct sctp_paramhdr *);
2133 ph->param_type =
2134 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2135 ph->param_length =
2136 htons(oper->m_len);
2137 ippp = (u_int32_t *)(ph + 1);
2138 *ippp = htonl(0x20000004);
2139 }
2140 sctp_abort_an_association(stcb->sctp_ep,
2141 stcb, SCTP_PEER_FAULTY, oper);
2142
2143 *abort_flag = 1;
2144 sctp_pegs[SCTP_DROP_FRAG]++;
2145 return (0);
2146 }
2147 }
2148 }
2149 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2150 /* queue directly into socket buffer */
2151 sctp_deliver_data(stcb, asoc, chk, 0);
2152 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2153 } else {
2154 /* Special check for when streams are resetting.
2155 * We could be more smart about this and check the
2156 * actual stream to see if it is not being reset.. that
2157 * way we would not create a HOLB when amongst streams
2158 * being reset and those not being reset.
2159 *
2160 * We take complete messages that have a stream reset
2161 * intervening (aka the TSN is after where our cum-ack needs
2162 * to be) off and put them on a pending_reply_queue. The
2163 * reassembly ones we do not have to worry about since
2164 * they are all sorted and proceessed by TSN order. It
2165 * is only the singletons I must worry about.
2166 */
2167 if ((asoc->pending_reply) &&
2168 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2169 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2170 ) {
2171 /* yep its past where we need to reset... go ahead and
2172 * queue it.
2173 */
2174 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2175 } else {
2176 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2177 }
2178 }
2179 } else {
2180 /* Into the re-assembly queue */
2181 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2182 if (*abort_flag) {
2183 sctp_pegs[SCTP_DROP_FRAG]++;
2184 return (0);
2185 }
2186 }
2187 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2188 /* we have a new high score */
2189 asoc->highest_tsn_inside_map = tsn;
2190 #ifdef SCTP_MAP_LOGGING
2191 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2192 #endif
2193 }
2194 finish_express_del:
2195 if (last_chunk) {
2196 *m = NULL;
2197 }
2198 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2199 /* Set it present please */
2200 #ifdef SCTP_STR_LOGGING
2201 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2202 #endif
2203 #ifdef SCTP_MAP_LOGGING
2204 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2205 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2206 #endif
2207 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2208 return (1);
2209 }
2210
2211 void
sctp_sack_check(struct sctp_tcb * stcb,int ok_to_sack,int was_a_gap,int * abort_flag)2212 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2213 {
2214 /*
2215 * Now we also need to check the mapping array in a couple of ways.
2216 * 1) Did we move the cum-ack point?
2217 */
2218 struct sctp_association *asoc;
2219 int i, at;
2220 int m_size, all_ones;
2221 int slide_from, slide_end, lgap, distance;
2222 #ifdef SCTP_MAP_LOGGING
2223 uint32_t old_cumack, old_base, old_highest;
2224 unsigned char aux_array[64];
2225 #endif
2226
2227 asoc = &stcb->asoc;
2228 at = 0;
2229
2230 #ifdef SCTP_MAP_LOGGING
2231 old_cumack = asoc->cumulative_tsn;
2232 old_base = asoc->mapping_array_base_tsn;
2233 old_highest = asoc->highest_tsn_inside_map;
2234 if (asoc->mapping_array_size < 64)
2235 memcpy(aux_array, asoc->mapping_array,
2236 asoc->mapping_array_size);
2237 else
2238 memcpy(aux_array, asoc->mapping_array, 64);
2239 #endif
2240
2241 /*
2242 * We could probably improve this a small bit by calculating the
2243 * offset of the current cum-ack as the starting point.
2244 */
2245 all_ones = 1;
2246 m_size = stcb->asoc.mapping_array_size << 3;
2247 for (i = 0; i < m_size; i++) {
2248 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2249 /*
2250 * Ok we found the first place that we are
2251 * missing a TSN.
2252 */
2253 at = i;
2254 all_ones = 0;
2255 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2256 (i - 1);
2257 break;
2258 }
2259 }
2260 if (compare_with_wrap(asoc->cumulative_tsn,
2261 asoc->highest_tsn_inside_map,
2262 MAX_TSN)) {
2263 panic("huh, cumack greater than high-tsn in map");
2264 }
2265 if (all_ones ||
2266 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2267 /* The complete array was completed by a single FR */
2268 /* higest becomes the cum-ack */
2269 int clr;
2270 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2271 /* clear the array */
2272 if (all_ones)
2273 clr = asoc->mapping_array_size;
2274 else {
2275 clr = (at >> 3) + 1;
2276 /*
2277 * this should be the allones case
2278 * but just in case :>
2279 */
2280 if (clr > asoc->mapping_array_size)
2281 clr = asoc->mapping_array_size;
2282 }
2283 memset(asoc->mapping_array, 0, clr);
2284 /* base becomes one ahead of the cum-ack */
2285 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2286 #ifdef SCTP_MAP_LOGGING
2287 sctp_log_map(old_base, old_cumack, old_highest,
2288 SCTP_MAP_PREPARE_SLIDE);
2289 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2290 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2291 #endif
2292 } else if (at >= 8) {
2293 /* we can slide the mapping array down */
2294 /* Calculate the new byte postion we can move down */
2295 slide_from = at >> 3;
2296 /* now calculate the ceiling of the move using our highest TSN value */
2297 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2298 lgap = asoc->highest_tsn_inside_map -
2299 asoc->mapping_array_base_tsn;
2300 } else {
2301 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2302 asoc->highest_tsn_inside_map + 1;
2303 }
2304 slide_end = lgap >> 3;
2305 if (slide_end < slide_from) {
2306 panic("impossible slide");
2307 }
2308 distance = (slide_end-slide_from) + 1;
2309 #ifdef SCTP_MAP_LOGGING
2310 sctp_log_map(old_base, old_cumack, old_highest,
2311 SCTP_MAP_PREPARE_SLIDE);
2312 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2313 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2314 #endif
2315 if (distance + slide_from > asoc->mapping_array_size ||
2316 distance < 0) {
2317 #ifdef SCTP_DEBUG
2318 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2319 printf("Ugh bad addition.. you can't hrumpp!\n");
2320 }
2321 #endif
2322 /*
2323 * Here we do NOT slide forward the array so that
2324 * hopefully when more data comes in to fill it up
2325 * we will be able to slide it forward. Really
2326 * I don't think this should happen :-0
2327 */
2328
2329 #ifdef SCTP_MAP_LOGGING
2330 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2331 (uint32_t)asoc->mapping_array_size,
2332 SCTP_MAP_SLIDE_NONE);
2333 #endif
2334 } else {
2335 int ii;
2336 for (ii = 0; ii < distance; ii++) {
2337 asoc->mapping_array[ii] =
2338 asoc->mapping_array[slide_from + ii];
2339 }
2340 for (ii = distance;ii <= slide_end; ii++) {
2341 asoc->mapping_array[ii] = 0;
2342 }
2343 asoc->mapping_array_base_tsn += (slide_from << 3);
2344 #ifdef SCTP_MAP_LOGGING
2345 sctp_log_map(asoc->mapping_array_base_tsn,
2346 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2347 SCTP_MAP_SLIDE_RESULT);
2348 #endif
2349 }
2350 }
2351
2352 /* check the special flag for stream resets */
2353 if ((asoc->pending_reply) &&
2354 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2355 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2356 ) {
2357 /* we have finished working through the backlogged TSN's now
2358 * time to reset streams.
2359 * 1: call reset function.
2360 * 2: free pending_reply space
2361 * 3: distribute any chunks in pending_reply_queue.
2362 */
2363 struct sctp_tmit_chunk *chk;
2364 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2365 free(asoc->pending_reply, M_PCB);
2366 asoc->pending_reply = NULL;
2367 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2368 while (chk) {
2369 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2370 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2371 if (*abort_flag) {
2372 return;
2373 }
2374 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2375 }
2376 }
2377 /*
2378 * Now we need to see if we need to queue a sack or just start
2379 * the timer (if allowed).
2380 */
2381 if (ok_to_sack) {
2382 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2383 /*
2384 * Ok special case, in SHUTDOWN-SENT case.
2385 * here we maker sure SACK timer is off and
2386 * instead send a SHUTDOWN and a SACK
2387 */
2388 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2389 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2390 stcb->sctp_ep, stcb, NULL);
2391 }
2392 #ifdef SCTP_DEBUG
2393 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2394 printf("%s:%d sends a shutdown\n",
2395 __FILE__,
2396 __LINE__
2397 );
2398 }
2399 #endif
2400 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2401 sctp_send_sack(stcb);
2402 } else {
2403 int is_a_gap;
2404 /* is there a gap now ? */
2405 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2406 stcb->asoc.cumulative_tsn, MAX_TSN);
2407 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2408 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2409 (stcb->asoc.numduptsns) || /* we have dup's */
2410 (is_a_gap) || /* is still a gap */
2411 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2412 ) {
2413 /*
2414 * Ok we must build a SACK since the timer
2415 * is pending, we got our first packet OR
2416 * there are gaps or duplicates.
2417 */
2418 stcb->asoc.first_ack_sent = 1;
2419 sctp_send_sack(stcb);
2420 /* The sending will stop the timer */
2421 } else {
2422 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2423 stcb->sctp_ep, stcb, NULL);
2424 }
2425 }
2426 }
2427 }
2428
2429 void
sctp_service_queues(struct sctp_tcb * stcb,struct sctp_association * asoc,int hold_locks)2430 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2431 {
2432 struct sctp_tmit_chunk *chk;
2433 int tsize, cntDel;
2434 u_int16_t nxt_todel;
2435
2436 cntDel = 0;
2437 if (asoc->fragmented_delivery_inprogress) {
2438 sctp_service_reassembly(stcb, asoc, hold_locks);
2439 }
2440 /* Can we proceed further, i.e. the PD-API is complete */
2441 if (asoc->fragmented_delivery_inprogress) {
2442 /* no */
2443 return;
2444 }
2445
2446 /*
2447 * Yes, reassembly delivery no longer in progress see if we
2448 * have some on the sb hold queue.
2449 */
2450 do {
2451 if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
2452 if (cntDel == 0)
2453 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2454 break;
2455 }
2456 /* If deliver_data says no we must stop */
2457 if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0)
2458 break;
2459 cntDel++;
2460 chk = TAILQ_FIRST(&asoc->delivery_queue);
2461 } while (chk);
2462 if (cntDel) {
2463 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2464 }
2465 /*
2466 * Now is there some other chunk I can deliver
2467 * from the reassembly queue.
2468 */
2469 chk = TAILQ_FIRST(&asoc->reasmqueue);
2470 if (chk == NULL) {
2471 asoc->size_on_reasm_queue = 0;
2472 asoc->cnt_on_reasm_queue = 0;
2473 return;
2474 }
2475 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2476 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2477 ((nxt_todel == chk->rec.data.stream_seq) ||
2478 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2479 /*
2480 * Yep the first one is here. We setup to
2481 * start reception, by backing down the TSN
2482 * just in case we can't deliver.
2483 */
2484
2485 /*
2486 * Before we start though either all of the
2487 * message should be here or 1/4 the socket buffer
2488 * max or nothing on the delivery queue and something
2489 * can be delivered.
2490 */
2491 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2492 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2493 (asoc->size_on_reasm_queue >=
2494 (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && tsize))) {
2495 asoc->fragmented_delivery_inprogress = 1;
2496 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2497 asoc->str_of_pdapi = chk->rec.data.stream_number;
2498 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2499 asoc->fragment_flags = chk->rec.data.rcv_flags;
2500 sctp_service_reassembly(stcb, asoc, hold_locks);
2501 }
2502 }
2503 }
2504
2505 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sctphdr * sh,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,u_int32_t * high_tsn)2506 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2507 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2508 struct sctp_nets *net, u_int32_t *high_tsn)
2509 {
2510 struct sctp_data_chunk *ch, chunk_buf;
2511 struct sctp_association *asoc;
2512 int num_chunks = 0; /* number of control chunks processed */
2513 int chk_length, break_flag, last_chunk;
2514 int abort_flag = 0, was_a_gap = 0;
2515 struct mbuf *m;
2516
2517 /* set the rwnd */
2518 sctp_set_rwnd(stcb, &stcb->asoc);
2519
2520 m = *mm;
2521 asoc = &stcb->asoc;
2522 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2523 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2524 /* there was a gap before this data was processed */
2525 was_a_gap = 1;
2526 }
2527 /*
2528 * setup where we got the last DATA packet from for
2529 * any SACK that may need to go out. Don't bump
2530 * the net. This is done ONLY when a chunk
2531 * is assigned.
2532 */
2533 asoc->last_data_chunk_from = net;
2534
2535 /*
2536 * Now before we proceed we must figure out if this
2537 * is a wasted cluster... i.e. it is a small packet
2538 * sent in and yet the driver underneath allocated a
2539 * full cluster for it. If so we must copy it to a
2540 * smaller mbuf and free up the cluster mbuf. This
2541 * will help with cluster starvation.
2542 */
2543 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2544 /* we only handle mbufs that are singletons.. not chains */
2545 MGET(m, M_DONTWAIT, MT_DATA);
2546 if (m) {
2547 /* ok lets see if we can copy the data up */
2548 vaddr_t *from, *to;
2549
2550 if ((*mm)->m_flags & M_PKTHDR) {
2551 /* got to copy the header first */
2552 #ifdef __APPLE__
2553 M_COPY_PKTHDR(m, (*mm));
2554 #else
2555 M_MOVE_PKTHDR(m, (*mm));
2556 #endif
2557 }
2558 /* get the pointers and copy */
2559 to = mtod(m, vaddr_t *);
2560 from = mtod((*mm), vaddr_t *);
2561 memcpy(to, from, (*mm)->m_len);
2562 /* copy the length and free up the old */
2563 m->m_len = (*mm)->m_len;
2564 sctp_m_freem(*mm);
2565 /* sucess, back copy */
2566 *mm = m;
2567 } else {
2568 /* We are in trouble in the mbuf world .. yikes */
2569 m = *mm;
2570 }
2571 }
2572 /* get pointer to the first chunk header */
2573 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2574 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2575 if (ch == NULL) {
2576 printf(" ... its short\n");
2577 return (1);
2578 }
2579 /*
2580 * process all DATA chunks...
2581 */
2582
2583 #ifdef SCTP_DEBUG
2584 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2585 printf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2586 *offset, length, iphlen, (int)ch->ch.chunk_type);
2587 }
2588 #endif
2589
2590 *high_tsn = asoc->cumulative_tsn;
2591 break_flag = 0;
2592 while (ch->ch.chunk_type == SCTP_DATA) {
2593 /* validate chunk length */
2594 chk_length = ntohs(ch->ch.chunk_length);
2595 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2596 length - *offset < chk_length) {
2597 /*
2598 * Need to send an abort since we had a invalid
2599 * data chunk.
2600 */
2601 struct mbuf *op_err;
2602 MGET(op_err, M_DONTWAIT, MT_DATA);
2603 if (op_err) {
2604 struct sctp_paramhdr *ph;
2605 u_int32_t *ippp;
2606
2607 op_err->m_len = sizeof(struct sctp_paramhdr) +
2608 sizeof(*ippp);
2609 ph = mtod(op_err, struct sctp_paramhdr *);
2610 ph->param_type =
2611 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2612 ph->param_length = htons(op_err->m_len);
2613 ippp = (u_int32_t *)(ph + 1);
2614 *ippp = htonl(0x30000001);
2615 }
2616 sctp_abort_association(inp, stcb, m, iphlen, sh,
2617 op_err);
2618 return (2);
2619 }
2620 #ifdef SCTP_DEBUG
2621 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2622 printf("A chunk of len:%d to process (tot:%d)\n",
2623 chk_length, length - *offset);
2624 }
2625 #endif
2626
2627 #ifdef SCTP_AUDITING_ENABLED
2628 sctp_audit_log(0xB1, 0);
2629 #endif
2630 if (SCTP_SIZE32(chk_length) == *offset - length) {
2631 last_chunk = 1;
2632 } else {
2633 last_chunk = 0;
2634 }
2635 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2636 chk_length, net, high_tsn, &abort_flag, &break_flag,
2637 last_chunk)) {
2638 num_chunks++;
2639 #ifdef SCTP_DEBUG
2640 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2641 printf("Now incr num_chunks to %d\n",
2642 num_chunks);
2643 }
2644 #endif
2645 }
2646 if (abort_flag)
2647 return (2);
2648
2649 if (break_flag) {
2650 /*
2651 * Set because of out of rwnd space and no drop rep
2652 * space left.
2653 */
2654 break;
2655 }
2656
2657 *offset += SCTP_SIZE32(chk_length);
2658 if (*offset >= length) {
2659 /* no more data left in the mbuf chain */
2660 break;
2661 }
2662 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2663 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2664 if (ch == NULL) {
2665 *offset = length;
2666 break;
2667 }
2668 } /* while */
2669 if (break_flag) {
2670 /*
2671 * we need to report rwnd overrun drops.
2672 */
2673 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2674 }
2675 if (num_chunks) {
2676 /*
2677 * Did we get data, if so update the time for
2678 * auto-close and give peer credit for being
2679 * alive.
2680 */
2681 sctp_pegs[SCTP_DATA_DG_RECV]++;
2682 stcb->asoc.overall_error_count = 0;
2683 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2684 }
2685 /* now service all of the reassm queue and delivery queue */
2686 sctp_service_queues(stcb, asoc, 0);
2687 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2688 /*
2689 * Assure that we ack right away by making
2690 * sure that a d-ack timer is running. So the
2691 * sack_check will send a sack.
2692 */
2693 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2694 net);
2695 }
2696 /* Start a sack timer or QUEUE a SACK for sending */
2697 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2698 if (abort_flag)
2699 return (2);
2700
2701 return (0);
2702 }
2703
2704 static void
sctp_handle_segments(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_sack_chunk * ch,u_long last_tsn,u_long * biggest_tsn_acked,u_long * biggest_newly_acked_tsn,int num_seg,int * ecn_seg_sums)2705 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2706 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2707 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2708 {
2709 /************************************************/
2710 /* process fragments and update sendqueue */
2711 /************************************************/
2712 struct sctp_sack *sack;
2713 struct sctp_gap_ack_block *frag;
2714 struct sctp_tmit_chunk *tp1;
2715 int i;
2716 unsigned int j;
2717 #ifdef SCTP_FR_LOGGING
2718 int num_frs=0;
2719 #endif
2720 uint16_t frag_strt, frag_end, primary_flag_set;
2721 u_long last_frag_high;
2722
2723 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2724 primary_flag_set = 1;
2725 } else {
2726 primary_flag_set = 0;
2727 }
2728
2729 sack = &ch->sack;
2730 frag = (struct sctp_gap_ack_block *)((vaddr_t)sack +
2731 sizeof(struct sctp_sack));
2732 tp1 = NULL;
2733 last_frag_high = 0;
2734 for (i = 0; i < num_seg; i++) {
2735 frag_strt = ntohs(frag->start);
2736 frag_end = ntohs(frag->end);
2737 /* some sanity checks on the fargment offsets */
2738 if (frag_strt > frag_end) {
2739 /* this one is malformed, skip */
2740 frag++;
2741 continue;
2742 }
2743 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2744 MAX_TSN))
2745 *biggest_tsn_acked = frag_end+last_tsn;
2746
2747 /* mark acked dgs and find out the highestTSN being acked */
2748 if (tp1 == NULL) {
2749 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2750
2751 /* save the locations of the last frags */
2752 last_frag_high = frag_end + last_tsn;
2753 } else {
2754 /*
2755 * now lets see if we need to reset the queue
2756 * due to a out-of-order SACK fragment
2757 */
2758 if (compare_with_wrap(frag_strt+last_tsn,
2759 last_frag_high, MAX_TSN)) {
2760 /*
2761 * if the new frag starts after the last TSN
2762 * frag covered, we are ok
2763 * and this one is beyond the last one
2764 */
2765 ;
2766 } else {
2767 /*
2768 * ok, they have reset us, so we need to reset
2769 * the queue this will cause extra hunting but
2770 * hey, they chose the performance
2771 * hit when they failed to order there gaps..
2772 */
2773 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2774 }
2775 last_frag_high = frag_end + last_tsn;
2776 }
2777 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2778 while (tp1) {
2779 #ifdef SCTP_FR_LOGGING
2780 if (tp1->rec.data.doing_fast_retransmit)
2781 num_frs++;
2782 #endif
2783
2784 if (tp1->rec.data.TSN_seq == j) {
2785 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2786 /* must be held until cum-ack passes */
2787 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2788 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2789 /*
2790 * If it is less than
2791 * ACKED, it is now
2792 * no-longer in flight.
2793 * Higher values may
2794 * already be set via
2795 * previous Gap Ack
2796 * Blocks...
2797 * i.e. ACKED or MARKED.
2798 */
2799 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2800 *biggest_newly_acked_tsn,
2801 MAX_TSN)) {
2802 *biggest_newly_acked_tsn =
2803 tp1->rec.data.TSN_seq;
2804 }
2805 sctp_flight_size_decrease(tp1);
2806
2807 sctp_total_flight_decrease(stcb, tp1);
2808
2809 if (tp1->snd_count < 2) {
2810 /* True non-retransmited chunk */
2811 tp1->whoTo->net_ack2 +=
2812 tp1->send_size;
2813
2814 /* update RTO too? */
2815 if (tp1->do_rtt) {
2816 tp1->whoTo->RTO =
2817 sctp_calculate_rto(stcb,
2818 asoc,
2819 tp1->whoTo,
2820 &tp1->sent_rcv_time);
2821 tp1->whoTo->rto_pending = 0;
2822 tp1->do_rtt = 0;
2823 }
2824 }
2825 }
2826 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2827 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2828 compare_with_wrap(tp1->rec.data.TSN_seq,
2829 asoc->this_sack_highest_gap,
2830 MAX_TSN)) {
2831 asoc->this_sack_highest_gap =
2832 tp1->rec.data.TSN_seq;
2833 if (primary_flag_set) {
2834 tp1->whoTo->cacc_saw_newack = 1;
2835 }
2836 }
2837 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2838 #ifdef SCTP_DEBUG
2839 if (sctp_debug_on &
2840 SCTP_DEBUG_INDATA3) {
2841 printf("Hmm. one that is in RESEND that is now ACKED\n");
2842 }
2843 #endif
2844 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
2845 #ifdef SCTP_AUDITING_ENABLED
2846 sctp_audit_log(0xB2,
2847 (asoc->sent_queue_retran_cnt & 0x000000ff));
2848 #endif
2849
2850 }
2851 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2852 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2853 tp1->sent = SCTP_DATAGRAM_MARKED;
2854 }
2855 break;
2856 } /* if (tp1->TSN_seq == j) */
2857 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2858 MAX_TSN))
2859 break;
2860 tp1 = TAILQ_NEXT(tp1, sctp_next);
2861 }/* end while (tp1) */
2862 } /* end for (j = fragStart */
2863 frag++; /* next one */
2864 }
2865 #ifdef SCTP_FR_LOGGING
2866 if (num_frs)
2867 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2868 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2869 #endif
2870 }
2871
2872 static void
sctp_check_for_revoked(struct sctp_association * asoc,u_long cum_ack,u_long biggest_tsn_acked)2873 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2874 u_long biggest_tsn_acked)
2875 {
2876 struct sctp_tmit_chunk *tp1;
2877 int tot_revoked=0;
2878
2879 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2880 while (tp1) {
2881 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2882 MAX_TSN)) {
2883 /*
2884 * ok this guy is either ACK or MARKED. If it is ACKED
2885 * it has been previously acked but not this time i.e.
2886 * revoked. If it is MARKED it was ACK'ed again.
2887 */
2888 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2889 /* it has been revoked */
2890 /*
2891 * We do NOT add back to flight size here since
2892 * it is really NOT in flight. Resend (when/if
2893 * it occurs will add to flight size
2894 */
2895 tp1->sent = SCTP_DATAGRAM_SENT;
2896 tot_revoked++;
2897 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2898 /* it has been re-acked in this SACK */
2899 tp1->sent = SCTP_DATAGRAM_ACKED;
2900 }
2901 }
2902 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2903 MAX_TSN)) {
2904 /* above the sack */
2905 break;
2906 }
2907 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2908 break;
2909 tp1 = TAILQ_NEXT(tp1, sctp_next);
2910 }
2911 if (tot_revoked > 0) {
2912 /* Setup the ecn nonce re-sync point. We
2913 * do this since once data is revoked
2914 * we begin to retransmit things, which
2915 * do NOT have the ECN bits set. This means
2916 * we are now out of sync and must wait until
2917 * we get back in sync with the peer to
2918 * check ECN bits.
2919 */
2920 tp1 = TAILQ_FIRST(&asoc->send_queue);
2921 if (tp1 == NULL) {
2922 asoc->nonce_resync_tsn = asoc->sending_seq;
2923 } else {
2924 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2925 }
2926 asoc->nonce_wait_for_ecne = 0;
2927 asoc->nonce_sum_check = 0;
2928 }
2929
2930 }
2931
2932 extern int sctp_peer_chunk_oh;
2933
2934 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,u_long biggest_tsn_acked,int strike_enabled,u_long biggest_tsn_newly_acked,int accum_moved)2935 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2936 u_long biggest_tsn_acked, int strike_enabled,
2937 u_long biggest_tsn_newly_acked, int accum_moved)
2938 {
2939 struct sctp_tmit_chunk *tp1;
2940 int strike_flag=0;
2941 struct timeval now;
2942 int tot_retrans=0;
2943 u_int32_t sending_seq;
2944 int primary_switch_active = 0;
2945 int double_switch_active = 0;
2946
2947 /* select the sending_seq, this is
2948 * either the next thing ready to
2949 * be sent but not transmitted, OR,
2950 * the next seq we assign.
2951 */
2952 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2953 if (tp1 == NULL) {
2954 sending_seq = asoc->sending_seq;
2955 } else {
2956 sending_seq = tp1->rec.data.TSN_seq;
2957 }
2958
2959 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2960 primary_switch_active = 1;
2961 }
2962 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
2963 double_switch_active = 1;
2964 }
2965 if (stcb->asoc.peer_supports_prsctp ) {
2966 SCTP_GETTIME_TIMEVAL(&now);
2967 }
2968 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2969 while (tp1) {
2970 strike_flag=0;
2971 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2972 MAX_TSN) ||
2973 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2974 /* done */
2975 break;
2976 }
2977 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
2978 SCTP_PR_SCTP_ENABLED &&
2979 tp1->sent < SCTP_DATAGRAM_ACKED) {
2980 /* Is it expired? */
2981 #ifndef __FreeBSD__
2982 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
2983 #else
2984 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
2985 #endif
2986 {
2987 /* Yes so drop it */
2988 if (tp1->data != NULL) {
2989 sctp_release_pr_sctp_chunk(stcb, tp1,
2990 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
2991 &asoc->sent_queue);
2992 }
2993 tp1 = TAILQ_NEXT(tp1, sctp_next);
2994 continue;
2995 }
2996 }
2997
2998 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2999 asoc->this_sack_highest_gap, MAX_TSN)) {
3000 /* we are beyond the tsn in the sack */
3001 break;
3002 }
3003 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3004 /* either a RESEND, ACKED, or MARKED */
3005 /* skip */
3006 tp1 = TAILQ_NEXT(tp1, sctp_next);
3007 continue;
3008 }
3009 if (primary_switch_active && (strike_enabled == 0)) {
3010 if (tp1->whoTo != asoc->primary_destination) {
3011 /*
3012 * We can only strike things on the primary if
3013 * the strike_enabled flag is clear
3014 */
3015 tp1 = TAILQ_NEXT(tp1, sctp_next);
3016 continue;
3017 }
3018 } else if (primary_switch_active) {
3019 if (tp1->whoTo->cacc_saw_newack == 0) {
3020 /*
3021 * Only one was received but it was NOT
3022 * this one.
3023 */
3024 tp1 = TAILQ_NEXT(tp1, sctp_next);
3025 continue;
3026 }
3027 }
3028 if (double_switch_active &&
3029 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3030 tp1->rec.data.TSN_seq, MAX_TSN))) {
3031 /*
3032 * With a double switch we do NOT mark unless we
3033 * are beyond the switch point.
3034 */
3035 tp1 = TAILQ_NEXT(tp1, sctp_next);
3036 continue;
3037 }
3038 /*
3039 * Here we check to see if we were have already done a FR
3040 * and if so we see if the biggest TSN we saw in the sack is
3041 * smaller than the recovery point. If so we don't strike the
3042 * tsn... otherwise we CAN strike the TSN.
3043 */
3044 if (accum_moved && asoc->fast_retran_loss_recovery) {
3045 /*
3046 * Strike the TSN if in fast-recovery and
3047 * cum-ack moved.
3048 */
3049 tp1->sent++;
3050 } else if (tp1->rec.data.doing_fast_retransmit) {
3051 /*
3052 * For those that have done a FR we must
3053 * take special consideration if we strike. I.e
3054 * the biggest_newly_acked must be higher
3055 * than the sending_seq at the time we did
3056 * the FR.
3057 */
3058 #ifdef SCTP_FR_TO_ALTERNATE
3059 /*
3060 * If FR's go to new networks, then we
3061 * must only do this for singly homed asoc's. However
3062 * if the FR's go to the same network (Armando's work)
3063 * then its ok to FR multiple times.
3064 */
3065 if (asoc->numnets < 2)
3066 #else
3067 if (1)
3068 #endif
3069 {
3070 if ((compare_with_wrap(biggest_tsn_newly_acked,
3071 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3072 (biggest_tsn_newly_acked ==
3073 tp1->rec.data.fast_retran_tsn)) {
3074 /*
3075 * Strike the TSN, since this ack is
3076 * beyond where things were when we did
3077 * a FR.
3078 */
3079 #ifdef SCTP_FR_LOGGING
3080 sctp_log_fr(biggest_tsn_newly_acked,
3081 tp1->rec.data.TSN_seq,
3082 tp1->rec.data.fast_retran_tsn,
3083 SCTP_FR_LOG_STRIKE_CHUNK);
3084 #endif
3085 tp1->sent++;
3086 strike_flag=1;
3087 }
3088 }
3089 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3090 biggest_tsn_newly_acked, MAX_TSN)) {
3091 /*
3092 * We don't strike these:
3093 * This is the HTNA algorithm i.e. we don't strike
3094 * If our TSN is larger than the Highest TSN Newly
3095 * Acked.
3096 */
3097 ;
3098 } else {
3099 /* Strike the TSN */
3100 tp1->sent++;
3101 }
3102 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3103 /* Increment the count to resend */
3104 struct sctp_nets *alt;
3105
3106 #ifdef SCTP_FR_LOGGING
3107 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3108 0, SCTP_FR_MARKED);
3109 #endif
3110 if (strike_flag) {
3111 /* This is a subsequent FR */
3112 sctp_pegs[SCTP_DUP_FR]++;
3113 }
3114 asoc->sent_queue_retran_cnt++;
3115 #ifdef SCTP_FR_TO_ALTERNATE
3116 /* Can we find an alternate? */
3117 alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3118 #else
3119 /*
3120 * default behavior is to NOT retransmit FR's
3121 * to an alternate. Armando Caro's paper details
3122 * why.
3123 */
3124 alt = tp1->whoTo;
3125 #endif
3126 tp1->rec.data.doing_fast_retransmit = 1;
3127 tot_retrans++;
3128 /* mark the sending seq for possible subsequent FR's */
3129 if (TAILQ_EMPTY(&asoc->send_queue)) {
3130 /*
3131 * If the queue of send is empty then its the
3132 * next sequence number that will be assigned so
3133 * we subtract one from this to get the one we
3134 * last sent.
3135 */
3136 tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3137 } else {
3138 /*
3139 * If there are chunks on the send queue
3140 * (unsent data that has made it from the
3141 * stream queues but not out the door, we take
3142 * the first one (which will have the lowest
3143 * TSN) and subtract one to get the one we last
3144 * sent.
3145 */
3146 struct sctp_tmit_chunk *ttt;
3147 ttt = TAILQ_FIRST(&asoc->send_queue);
3148 tp1->rec.data.fast_retran_tsn =
3149 ttt->rec.data.TSN_seq - 1;
3150 }
3151 if (tp1->do_rtt) {
3152 /*
3153 * this guy had a RTO calculation pending on it,
3154 * cancel it
3155 */
3156 tp1->whoTo->rto_pending = 0;
3157 tp1->do_rtt = 0;
3158 }
3159 /* fix counts and things */
3160
3161 tp1->whoTo->net_ack++;
3162 sctp_flight_size_decrease(tp1);
3163 #ifdef SCTP_LOG_RWND
3164 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3165 asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3166 #endif
3167 /* add back to the rwnd */
3168 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3169
3170 /* remove from the total flight */
3171 sctp_total_flight_decrease(stcb, tp1);
3172 if (alt != tp1->whoTo) {
3173 /* yes, there is an alternate. */
3174 sctp_free_remote_addr(tp1->whoTo);
3175 tp1->whoTo = alt;
3176 alt->ref_count++;
3177 }
3178 }
3179 tp1 = TAILQ_NEXT(tp1, sctp_next);
3180 } /* while (tp1) */
3181
3182 if (tot_retrans > 0) {
3183 /* Setup the ecn nonce re-sync point. We
3184 * do this since once we go to FR something
3185 * we introduce a Karn's rule scenario and
3186 * won't know the totals for the ECN bits.
3187 */
3188 asoc->nonce_resync_tsn = sending_seq;
3189 asoc->nonce_wait_for_ecne = 0;
3190 asoc->nonce_sum_check = 0;
3191 }
3192
3193 }
3194
3195 struct sctp_tmit_chunk *
sctp_try_advance_peer_ack_point(struct sctp_tcb * stcb,struct sctp_association * asoc)3196 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3197 struct sctp_association *asoc)
3198 {
3199 struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3200 struct timeval now;
3201 int now_filled=0;
3202
3203 if (asoc->peer_supports_prsctp == 0) {
3204 return (NULL);
3205 }
3206 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3207 while (tp1) {
3208 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3209 tp1->sent != SCTP_DATAGRAM_RESEND) {
3210 /* no chance to advance, out of here */
3211 break;
3212 }
3213 if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3214 /*
3215 * We can't fwd-tsn past any that are reliable
3216 * aka retransmitted until the asoc fails.
3217 */
3218 break;
3219 }
3220 if (!now_filled) {
3221 SCTP_GETTIME_TIMEVAL(&now);
3222 now_filled = 1;
3223 }
3224 tp2 = TAILQ_NEXT(tp1, sctp_next);
3225 /*
3226 * now we got a chunk which is marked for another
3227 * retransmission to a PR-stream but has run
3228 * out its chances already maybe OR has been
3229 * marked to skip now. Can we skip it if its a
3230 * resend?
3231 */
3232 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3233 (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3234 /*
3235 * Now is this one marked for resend and its time
3236 * is now up?
3237 */
3238 #ifndef __FreeBSD__
3239 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3240 #else
3241 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3242 #endif
3243 {
3244 /* Yes so drop it */
3245 if (tp1->data) {
3246 sctp_release_pr_sctp_chunk(stcb, tp1,
3247 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3248 &asoc->sent_queue);
3249 }
3250 } else {
3251 /*
3252 * No, we are done when hit one for resend whos
3253 * time as not expired.
3254 */
3255 break;
3256 }
3257 }
3258 /*
3259 * Ok now if this chunk is marked to drop it
3260 * we can clean up the chunk, advance our peer ack point
3261 * and we can check the next chunk.
3262 */
3263 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3264 /* advance PeerAckPoint goes forward */
3265 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3266 a_adv = tp1;
3267 /*
3268 * we don't want to de-queue it here. Just wait for the
3269 * next peer SACK to come with a new cumTSN and then
3270 * the chunk will be droped in the normal fashion.
3271 */
3272 if (tp1->data) {
3273 sctp_free_bufspace(stcb, asoc, tp1);
3274 #ifdef SCTP_DEBUG
3275 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3276 printf("--total out:%lu total_mbuf_out:%lu\n",
3277 (u_long)asoc->total_output_queue_size,
3278 (u_long)asoc->total_output_mbuf_queue_size);
3279 }
3280 #endif
3281 /*
3282 * Maybe there should be another notification
3283 * type
3284 */
3285 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3286 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3287 tp1);
3288 sctp_m_freem(tp1->data);
3289 tp1->data = NULL;
3290 sctp_sowwakeup(stcb->sctp_ep,
3291 stcb->sctp_socket);
3292 }
3293 } else {
3294 /* If it is still in RESEND we can advance no further */
3295 break;
3296 }
3297 /*
3298 * If we hit here we just dumped tp1, move to next
3299 * tsn on sent queue.
3300 */
3301 tp1 = tp2;
3302 }
3303 return (a_adv);
3304 }
3305
3306 #ifdef SCTP_HIGH_SPEED
3307 struct sctp_hs_raise_drop {
3308 int32_t cwnd;
3309 int32_t increase;
3310 int32_t drop_percent;
3311 };
3312
3313 #define SCTP_HS_TABLE_SIZE 73
3314
3315 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3316 {38,1,50}, /* 0 */
3317 {118,2,44}, /* 1 */
3318 {221,3,41}, /* 2 */
3319 {347,4,38}, /* 3 */
3320 {495,5,37}, /* 4 */
3321 {663,6,35}, /* 5 */
3322 {851,7,34}, /* 6 */
3323 {1058,8,33}, /* 7 */
3324 {1284,9,32}, /* 8 */
3325 {1529,10,31}, /* 9 */
3326 {1793,11,30}, /* 10 */
3327 {2076,12,29}, /* 11 */
3328 {2378,13,28}, /* 12 */
3329 {2699,14,28}, /* 13 */
3330 {3039,15,27}, /* 14 */
3331 {3399,16,27}, /* 15 */
3332 {3778,17,26}, /* 16 */
3333 {4177,18,26}, /* 17 */
3334 {4596,19,25}, /* 18 */
3335 {5036,20,25}, /* 19 */
3336 {5497,21,24}, /* 20 */
3337 {5979,22,24}, /* 21 */
3338 {6483,23,23}, /* 22 */
3339 {7009,24,23}, /* 23 */
3340 {7558,25,22}, /* 24 */
3341 {8130,26,22}, /* 25 */
3342 {8726,27,22}, /* 26 */
3343 {9346,28,21}, /* 27 */
3344 {9991,29,21}, /* 28 */
3345 {10661,30,21}, /* 29 */
3346 {11358,31,20}, /* 30 */
3347 {12082,32,20}, /* 31 */
3348 {12834,33,20}, /* 32 */
3349 {13614,34,19}, /* 33 */
3350 {14424,35,19}, /* 34 */
3351 {15265,36,19}, /* 35 */
3352 {16137,37,19}, /* 36 */
3353 {17042,38,18}, /* 37 */
3354 {17981,39,18}, /* 38 */
3355 {18955,40,18}, /* 39 */
3356 {19965,41,17}, /* 40 */
3357 {21013,42,17}, /* 41 */
3358 {22101,43,17}, /* 42 */
3359 {23230,44,17}, /* 43 */
3360 {24402,45,16}, /* 44 */
3361 {25618,46,16}, /* 45 */
3362 {26881,47,16}, /* 46 */
3363 {28193,48,16}, /* 47 */
3364 {29557,49,15}, /* 48 */
3365 {30975,50,15}, /* 49 */
3366 {32450,51,15}, /* 50 */
3367 {33986,52,15}, /* 51 */
3368 {35586,53,14}, /* 52 */
3369 {37253,54,14}, /* 53 */
3370 {38992,55,14}, /* 54 */
3371 {40808,56,14}, /* 55 */
3372 {42707,57,13}, /* 56 */
3373 {44694,58,13}, /* 57 */
3374 {46776,59,13}, /* 58 */
3375 {48961,60,13}, /* 59 */
3376 {51258,61,13}, /* 60 */
3377 {53677,62,12}, /* 61 */
3378 {56230,63,12}, /* 62 */
3379 {58932,64,12}, /* 63 */
3380 {61799,65,12}, /* 64 */
3381 {64851,66,11}, /* 65 */
3382 {68113,67,11}, /* 66 */
3383 {71617,68,11}, /* 67 */
3384 {75401,69,10}, /* 68 */
3385 {79517,70,10}, /* 69 */
3386 {84035,71,10}, /* 70 */
3387 {89053,72,10}, /* 71 */
3388 {94717,73,9} /* 72 */
3389 };
3390
3391 static void
sctp_hs_cwnd_increase(struct sctp_nets * net)3392 sctp_hs_cwnd_increase(struct sctp_nets *net)
3393 {
3394 int cur_val, i, indx, incr;
3395
3396 cur_val = net->cwnd >> 10;
3397 indx = SCTP_HS_TABLE_SIZE - 1;
3398
3399 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3400 /* normal mode */
3401 if (net->net_ack > net->mtu) {
3402 net->cwnd += net->mtu;
3403 #ifdef SCTP_CWND_LOGGING
3404 sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3405 #endif
3406 } else {
3407 net->cwnd += net->net_ack;
3408 #ifdef SCTP_CWND_LOGGING
3409 sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3410 #endif
3411 }
3412 } else {
3413 for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3414 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3415 indx = i;
3416 break;
3417 }
3418 }
3419 net->last_hs_used = indx;
3420 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3421 net->cwnd += incr;
3422 #ifdef SCTP_CWND_LOGGING
3423 sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3424 #endif
3425 }
3426 }
3427
3428 static void
sctp_hs_cwnd_decrease(struct sctp_nets * net)3429 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3430 {
3431 int cur_val, i, indx;
3432 #ifdef SCTP_CWND_LOGGING
3433 int old_cwnd = net->cwnd;
3434 #endif
3435
3436 cur_val = net->cwnd >> 10;
3437 indx = net->last_hs_used;
3438 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3439 /* normal mode */
3440 net->ssthresh = net->cwnd / 2;
3441 if (net->ssthresh < (net->mtu*2)) {
3442 net->ssthresh = 2 * net->mtu;
3443 }
3444 net->cwnd = net->ssthresh;
3445 #ifdef SCTP_CWND_LOGGING
3446 sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3447 #endif
3448 } else {
3449 /* drop by the proper amount */
3450 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3451 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3452 net->cwnd = net->ssthresh;
3453 /* now where are we */
3454 indx = net->last_hs_used;
3455 cur_val = net->cwnd >> 10;
3456 /* reset where we are in the table */
3457 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3458 /* feel out of hs */
3459 net->last_hs_used = 0;
3460 } else {
3461 for (i = indx; i >= 1; i--) {
3462 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3463 break;
3464 }
3465 }
3466 net->last_hs_used = indx;
3467 }
3468 }
3469 }
3470 #endif
3471
3472 void
sctp_handle_sack(struct sctp_sack_chunk * ch,struct sctp_tcb * stcb,struct sctp_nets * net_from,int * abort_now)3473 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3474 struct sctp_nets *net_from, int *abort_now)
3475 {
3476 struct sctp_association *asoc;
3477 struct sctp_sack *sack;
3478 struct sctp_tmit_chunk *tp1, *tp2;
3479 u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3480 uint16_t num_seg;
3481 unsigned int sack_length;
3482 uint32_t send_s;
3483 int some_on_streamwheel;
3484 int strike_enabled = 0, cnt_of_cacc = 0;
3485 int accum_moved = 0;
3486 int marking_allowed = 1;
3487 int will_exit_fast_recovery=0;
3488 u_int32_t a_rwnd;
3489 struct sctp_nets *net = NULL;
3490 int nonce_sum_flag, ecn_seg_sums=0;
3491 asoc = &stcb->asoc;
3492
3493 /*
3494 * Handle the incoming sack on data I have been sending.
3495 */
3496
3497 /*
3498 * we take any chance we can to service our queues since we
3499 * cannot get awoken when the socket is read from :<
3500 */
3501 asoc->overall_error_count = 0;
3502
3503 if (asoc->sent_queue_retran_cnt) {
3504 #ifdef SCTP_DEBUG
3505 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3506 printf("Handling SACK for asoc:%p retran:%d\n",
3507 asoc, asoc->sent_queue_retran_cnt);
3508 }
3509 #endif
3510 }
3511
3512 sctp_service_queues(stcb, asoc, 0);
3513
3514 /*
3515 * Now perform the actual SACK handling:
3516 * 1) Verify that it is not an old sack, if so discard.
3517 * 2) If there is nothing left in the send queue (cum-ack is equal
3518 * to last acked) then you have a duplicate too, update any rwnd
3519 * change and verify no timers are running. then return.
3520 * 3) Process any new consequtive data i.e. cum-ack moved
3521 * process these first and note that it moved.
3522 * 4) Process any sack blocks.
3523 * 5) Drop any acked from the queue.
3524 * 6) Check for any revoked blocks and mark.
3525 * 7) Update the cwnd.
3526 * 8) Nothing left, sync up flightsizes and things, stop all timers
3527 * and also check for shutdown_pending state. If so then go ahead
3528 * and send off the shutdown. If in shutdown recv, send off the
3529 * shutdown-ack and start that timer, Ret.
3530 * 9) Strike any non-acked things and do FR procedure if needed being
3531 * sure to set the FR flag.
3532 * 10) Do pr-sctp procedures.
3533 * 11) Apply any FR penalties.
3534 * 12) Assure we will SACK if in shutdown_recv state.
3535 */
3536
3537 sack_length = ntohs(ch->ch.chunk_length);
3538 if (sack_length < sizeof(struct sctp_sack_chunk)) {
3539 #ifdef SCTP_DEBUG
3540 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3541 printf("Bad size on sack chunk .. to small\n");
3542 }
3543 #endif
3544 return;
3545 }
3546 /* ECN Nonce */
3547 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3548 sack = &ch->sack;
3549 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3550 num_seg = ntohs(sack->num_gap_ack_blks);
3551
3552 /* reality check */
3553 if (TAILQ_EMPTY(&asoc->send_queue)) {
3554 send_s = asoc->sending_seq;
3555 } else {
3556 tp1 = TAILQ_FIRST(&asoc->send_queue);
3557 send_s = tp1->rec.data.TSN_seq;
3558 }
3559
3560 if (sctp_strict_sacks) {
3561 if (cum_ack == send_s ||
3562 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3563 struct mbuf *oper;
3564 /*
3565 * no way, we have not even sent this TSN out yet.
3566 * Peer is hopelessly messed up with us.
3567 */
3568 hopeless_peer:
3569 *abort_now = 1;
3570 /* XXX */
3571 MGET(oper, M_DONTWAIT, MT_DATA);
3572 if (oper) {
3573 struct sctp_paramhdr *ph;
3574 u_int32_t *ippp;
3575
3576 oper->m_len = sizeof(struct sctp_paramhdr) +
3577 sizeof(*ippp);
3578 ph = mtod(oper, struct sctp_paramhdr *);
3579 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3580 ph->param_length = htons(oper->m_len);
3581 ippp = (u_int32_t *)(ph + 1);
3582 *ippp = htonl(0x30000002);
3583 }
3584 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3585 return;
3586 }
3587 }
3588 /* update the Rwnd of the peer */
3589 a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3590 if (asoc->sent_queue_retran_cnt) {
3591 #ifdef SCTP_DEBUG
3592 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3593 printf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3594 cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3595 }
3596 #endif
3597 }
3598 if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3599 /* we are not allowed to mark for FR */
3600 marking_allowed = 0;
3601 }
3602 /**********************/
3603 /* 1) check the range */
3604 /**********************/
3605 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3606 /* acking something behind */
3607 if (asoc->sent_queue_retran_cnt) {
3608 #ifdef SCTP_DEBUG
3609 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3610 printf("The cum-ack is behind us\n");
3611 }
3612 #endif
3613 }
3614 return;
3615 }
3616
3617 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3618 /* nothing left on sendqueue.. consider done */
3619 #ifdef SCTP_LOG_RWND
3620 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3621 asoc->peers_rwnd, 0, 0, a_rwnd);
3622 #endif
3623 asoc->peers_rwnd = a_rwnd;
3624 if (asoc->sent_queue_retran_cnt) {
3625 #ifdef SCTP_DEBUG
3626 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3627 printf("Huh? retran set but none on queue\n");
3628 }
3629 #endif
3630 asoc->sent_queue_retran_cnt = 0;
3631 }
3632 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3633 /* SWS sender side engages */
3634 asoc->peers_rwnd = 0;
3635 }
3636 /* stop any timers */
3637 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3638 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3639 stcb, net);
3640 net->partial_bytes_acked = 0;
3641 net->flight_size = 0;
3642 }
3643 asoc->total_flight = 0;
3644 asoc->total_flight_count = 0;
3645 return;
3646 }
3647 /*
3648 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3649 * things. The total byte count acked is tracked in netAckSz AND
3650 * netAck2 is used to track the total bytes acked that are un-
3651 * amibguious and were never retransmitted. We track these on a
3652 * per destination address basis.
3653 */
3654 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3655 net->prev_cwnd = net->cwnd;
3656 net->net_ack = 0;
3657 net->net_ack2 = 0;
3658 }
3659 /* process the new consecutive TSN first */
3660 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3661 while (tp1) {
3662 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3663 MAX_TSN) ||
3664 last_tsn == tp1->rec.data.TSN_seq) {
3665 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3666 /* ECN Nonce: Add the nonce to the sender's nonce sum */
3667 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3668 accum_moved = 1;
3669 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3670 /*
3671 * If it is less than ACKED, it is now
3672 * no-longer in flight. Higher values
3673 * may occur during marking
3674 */
3675 if ((tp1->whoTo->dest_state &
3676 SCTP_ADDR_UNCONFIRMED) &&
3677 (tp1->snd_count < 2) ) {
3678 /*
3679 * If there was no retran and
3680 * the address is un-confirmed
3681 * and we sent there and are
3682 * now sacked.. its confirmed,
3683 * mark it so.
3684 */
3685 tp1->whoTo->dest_state &=
3686 ~SCTP_ADDR_UNCONFIRMED;
3687 }
3688 sctp_flight_size_decrease(tp1);
3689 sctp_total_flight_decrease(stcb, tp1);
3690 tp1->whoTo->net_ack += tp1->send_size;
3691 if (tp1->snd_count < 2) {
3692 /* True non-retransmited chunk */
3693 tp1->whoTo->net_ack2 +=
3694 tp1->send_size;
3695 /* update RTO too? */
3696 if (tp1->do_rtt) {
3697 tp1->whoTo->RTO =
3698 sctp_calculate_rto(stcb,
3699 asoc, tp1->whoTo,
3700 &tp1->sent_rcv_time);
3701 tp1->whoTo->rto_pending = 0;
3702 tp1->do_rtt = 0;
3703 }
3704 }
3705 }
3706 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3707 #ifdef SCTP_DEBUG
3708 if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3709 printf("Hmm. one that is in RESEND that is now ACKED\n");
3710 }
3711 #endif
3712 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3713 #ifdef SCTP_AUDITING_ENABLED
3714 sctp_audit_log(0xB3,
3715 (asoc->sent_queue_retran_cnt & 0x000000ff));
3716 #endif
3717
3718 }
3719 tp1->sent = SCTP_DATAGRAM_ACKED;
3720 }
3721 } else {
3722 break;
3723 }
3724 tp1 = TAILQ_NEXT(tp1, sctp_next);
3725 }
3726 /*******************************************/
3727 /* cancel ALL T3-send timer if accum moved */
3728 /*******************************************/
3729 if (accum_moved) {
3730 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3731 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3732 stcb, net);
3733 }
3734 }
3735 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3736 /* always set this up to cum-ack */
3737 asoc->this_sack_highest_gap = last_tsn;
3738
3739 if (num_seg * sizeof(struct sctp_gap_ack_block) + sizeof(struct sctp_sack_chunk) > sack_length) {
3740 /* skip corrupt segments */
3741 strike_enabled = 0;
3742 goto skip_segments;
3743 }
3744
3745 if (num_seg > 0) {
3746 if (asoc->primary_destination->dest_state &
3747 SCTP_ADDR_SWITCH_PRIMARY) {
3748 /* clear the nets CACC flags */
3749 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3750 net->cacc_saw_newack = 0;
3751 }
3752 }
3753 /*
3754 * thisSackHighestGap will increase while handling NEW segments
3755 */
3756
3757 sctp_handle_segments(stcb, asoc, ch, last_tsn,
3758 &biggest_tsn_acked, &biggest_tsn_newly_acked,
3759 num_seg, &ecn_seg_sums);
3760
3761 if (sctp_strict_sacks) {
3762 /* validate the biggest_tsn_acked in the gap acks
3763 * if strict adherence is wanted.
3764 */
3765 if ((biggest_tsn_acked == send_s) ||
3766 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3767 /*
3768 * peer is either confused or we are under
3769 * attack. We must abort.
3770 */
3771 goto hopeless_peer;
3772 }
3773 }
3774
3775 if (asoc->primary_destination->dest_state &
3776 SCTP_ADDR_SWITCH_PRIMARY) {
3777 /* clear the nets CACC flags */
3778 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3779 if (net->cacc_saw_newack) {
3780 cnt_of_cacc++;
3781 }
3782 }
3783 }
3784
3785 }
3786
3787 if (cnt_of_cacc < 2) {
3788 strike_enabled = 1;
3789 } else {
3790 strike_enabled = 0;
3791 }
3792 skip_segments:
3793 /********************************************/
3794 /* drop the acked chunks from the sendqueue */
3795 /********************************************/
3796 asoc->last_acked_seq = cum_ack;
3797 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3798 if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3799 (compare_with_wrap(cum_ack,
3800 asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3801 struct sctp_nets *lnet;
3802 /* Turn off the switch flag for ALL addresses */
3803 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3804 asoc->primary_destination->dest_state &=
3805 ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3806 }
3807 }
3808 }
3809 /* Drag along the t3 timeout point so we don't have a problem at wrap */
3810 if (marking_allowed) {
3811 asoc->t3timeout_highest_marked = cum_ack;
3812 }
3813 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3814 do {
3815 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3816 MAX_TSN)) {
3817 break;
3818 }
3819 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3820 /* no more sent on list */
3821 break;
3822 }
3823 tp2 = TAILQ_NEXT(tp1, sctp_next);
3824 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3825 if (tp1->data) {
3826 sctp_free_bufspace(stcb, asoc, tp1);
3827 #ifdef SCTP_DEBUG
3828 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3829 printf("--total out:%lu total_mbuf_out:%lu\n",
3830 (u_long)asoc->total_output_queue_size,
3831 (u_long)asoc->total_output_mbuf_queue_size);
3832 }
3833 #endif
3834
3835 sctp_m_freem(tp1->data);
3836 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3837 asoc->sent_queue_cnt_removeable--;
3838 }
3839
3840 }
3841 tp1->data = NULL;
3842 asoc->sent_queue_cnt--;
3843 sctp_free_remote_addr(tp1->whoTo);
3844 sctppcbinfo.ipi_count_chunk--;
3845 asoc->chunks_on_out_queue--;
3846
3847 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3848 panic("Chunk count is going negative");
3849 }
3850 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3851 sctppcbinfo.ipi_gencnt_chunk++;
3852 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3853 tp1 = tp2;
3854 } while (tp1 != NULL);
3855
3856
3857 if (asoc->fast_retran_loss_recovery && accum_moved) {
3858 if (compare_with_wrap(asoc->last_acked_seq,
3859 asoc->fast_recovery_tsn, MAX_TSN) ||
3860 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3861 /* Setup so we will exit RFC2582 fast recovery */
3862 will_exit_fast_recovery = 1;
3863 }
3864 }
3865
3866 /* Check for revoked fragments if we hand
3867 * fragments in a previous segment. If we
3868 * had no previous fragments we cannot have
3869 * a revoke issue.
3870 */
3871 if (asoc->saw_sack_with_frags)
3872 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3873
3874 if (num_seg)
3875 asoc->saw_sack_with_frags = 1;
3876 else
3877 asoc->saw_sack_with_frags = 0;
3878
3879 /******************************/
3880 /* update cwnd */
3881 /******************************/
3882 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3883 /* if nothing was acked on this destination skip it */
3884 if (net->net_ack == 0)
3885 continue;
3886
3887 if (net->net_ack2 > 0) {
3888 /*
3889 * Karn's rule applies to clearing error count,
3890 * this is optional.
3891 */
3892 net->error_count = 0;
3893 if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3894 SCTP_ADDR_NOT_REACHABLE) {
3895 /* addr came good */
3896 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3897 net->dest_state |= SCTP_ADDR_REACHABLE;
3898 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3899 SCTP_RECEIVED_SACK, (void *)net);
3900 /* now was it the primary? if so restore */
3901 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3902 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3903 }
3904 }
3905 }
3906
3907 if (asoc->fast_retran_loss_recovery &&
3908 will_exit_fast_recovery == 0) {
3909 /* If we are in loss recovery we skip any cwnd update */
3910 sctp_pegs[SCTP_CWND_SKIP]++;
3911 goto skip_cwnd_update;
3912 }
3913 if (accum_moved) {
3914 /* If the cumulative ack moved we can proceed */
3915 if (net->cwnd <= net->ssthresh) {
3916 /* We are in slow start */
3917 if (net->flight_size + net->net_ack >=
3918 net->cwnd ) {
3919 #ifdef SCTP_HIGH_SPEED
3920 sctp_hs_cwnd_increase(net);
3921 #else
3922 if (net->net_ack > net->mtu) {
3923 net->cwnd += net->mtu;
3924 #ifdef SCTP_CWND_LOGGING
3925 sctp_log_cwnd(net, net->mtu,
3926 SCTP_CWND_LOG_FROM_SS);
3927 #endif
3928
3929 } else {
3930 net->cwnd += net->net_ack;
3931 #ifdef SCTP_CWND_LOGGING
3932 sctp_log_cwnd(net, net->net_ack,
3933 SCTP_CWND_LOG_FROM_SS);
3934 #endif
3935
3936 }
3937 #endif
3938 sctp_pegs[SCTP_CWND_SS]++;
3939 } else {
3940 unsigned int dif;
3941 sctp_pegs[SCTP_CWND_NOUSE_SS]++;
3942 dif = net->cwnd - (net->flight_size +
3943 net->net_ack);
3944 #ifdef SCTP_CWND_LOGGING
3945 /* sctp_log_cwnd(net, net->net_ack,
3946 SCTP_CWND_LOG_NOADV_SS);*/
3947 #endif
3948 if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
3949 sctp_pegs[SCTP_CWND_DIFF_SA] =
3950 dif;
3951 sctp_pegs[SCTP_OQS_AT_SS] =
3952 asoc->total_output_queue_size;
3953 sctp_pegs[SCTP_SQQ_AT_SS] =
3954 asoc->sent_queue_cnt;
3955 sctp_pegs[SCTP_SQC_AT_SS] =
3956 asoc->send_queue_cnt;
3957 }
3958 }
3959 } else {
3960 /* We are in congestion avoidance */
3961 if (net->flight_size + net->net_ack >=
3962 net->cwnd) {
3963 /*
3964 * add to pba only if we had a cwnd's
3965 * worth (or so) in flight OR the
3966 * burst limit was applied.
3967 */
3968 net->partial_bytes_acked +=
3969 net->net_ack;
3970
3971 /*
3972 * Do we need to increase
3973 * (if pba is > cwnd)?
3974 */
3975 if (net->partial_bytes_acked >=
3976 net->cwnd) {
3977 if (net->cwnd <
3978 net->partial_bytes_acked) {
3979 net->partial_bytes_acked -=
3980 net->cwnd;
3981 } else {
3982 net->partial_bytes_acked =
3983 0;
3984 }
3985 net->cwnd += net->mtu;
3986 #ifdef SCTP_CWND_LOGGING
3987 sctp_log_cwnd(net, net->mtu,
3988 SCTP_CWND_LOG_FROM_CA);
3989 #endif
3990 sctp_pegs[SCTP_CWND_CA]++;
3991 }
3992 } else {
3993 unsigned int dif;
3994 sctp_pegs[SCTP_CWND_NOUSE_CA]++;
3995 #ifdef SCTP_CWND_LOGGING
3996 /* sctp_log_cwnd(net, net->net_ack,
3997 SCTP_CWND_LOG_NOADV_CA);
3998 */
3999 #endif
4000 dif = net->cwnd - (net->flight_size +
4001 net->net_ack);
4002 if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4003 sctp_pegs[SCTP_CWND_DIFF_CA] =
4004 dif;
4005 sctp_pegs[SCTP_OQS_AT_CA] =
4006 asoc->total_output_queue_size;
4007 sctp_pegs[SCTP_SQQ_AT_CA] =
4008 asoc->sent_queue_cnt;
4009 sctp_pegs[SCTP_SQC_AT_CA] =
4010 asoc->send_queue_cnt;
4011
4012 }
4013
4014 }
4015 }
4016 } else {
4017 sctp_pegs[SCTP_CWND_NOCUM]++;
4018 }
4019 skip_cwnd_update:
4020 /*
4021 * NOW, according to Karn's rule do we need to restore the
4022 * RTO timer back? Check our net_ack2. If not set then we
4023 * have a ambiguity.. i.e. all data ack'd was sent to more
4024 * than one place.
4025 */
4026
4027 if (net->net_ack2) {
4028 /* restore any doubled timers */
4029 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4030 if (net->RTO < stcb->asoc.minrto) {
4031 net->RTO = stcb->asoc.minrto;
4032 }
4033 if (net->RTO > stcb->asoc.maxrto) {
4034 net->RTO = stcb->asoc.maxrto;
4035 }
4036 }
4037 if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4038 sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4039 }
4040 }
4041 /**********************************/
4042 /* Now what about shutdown issues */
4043 /**********************************/
4044 some_on_streamwheel = 0;
4045 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4046 /* Check to see if some data queued */
4047 struct sctp_stream_out *outs;
4048 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4049 if (!TAILQ_EMPTY(&outs->outqueue)) {
4050 some_on_streamwheel = 1;
4051 break;
4052 }
4053 }
4054 }
4055 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4056 some_on_streamwheel == 0) {
4057 /* nothing left on sendqueue.. consider done */
4058 /* stop all timers */
4059 #ifdef SCTP_LOG_RWND
4060 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4061 asoc->peers_rwnd, 0, 0, a_rwnd);
4062 #endif
4063 asoc->peers_rwnd = a_rwnd;
4064 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4065 /* SWS sender side engages */
4066 asoc->peers_rwnd = 0;
4067 }
4068 /* stop any timers */
4069 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4070 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4071 stcb, net);
4072 net->flight_size = 0;
4073 net->partial_bytes_acked = 0;
4074 }
4075 asoc->total_flight = 0;
4076 asoc->total_flight_count = 0;
4077 /* clean up */
4078 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4079 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4080 #ifdef SCTP_DEBUG
4081 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4082 printf("%s:%d sends a shutdown\n",
4083 __FILE__,
4084 __LINE__
4085 );
4086 }
4087 #endif
4088 sctp_send_shutdown(stcb,
4089 stcb->asoc.primary_destination);
4090 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4091 stcb->sctp_ep, stcb, asoc->primary_destination);
4092 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4093 stcb->sctp_ep, stcb, asoc->primary_destination);
4094 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4095 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4096
4097 sctp_send_shutdown_ack(stcb,
4098 stcb->asoc.primary_destination);
4099
4100 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4101 stcb->sctp_ep, stcb, asoc->primary_destination);
4102 }
4103 return;
4104 }
4105 /*
4106 * Now here we are going to recycle net_ack for a different
4107 * use... HEADS UP.
4108 */
4109 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4110 net->net_ack = 0;
4111 }
4112 if ((num_seg > 0) && marking_allowed) {
4113 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4114 strike_enabled, biggest_tsn_newly_acked, accum_moved);
4115 }
4116
4117 /*********************************************/
4118 /* Here we perform PR-SCTP procedures */
4119 /* (section 4.2) */
4120 /*********************************************/
4121 /* C1. update advancedPeerAckPoint */
4122 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4123 asoc->advanced_peer_ack_point = cum_ack;
4124 }
4125 /* C2. try to further move advancedPeerAckPoint ahead */
4126 if (asoc->peer_supports_prsctp) {
4127 struct sctp_tmit_chunk *lchk;
4128 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4129 /* C3. See if we need to send a Fwd-TSN */
4130 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4131 MAX_TSN)) {
4132 /*
4133 * ISSUE with ECN, see FWD-TSN processing for notes
4134 * on issues that will occur when the ECN NONCE stuff
4135 * is put into SCTP for cross checking.
4136 */
4137 send_forward_tsn(stcb, asoc);
4138
4139 /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4140 asoc->nonce_sum_check = 0;
4141 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4142 if (lchk) {
4143 /* Assure a timer is up */
4144 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4145 stcb->sctp_ep, stcb, lchk->whoTo);
4146 }
4147 }
4148 }
4149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4150 if (asoc->fast_retran_loss_recovery == 0) {
4151 /* out of a RFC2582 Fast recovery window? */
4152 if (net->net_ack > 0) {
4153 /*
4154 * per section 7.2.3, are there
4155 * any destinations that had a fast
4156 * retransmit to them. If so what we
4157 * need to do is adjust ssthresh and
4158 * cwnd.
4159 */
4160 struct sctp_tmit_chunk *lchk;
4161 #ifdef SCTP_HIGH_SPEED
4162 sctp_hs_cwnd_decrease(net);
4163 #else
4164 #ifdef SCTP_CWND_LOGGING
4165 int old_cwnd = net->cwnd;
4166 #endif
4167 net->ssthresh = net->cwnd / 2;
4168 if (net->ssthresh < (net->mtu*2)) {
4169 net->ssthresh = 2 * net->mtu;
4170 }
4171 net->cwnd = net->ssthresh;
4172 #ifdef SCTP_CWND_LOGGING
4173 sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4174 SCTP_CWND_LOG_FROM_FR);
4175 #endif
4176 #endif
4177
4178 lchk = TAILQ_FIRST(&asoc->send_queue);
4179
4180 net->partial_bytes_acked = 0;
4181 /* Turn on fast recovery window */
4182 asoc->fast_retran_loss_recovery = 1;
4183 if (lchk == NULL) {
4184 /* Mark end of the window */
4185 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4186 } else {
4187 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4188 }
4189
4190
4191 /* Disable Nonce Sum Checking and store the resync tsn*/
4192 asoc->nonce_sum_check = 0;
4193 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4194
4195 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4196 stcb->sctp_ep, stcb, net);
4197 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4198 stcb->sctp_ep, stcb, net);
4199 }
4200 } else if (net->net_ack > 0) {
4201 /*
4202 * Mark a peg that we WOULD have done a cwnd reduction
4203 * but RFC2582 prevented this action.
4204 */
4205 sctp_pegs[SCTP_FR_INAWINDOW]++;
4206 }
4207 }
4208
4209
4210 /******************************************************************
4211 * Here we do the stuff with ECN Nonce checking.
4212 * We basically check to see if the nonce sum flag was incorrect
4213 * or if resynchronization needs to be done. Also if we catch a
4214 * misbehaving receiver we give him the kick.
4215 ******************************************************************/
4216
4217 if (asoc->ecn_nonce_allowed) {
4218 if (asoc->nonce_sum_check) {
4219 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4220 if (asoc->nonce_wait_for_ecne == 0) {
4221 struct sctp_tmit_chunk *lchk;
4222 lchk = TAILQ_FIRST(&asoc->send_queue);
4223 asoc->nonce_wait_for_ecne = 1;
4224 if (lchk) {
4225 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4226 } else {
4227 asoc->nonce_wait_tsn = asoc->sending_seq;
4228 }
4229 } else {
4230 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4231 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4232 /* Misbehaving peer. We need to react to this guy */
4233 printf("Mis-behaving peer detected\n");
4234 asoc->ecn_allowed = 0;
4235 asoc->ecn_nonce_allowed = 0;
4236 }
4237 }
4238 }
4239 } else {
4240 /* See if Resynchronization Possible */
4241 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4242 asoc->nonce_sum_check = 1;
4243 /* now we must calculate what the base
4244 * is. We do this based on two things, we know
4245 * the total's for all the segments gap-acked
4246 * in the SACK, its stored in ecn_seg_sums.
4247 * We also know the SACK's nonce sum, its
4248 * in nonce_sum_flag. So we can build a truth
4249 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4250 *
4251 * SACK-flag-Value Seg-Sums Base
4252 * 0 0 0
4253 * 1 0 1
4254 * 0 1 1
4255 * 1 1 0
4256 */
4257 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4258 }
4259 }
4260 }
4261 /* Now are we exiting loss recovery ? */
4262 if (will_exit_fast_recovery) {
4263 /* Ok, we must exit fast recovery */
4264 asoc->fast_retran_loss_recovery = 0;
4265 }
4266 if ((asoc->sat_t3_loss_recovery) &&
4267 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4268 MAX_TSN) ||
4269 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4270 /* end satellite t3 loss recovery */
4271 asoc->sat_t3_loss_recovery = 0;
4272 }
4273 /* Adjust and set the new rwnd value */
4274 #ifdef SCTP_LOG_RWND
4275 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4276 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4277 #endif
4278
4279 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4280 (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4281 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4282 /* SWS sender side engages */
4283 asoc->peers_rwnd = 0;
4284 }
4285 /*
4286 * Now we must setup so we have a timer up for anyone with
4287 * outstanding data.
4288 */
4289 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4290 struct sctp_tmit_chunk *chk;
4291 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4292 if (chk->whoTo == net &&
4293 (chk->sent < SCTP_DATAGRAM_ACKED ||
4294 chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4295 /*
4296 * Not ack'ed and still outstanding to this
4297 * destination or marked and must be
4298 * sacked after fwd-tsn sent.
4299 */
4300 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4301 stcb->sctp_ep, stcb, net);
4302 break;
4303 }
4304 }
4305 }
4306 }
4307
4308 void
sctp_update_acked(struct sctp_tcb * stcb,struct sctp_shutdown_chunk * cp,struct sctp_nets * netp,int * abort_flag)4309 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4310 struct sctp_nets *netp, int *abort_flag)
4311 {
4312 /* Mutate a shutdown into a SACK */
4313 struct sctp_sack_chunk sack;
4314
4315 /* Copy cum-ack */
4316 sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4317 /* Arrange so a_rwnd does NOT change */
4318 sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4319 sack.ch.chunk_flags = 0;
4320 sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4321 sack.sack.a_rwnd =
4322 htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4323 /*
4324 * no gaps in this one. This may cause a temporal view to reneging,
4325 * but hopefully the second chunk is a true SACK in the packet and
4326 * will correct this view. One will come soon after no matter what
4327 * to fix this.
4328 */
4329 sack.sack.num_gap_ack_blks = 0;
4330 sack.sack.num_dup_tsns = 0;
4331 /* Now call the SACK processor */
4332 sctp_handle_sack(&sack, stcb, netp, abort_flag);
4333 }
4334
4335 static void
sctp_kick_prsctp_reorder_queue(struct sctp_tcb * stcb,struct sctp_stream_in * strmin)4336 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4337 struct sctp_stream_in *strmin)
4338 {
4339 struct sctp_tmit_chunk *chk, *nchk;
4340 struct sctp_association *asoc;
4341 int tt;
4342
4343 asoc = &stcb->asoc;
4344 tt = strmin->last_sequence_delivered;
4345 /*
4346 * First deliver anything prior to and including the stream no that
4347 * came in
4348 */
4349 chk = TAILQ_FIRST(&strmin->inqueue);
4350 while (chk) {
4351 nchk = TAILQ_NEXT(chk, sctp_next);
4352 if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4353 (tt == chk->rec.data.stream_seq)) {
4354 /* this is deliverable now */
4355 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4356 /* subtract pending on streams */
4357 asoc->size_on_all_streams -= chk->send_size;
4358 asoc->cnt_on_all_streams--;
4359 /* deliver it to at least the delivery-q */
4360 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4361 } else {
4362 /* no more delivery now. */
4363 break;
4364 }
4365 chk = nchk;
4366 }
4367 /*
4368 * now we must deliver things in queue the normal way if any
4369 * are now ready.
4370 */
4371 tt = strmin->last_sequence_delivered + 1;
4372 chk = TAILQ_FIRST(&strmin->inqueue);
4373 while (chk) {
4374 nchk = TAILQ_NEXT(chk, sctp_next);
4375 if (tt == chk->rec.data.stream_seq) {
4376 /* this is deliverable now */
4377 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4378 /* subtract pending on streams */
4379 asoc->size_on_all_streams -= chk->send_size;
4380 asoc->cnt_on_all_streams--;
4381 /* deliver it to at least the delivery-q */
4382 strmin->last_sequence_delivered =
4383 chk->rec.data.stream_seq;
4384 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4385 tt = strmin->last_sequence_delivered + 1;
4386 } else {
4387 break;
4388 }
4389 chk = nchk;
4390 }
4391
4392 }
4393
4394 void
sctp_handle_forward_tsn(struct sctp_tcb * stcb,struct sctp_forward_tsn_chunk * fwd,int * abort_flag)4395 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4396 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4397 {
4398 /*
4399 * ISSUES that MUST be fixed for ECN! When we are the
4400 * sender of the forward TSN, when the SACK comes back
4401 * that acknowledges the FWD-TSN we must reset the
4402 * NONCE sum to match correctly. This will get quite
4403 * tricky since we may have sent more data interveneing and
4404 * must carefully account for what the SACK says on the
4405 * nonce and any gaps that are reported. This work
4406 * will NOT be done here, but I note it here since
4407 * it is really related to PR-SCTP and FWD-TSN's
4408 */
4409
4410 /* The pr-sctp fwd tsn */
4411 /*
4412 * here we will perform all the data receiver side steps for
4413 * processing FwdTSN, as required in by pr-sctp draft:
4414 *
4415 * Assume we get FwdTSN(x):
4416 *
4417 * 1) update local cumTSN to x
4418 * 2) try to further advance cumTSN to x + others we have
4419 * 3) examine and update re-ordering queue on pr-in-streams
4420 * 4) clean up re-assembly queue
4421 * 5) Send a sack to report where we are.
4422 */
4423 struct sctp_strseq *stseq;
4424 struct sctp_association *asoc;
4425 u_int32_t new_cum_tsn, gap, back_out_htsn;
4426 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4427 struct sctp_stream_in *strm;
4428 struct sctp_tmit_chunk *chk, *at;
4429
4430 cumack_set_flag = 0;
4431 asoc = &stcb->asoc;
4432 cnt_gone = 0;
4433 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4434 #ifdef SCTP_DEBUG
4435 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4436 printf("Bad size too small/big fwd-tsn\n");
4437 }
4438 #endif
4439 return;
4440 }
4441 m_size = (stcb->asoc.mapping_array_size << 3);
4442 /*************************************************************/
4443 /* 1. Here we update local cumTSN and shift the bitmap array */
4444 /*************************************************************/
4445 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4446
4447 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4448 asoc->cumulative_tsn == new_cum_tsn) {
4449 /* Already got there ... */
4450 return;
4451 }
4452
4453 back_out_htsn = asoc->highest_tsn_inside_map;
4454 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4455 MAX_TSN)) {
4456 asoc->highest_tsn_inside_map = new_cum_tsn;
4457 #ifdef SCTP_MAP_LOGGING
4458 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4459 #endif
4460 }
4461 /*
4462 * now we know the new TSN is more advanced, let's find the
4463 * actual gap
4464 */
4465 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4466 MAX_TSN)) ||
4467 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4468 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4469 } else {
4470 /* try to prevent underflow here */
4471 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4472 }
4473
4474 if (gap >= m_size) {
4475 asoc->highest_tsn_inside_map = back_out_htsn;
4476 if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4477 /*
4478 * out of range (of single byte chunks in the rwnd I
4479 * give out)
4480 * too questionable. better to drop it silently
4481 */
4482 return;
4483 }
4484 if (asoc->highest_tsn_inside_map >
4485 asoc->mapping_array_base_tsn) {
4486 gap = asoc->highest_tsn_inside_map -
4487 asoc->mapping_array_base_tsn;
4488 } else {
4489 gap = asoc->highest_tsn_inside_map +
4490 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4491 }
4492 cumack_set_flag = 1;
4493 }
4494 for (i = 0; i <= gap; i++) {
4495 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4496 }
4497 /*
4498 * Now after marking all, slide thing forward but no
4499 * sack please.
4500 */
4501 sctp_sack_check(stcb, 0, 0, abort_flag);
4502 if (*abort_flag)
4503 return;
4504
4505 if (cumack_set_flag) {
4506 /*
4507 * fwd-tsn went outside my gap array - not a
4508 * common occurance. Do the same thing we
4509 * do when a cookie-echo arrives.
4510 */
4511 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
4512 asoc->mapping_array_base_tsn = new_cum_tsn;
4513 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4514 #ifdef SCTP_MAP_LOGGING
4515 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4516 #endif
4517 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4518 }
4519 /*************************************************************/
4520 /* 2. Clear up re-assembly queue */
4521 /*************************************************************/
4522
4523 /*
4524 * First service it if pd-api is up, just in case we can
4525 * progress it forward
4526 */
4527 if (asoc->fragmented_delivery_inprogress) {
4528 sctp_service_reassembly(stcb, asoc, 0);
4529 }
4530 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4531 /* For each one on here see if we need to toss it */
4532 /*
4533 * For now large messages held on the reasmqueue that are
4534 * complete will be tossed too. We could in theory do more
4535 * work to spin through and stop after dumping one msg
4536 * aka seeing the start of a new msg at the head, and call
4537 * the delivery function... to see if it can be delivered...
4538 * But for now we just dump everything on the queue.
4539 */
4540 chk = TAILQ_FIRST(&asoc->reasmqueue);
4541 while (chk) {
4542 at = TAILQ_NEXT(chk, sctp_next);
4543 if (compare_with_wrap(asoc->cumulative_tsn,
4544 chk->rec.data.TSN_seq, MAX_TSN) ||
4545 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4546 /* It needs to be tossed */
4547 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4548 if (compare_with_wrap(chk->rec.data.TSN_seq,
4549 asoc->tsn_last_delivered, MAX_TSN)) {
4550 asoc->tsn_last_delivered =
4551 chk->rec.data.TSN_seq;
4552 asoc->str_of_pdapi =
4553 chk->rec.data.stream_number;
4554 asoc->ssn_of_pdapi =
4555 chk->rec.data.stream_seq;
4556 asoc->fragment_flags =
4557 chk->rec.data.rcv_flags;
4558 }
4559 asoc->size_on_reasm_queue -= chk->send_size;
4560 asoc->cnt_on_reasm_queue--;
4561 cnt_gone++;
4562
4563 /* Clear up any stream problem */
4564 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4565 SCTP_DATA_UNORDERED &&
4566 (compare_with_wrap(chk->rec.data.stream_seq,
4567 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4568 MAX_SEQ))) {
4569 /*
4570 * We must dump forward this streams
4571 * sequence number if the chunk is not
4572 * unordered that is being skipped.
4573 * There is a chance that if the peer
4574 * does not include the last fragment
4575 * in its FWD-TSN we WILL have a problem
4576 * here since you would have a partial
4577 * chunk in queue that may not be
4578 * deliverable.
4579 * Also if a Partial delivery API as
4580 * started the user may get a partial
4581 * chunk. The next read returning a new
4582 * chunk... really ugly but I see no way
4583 * around it! Maybe a notify??
4584 */
4585 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4586 chk->rec.data.stream_seq;
4587 }
4588 if (chk->data) {
4589 sctp_m_freem(chk->data);
4590 chk->data = NULL;
4591 }
4592 sctp_free_remote_addr(chk->whoTo);
4593 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4594 sctppcbinfo.ipi_count_chunk--;
4595 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4596 panic("Chunk count is negative");
4597 }
4598 sctppcbinfo.ipi_gencnt_chunk++;
4599 } else {
4600 /*
4601 * Ok we have gone beyond the end of the
4602 * fwd-tsn's mark. Some checks...
4603 */
4604 if ((asoc->fragmented_delivery_inprogress) &&
4605 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4606 /* Special case PD-API is up and what we fwd-tsn'
4607 * over includes one that had the LAST_FRAG. We
4608 * no longer need to do the PD-API.
4609 */
4610 asoc->fragmented_delivery_inprogress = 0;
4611 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4612 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4613
4614 }
4615 break;
4616 }
4617 chk = at;
4618 }
4619 }
4620 if (asoc->fragmented_delivery_inprogress) {
4621 /*
4622 * Ok we removed cnt_gone chunks in the PD-API queue that
4623 * were being delivered. So now we must turn off the
4624 * flag.
4625 */
4626 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4627 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4628 asoc->fragmented_delivery_inprogress = 0;
4629 }
4630 /*************************************************************/
4631 /* 3. Update the PR-stream re-ordering queues */
4632 /*************************************************************/
4633 stseq = (struct sctp_strseq *)((vaddr_t)fwd + sizeof(*fwd));
4634 fwd_sz -= sizeof(*fwd);
4635 {
4636 /* New method. */
4637 int num_str;
4638 num_str = fwd_sz/sizeof(struct sctp_strseq);
4639 #ifdef SCTP_DEBUG
4640 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4641 printf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4642 num_str);
4643 }
4644 #endif
4645 for (i = 0; i < num_str; i++) {
4646 u_int16_t st;
4647 #if 0
4648 unsigned char *xx;
4649 /* Convert */
4650 xx = (unsigned char *)&stseq[i];
4651 #endif
4652 st = ntohs(stseq[i].stream);
4653 stseq[i].stream = st;
4654 st = ntohs(stseq[i].sequence);
4655 stseq[i].sequence = st;
4656 /* now process */
4657 if (stseq[i].stream > asoc->streamincnt) {
4658 #ifdef SCTP_DEBUG
4659 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4660 printf("Bogus stream number %d "
4661 "streamincnt is %d\n",
4662 stseq[i].stream, asoc->streamincnt);
4663 }
4664 #endif
4665 /*
4666 * It is arguable if we should continue. Since
4667 * the peer sent bogus stream info we may be in
4668 * deep trouble..
4669 * a return may be a better choice?
4670 */
4671 continue;
4672 }
4673 strm = &asoc->strmin[stseq[i].stream];
4674 if (compare_with_wrap(stseq[i].sequence,
4675 strm->last_sequence_delivered, MAX_SEQ)) {
4676 /* Update the sequence number */
4677 strm->last_sequence_delivered =
4678 stseq[i].sequence;
4679 }
4680 /* now kick the stream the new way */
4681 sctp_kick_prsctp_reorder_queue(stcb, strm);
4682 }
4683 }
4684 }
4685