xref: /freebsd/sys/netinet/sctp_indata.c (revision 4bc52338)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 
286 static void
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 {
289 	uint32_t gap, i, cumackp1;
290 	int fnd = 0;
291 	int in_r = 0, in_nr = 0;
292 
293 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 		return;
295 	}
296 	cumackp1 = asoc->cumulative_tsn + 1;
297 	if (SCTP_TSN_GT(cumackp1, tsn)) {
298 		/*
299 		 * this tsn is behind the cum ack and thus we don't need to
300 		 * worry about it being moved from one to the other.
301 		 */
302 		return;
303 	}
304 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	if ((in_r == 0) && (in_nr == 0)) {
308 #ifdef INVARIANTS
309 		panic("Things are really messed up now");
310 #else
311 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 		sctp_print_mapping_array(asoc);
313 #endif
314 	}
315 	if (in_nr == 0)
316 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	if (in_r)
318 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 		asoc->highest_tsn_inside_nr_map = tsn;
321 	}
322 	if (tsn == asoc->highest_tsn_inside_map) {
323 		/* We must back down to see what the new highest is */
324 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 static int
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340     struct sctp_association *asoc,
341     struct sctp_queued_to_read *control)
342 {
343 	struct sctp_queued_to_read *at;
344 	struct sctp_readhead *q;
345 	uint8_t flags, unordered;
346 
347 	flags = (control->sinfo_flags >> 8);
348 	unordered = flags & SCTP_DATA_UNORDERED;
349 	if (unordered) {
350 		q = &strm->uno_inqueue;
351 		if (asoc->idata_supported == 0) {
352 			if (!TAILQ_EMPTY(q)) {
353 				/*
354 				 * Only one stream can be here in old style
355 				 * -- abort
356 				 */
357 				return (-1);
358 			}
359 			TAILQ_INSERT_TAIL(q, control, next_instrm);
360 			control->on_strm_q = SCTP_ON_UNORDERED;
361 			return (0);
362 		}
363 	} else {
364 		q = &strm->inqueue;
365 	}
366 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 		control->end_added = 1;
368 		control->first_frag_seen = 1;
369 		control->last_frag_seen = 1;
370 	}
371 	if (TAILQ_EMPTY(q)) {
372 		/* Empty queue */
373 		TAILQ_INSERT_HEAD(q, control, next_instrm);
374 		if (unordered) {
375 			control->on_strm_q = SCTP_ON_UNORDERED;
376 		} else {
377 			control->on_strm_q = SCTP_ON_ORDERED;
378 		}
379 		return (0);
380 	} else {
381 		TAILQ_FOREACH(at, q, next_instrm) {
382 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * one in queue is bigger than the new one,
385 				 * insert before this one
386 				 */
387 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 				if (unordered) {
389 					control->on_strm_q = SCTP_ON_UNORDERED;
390 				} else {
391 					control->on_strm_q = SCTP_ON_ORDERED;
392 				}
393 				break;
394 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 				/*
396 				 * Gak, He sent me a duplicate msg id
397 				 * number?? return -1 to abort.
398 				 */
399 				return (-1);
400 			} else {
401 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 					/*
403 					 * We are at the end, insert it
404 					 * after this one
405 					 */
406 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 						sctp_log_strm_del(control, at,
408 						    SCTP_STR_LOG_FROM_INSERT_TL);
409 					}
410 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 					if (unordered) {
412 						control->on_strm_q = SCTP_ON_UNORDERED;
413 					} else {
414 						control->on_strm_q = SCTP_ON_ORDERED;
415 					}
416 					break;
417 				}
418 			}
419 		}
420 	}
421 	return (0);
422 }
423 
424 static void
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426     struct sctp_queued_to_read *control,
427     struct sctp_tmit_chunk *chk,
428     int *abort_flag, int opspot)
429 {
430 	char msg[SCTP_DIAG_INFO_LEN];
431 	struct mbuf *oper;
432 
433 	if (stcb->asoc.idata_supported) {
434 		snprintf(msg, sizeof(msg),
435 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 		    opspot,
437 		    control->fsn_included,
438 		    chk->rec.data.tsn,
439 		    chk->rec.data.sid,
440 		    chk->rec.data.fsn, chk->rec.data.mid);
441 	} else {
442 		snprintf(msg, sizeof(msg),
443 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 		    opspot,
445 		    control->fsn_included,
446 		    chk->rec.data.tsn,
447 		    chk->rec.data.sid,
448 		    chk->rec.data.fsn,
449 		    (uint16_t)chk->rec.data.mid);
450 	}
451 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 	sctp_m_freem(chk->data);
453 	chk->data = NULL;
454 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
457 	*abort_flag = 1;
458 }
459 
460 static void
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 {
463 	/*
464 	 * The control could not be placed and must be cleaned.
465 	 */
466 	struct sctp_tmit_chunk *chk, *nchk;
467 
468 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 		if (chk->data)
471 			sctp_m_freem(chk->data);
472 		chk->data = NULL;
473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	}
475 	sctp_free_a_readq(stcb, control);
476 }
477 
478 /*
479  * Queue the chunk either right into the socket buffer if it is the next one
480  * to go OR put it in the correct place in the delivery queue.  If we do
481  * append to the so_buf, keep doing so until we are out of order as
482  * long as the control's entered are non-fragmented.
483  */
484 static void
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486     struct sctp_association *asoc,
487     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
488 {
489 	/*
490 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 	 * all the data in one stream this could happen quite rapidly. One
492 	 * could use the TSN to keep track of things, but this scheme breaks
493 	 * down in the other type of stream usage that could occur. Send a
494 	 * single msg to stream 0, send 4Billion messages to stream 1, now
495 	 * send a message to stream 0. You have a situation where the TSN
496 	 * has wrapped but not in the stream. Is this worth worrying about
497 	 * or should we just change our queue sort at the bottom to be by
498 	 * TSN.
499 	 *
500 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 	 * assignment this could happen... and I don't see how this would be
503 	 * a violation. So for now I am undecided an will leave the sort by
504 	 * SSN alone. Maybe a hybred approach is the answer
505 	 *
506 	 */
507 	struct sctp_queued_to_read *at;
508 	int queue_needed;
509 	uint32_t nxt_todel;
510 	struct mbuf *op_err;
511 	struct sctp_stream_in *strm;
512 	char msg[SCTP_DIAG_INFO_LEN];
513 
514 	strm = &asoc->strmin[control->sinfo_stream];
515 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
517 	}
518 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 		/* The incoming sseq is behind where we last delivered? */
520 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 		    strm->last_mid_delivered, control->mid);
522 		/*
523 		 * throw it in the stream so it gets cleaned up in
524 		 * association destruction
525 		 */
526 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 		if (asoc->idata_supported) {
528 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 			    strm->last_mid_delivered, control->sinfo_tsn,
530 			    control->sinfo_stream, control->mid);
531 		} else {
532 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 			    (uint16_t)strm->last_mid_delivered,
534 			    control->sinfo_tsn,
535 			    control->sinfo_stream,
536 			    (uint16_t)control->mid);
537 		}
538 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
541 		*abort_flag = 1;
542 		return;
543 
544 	}
545 	queue_needed = 1;
546 	asoc->size_on_all_streams += control->length;
547 	sctp_ucount_incr(asoc->cnt_on_all_streams);
548 	nxt_todel = strm->last_mid_delivered + 1;
549 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
551 		struct socket *so;
552 
553 		so = SCTP_INP_SO(stcb->sctp_ep);
554 		atomic_add_int(&stcb->asoc.refcnt, 1);
555 		SCTP_TCB_UNLOCK(stcb);
556 		SCTP_SOCKET_LOCK(so, 1);
557 		SCTP_TCB_LOCK(stcb);
558 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 			SCTP_SOCKET_UNLOCK(so, 1);
561 			return;
562 		}
563 #endif
564 		/* can be delivered right away? */
565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
567 		}
568 		/* EY it wont be queued if it could be delivered directly */
569 		queue_needed = 0;
570 		if (asoc->size_on_all_streams >= control->length) {
571 			asoc->size_on_all_streams -= control->length;
572 		} else {
573 #ifdef INVARIANTS
574 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
575 #else
576 			asoc->size_on_all_streams = 0;
577 #endif
578 		}
579 		sctp_ucount_decr(asoc->cnt_on_all_streams);
580 		strm->last_mid_delivered++;
581 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 		sctp_add_to_readq(stcb->sctp_ep, stcb,
583 		    control,
584 		    &stcb->sctp_socket->so_rcv, 1,
585 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
587 			/* all delivered */
588 			nxt_todel = strm->last_mid_delivered + 1;
589 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 				if (control->on_strm_q == SCTP_ON_ORDERED) {
592 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 					if (asoc->size_on_all_streams >= control->length) {
594 						asoc->size_on_all_streams -= control->length;
595 					} else {
596 #ifdef INVARIANTS
597 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598 #else
599 						asoc->size_on_all_streams = 0;
600 #endif
601 					}
602 					sctp_ucount_decr(asoc->cnt_on_all_streams);
603 #ifdef INVARIANTS
604 				} else {
605 					panic("Huh control: %p is on_strm_q: %d",
606 					    control, control->on_strm_q);
607 #endif
608 				}
609 				control->on_strm_q = 0;
610 				strm->last_mid_delivered++;
611 				/*
612 				 * We ignore the return of deliver_data here
613 				 * since we always can hold the chunk on the
614 				 * d-queue. And we have a finite number that
615 				 * can be delivered from the strq.
616 				 */
617 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 					sctp_log_strm_del(control, NULL,
619 					    SCTP_STR_LOG_FROM_IMMED_DEL);
620 				}
621 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 				sctp_add_to_readq(stcb->sctp_ep, stcb,
623 				    control,
624 				    &stcb->sctp_socket->so_rcv, 1,
625 				    SCTP_READ_LOCK_NOT_HELD,
626 				    SCTP_SO_LOCKED);
627 				continue;
628 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
629 				*need_reasm = 1;
630 			}
631 			break;
632 		}
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 		SCTP_SOCKET_UNLOCK(so, 1);
635 #endif
636 	}
637 	if (queue_needed) {
638 		/*
639 		 * Ok, we did not deliver this guy, find the correct place
640 		 * to put it on the queue.
641 		 */
642 		if (sctp_place_control_in_stream(strm, asoc, control)) {
643 			snprintf(msg, sizeof(msg),
644 			    "Queue to str MID: %u duplicate",
645 			    control->mid);
646 			sctp_clean_up_control(stcb, control);
647 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
650 			*abort_flag = 1;
651 		}
652 	}
653 }
654 
655 
656 static void
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
658 {
659 	struct mbuf *m, *prev = NULL;
660 	struct sctp_tcb *stcb;
661 
662 	stcb = control->stcb;
663 	control->held_length = 0;
664 	control->length = 0;
665 	m = control->data;
666 	while (m) {
667 		if (SCTP_BUF_LEN(m) == 0) {
668 			/* Skip mbufs with NO length */
669 			if (prev == NULL) {
670 				/* First one */
671 				control->data = sctp_m_free(m);
672 				m = control->data;
673 			} else {
674 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 				m = SCTP_BUF_NEXT(prev);
676 			}
677 			if (m == NULL) {
678 				control->tail_mbuf = prev;
679 			}
680 			continue;
681 		}
682 		prev = m;
683 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 		if (control->on_read_q) {
685 			/*
686 			 * On read queue so we must increment the SB stuff,
687 			 * we assume caller has done any locks of SB.
688 			 */
689 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
690 		}
691 		m = SCTP_BUF_NEXT(m);
692 	}
693 	if (prev) {
694 		control->tail_mbuf = prev;
695 	}
696 }
697 
698 static void
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
700 {
701 	struct mbuf *prev = NULL;
702 	struct sctp_tcb *stcb;
703 
704 	stcb = control->stcb;
705 	if (stcb == NULL) {
706 #ifdef INVARIANTS
707 		panic("Control broken");
708 #else
709 		return;
710 #endif
711 	}
712 	if (control->tail_mbuf == NULL) {
713 		/* TSNH */
714 		control->data = m;
715 		sctp_setup_tail_pointer(control);
716 		return;
717 	}
718 	control->tail_mbuf->m_next = m;
719 	while (m) {
720 		if (SCTP_BUF_LEN(m) == 0) {
721 			/* Skip mbufs with NO length */
722 			if (prev == NULL) {
723 				/* First one */
724 				control->tail_mbuf->m_next = sctp_m_free(m);
725 				m = control->tail_mbuf->m_next;
726 			} else {
727 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 				m = SCTP_BUF_NEXT(prev);
729 			}
730 			if (m == NULL) {
731 				control->tail_mbuf = prev;
732 			}
733 			continue;
734 		}
735 		prev = m;
736 		if (control->on_read_q) {
737 			/*
738 			 * On read queue so we must increment the SB stuff,
739 			 * we assume caller has done any locks of SB.
740 			 */
741 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
742 		}
743 		*added += SCTP_BUF_LEN(m);
744 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 		m = SCTP_BUF_NEXT(m);
746 	}
747 	if (prev) {
748 		control->tail_mbuf = prev;
749 	}
750 }
751 
752 static void
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
754 {
755 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 	nc->sinfo_stream = control->sinfo_stream;
757 	nc->mid = control->mid;
758 	TAILQ_INIT(&nc->reasm);
759 	nc->top_fsn = control->top_fsn;
760 	nc->mid = control->mid;
761 	nc->sinfo_flags = control->sinfo_flags;
762 	nc->sinfo_ppid = control->sinfo_ppid;
763 	nc->sinfo_context = control->sinfo_context;
764 	nc->fsn_included = 0xffffffff;
765 	nc->sinfo_tsn = control->sinfo_tsn;
766 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 	nc->whoFrom = control->whoFrom;
769 	atomic_add_int(&nc->whoFrom->ref_count, 1);
770 	nc->stcb = control->stcb;
771 	nc->port_from = control->port_from;
772 }
773 
774 static void
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776     struct sctp_inpcb *inp, uint32_t tsn)
777 {
778 	control->fsn_included = tsn;
779 	if (control->on_read_q) {
780 		/*
781 		 * We have to purge it from there, hopefully this will work
782 		 * :-)
783 		 */
784 		TAILQ_REMOVE(&inp->read_queue, control, next);
785 		control->on_read_q = 0;
786 	}
787 }
788 
789 static int
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791     struct sctp_association *asoc,
792     struct sctp_stream_in *strm,
793     struct sctp_queued_to_read *control,
794     uint32_t pd_point,
795     int inp_read_lock_held)
796 {
797 	/*
798 	 * Special handling for the old un-ordered data chunk. All the
799 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 	 * to see if we have it all. If you return one, no other control
801 	 * entries on the un-ordered queue will be looked at. In theory
802 	 * there should be no others entries in reality, unless the guy is
803 	 * sending both unordered NDATA and unordered DATA...
804 	 */
805 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
806 	uint32_t fsn;
807 	struct sctp_queued_to_read *nc;
808 	int cnt_added;
809 
810 	if (control->first_frag_seen == 0) {
811 		/* Nothing we can do, we have not seen the first piece yet */
812 		return (1);
813 	}
814 	/* Collapse any we can */
815 	cnt_added = 0;
816 restart:
817 	fsn = control->fsn_included + 1;
818 	/* Now what can we add? */
819 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 		if (chk->rec.data.fsn == fsn) {
821 			/* Ok lets add it */
822 			sctp_alloc_a_readq(stcb, nc);
823 			if (nc == NULL) {
824 				break;
825 			}
826 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
829 			fsn++;
830 			cnt_added++;
831 			chk = NULL;
832 			if (control->end_added) {
833 				/* We are done */
834 				if (!TAILQ_EMPTY(&control->reasm)) {
835 					/*
836 					 * Ok we have to move anything left
837 					 * on the control queue to a new
838 					 * control.
839 					 */
840 					sctp_build_readq_entry_from_ctl(nc, control);
841 					tchk = TAILQ_FIRST(&control->reasm);
842 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 							asoc->size_on_reasm_queue -= tchk->send_size;
846 						} else {
847 #ifdef INVARIANTS
848 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
849 #else
850 							asoc->size_on_reasm_queue = 0;
851 #endif
852 						}
853 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 						nc->first_frag_seen = 1;
855 						nc->fsn_included = tchk->rec.data.fsn;
856 						nc->data = tchk->data;
857 						nc->sinfo_ppid = tchk->rec.data.ppid;
858 						nc->sinfo_tsn = tchk->rec.data.tsn;
859 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
860 						tchk->data = NULL;
861 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 						sctp_setup_tail_pointer(nc);
863 						tchk = TAILQ_FIRST(&control->reasm);
864 					}
865 					/* Spin the rest onto the queue */
866 					while (tchk) {
867 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 						tchk = TAILQ_FIRST(&control->reasm);
870 					}
871 					/*
872 					 * Now lets add it to the queue
873 					 * after removing control
874 					 */
875 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 					nc->on_strm_q = SCTP_ON_UNORDERED;
877 					if (control->on_strm_q) {
878 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 						control->on_strm_q = 0;
880 					}
881 				}
882 				if (control->pdapi_started) {
883 					strm->pd_api_started = 0;
884 					control->pdapi_started = 0;
885 				}
886 				if (control->on_strm_q) {
887 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 					control->on_strm_q = 0;
889 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
890 				}
891 				if (control->on_read_q == 0) {
892 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 					    &stcb->sctp_socket->so_rcv, control->end_added,
894 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
895 				}
896 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
898 					/*
899 					 * Switch to the new guy and
900 					 * continue
901 					 */
902 					control = nc;
903 					goto restart;
904 				} else {
905 					if (nc->on_strm_q == 0) {
906 						sctp_free_a_readq(stcb, nc);
907 					}
908 				}
909 				return (1);
910 			} else {
911 				sctp_free_a_readq(stcb, nc);
912 			}
913 		} else {
914 			/* Can't add more */
915 			break;
916 		}
917 	}
918 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
919 		strm->pd_api_started = 1;
920 		control->pdapi_started = 1;
921 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
922 		    &stcb->sctp_socket->so_rcv, control->end_added,
923 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
924 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
925 		return (0);
926 	} else {
927 		return (1);
928 	}
929 }
930 
931 static void
932 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
933     struct sctp_association *asoc,
934     struct sctp_queued_to_read *control,
935     struct sctp_tmit_chunk *chk,
936     int *abort_flag)
937 {
938 	struct sctp_tmit_chunk *at;
939 	int inserted;
940 
941 	/*
942 	 * Here we need to place the chunk into the control structure sorted
943 	 * in the correct order.
944 	 */
945 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
946 		/* Its the very first one. */
947 		SCTPDBG(SCTP_DEBUG_XXX,
948 		    "chunk is a first fsn: %u becomes fsn_included\n",
949 		    chk->rec.data.fsn);
950 		at = TAILQ_FIRST(&control->reasm);
951 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
952 			/*
953 			 * The first chunk in the reassembly is a smaller
954 			 * TSN than this one, even though this has a first,
955 			 * it must be from a subsequent msg.
956 			 */
957 			goto place_chunk;
958 		}
959 		if (control->first_frag_seen) {
960 			/*
961 			 * In old un-ordered we can reassembly on one
962 			 * control multiple messages. As long as the next
963 			 * FIRST is greater then the old first (TSN i.e. FSN
964 			 * wise)
965 			 */
966 			struct mbuf *tdata;
967 			uint32_t tmp;
968 
969 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
970 				/*
971 				 * Easy way the start of a new guy beyond
972 				 * the lowest
973 				 */
974 				goto place_chunk;
975 			}
976 			if ((chk->rec.data.fsn == control->fsn_included) ||
977 			    (control->pdapi_started)) {
978 				/*
979 				 * Ok this should not happen, if it does we
980 				 * started the pd-api on the higher TSN
981 				 * (since the equals part is a TSN failure
982 				 * it must be that).
983 				 *
984 				 * We are completly hosed in that case since
985 				 * I have no way to recover. This really
986 				 * will only happen if we can get more TSN's
987 				 * higher before the pd-api-point.
988 				 */
989 				sctp_abort_in_reasm(stcb, control, chk,
990 				    abort_flag,
991 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
992 
993 				return;
994 			}
995 			/*
996 			 * Ok we have two firsts and the one we just got is
997 			 * smaller than the one we previously placed.. yuck!
998 			 * We must swap them out.
999 			 */
1000 			/* swap the mbufs */
1001 			tdata = control->data;
1002 			control->data = chk->data;
1003 			chk->data = tdata;
1004 			/* Save the lengths */
1005 			chk->send_size = control->length;
1006 			/* Recompute length of control and tail pointer */
1007 			sctp_setup_tail_pointer(control);
1008 			/* Fix the FSN included */
1009 			tmp = control->fsn_included;
1010 			control->fsn_included = chk->rec.data.fsn;
1011 			chk->rec.data.fsn = tmp;
1012 			/* Fix the TSN included */
1013 			tmp = control->sinfo_tsn;
1014 			control->sinfo_tsn = chk->rec.data.tsn;
1015 			chk->rec.data.tsn = tmp;
1016 			/* Fix the PPID included */
1017 			tmp = control->sinfo_ppid;
1018 			control->sinfo_ppid = chk->rec.data.ppid;
1019 			chk->rec.data.ppid = tmp;
1020 			/* Fix tail pointer */
1021 			goto place_chunk;
1022 		}
1023 		control->first_frag_seen = 1;
1024 		control->fsn_included = chk->rec.data.fsn;
1025 		control->top_fsn = chk->rec.data.fsn;
1026 		control->sinfo_tsn = chk->rec.data.tsn;
1027 		control->sinfo_ppid = chk->rec.data.ppid;
1028 		control->data = chk->data;
1029 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1030 		chk->data = NULL;
1031 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1032 		sctp_setup_tail_pointer(control);
1033 		return;
1034 	}
1035 place_chunk:
1036 	inserted = 0;
1037 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1038 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1039 			/*
1040 			 * This one in queue is bigger than the new one,
1041 			 * insert the new one before at.
1042 			 */
1043 			asoc->size_on_reasm_queue += chk->send_size;
1044 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1045 			inserted = 1;
1046 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1047 			break;
1048 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1049 			/*
1050 			 * They sent a duplicate fsn number. This really
1051 			 * should not happen since the FSN is a TSN and it
1052 			 * should have been dropped earlier.
1053 			 */
1054 			sctp_abort_in_reasm(stcb, control, chk,
1055 			    abort_flag,
1056 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1057 			return;
1058 		}
1059 
1060 	}
1061 	if (inserted == 0) {
1062 		/* Its at the end */
1063 		asoc->size_on_reasm_queue += chk->send_size;
1064 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1065 		control->top_fsn = chk->rec.data.fsn;
1066 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1067 	}
1068 }
1069 
1070 static int
1071 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1072     struct sctp_stream_in *strm, int inp_read_lock_held)
1073 {
1074 	/*
1075 	 * Given a stream, strm, see if any of the SSN's on it that are
1076 	 * fragmented are ready to deliver. If so go ahead and place them on
1077 	 * the read queue. In so placing if we have hit the end, then we
1078 	 * need to remove them from the stream's queue.
1079 	 */
1080 	struct sctp_queued_to_read *control, *nctl = NULL;
1081 	uint32_t next_to_del;
1082 	uint32_t pd_point;
1083 	int ret = 0;
1084 
1085 	if (stcb->sctp_socket) {
1086 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1087 		    stcb->sctp_ep->partial_delivery_point);
1088 	} else {
1089 		pd_point = stcb->sctp_ep->partial_delivery_point;
1090 	}
1091 	control = TAILQ_FIRST(&strm->uno_inqueue);
1092 
1093 	if ((control != NULL) &&
1094 	    (asoc->idata_supported == 0)) {
1095 		/* Special handling needed for "old" data format */
1096 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1097 			goto done_un;
1098 		}
1099 	}
1100 	if (strm->pd_api_started) {
1101 		/* Can't add more */
1102 		return (0);
1103 	}
1104 	while (control) {
1105 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1106 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1107 		nctl = TAILQ_NEXT(control, next_instrm);
1108 		if (control->end_added) {
1109 			/* We just put the last bit on */
1110 			if (control->on_strm_q) {
1111 #ifdef INVARIANTS
1112 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1113 					panic("Huh control: %p on_q: %d -- not unordered?",
1114 					    control, control->on_strm_q);
1115 				}
1116 #endif
1117 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1118 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1119 				control->on_strm_q = 0;
1120 			}
1121 			if (control->on_read_q == 0) {
1122 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1123 				    control,
1124 				    &stcb->sctp_socket->so_rcv, control->end_added,
1125 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126 			}
1127 		} else {
1128 			/* Can we do a PD-API for this un-ordered guy? */
1129 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1130 				strm->pd_api_started = 1;
1131 				control->pdapi_started = 1;
1132 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1133 				    control,
1134 				    &stcb->sctp_socket->so_rcv, control->end_added,
1135 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1136 
1137 				break;
1138 			}
1139 		}
1140 		control = nctl;
1141 	}
1142 done_un:
1143 	control = TAILQ_FIRST(&strm->inqueue);
1144 	if (strm->pd_api_started) {
1145 		/* Can't add more */
1146 		return (0);
1147 	}
1148 	if (control == NULL) {
1149 		return (ret);
1150 	}
1151 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1152 		/*
1153 		 * Ok the guy at the top was being partially delivered
1154 		 * completed, so we remove it. Note the pd_api flag was
1155 		 * taken off when the chunk was merged on in
1156 		 * sctp_queue_data_for_reasm below.
1157 		 */
1158 		nctl = TAILQ_NEXT(control, next_instrm);
1159 		SCTPDBG(SCTP_DEBUG_XXX,
1160 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1161 		    control, control->end_added, control->mid,
1162 		    control->top_fsn, control->fsn_included,
1163 		    strm->last_mid_delivered);
1164 		if (control->end_added) {
1165 			if (control->on_strm_q) {
1166 #ifdef INVARIANTS
1167 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1168 					panic("Huh control: %p on_q: %d -- not ordered?",
1169 					    control, control->on_strm_q);
1170 				}
1171 #endif
1172 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1173 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1174 				if (asoc->size_on_all_streams >= control->length) {
1175 					asoc->size_on_all_streams -= control->length;
1176 				} else {
1177 #ifdef INVARIANTS
1178 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1179 #else
1180 					asoc->size_on_all_streams = 0;
1181 #endif
1182 				}
1183 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1184 				control->on_strm_q = 0;
1185 			}
1186 			if (strm->pd_api_started && control->pdapi_started) {
1187 				control->pdapi_started = 0;
1188 				strm->pd_api_started = 0;
1189 			}
1190 			if (control->on_read_q == 0) {
1191 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1192 				    control,
1193 				    &stcb->sctp_socket->so_rcv, control->end_added,
1194 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1195 			}
1196 			control = nctl;
1197 		}
1198 	}
1199 	if (strm->pd_api_started) {
1200 		/*
1201 		 * Can't add more must have gotten an un-ordered above being
1202 		 * partially delivered.
1203 		 */
1204 		return (0);
1205 	}
1206 deliver_more:
1207 	next_to_del = strm->last_mid_delivered + 1;
1208 	if (control) {
1209 		SCTPDBG(SCTP_DEBUG_XXX,
1210 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1211 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1212 		    next_to_del);
1213 		nctl = TAILQ_NEXT(control, next_instrm);
1214 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1215 		    (control->first_frag_seen)) {
1216 			int done;
1217 
1218 			/* Ok we can deliver it onto the stream. */
1219 			if (control->end_added) {
1220 				/* We are done with it afterwards */
1221 				if (control->on_strm_q) {
1222 #ifdef INVARIANTS
1223 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1224 						panic("Huh control: %p on_q: %d -- not ordered?",
1225 						    control, control->on_strm_q);
1226 					}
1227 #endif
1228 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1229 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1230 					if (asoc->size_on_all_streams >= control->length) {
1231 						asoc->size_on_all_streams -= control->length;
1232 					} else {
1233 #ifdef INVARIANTS
1234 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1235 #else
1236 						asoc->size_on_all_streams = 0;
1237 #endif
1238 					}
1239 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1240 					control->on_strm_q = 0;
1241 				}
1242 				ret++;
1243 			}
1244 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1245 				/*
1246 				 * A singleton now slipping through - mark
1247 				 * it non-revokable too
1248 				 */
1249 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1250 			} else if (control->end_added == 0) {
1251 				/*
1252 				 * Check if we can defer adding until its
1253 				 * all there
1254 				 */
1255 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1256 					/*
1257 					 * Don't need it or cannot add more
1258 					 * (one being delivered that way)
1259 					 */
1260 					goto out;
1261 				}
1262 			}
1263 			done = (control->end_added) && (control->last_frag_seen);
1264 			if (control->on_read_q == 0) {
1265 				if (!done) {
1266 					if (asoc->size_on_all_streams >= control->length) {
1267 						asoc->size_on_all_streams -= control->length;
1268 					} else {
1269 #ifdef INVARIANTS
1270 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1271 #else
1272 						asoc->size_on_all_streams = 0;
1273 #endif
1274 					}
1275 					strm->pd_api_started = 1;
1276 					control->pdapi_started = 1;
1277 				}
1278 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1279 				    control,
1280 				    &stcb->sctp_socket->so_rcv, control->end_added,
1281 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1282 			}
1283 			strm->last_mid_delivered = next_to_del;
1284 			if (done) {
1285 				control = nctl;
1286 				goto deliver_more;
1287 			}
1288 		}
1289 	}
1290 out:
1291 	return (ret);
1292 }
1293 
1294 
1295 uint32_t
1296 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1297     struct sctp_stream_in *strm,
1298     struct sctp_tcb *stcb, struct sctp_association *asoc,
1299     struct sctp_tmit_chunk *chk, int hold_rlock)
1300 {
1301 	/*
1302 	 * Given a control and a chunk, merge the data from the chk onto the
1303 	 * control and free up the chunk resources.
1304 	 */
1305 	uint32_t added = 0;
1306 	int i_locked = 0;
1307 
1308 	if (control->on_read_q && (hold_rlock == 0)) {
1309 		/*
1310 		 * Its being pd-api'd so we must do some locks.
1311 		 */
1312 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1313 		i_locked = 1;
1314 	}
1315 	if (control->data == NULL) {
1316 		control->data = chk->data;
1317 		sctp_setup_tail_pointer(control);
1318 	} else {
1319 		sctp_add_to_tail_pointer(control, chk->data, &added);
1320 	}
1321 	control->fsn_included = chk->rec.data.fsn;
1322 	asoc->size_on_reasm_queue -= chk->send_size;
1323 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1324 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1325 	chk->data = NULL;
1326 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1327 		control->first_frag_seen = 1;
1328 		control->sinfo_tsn = chk->rec.data.tsn;
1329 		control->sinfo_ppid = chk->rec.data.ppid;
1330 	}
1331 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1332 		/* Its complete */
1333 		if ((control->on_strm_q) && (control->on_read_q)) {
1334 			if (control->pdapi_started) {
1335 				control->pdapi_started = 0;
1336 				strm->pd_api_started = 0;
1337 			}
1338 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1339 				/* Unordered */
1340 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1341 				control->on_strm_q = 0;
1342 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1343 				/* Ordered */
1344 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1345 				/*
1346 				 * Don't need to decrement
1347 				 * size_on_all_streams, since control is on
1348 				 * the read queue.
1349 				 */
1350 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1351 				control->on_strm_q = 0;
1352 #ifdef INVARIANTS
1353 			} else if (control->on_strm_q) {
1354 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1355 				    control->on_strm_q);
1356 #endif
1357 			}
1358 		}
1359 		control->end_added = 1;
1360 		control->last_frag_seen = 1;
1361 	}
1362 	if (i_locked) {
1363 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1364 	}
1365 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1366 	return (added);
1367 }
1368 
1369 /*
1370  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1371  * queue, see if anthing can be delivered. If so pull it off (or as much as
1372  * we can. If we run out of space then we must dump what we can and set the
1373  * appropriate flag to say we queued what we could.
1374  */
1375 static void
1376 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1377     struct sctp_queued_to_read *control,
1378     struct sctp_tmit_chunk *chk,
1379     int created_control,
1380     int *abort_flag, uint32_t tsn)
1381 {
1382 	uint32_t next_fsn;
1383 	struct sctp_tmit_chunk *at, *nat;
1384 	struct sctp_stream_in *strm;
1385 	int do_wakeup, unordered;
1386 	uint32_t lenadded;
1387 
1388 	strm = &asoc->strmin[control->sinfo_stream];
1389 	/*
1390 	 * For old un-ordered data chunks.
1391 	 */
1392 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1393 		unordered = 1;
1394 	} else {
1395 		unordered = 0;
1396 	}
1397 	/* Must be added to the stream-in queue */
1398 	if (created_control) {
1399 		if (unordered == 0) {
1400 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1401 		}
1402 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1403 			/* Duplicate SSN? */
1404 			sctp_abort_in_reasm(stcb, control, chk,
1405 			    abort_flag,
1406 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1407 			sctp_clean_up_control(stcb, control);
1408 			return;
1409 		}
1410 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1411 			/*
1412 			 * Ok we created this control and now lets validate
1413 			 * that its legal i.e. there is a B bit set, if not
1414 			 * and we have up to the cum-ack then its invalid.
1415 			 */
1416 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1417 				sctp_abort_in_reasm(stcb, control, chk,
1418 				    abort_flag,
1419 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1420 				return;
1421 			}
1422 		}
1423 	}
1424 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1425 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1426 		return;
1427 	}
1428 	/*
1429 	 * Ok we must queue the chunk into the reasembly portion: o if its
1430 	 * the first it goes to the control mbuf. o if its not first but the
1431 	 * next in sequence it goes to the control, and each succeeding one
1432 	 * in order also goes. o if its not in order we place it on the list
1433 	 * in its place.
1434 	 */
1435 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1436 		/* Its the very first one. */
1437 		SCTPDBG(SCTP_DEBUG_XXX,
1438 		    "chunk is a first fsn: %u becomes fsn_included\n",
1439 		    chk->rec.data.fsn);
1440 		if (control->first_frag_seen) {
1441 			/*
1442 			 * Error on senders part, they either sent us two
1443 			 * data chunks with FIRST, or they sent two
1444 			 * un-ordered chunks that were fragmented at the
1445 			 * same time in the same stream.
1446 			 */
1447 			sctp_abort_in_reasm(stcb, control, chk,
1448 			    abort_flag,
1449 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1450 			return;
1451 		}
1452 		control->first_frag_seen = 1;
1453 		control->sinfo_ppid = chk->rec.data.ppid;
1454 		control->sinfo_tsn = chk->rec.data.tsn;
1455 		control->fsn_included = chk->rec.data.fsn;
1456 		control->data = chk->data;
1457 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1458 		chk->data = NULL;
1459 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1460 		sctp_setup_tail_pointer(control);
1461 		asoc->size_on_all_streams += control->length;
1462 	} else {
1463 		/* Place the chunk in our list */
1464 		int inserted = 0;
1465 
1466 		if (control->last_frag_seen == 0) {
1467 			/* Still willing to raise highest FSN seen */
1468 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1469 				SCTPDBG(SCTP_DEBUG_XXX,
1470 				    "We have a new top_fsn: %u\n",
1471 				    chk->rec.data.fsn);
1472 				control->top_fsn = chk->rec.data.fsn;
1473 			}
1474 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1475 				SCTPDBG(SCTP_DEBUG_XXX,
1476 				    "The last fsn is now in place fsn: %u\n",
1477 				    chk->rec.data.fsn);
1478 				control->last_frag_seen = 1;
1479 			}
1480 			if (asoc->idata_supported || control->first_frag_seen) {
1481 				/*
1482 				 * For IDATA we always check since we know
1483 				 * that the first fragment is 0. For old
1484 				 * DATA we have to receive the first before
1485 				 * we know the first FSN (which is the TSN).
1486 				 */
1487 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1488 					/*
1489 					 * We have already delivered up to
1490 					 * this so its a dup
1491 					 */
1492 					sctp_abort_in_reasm(stcb, control, chk,
1493 					    abort_flag,
1494 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 					return;
1496 				}
1497 			}
1498 		} else {
1499 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1500 				/* Second last? huh? */
1501 				SCTPDBG(SCTP_DEBUG_XXX,
1502 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1503 				    chk->rec.data.fsn, control->top_fsn);
1504 				sctp_abort_in_reasm(stcb, control,
1505 				    chk, abort_flag,
1506 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1507 				return;
1508 			}
1509 			if (asoc->idata_supported || control->first_frag_seen) {
1510 				/*
1511 				 * For IDATA we always check since we know
1512 				 * that the first fragment is 0. For old
1513 				 * DATA we have to receive the first before
1514 				 * we know the first FSN (which is the TSN).
1515 				 */
1516 
1517 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1518 					/*
1519 					 * We have already delivered up to
1520 					 * this so its a dup
1521 					 */
1522 					SCTPDBG(SCTP_DEBUG_XXX,
1523 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1524 					    chk->rec.data.fsn, control->fsn_included);
1525 					sctp_abort_in_reasm(stcb, control, chk,
1526 					    abort_flag,
1527 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1528 					return;
1529 				}
1530 			}
1531 			/*
1532 			 * validate not beyond top FSN if we have seen last
1533 			 * one
1534 			 */
1535 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1536 				SCTPDBG(SCTP_DEBUG_XXX,
1537 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1538 				    chk->rec.data.fsn,
1539 				    control->top_fsn);
1540 				sctp_abort_in_reasm(stcb, control, chk,
1541 				    abort_flag,
1542 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1543 				return;
1544 			}
1545 		}
1546 		/*
1547 		 * If we reach here, we need to place the new chunk in the
1548 		 * reassembly for this control.
1549 		 */
1550 		SCTPDBG(SCTP_DEBUG_XXX,
1551 		    "chunk is a not first fsn: %u needs to be inserted\n",
1552 		    chk->rec.data.fsn);
1553 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1554 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1555 				/*
1556 				 * This one in queue is bigger than the new
1557 				 * one, insert the new one before at.
1558 				 */
1559 				SCTPDBG(SCTP_DEBUG_XXX,
1560 				    "Insert it before fsn: %u\n",
1561 				    at->rec.data.fsn);
1562 				asoc->size_on_reasm_queue += chk->send_size;
1563 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1564 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1565 				inserted = 1;
1566 				break;
1567 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1568 				/*
1569 				 * Gak, He sent me a duplicate str seq
1570 				 * number
1571 				 */
1572 				/*
1573 				 * foo bar, I guess I will just free this
1574 				 * new guy, should we abort too? FIX ME
1575 				 * MAYBE? Or it COULD be that the SSN's have
1576 				 * wrapped. Maybe I should compare to TSN
1577 				 * somehow... sigh for now just blow away
1578 				 * the chunk!
1579 				 */
1580 				SCTPDBG(SCTP_DEBUG_XXX,
1581 				    "Duplicate to fsn: %u -- abort\n",
1582 				    at->rec.data.fsn);
1583 				sctp_abort_in_reasm(stcb, control,
1584 				    chk, abort_flag,
1585 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1586 				return;
1587 			}
1588 		}
1589 		if (inserted == 0) {
1590 			/* Goes on the end */
1591 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1592 			    chk->rec.data.fsn);
1593 			asoc->size_on_reasm_queue += chk->send_size;
1594 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1595 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1596 		}
1597 	}
1598 	/*
1599 	 * Ok lets see if we can suck any up into the control structure that
1600 	 * are in seq if it makes sense.
1601 	 */
1602 	do_wakeup = 0;
1603 	/*
1604 	 * If the first fragment has not been seen there is no sense in
1605 	 * looking.
1606 	 */
1607 	if (control->first_frag_seen) {
1608 		next_fsn = control->fsn_included + 1;
1609 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1610 			if (at->rec.data.fsn == next_fsn) {
1611 				/* We can add this one now to the control */
1612 				SCTPDBG(SCTP_DEBUG_XXX,
1613 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1614 				    control, at,
1615 				    at->rec.data.fsn,
1616 				    next_fsn, control->fsn_included);
1617 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1618 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1619 				if (control->on_read_q) {
1620 					do_wakeup = 1;
1621 				} else {
1622 					/*
1623 					 * We only add to the
1624 					 * size-on-all-streams if its not on
1625 					 * the read q. The read q flag will
1626 					 * cause a sballoc so its accounted
1627 					 * for there.
1628 					 */
1629 					asoc->size_on_all_streams += lenadded;
1630 				}
1631 				next_fsn++;
1632 				if (control->end_added && control->pdapi_started) {
1633 					if (strm->pd_api_started) {
1634 						strm->pd_api_started = 0;
1635 						control->pdapi_started = 0;
1636 					}
1637 					if (control->on_read_q == 0) {
1638 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1639 						    control,
1640 						    &stcb->sctp_socket->so_rcv, control->end_added,
1641 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1642 					}
1643 					break;
1644 				}
1645 			} else {
1646 				break;
1647 			}
1648 		}
1649 	}
1650 	if (do_wakeup) {
1651 		/* Need to wakeup the reader */
1652 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1653 	}
1654 }
1655 
1656 static struct sctp_queued_to_read *
1657 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1658 {
1659 	struct sctp_queued_to_read *control;
1660 
1661 	if (ordered) {
1662 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1663 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1664 				break;
1665 			}
1666 		}
1667 	} else {
1668 		if (idata_supported) {
1669 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1670 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1671 					break;
1672 				}
1673 			}
1674 		} else {
1675 			control = TAILQ_FIRST(&strm->uno_inqueue);
1676 		}
1677 	}
1678 	return (control);
1679 }
1680 
1681 static int
1682 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1683     struct mbuf **m, int offset, int chk_length,
1684     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1685     int *break_flag, int last_chunk, uint8_t chk_type)
1686 {
1687 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1688 	uint32_t tsn, fsn, gap, mid;
1689 	struct mbuf *dmbuf;
1690 	int the_len;
1691 	int need_reasm_check = 0;
1692 	uint16_t sid;
1693 	struct mbuf *op_err;
1694 	char msg[SCTP_DIAG_INFO_LEN];
1695 	struct sctp_queued_to_read *control, *ncontrol;
1696 	uint32_t ppid;
1697 	uint8_t chk_flags;
1698 	struct sctp_stream_reset_list *liste;
1699 	int ordered;
1700 	size_t clen;
1701 	int created_control = 0;
1702 
1703 	if (chk_type == SCTP_IDATA) {
1704 		struct sctp_idata_chunk *chunk, chunk_buf;
1705 
1706 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1707 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1708 		chk_flags = chunk->ch.chunk_flags;
1709 		clen = sizeof(struct sctp_idata_chunk);
1710 		tsn = ntohl(chunk->dp.tsn);
1711 		sid = ntohs(chunk->dp.sid);
1712 		mid = ntohl(chunk->dp.mid);
1713 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1714 			fsn = 0;
1715 			ppid = chunk->dp.ppid_fsn.ppid;
1716 		} else {
1717 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1718 			ppid = 0xffffffff;	/* Use as an invalid value. */
1719 		}
1720 	} else {
1721 		struct sctp_data_chunk *chunk, chunk_buf;
1722 
1723 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1724 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1725 		chk_flags = chunk->ch.chunk_flags;
1726 		clen = sizeof(struct sctp_data_chunk);
1727 		tsn = ntohl(chunk->dp.tsn);
1728 		sid = ntohs(chunk->dp.sid);
1729 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1730 		fsn = tsn;
1731 		ppid = chunk->dp.ppid;
1732 	}
1733 	if ((size_t)chk_length == clen) {
1734 		/*
1735 		 * Need to send an abort since we had a empty data chunk.
1736 		 */
1737 		op_err = sctp_generate_no_user_data_cause(tsn);
1738 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1739 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1740 		*abort_flag = 1;
1741 		return (0);
1742 	}
1743 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1744 		asoc->send_sack = 1;
1745 	}
1746 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1747 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1748 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1749 	}
1750 	if (stcb == NULL) {
1751 		return (0);
1752 	}
1753 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1754 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1755 		/* It is a duplicate */
1756 		SCTP_STAT_INCR(sctps_recvdupdata);
1757 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1758 			/* Record a dup for the next outbound sack */
1759 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1760 			asoc->numduptsns++;
1761 		}
1762 		asoc->send_sack = 1;
1763 		return (0);
1764 	}
1765 	/* Calculate the number of TSN's between the base and this TSN */
1766 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1767 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1768 		/* Can't hold the bit in the mapping at max array, toss it */
1769 		return (0);
1770 	}
1771 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1772 		SCTP_TCB_LOCK_ASSERT(stcb);
1773 		if (sctp_expand_mapping_array(asoc, gap)) {
1774 			/* Can't expand, drop it */
1775 			return (0);
1776 		}
1777 	}
1778 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1779 		*high_tsn = tsn;
1780 	}
1781 	/* See if we have received this one already */
1782 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1783 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1784 		SCTP_STAT_INCR(sctps_recvdupdata);
1785 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1786 			/* Record a dup for the next outbound sack */
1787 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1788 			asoc->numduptsns++;
1789 		}
1790 		asoc->send_sack = 1;
1791 		return (0);
1792 	}
1793 	/*
1794 	 * Check to see about the GONE flag, duplicates would cause a sack
1795 	 * to be sent up above
1796 	 */
1797 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1798 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1799 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1800 		/*
1801 		 * wait a minute, this guy is gone, there is no longer a
1802 		 * receiver. Send peer an ABORT!
1803 		 */
1804 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1805 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1806 		*abort_flag = 1;
1807 		return (0);
1808 	}
1809 	/*
1810 	 * Now before going further we see if there is room. If NOT then we
1811 	 * MAY let one through only IF this TSN is the one we are waiting
1812 	 * for on a partial delivery API.
1813 	 */
1814 
1815 	/* Is the stream valid? */
1816 	if (sid >= asoc->streamincnt) {
1817 		struct sctp_error_invalid_stream *cause;
1818 
1819 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1820 		    0, M_NOWAIT, 1, MT_DATA);
1821 		if (op_err != NULL) {
1822 			/* add some space up front so prepend will work well */
1823 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1824 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1825 			/*
1826 			 * Error causes are just param's and this one has
1827 			 * two back to back phdr, one with the error type
1828 			 * and size, the other with the streamid and a rsvd
1829 			 */
1830 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1831 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1832 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1833 			cause->stream_id = htons(sid);
1834 			cause->reserved = htons(0);
1835 			sctp_queue_op_err(stcb, op_err);
1836 		}
1837 		SCTP_STAT_INCR(sctps_badsid);
1838 		SCTP_TCB_LOCK_ASSERT(stcb);
1839 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1840 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1841 			asoc->highest_tsn_inside_nr_map = tsn;
1842 		}
1843 		if (tsn == (asoc->cumulative_tsn + 1)) {
1844 			/* Update cum-ack */
1845 			asoc->cumulative_tsn = tsn;
1846 		}
1847 		return (0);
1848 	}
1849 	/*
1850 	 * If its a fragmented message, lets see if we can find the control
1851 	 * on the reassembly queues.
1852 	 */
1853 	if ((chk_type == SCTP_IDATA) &&
1854 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1855 	    (fsn == 0)) {
1856 		/*
1857 		 * The first *must* be fsn 0, and other (middle/end) pieces
1858 		 * can *not* be fsn 0. XXX: This can happen in case of a
1859 		 * wrap around. Ignore is for now.
1860 		 */
1861 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1862 		    mid, chk_flags);
1863 		goto err_out;
1864 	}
1865 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1866 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1867 	    chk_flags, control);
1868 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1869 		/* See if we can find the re-assembly entity */
1870 		if (control != NULL) {
1871 			/* We found something, does it belong? */
1872 			if (ordered && (mid != control->mid)) {
1873 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1874 		err_out:
1875 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1876 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1877 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1878 				*abort_flag = 1;
1879 				return (0);
1880 			}
1881 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1882 				/*
1883 				 * We can't have a switched order with an
1884 				 * unordered chunk
1885 				 */
1886 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1887 				    tsn);
1888 				goto err_out;
1889 			}
1890 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1891 				/*
1892 				 * We can't have a switched unordered with a
1893 				 * ordered chunk
1894 				 */
1895 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1896 				    tsn);
1897 				goto err_out;
1898 			}
1899 		}
1900 	} else {
1901 		/*
1902 		 * Its a complete segment. Lets validate we don't have a
1903 		 * re-assembly going on with the same Stream/Seq (for
1904 		 * ordered) or in the same Stream for unordered.
1905 		 */
1906 		if (control != NULL) {
1907 			if (ordered || asoc->idata_supported) {
1908 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1909 				    chk_flags, mid);
1910 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1911 				goto err_out;
1912 			} else {
1913 				if ((tsn == control->fsn_included + 1) &&
1914 				    (control->end_added == 0)) {
1915 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1916 					goto err_out;
1917 				} else {
1918 					control = NULL;
1919 				}
1920 			}
1921 		}
1922 	}
1923 	/* now do the tests */
1924 	if (((asoc->cnt_on_all_streams +
1925 	    asoc->cnt_on_reasm_queue +
1926 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1927 	    (((int)asoc->my_rwnd) <= 0)) {
1928 		/*
1929 		 * When we have NO room in the rwnd we check to make sure
1930 		 * the reader is doing its job...
1931 		 */
1932 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1933 			/* some to read, wake-up */
1934 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1935 			struct socket *so;
1936 
1937 			so = SCTP_INP_SO(stcb->sctp_ep);
1938 			atomic_add_int(&stcb->asoc.refcnt, 1);
1939 			SCTP_TCB_UNLOCK(stcb);
1940 			SCTP_SOCKET_LOCK(so, 1);
1941 			SCTP_TCB_LOCK(stcb);
1942 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1943 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1944 				/* assoc was freed while we were unlocked */
1945 				SCTP_SOCKET_UNLOCK(so, 1);
1946 				return (0);
1947 			}
1948 #endif
1949 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1951 			SCTP_SOCKET_UNLOCK(so, 1);
1952 #endif
1953 		}
1954 		/* now is it in the mapping array of what we have accepted? */
1955 		if (chk_type == SCTP_DATA) {
1956 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1957 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1958 				/* Nope not in the valid range dump it */
1959 		dump_packet:
1960 				sctp_set_rwnd(stcb, asoc);
1961 				if ((asoc->cnt_on_all_streams +
1962 				    asoc->cnt_on_reasm_queue +
1963 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1964 					SCTP_STAT_INCR(sctps_datadropchklmt);
1965 				} else {
1966 					SCTP_STAT_INCR(sctps_datadroprwnd);
1967 				}
1968 				*break_flag = 1;
1969 				return (0);
1970 			}
1971 		} else {
1972 			if (control == NULL) {
1973 				goto dump_packet;
1974 			}
1975 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1976 				goto dump_packet;
1977 			}
1978 		}
1979 	}
1980 #ifdef SCTP_ASOCLOG_OF_TSNS
1981 	SCTP_TCB_LOCK_ASSERT(stcb);
1982 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1983 		asoc->tsn_in_at = 0;
1984 		asoc->tsn_in_wrapped = 1;
1985 	}
1986 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1987 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1988 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1989 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1990 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1991 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1992 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1993 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1994 	asoc->tsn_in_at++;
1995 #endif
1996 	/*
1997 	 * Before we continue lets validate that we are not being fooled by
1998 	 * an evil attacker. We can only have Nk chunks based on our TSN
1999 	 * spread allowed by the mapping array N * 8 bits, so there is no
2000 	 * way our stream sequence numbers could have wrapped. We of course
2001 	 * only validate the FIRST fragment so the bit must be set.
2002 	 */
2003 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2004 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2005 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2006 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2007 		/* The incoming sseq is behind where we last delivered? */
2008 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2009 		    mid, asoc->strmin[sid].last_mid_delivered);
2010 
2011 		if (asoc->idata_supported) {
2012 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2013 			    asoc->strmin[sid].last_mid_delivered,
2014 			    tsn,
2015 			    sid,
2016 			    mid);
2017 		} else {
2018 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2019 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2020 			    tsn,
2021 			    sid,
2022 			    (uint16_t)mid);
2023 		}
2024 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2025 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2026 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2027 		*abort_flag = 1;
2028 		return (0);
2029 	}
2030 	if (chk_type == SCTP_IDATA) {
2031 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2032 	} else {
2033 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2034 	}
2035 	if (last_chunk == 0) {
2036 		if (chk_type == SCTP_IDATA) {
2037 			dmbuf = SCTP_M_COPYM(*m,
2038 			    (offset + sizeof(struct sctp_idata_chunk)),
2039 			    the_len, M_NOWAIT);
2040 		} else {
2041 			dmbuf = SCTP_M_COPYM(*m,
2042 			    (offset + sizeof(struct sctp_data_chunk)),
2043 			    the_len, M_NOWAIT);
2044 		}
2045 #ifdef SCTP_MBUF_LOGGING
2046 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2047 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2048 		}
2049 #endif
2050 	} else {
2051 		/* We can steal the last chunk */
2052 		int l_len;
2053 
2054 		dmbuf = *m;
2055 		/* lop off the top part */
2056 		if (chk_type == SCTP_IDATA) {
2057 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2058 		} else {
2059 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2060 		}
2061 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2062 			l_len = SCTP_BUF_LEN(dmbuf);
2063 		} else {
2064 			/*
2065 			 * need to count up the size hopefully does not hit
2066 			 * this to often :-0
2067 			 */
2068 			struct mbuf *lat;
2069 
2070 			l_len = 0;
2071 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2072 				l_len += SCTP_BUF_LEN(lat);
2073 			}
2074 		}
2075 		if (l_len > the_len) {
2076 			/* Trim the end round bytes off  too */
2077 			m_adj(dmbuf, -(l_len - the_len));
2078 		}
2079 	}
2080 	if (dmbuf == NULL) {
2081 		SCTP_STAT_INCR(sctps_nomem);
2082 		return (0);
2083 	}
2084 	/*
2085 	 * Now no matter what, we need a control, get one if we don't have
2086 	 * one (we may have gotten it above when we found the message was
2087 	 * fragmented
2088 	 */
2089 	if (control == NULL) {
2090 		sctp_alloc_a_readq(stcb, control);
2091 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2092 		    ppid,
2093 		    sid,
2094 		    chk_flags,
2095 		    NULL, fsn, mid);
2096 		if (control == NULL) {
2097 			SCTP_STAT_INCR(sctps_nomem);
2098 			return (0);
2099 		}
2100 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2101 			struct mbuf *mm;
2102 
2103 			control->data = dmbuf;
2104 			for (mm = control->data; mm; mm = mm->m_next) {
2105 				control->length += SCTP_BUF_LEN(mm);
2106 			}
2107 			control->tail_mbuf = NULL;
2108 			control->end_added = 1;
2109 			control->last_frag_seen = 1;
2110 			control->first_frag_seen = 1;
2111 			control->fsn_included = fsn;
2112 			control->top_fsn = fsn;
2113 		}
2114 		created_control = 1;
2115 	}
2116 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2117 	    chk_flags, ordered, mid, control);
2118 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2119 	    TAILQ_EMPTY(&asoc->resetHead) &&
2120 	    ((ordered == 0) ||
2121 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2122 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2123 		/* Candidate for express delivery */
2124 		/*
2125 		 * Its not fragmented, No PD-API is up, Nothing in the
2126 		 * delivery queue, Its un-ordered OR ordered and the next to
2127 		 * deliver AND nothing else is stuck on the stream queue,
2128 		 * And there is room for it in the socket buffer. Lets just
2129 		 * stuff it up the buffer....
2130 		 */
2131 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2132 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2133 			asoc->highest_tsn_inside_nr_map = tsn;
2134 		}
2135 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2136 		    control, mid);
2137 
2138 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2139 		    control, &stcb->sctp_socket->so_rcv,
2140 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2141 
2142 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2143 			/* for ordered, bump what we delivered */
2144 			asoc->strmin[sid].last_mid_delivered++;
2145 		}
2146 		SCTP_STAT_INCR(sctps_recvexpress);
2147 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2148 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2149 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2150 		}
2151 		control = NULL;
2152 		goto finish_express_del;
2153 	}
2154 
2155 	/* Now will we need a chunk too? */
2156 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2157 		sctp_alloc_a_chunk(stcb, chk);
2158 		if (chk == NULL) {
2159 			/* No memory so we drop the chunk */
2160 			SCTP_STAT_INCR(sctps_nomem);
2161 			if (last_chunk == 0) {
2162 				/* we copied it, free the copy */
2163 				sctp_m_freem(dmbuf);
2164 			}
2165 			return (0);
2166 		}
2167 		chk->rec.data.tsn = tsn;
2168 		chk->no_fr_allowed = 0;
2169 		chk->rec.data.fsn = fsn;
2170 		chk->rec.data.mid = mid;
2171 		chk->rec.data.sid = sid;
2172 		chk->rec.data.ppid = ppid;
2173 		chk->rec.data.context = stcb->asoc.context;
2174 		chk->rec.data.doing_fast_retransmit = 0;
2175 		chk->rec.data.rcv_flags = chk_flags;
2176 		chk->asoc = asoc;
2177 		chk->send_size = the_len;
2178 		chk->whoTo = net;
2179 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2180 		    chk,
2181 		    control, mid);
2182 		atomic_add_int(&net->ref_count, 1);
2183 		chk->data = dmbuf;
2184 	}
2185 	/* Set the appropriate TSN mark */
2186 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2187 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2188 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2189 			asoc->highest_tsn_inside_nr_map = tsn;
2190 		}
2191 	} else {
2192 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2193 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2194 			asoc->highest_tsn_inside_map = tsn;
2195 		}
2196 	}
2197 	/* Now is it complete (i.e. not fragmented)? */
2198 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2199 		/*
2200 		 * Special check for when streams are resetting. We could be
2201 		 * more smart about this and check the actual stream to see
2202 		 * if it is not being reset.. that way we would not create a
2203 		 * HOLB when amongst streams being reset and those not being
2204 		 * reset.
2205 		 *
2206 		 */
2207 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2208 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2209 			/*
2210 			 * yep its past where we need to reset... go ahead
2211 			 * and queue it.
2212 			 */
2213 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2214 				/* first one on */
2215 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2216 			} else {
2217 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2218 				unsigned char inserted = 0;
2219 
2220 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2221 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2222 
2223 						continue;
2224 					} else {
2225 						/* found it */
2226 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2227 						inserted = 1;
2228 						break;
2229 					}
2230 				}
2231 				if (inserted == 0) {
2232 					/*
2233 					 * must be put at end, use prevP
2234 					 * (all setup from loop) to setup
2235 					 * nextP.
2236 					 */
2237 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2238 				}
2239 			}
2240 			goto finish_express_del;
2241 		}
2242 		if (chk_flags & SCTP_DATA_UNORDERED) {
2243 			/* queue directly into socket buffer */
2244 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2245 			    control, mid);
2246 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2247 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2248 			    control,
2249 			    &stcb->sctp_socket->so_rcv, 1,
2250 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2251 
2252 		} else {
2253 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2254 			    mid);
2255 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2256 			if (*abort_flag) {
2257 				if (last_chunk) {
2258 					*m = NULL;
2259 				}
2260 				return (0);
2261 			}
2262 		}
2263 		goto finish_express_del;
2264 	}
2265 	/* If we reach here its a reassembly */
2266 	need_reasm_check = 1;
2267 	SCTPDBG(SCTP_DEBUG_XXX,
2268 	    "Queue data to stream for reasm control: %p MID: %u\n",
2269 	    control, mid);
2270 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2271 	if (*abort_flag) {
2272 		/*
2273 		 * the assoc is now gone and chk was put onto the reasm
2274 		 * queue, which has all been freed.
2275 		 */
2276 		if (last_chunk) {
2277 			*m = NULL;
2278 		}
2279 		return (0);
2280 	}
2281 finish_express_del:
2282 	/* Here we tidy up things */
2283 	if (tsn == (asoc->cumulative_tsn + 1)) {
2284 		/* Update cum-ack */
2285 		asoc->cumulative_tsn = tsn;
2286 	}
2287 	if (last_chunk) {
2288 		*m = NULL;
2289 	}
2290 	if (ordered) {
2291 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2292 	} else {
2293 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2294 	}
2295 	SCTP_STAT_INCR(sctps_recvdata);
2296 	/* Set it present please */
2297 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2298 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2299 	}
2300 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2301 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2302 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2303 	}
2304 	if (need_reasm_check) {
2305 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2306 		need_reasm_check = 0;
2307 	}
2308 	/* check the special flag for stream resets */
2309 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2310 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2311 		/*
2312 		 * we have finished working through the backlogged TSN's now
2313 		 * time to reset streams. 1: call reset function. 2: free
2314 		 * pending_reply space 3: distribute any chunks in
2315 		 * pending_reply_queue.
2316 		 */
2317 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2318 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2319 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2320 		SCTP_FREE(liste, SCTP_M_STRESET);
2321 		/* sa_ignore FREED_MEMORY */
2322 		liste = TAILQ_FIRST(&asoc->resetHead);
2323 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2324 			/* All can be removed */
2325 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2326 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2327 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2328 				if (*abort_flag) {
2329 					return (0);
2330 				}
2331 				if (need_reasm_check) {
2332 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2333 					need_reasm_check = 0;
2334 				}
2335 			}
2336 		} else {
2337 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2338 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2339 					break;
2340 				}
2341 				/*
2342 				 * if control->sinfo_tsn is <= liste->tsn we
2343 				 * can process it which is the NOT of
2344 				 * control->sinfo_tsn > liste->tsn
2345 				 */
2346 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2347 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2348 				if (*abort_flag) {
2349 					return (0);
2350 				}
2351 				if (need_reasm_check) {
2352 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2353 					need_reasm_check = 0;
2354 				}
2355 			}
2356 		}
2357 	}
2358 	return (1);
2359 }
2360 
2361 static const int8_t sctp_map_lookup_tab[256] = {
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 4,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 5,
2366 	0, 1, 0, 2, 0, 1, 0, 3,
2367 	0, 1, 0, 2, 0, 1, 0, 4,
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 6,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 4,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 5,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 4,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 7,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 4,
2380 	0, 1, 0, 2, 0, 1, 0, 3,
2381 	0, 1, 0, 2, 0, 1, 0, 5,
2382 	0, 1, 0, 2, 0, 1, 0, 3,
2383 	0, 1, 0, 2, 0, 1, 0, 4,
2384 	0, 1, 0, 2, 0, 1, 0, 3,
2385 	0, 1, 0, 2, 0, 1, 0, 6,
2386 	0, 1, 0, 2, 0, 1, 0, 3,
2387 	0, 1, 0, 2, 0, 1, 0, 4,
2388 	0, 1, 0, 2, 0, 1, 0, 3,
2389 	0, 1, 0, 2, 0, 1, 0, 5,
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 4,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 8
2394 };
2395 
2396 
2397 void
2398 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2399 {
2400 	/*
2401 	 * Now we also need to check the mapping array in a couple of ways.
2402 	 * 1) Did we move the cum-ack point?
2403 	 *
2404 	 * When you first glance at this you might think that all entries
2405 	 * that make up the position of the cum-ack would be in the
2406 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2407 	 * deliverable. Thats true with one exception, when its a fragmented
2408 	 * message we may not deliver the data until some threshold (or all
2409 	 * of it) is in place. So we must OR the nr_mapping_array and
2410 	 * mapping_array to get a true picture of the cum-ack.
2411 	 */
2412 	struct sctp_association *asoc;
2413 	int at;
2414 	uint8_t val;
2415 	int slide_from, slide_end, lgap, distance;
2416 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2417 
2418 	asoc = &stcb->asoc;
2419 
2420 	old_cumack = asoc->cumulative_tsn;
2421 	old_base = asoc->mapping_array_base_tsn;
2422 	old_highest = asoc->highest_tsn_inside_map;
2423 	/*
2424 	 * We could probably improve this a small bit by calculating the
2425 	 * offset of the current cum-ack as the starting point.
2426 	 */
2427 	at = 0;
2428 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2429 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2430 		if (val == 0xff) {
2431 			at += 8;
2432 		} else {
2433 			/* there is a 0 bit */
2434 			at += sctp_map_lookup_tab[val];
2435 			break;
2436 		}
2437 	}
2438 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2439 
2440 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2441 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2442 #ifdef INVARIANTS
2443 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2444 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2445 #else
2446 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2447 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2448 		sctp_print_mapping_array(asoc);
2449 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2450 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2451 		}
2452 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2453 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2454 #endif
2455 	}
2456 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2457 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2458 	} else {
2459 		highest_tsn = asoc->highest_tsn_inside_map;
2460 	}
2461 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2462 		/* The complete array was completed by a single FR */
2463 		/* highest becomes the cum-ack */
2464 		int clr;
2465 #ifdef INVARIANTS
2466 		unsigned int i;
2467 #endif
2468 
2469 		/* clear the array */
2470 		clr = ((at + 7) >> 3);
2471 		if (clr > asoc->mapping_array_size) {
2472 			clr = asoc->mapping_array_size;
2473 		}
2474 		memset(asoc->mapping_array, 0, clr);
2475 		memset(asoc->nr_mapping_array, 0, clr);
2476 #ifdef INVARIANTS
2477 		for (i = 0; i < asoc->mapping_array_size; i++) {
2478 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2479 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2480 				sctp_print_mapping_array(asoc);
2481 			}
2482 		}
2483 #endif
2484 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2485 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2486 	} else if (at >= 8) {
2487 		/* we can slide the mapping array down */
2488 		/* slide_from holds where we hit the first NON 0xff byte */
2489 
2490 		/*
2491 		 * now calculate the ceiling of the move using our highest
2492 		 * TSN value
2493 		 */
2494 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2495 		slide_end = (lgap >> 3);
2496 		if (slide_end < slide_from) {
2497 			sctp_print_mapping_array(asoc);
2498 #ifdef INVARIANTS
2499 			panic("impossible slide");
2500 #else
2501 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2502 			    lgap, slide_end, slide_from, at);
2503 			return;
2504 #endif
2505 		}
2506 		if (slide_end > asoc->mapping_array_size) {
2507 #ifdef INVARIANTS
2508 			panic("would overrun buffer");
2509 #else
2510 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2511 			    asoc->mapping_array_size, slide_end);
2512 			slide_end = asoc->mapping_array_size;
2513 #endif
2514 		}
2515 		distance = (slide_end - slide_from) + 1;
2516 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2517 			sctp_log_map(old_base, old_cumack, old_highest,
2518 			    SCTP_MAP_PREPARE_SLIDE);
2519 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2520 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2521 		}
2522 		if (distance + slide_from > asoc->mapping_array_size ||
2523 		    distance < 0) {
2524 			/*
2525 			 * Here we do NOT slide forward the array so that
2526 			 * hopefully when more data comes in to fill it up
2527 			 * we will be able to slide it forward. Really I
2528 			 * don't think this should happen :-0
2529 			 */
2530 
2531 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2532 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2533 				    (uint32_t)asoc->mapping_array_size,
2534 				    SCTP_MAP_SLIDE_NONE);
2535 			}
2536 		} else {
2537 			int ii;
2538 
2539 			for (ii = 0; ii < distance; ii++) {
2540 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2541 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2542 
2543 			}
2544 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2545 				asoc->mapping_array[ii] = 0;
2546 				asoc->nr_mapping_array[ii] = 0;
2547 			}
2548 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2549 				asoc->highest_tsn_inside_map += (slide_from << 3);
2550 			}
2551 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2552 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2553 			}
2554 			asoc->mapping_array_base_tsn += (slide_from << 3);
2555 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2556 				sctp_log_map(asoc->mapping_array_base_tsn,
2557 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2558 				    SCTP_MAP_SLIDE_RESULT);
2559 			}
2560 		}
2561 	}
2562 }
2563 
2564 void
2565 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2566 {
2567 	struct sctp_association *asoc;
2568 	uint32_t highest_tsn;
2569 	int is_a_gap;
2570 
2571 	sctp_slide_mapping_arrays(stcb);
2572 	asoc = &stcb->asoc;
2573 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2574 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2575 	} else {
2576 		highest_tsn = asoc->highest_tsn_inside_map;
2577 	}
2578 	/* Is there a gap now? */
2579 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2580 
2581 	/*
2582 	 * Now we need to see if we need to queue a sack or just start the
2583 	 * timer (if allowed).
2584 	 */
2585 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2586 		/*
2587 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2588 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2589 		 * SACK
2590 		 */
2591 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2592 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2593 			    stcb->sctp_ep, stcb, NULL,
2594 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2595 		}
2596 		sctp_send_shutdown(stcb,
2597 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2598 		if (is_a_gap) {
2599 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2600 		}
2601 	} else {
2602 		/*
2603 		 * CMT DAC algorithm: increase number of packets received
2604 		 * since last ack
2605 		 */
2606 		stcb->asoc.cmt_dac_pkts_rcvd++;
2607 
2608 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2609 							 * SACK */
2610 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2611 							 * longer is one */
2612 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2613 		    (is_a_gap) ||	/* is still a gap */
2614 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2615 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2616 		    ) {
2617 
2618 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2619 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2620 			    (stcb->asoc.send_sack == 0) &&
2621 			    (stcb->asoc.numduptsns == 0) &&
2622 			    (stcb->asoc.delayed_ack) &&
2623 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2624 
2625 				/*
2626 				 * CMT DAC algorithm: With CMT, delay acks
2627 				 * even in the face of
2628 				 *
2629 				 * reordering. Therefore, if acks that do
2630 				 * not have to be sent because of the above
2631 				 * reasons, will be delayed. That is, acks
2632 				 * that would have been sent due to gap
2633 				 * reports will be delayed with DAC. Start
2634 				 * the delayed ack timer.
2635 				 */
2636 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2637 				    stcb->sctp_ep, stcb, NULL);
2638 			} else {
2639 				/*
2640 				 * Ok we must build a SACK since the timer
2641 				 * is pending, we got our first packet OR
2642 				 * there are gaps or duplicates.
2643 				 */
2644 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2645 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2646 			}
2647 		} else {
2648 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2649 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 				    stcb->sctp_ep, stcb, NULL);
2651 			}
2652 		}
2653 	}
2654 }
2655 
2656 int
2657 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2658     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2659     struct sctp_nets *net, uint32_t *high_tsn)
2660 {
2661 	struct sctp_chunkhdr *ch, chunk_buf;
2662 	struct sctp_association *asoc;
2663 	int num_chunks = 0;	/* number of control chunks processed */
2664 	int stop_proc = 0;
2665 	int break_flag, last_chunk;
2666 	int abort_flag = 0, was_a_gap;
2667 	struct mbuf *m;
2668 	uint32_t highest_tsn;
2669 	uint16_t chk_length;
2670 
2671 	/* set the rwnd */
2672 	sctp_set_rwnd(stcb, &stcb->asoc);
2673 
2674 	m = *mm;
2675 	SCTP_TCB_LOCK_ASSERT(stcb);
2676 	asoc = &stcb->asoc;
2677 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2678 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2679 	} else {
2680 		highest_tsn = asoc->highest_tsn_inside_map;
2681 	}
2682 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2683 	/*
2684 	 * setup where we got the last DATA packet from for any SACK that
2685 	 * may need to go out. Don't bump the net. This is done ONLY when a
2686 	 * chunk is assigned.
2687 	 */
2688 	asoc->last_data_chunk_from = net;
2689 
2690 	/*-
2691 	 * Now before we proceed we must figure out if this is a wasted
2692 	 * cluster... i.e. it is a small packet sent in and yet the driver
2693 	 * underneath allocated a full cluster for it. If so we must copy it
2694 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2695 	 * with cluster starvation. Note for __Panda__ we don't do this
2696 	 * since it has clusters all the way down to 64 bytes.
2697 	 */
2698 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2699 		/* we only handle mbufs that are singletons.. not chains */
2700 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2701 		if (m) {
2702 			/* ok lets see if we can copy the data up */
2703 			caddr_t *from, *to;
2704 
2705 			/* get the pointers and copy */
2706 			to = mtod(m, caddr_t *);
2707 			from = mtod((*mm), caddr_t *);
2708 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2709 			/* copy the length and free up the old */
2710 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2711 			sctp_m_freem(*mm);
2712 			/* success, back copy */
2713 			*mm = m;
2714 		} else {
2715 			/* We are in trouble in the mbuf world .. yikes */
2716 			m = *mm;
2717 		}
2718 	}
2719 	/* get pointer to the first chunk header */
2720 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2721 	    sizeof(struct sctp_chunkhdr),
2722 	    (uint8_t *)&chunk_buf);
2723 	if (ch == NULL) {
2724 		return (1);
2725 	}
2726 	/*
2727 	 * process all DATA chunks...
2728 	 */
2729 	*high_tsn = asoc->cumulative_tsn;
2730 	break_flag = 0;
2731 	asoc->data_pkts_seen++;
2732 	while (stop_proc == 0) {
2733 		/* validate chunk length */
2734 		chk_length = ntohs(ch->chunk_length);
2735 		if (length - *offset < chk_length) {
2736 			/* all done, mutulated chunk */
2737 			stop_proc = 1;
2738 			continue;
2739 		}
2740 		if ((asoc->idata_supported == 1) &&
2741 		    (ch->chunk_type == SCTP_DATA)) {
2742 			struct mbuf *op_err;
2743 			char msg[SCTP_DIAG_INFO_LEN];
2744 
2745 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2746 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2747 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2748 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2749 			return (2);
2750 		}
2751 		if ((asoc->idata_supported == 0) &&
2752 		    (ch->chunk_type == SCTP_IDATA)) {
2753 			struct mbuf *op_err;
2754 			char msg[SCTP_DIAG_INFO_LEN];
2755 
2756 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2757 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2758 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2759 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2760 			return (2);
2761 		}
2762 		if ((ch->chunk_type == SCTP_DATA) ||
2763 		    (ch->chunk_type == SCTP_IDATA)) {
2764 			uint16_t clen;
2765 
2766 			if (ch->chunk_type == SCTP_DATA) {
2767 				clen = sizeof(struct sctp_data_chunk);
2768 			} else {
2769 				clen = sizeof(struct sctp_idata_chunk);
2770 			}
2771 			if (chk_length < clen) {
2772 				/*
2773 				 * Need to send an abort since we had a
2774 				 * invalid data chunk.
2775 				 */
2776 				struct mbuf *op_err;
2777 				char msg[SCTP_DIAG_INFO_LEN];
2778 
2779 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2780 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2781 				    chk_length);
2782 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2783 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2784 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2785 				return (2);
2786 			}
2787 #ifdef SCTP_AUDITING_ENABLED
2788 			sctp_audit_log(0xB1, 0);
2789 #endif
2790 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2791 				last_chunk = 1;
2792 			} else {
2793 				last_chunk = 0;
2794 			}
2795 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2796 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2797 			    last_chunk, ch->chunk_type)) {
2798 				num_chunks++;
2799 			}
2800 			if (abort_flag)
2801 				return (2);
2802 
2803 			if (break_flag) {
2804 				/*
2805 				 * Set because of out of rwnd space and no
2806 				 * drop rep space left.
2807 				 */
2808 				stop_proc = 1;
2809 				continue;
2810 			}
2811 		} else {
2812 			/* not a data chunk in the data region */
2813 			switch (ch->chunk_type) {
2814 			case SCTP_INITIATION:
2815 			case SCTP_INITIATION_ACK:
2816 			case SCTP_SELECTIVE_ACK:
2817 			case SCTP_NR_SELECTIVE_ACK:
2818 			case SCTP_HEARTBEAT_REQUEST:
2819 			case SCTP_HEARTBEAT_ACK:
2820 			case SCTP_ABORT_ASSOCIATION:
2821 			case SCTP_SHUTDOWN:
2822 			case SCTP_SHUTDOWN_ACK:
2823 			case SCTP_OPERATION_ERROR:
2824 			case SCTP_COOKIE_ECHO:
2825 			case SCTP_COOKIE_ACK:
2826 			case SCTP_ECN_ECHO:
2827 			case SCTP_ECN_CWR:
2828 			case SCTP_SHUTDOWN_COMPLETE:
2829 			case SCTP_AUTHENTICATION:
2830 			case SCTP_ASCONF_ACK:
2831 			case SCTP_PACKET_DROPPED:
2832 			case SCTP_STREAM_RESET:
2833 			case SCTP_FORWARD_CUM_TSN:
2834 			case SCTP_ASCONF:
2835 				{
2836 					/*
2837 					 * Now, what do we do with KNOWN
2838 					 * chunks that are NOT in the right
2839 					 * place?
2840 					 *
2841 					 * For now, I do nothing but ignore
2842 					 * them. We may later want to add
2843 					 * sysctl stuff to switch out and do
2844 					 * either an ABORT() or possibly
2845 					 * process them.
2846 					 */
2847 					struct mbuf *op_err;
2848 					char msg[SCTP_DIAG_INFO_LEN];
2849 
2850 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2851 					    ch->chunk_type);
2852 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2853 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2854 					return (2);
2855 				}
2856 			default:
2857 				/*
2858 				 * Unknown chunk type: use bit rules after
2859 				 * checking length
2860 				 */
2861 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2862 					/*
2863 					 * Need to send an abort since we
2864 					 * had a invalid chunk.
2865 					 */
2866 					struct mbuf *op_err;
2867 					char msg[SCTP_DIAG_INFO_LEN];
2868 
2869 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2870 					    chk_length);
2871 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2872 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2873 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2874 					return (2);
2875 				}
2876 				if (ch->chunk_type & 0x40) {
2877 					/* Add a error report to the queue */
2878 					struct mbuf *op_err;
2879 					struct sctp_gen_error_cause *cause;
2880 
2881 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2882 					    0, M_NOWAIT, 1, MT_DATA);
2883 					if (op_err != NULL) {
2884 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2885 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2886 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2887 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2888 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2889 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2890 							sctp_queue_op_err(stcb, op_err);
2891 						} else {
2892 							sctp_m_freem(op_err);
2893 						}
2894 					}
2895 				}
2896 				if ((ch->chunk_type & 0x80) == 0) {
2897 					/* discard the rest of this packet */
2898 					stop_proc = 1;
2899 				}	/* else skip this bad chunk and
2900 					 * continue... */
2901 				break;
2902 			}	/* switch of chunk type */
2903 		}
2904 		*offset += SCTP_SIZE32(chk_length);
2905 		if ((*offset >= length) || stop_proc) {
2906 			/* no more data left in the mbuf chain */
2907 			stop_proc = 1;
2908 			continue;
2909 		}
2910 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2911 		    sizeof(struct sctp_chunkhdr),
2912 		    (uint8_t *)&chunk_buf);
2913 		if (ch == NULL) {
2914 			*offset = length;
2915 			stop_proc = 1;
2916 			continue;
2917 		}
2918 	}
2919 	if (break_flag) {
2920 		/*
2921 		 * we need to report rwnd overrun drops.
2922 		 */
2923 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2924 	}
2925 	if (num_chunks) {
2926 		/*
2927 		 * Did we get data, if so update the time for auto-close and
2928 		 * give peer credit for being alive.
2929 		 */
2930 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2931 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2932 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2933 			    stcb->asoc.overall_error_count,
2934 			    0,
2935 			    SCTP_FROM_SCTP_INDATA,
2936 			    __LINE__);
2937 		}
2938 		stcb->asoc.overall_error_count = 0;
2939 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2940 	}
2941 	/* now service all of the reassm queue if needed */
2942 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2943 		/* Assure that we ack right away */
2944 		stcb->asoc.send_sack = 1;
2945 	}
2946 	/* Start a sack timer or QUEUE a SACK for sending */
2947 	sctp_sack_check(stcb, was_a_gap);
2948 	return (0);
2949 }
2950 
2951 static int
2952 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2953     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2954     int *num_frs,
2955     uint32_t *biggest_newly_acked_tsn,
2956     uint32_t *this_sack_lowest_newack,
2957     int *rto_ok)
2958 {
2959 	struct sctp_tmit_chunk *tp1;
2960 	unsigned int theTSN;
2961 	int j, wake_him = 0, circled = 0;
2962 
2963 	/* Recover the tp1 we last saw */
2964 	tp1 = *p_tp1;
2965 	if (tp1 == NULL) {
2966 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2967 	}
2968 	for (j = frag_strt; j <= frag_end; j++) {
2969 		theTSN = j + last_tsn;
2970 		while (tp1) {
2971 			if (tp1->rec.data.doing_fast_retransmit)
2972 				(*num_frs) += 1;
2973 
2974 			/*-
2975 			 * CMT: CUCv2 algorithm. For each TSN being
2976 			 * processed from the sent queue, track the
2977 			 * next expected pseudo-cumack, or
2978 			 * rtx_pseudo_cumack, if required. Separate
2979 			 * cumack trackers for first transmissions,
2980 			 * and retransmissions.
2981 			 */
2982 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2983 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2984 			    (tp1->snd_count == 1)) {
2985 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2986 				tp1->whoTo->find_pseudo_cumack = 0;
2987 			}
2988 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2989 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2990 			    (tp1->snd_count > 1)) {
2991 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2992 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2993 			}
2994 			if (tp1->rec.data.tsn == theTSN) {
2995 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2996 					/*-
2997 					 * must be held until
2998 					 * cum-ack passes
2999 					 */
3000 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3001 						/*-
3002 						 * If it is less than RESEND, it is
3003 						 * now no-longer in flight.
3004 						 * Higher values may already be set
3005 						 * via previous Gap Ack Blocks...
3006 						 * i.e. ACKED or RESEND.
3007 						 */
3008 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3009 						    *biggest_newly_acked_tsn)) {
3010 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3011 						}
3012 						/*-
3013 						 * CMT: SFR algo (and HTNA) - set
3014 						 * saw_newack to 1 for dest being
3015 						 * newly acked. update
3016 						 * this_sack_highest_newack if
3017 						 * appropriate.
3018 						 */
3019 						if (tp1->rec.data.chunk_was_revoked == 0)
3020 							tp1->whoTo->saw_newack = 1;
3021 
3022 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3023 						    tp1->whoTo->this_sack_highest_newack)) {
3024 							tp1->whoTo->this_sack_highest_newack =
3025 							    tp1->rec.data.tsn;
3026 						}
3027 						/*-
3028 						 * CMT DAC algo: also update
3029 						 * this_sack_lowest_newack
3030 						 */
3031 						if (*this_sack_lowest_newack == 0) {
3032 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3033 								sctp_log_sack(*this_sack_lowest_newack,
3034 								    last_tsn,
3035 								    tp1->rec.data.tsn,
3036 								    0,
3037 								    0,
3038 								    SCTP_LOG_TSN_ACKED);
3039 							}
3040 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3041 						}
3042 						/*-
3043 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3044 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3045 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3046 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3047 						 * Separate pseudo_cumack trackers for first transmissions and
3048 						 * retransmissions.
3049 						 */
3050 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3051 							if (tp1->rec.data.chunk_was_revoked == 0) {
3052 								tp1->whoTo->new_pseudo_cumack = 1;
3053 							}
3054 							tp1->whoTo->find_pseudo_cumack = 1;
3055 						}
3056 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3057 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3058 						}
3059 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3060 							if (tp1->rec.data.chunk_was_revoked == 0) {
3061 								tp1->whoTo->new_pseudo_cumack = 1;
3062 							}
3063 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3064 						}
3065 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3066 							sctp_log_sack(*biggest_newly_acked_tsn,
3067 							    last_tsn,
3068 							    tp1->rec.data.tsn,
3069 							    frag_strt,
3070 							    frag_end,
3071 							    SCTP_LOG_TSN_ACKED);
3072 						}
3073 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3074 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3075 							    tp1->whoTo->flight_size,
3076 							    tp1->book_size,
3077 							    (uint32_t)(uintptr_t)tp1->whoTo,
3078 							    tp1->rec.data.tsn);
3079 						}
3080 						sctp_flight_size_decrease(tp1);
3081 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3082 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3083 							    tp1);
3084 						}
3085 						sctp_total_flight_decrease(stcb, tp1);
3086 
3087 						tp1->whoTo->net_ack += tp1->send_size;
3088 						if (tp1->snd_count < 2) {
3089 							/*-
3090 							 * True non-retransmitted chunk
3091 							 */
3092 							tp1->whoTo->net_ack2 += tp1->send_size;
3093 
3094 							/*-
3095 							 * update RTO too ?
3096 							 */
3097 							if (tp1->do_rtt) {
3098 								if (*rto_ok) {
3099 									tp1->whoTo->RTO =
3100 									    sctp_calculate_rto(stcb,
3101 									    &stcb->asoc,
3102 									    tp1->whoTo,
3103 									    &tp1->sent_rcv_time,
3104 									    SCTP_RTT_FROM_DATA);
3105 									*rto_ok = 0;
3106 								}
3107 								if (tp1->whoTo->rto_needed == 0) {
3108 									tp1->whoTo->rto_needed = 1;
3109 								}
3110 								tp1->do_rtt = 0;
3111 							}
3112 						}
3113 
3114 					}
3115 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3116 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3117 						    stcb->asoc.this_sack_highest_gap)) {
3118 							stcb->asoc.this_sack_highest_gap =
3119 							    tp1->rec.data.tsn;
3120 						}
3121 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3122 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3123 #ifdef SCTP_AUDITING_ENABLED
3124 							sctp_audit_log(0xB2,
3125 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3126 #endif
3127 						}
3128 					}
3129 					/*-
3130 					 * All chunks NOT UNSENT fall through here and are marked
3131 					 * (leave PR-SCTP ones that are to skip alone though)
3132 					 */
3133 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3134 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3135 						tp1->sent = SCTP_DATAGRAM_MARKED;
3136 					}
3137 					if (tp1->rec.data.chunk_was_revoked) {
3138 						/* deflate the cwnd */
3139 						tp1->whoTo->cwnd -= tp1->book_size;
3140 						tp1->rec.data.chunk_was_revoked = 0;
3141 					}
3142 					/* NR Sack code here */
3143 					if (nr_sacking &&
3144 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3145 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3146 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3147 #ifdef INVARIANTS
3148 						} else {
3149 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3150 #endif
3151 						}
3152 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3153 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3154 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3155 							stcb->asoc.trigger_reset = 1;
3156 						}
3157 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3158 						if (tp1->data) {
3159 							/*
3160 							 * sa_ignore
3161 							 * NO_NULL_CHK
3162 							 */
3163 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3164 							sctp_m_freem(tp1->data);
3165 							tp1->data = NULL;
3166 						}
3167 						wake_him++;
3168 					}
3169 				}
3170 				break;
3171 			}	/* if (tp1->tsn == theTSN) */
3172 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3173 				break;
3174 			}
3175 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3176 			if ((tp1 == NULL) && (circled == 0)) {
3177 				circled++;
3178 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3179 			}
3180 		}		/* end while (tp1) */
3181 		if (tp1 == NULL) {
3182 			circled = 0;
3183 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184 		}
3185 		/* In case the fragments were not in order we must reset */
3186 	}			/* end for (j = fragStart */
3187 	*p_tp1 = tp1;
3188 	return (wake_him);	/* Return value only used for nr-sack */
3189 }
3190 
3191 
3192 static int
3193 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3194     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3195     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3196     int num_seg, int num_nr_seg, int *rto_ok)
3197 {
3198 	struct sctp_gap_ack_block *frag, block;
3199 	struct sctp_tmit_chunk *tp1;
3200 	int i;
3201 	int num_frs = 0;
3202 	int chunk_freed;
3203 	int non_revocable;
3204 	uint16_t frag_strt, frag_end, prev_frag_end;
3205 
3206 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3207 	prev_frag_end = 0;
3208 	chunk_freed = 0;
3209 
3210 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3211 		if (i == num_seg) {
3212 			prev_frag_end = 0;
3213 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3214 		}
3215 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3216 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3217 		*offset += sizeof(block);
3218 		if (frag == NULL) {
3219 			return (chunk_freed);
3220 		}
3221 		frag_strt = ntohs(frag->start);
3222 		frag_end = ntohs(frag->end);
3223 
3224 		if (frag_strt > frag_end) {
3225 			/* This gap report is malformed, skip it. */
3226 			continue;
3227 		}
3228 		if (frag_strt <= prev_frag_end) {
3229 			/* This gap report is not in order, so restart. */
3230 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3231 		}
3232 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3233 			*biggest_tsn_acked = last_tsn + frag_end;
3234 		}
3235 		if (i < num_seg) {
3236 			non_revocable = 0;
3237 		} else {
3238 			non_revocable = 1;
3239 		}
3240 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3241 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3242 		    this_sack_lowest_newack, rto_ok)) {
3243 			chunk_freed = 1;
3244 		}
3245 		prev_frag_end = frag_end;
3246 	}
3247 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3248 		if (num_frs)
3249 			sctp_log_fr(*biggest_tsn_acked,
3250 			    *biggest_newly_acked_tsn,
3251 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3252 	}
3253 	return (chunk_freed);
3254 }
3255 
3256 static void
3257 sctp_check_for_revoked(struct sctp_tcb *stcb,
3258     struct sctp_association *asoc, uint32_t cumack,
3259     uint32_t biggest_tsn_acked)
3260 {
3261 	struct sctp_tmit_chunk *tp1;
3262 
3263 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3264 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3265 			/*
3266 			 * ok this guy is either ACK or MARKED. If it is
3267 			 * ACKED it has been previously acked but not this
3268 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3269 			 * again.
3270 			 */
3271 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3272 				break;
3273 			}
3274 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3275 				/* it has been revoked */
3276 				tp1->sent = SCTP_DATAGRAM_SENT;
3277 				tp1->rec.data.chunk_was_revoked = 1;
3278 				/*
3279 				 * We must add this stuff back in to assure
3280 				 * timers and such get started.
3281 				 */
3282 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3283 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3284 					    tp1->whoTo->flight_size,
3285 					    tp1->book_size,
3286 					    (uint32_t)(uintptr_t)tp1->whoTo,
3287 					    tp1->rec.data.tsn);
3288 				}
3289 				sctp_flight_size_increase(tp1);
3290 				sctp_total_flight_increase(stcb, tp1);
3291 				/*
3292 				 * We inflate the cwnd to compensate for our
3293 				 * artificial inflation of the flight_size.
3294 				 */
3295 				tp1->whoTo->cwnd += tp1->book_size;
3296 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3297 					sctp_log_sack(asoc->last_acked_seq,
3298 					    cumack,
3299 					    tp1->rec.data.tsn,
3300 					    0,
3301 					    0,
3302 					    SCTP_LOG_TSN_REVOKED);
3303 				}
3304 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3305 				/* it has been re-acked in this SACK */
3306 				tp1->sent = SCTP_DATAGRAM_ACKED;
3307 			}
3308 		}
3309 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3310 			break;
3311 	}
3312 }
3313 
3314 
3315 static void
3316 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3317     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3318 {
3319 	struct sctp_tmit_chunk *tp1;
3320 	int strike_flag = 0;
3321 	struct timeval now;
3322 	int tot_retrans = 0;
3323 	uint32_t sending_seq;
3324 	struct sctp_nets *net;
3325 	int num_dests_sacked = 0;
3326 
3327 	/*
3328 	 * select the sending_seq, this is either the next thing ready to be
3329 	 * sent but not transmitted, OR, the next seq we assign.
3330 	 */
3331 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3332 	if (tp1 == NULL) {
3333 		sending_seq = asoc->sending_seq;
3334 	} else {
3335 		sending_seq = tp1->rec.data.tsn;
3336 	}
3337 
3338 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3339 	if ((asoc->sctp_cmt_on_off > 0) &&
3340 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3341 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3342 			if (net->saw_newack)
3343 				num_dests_sacked++;
3344 		}
3345 	}
3346 	if (stcb->asoc.prsctp_supported) {
3347 		(void)SCTP_GETTIME_TIMEVAL(&now);
3348 	}
3349 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3350 		strike_flag = 0;
3351 		if (tp1->no_fr_allowed) {
3352 			/* this one had a timeout or something */
3353 			continue;
3354 		}
3355 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3356 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3357 				sctp_log_fr(biggest_tsn_newly_acked,
3358 				    tp1->rec.data.tsn,
3359 				    tp1->sent,
3360 				    SCTP_FR_LOG_CHECK_STRIKE);
3361 		}
3362 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3363 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3364 			/* done */
3365 			break;
3366 		}
3367 		if (stcb->asoc.prsctp_supported) {
3368 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3369 				/* Is it expired? */
3370 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3371 					/* Yes so drop it */
3372 					if (tp1->data != NULL) {
3373 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3374 						    SCTP_SO_NOT_LOCKED);
3375 					}
3376 					continue;
3377 				}
3378 			}
3379 
3380 		}
3381 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3382 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3383 			/* we are beyond the tsn in the sack  */
3384 			break;
3385 		}
3386 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3387 			/* either a RESEND, ACKED, or MARKED */
3388 			/* skip */
3389 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3390 				/* Continue strikin FWD-TSN chunks */
3391 				tp1->rec.data.fwd_tsn_cnt++;
3392 			}
3393 			continue;
3394 		}
3395 		/*
3396 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3397 		 */
3398 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3399 			/*
3400 			 * No new acks were receieved for data sent to this
3401 			 * dest. Therefore, according to the SFR algo for
3402 			 * CMT, no data sent to this dest can be marked for
3403 			 * FR using this SACK.
3404 			 */
3405 			continue;
3406 		} else if (tp1->whoTo &&
3407 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3408 			    tp1->whoTo->this_sack_highest_newack) &&
3409 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3410 			/*
3411 			 * CMT: New acks were receieved for data sent to
3412 			 * this dest. But no new acks were seen for data
3413 			 * sent after tp1. Therefore, according to the SFR
3414 			 * algo for CMT, tp1 cannot be marked for FR using
3415 			 * this SACK. This step covers part of the DAC algo
3416 			 * and the HTNA algo as well.
3417 			 */
3418 			continue;
3419 		}
3420 		/*
3421 		 * Here we check to see if we were have already done a FR
3422 		 * and if so we see if the biggest TSN we saw in the sack is
3423 		 * smaller than the recovery point. If so we don't strike
3424 		 * the tsn... otherwise we CAN strike the TSN.
3425 		 */
3426 		/*
3427 		 * @@@ JRI: Check for CMT if (accum_moved &&
3428 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3429 		 * 0)) {
3430 		 */
3431 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3432 			/*
3433 			 * Strike the TSN if in fast-recovery and cum-ack
3434 			 * moved.
3435 			 */
3436 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3437 				sctp_log_fr(biggest_tsn_newly_acked,
3438 				    tp1->rec.data.tsn,
3439 				    tp1->sent,
3440 				    SCTP_FR_LOG_STRIKE_CHUNK);
3441 			}
3442 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3443 				tp1->sent++;
3444 			}
3445 			if ((asoc->sctp_cmt_on_off > 0) &&
3446 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3447 				/*
3448 				 * CMT DAC algorithm: If SACK flag is set to
3449 				 * 0, then lowest_newack test will not pass
3450 				 * because it would have been set to the
3451 				 * cumack earlier. If not already to be
3452 				 * rtx'd, If not a mixed sack and if tp1 is
3453 				 * not between two sacked TSNs, then mark by
3454 				 * one more. NOTE that we are marking by one
3455 				 * additional time since the SACK DAC flag
3456 				 * indicates that two packets have been
3457 				 * received after this missing TSN.
3458 				 */
3459 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3460 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3461 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3462 						sctp_log_fr(16 + num_dests_sacked,
3463 						    tp1->rec.data.tsn,
3464 						    tp1->sent,
3465 						    SCTP_FR_LOG_STRIKE_CHUNK);
3466 					}
3467 					tp1->sent++;
3468 				}
3469 			}
3470 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3471 		    (asoc->sctp_cmt_on_off == 0)) {
3472 			/*
3473 			 * For those that have done a FR we must take
3474 			 * special consideration if we strike. I.e the
3475 			 * biggest_newly_acked must be higher than the
3476 			 * sending_seq at the time we did the FR.
3477 			 */
3478 			if (
3479 #ifdef SCTP_FR_TO_ALTERNATE
3480 			/*
3481 			 * If FR's go to new networks, then we must only do
3482 			 * this for singly homed asoc's. However if the FR's
3483 			 * go to the same network (Armando's work) then its
3484 			 * ok to FR multiple times.
3485 			 */
3486 			    (asoc->numnets < 2)
3487 #else
3488 			    (1)
3489 #endif
3490 			    ) {
3491 
3492 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3493 				    tp1->rec.data.fast_retran_tsn)) {
3494 					/*
3495 					 * Strike the TSN, since this ack is
3496 					 * beyond where things were when we
3497 					 * did a FR.
3498 					 */
3499 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 						sctp_log_fr(biggest_tsn_newly_acked,
3501 						    tp1->rec.data.tsn,
3502 						    tp1->sent,
3503 						    SCTP_FR_LOG_STRIKE_CHUNK);
3504 					}
3505 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3506 						tp1->sent++;
3507 					}
3508 					strike_flag = 1;
3509 					if ((asoc->sctp_cmt_on_off > 0) &&
3510 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3511 						/*
3512 						 * CMT DAC algorithm: If
3513 						 * SACK flag is set to 0,
3514 						 * then lowest_newack test
3515 						 * will not pass because it
3516 						 * would have been set to
3517 						 * the cumack earlier. If
3518 						 * not already to be rtx'd,
3519 						 * If not a mixed sack and
3520 						 * if tp1 is not between two
3521 						 * sacked TSNs, then mark by
3522 						 * one more. NOTE that we
3523 						 * are marking by one
3524 						 * additional time since the
3525 						 * SACK DAC flag indicates
3526 						 * that two packets have
3527 						 * been received after this
3528 						 * missing TSN.
3529 						 */
3530 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3531 						    (num_dests_sacked == 1) &&
3532 						    SCTP_TSN_GT(this_sack_lowest_newack,
3533 						    tp1->rec.data.tsn)) {
3534 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3535 								sctp_log_fr(32 + num_dests_sacked,
3536 								    tp1->rec.data.tsn,
3537 								    tp1->sent,
3538 								    SCTP_FR_LOG_STRIKE_CHUNK);
3539 							}
3540 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3541 								tp1->sent++;
3542 							}
3543 						}
3544 					}
3545 				}
3546 			}
3547 			/*
3548 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3549 			 * algo covers HTNA.
3550 			 */
3551 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3552 		    biggest_tsn_newly_acked)) {
3553 			/*
3554 			 * We don't strike these: This is the  HTNA
3555 			 * algorithm i.e. we don't strike If our TSN is
3556 			 * larger than the Highest TSN Newly Acked.
3557 			 */
3558 			;
3559 		} else {
3560 			/* Strike the TSN */
3561 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3562 				sctp_log_fr(biggest_tsn_newly_acked,
3563 				    tp1->rec.data.tsn,
3564 				    tp1->sent,
3565 				    SCTP_FR_LOG_STRIKE_CHUNK);
3566 			}
3567 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3568 				tp1->sent++;
3569 			}
3570 			if ((asoc->sctp_cmt_on_off > 0) &&
3571 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3572 				/*
3573 				 * CMT DAC algorithm: If SACK flag is set to
3574 				 * 0, then lowest_newack test will not pass
3575 				 * because it would have been set to the
3576 				 * cumack earlier. If not already to be
3577 				 * rtx'd, If not a mixed sack and if tp1 is
3578 				 * not between two sacked TSNs, then mark by
3579 				 * one more. NOTE that we are marking by one
3580 				 * additional time since the SACK DAC flag
3581 				 * indicates that two packets have been
3582 				 * received after this missing TSN.
3583 				 */
3584 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3585 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3586 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3587 						sctp_log_fr(48 + num_dests_sacked,
3588 						    tp1->rec.data.tsn,
3589 						    tp1->sent,
3590 						    SCTP_FR_LOG_STRIKE_CHUNK);
3591 					}
3592 					tp1->sent++;
3593 				}
3594 			}
3595 		}
3596 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3597 			struct sctp_nets *alt;
3598 
3599 			/* fix counts and things */
3600 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3601 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3602 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3603 				    tp1->book_size,
3604 				    (uint32_t)(uintptr_t)tp1->whoTo,
3605 				    tp1->rec.data.tsn);
3606 			}
3607 			if (tp1->whoTo) {
3608 				tp1->whoTo->net_ack++;
3609 				sctp_flight_size_decrease(tp1);
3610 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3611 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3612 					    tp1);
3613 				}
3614 			}
3615 
3616 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3617 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3618 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3619 			}
3620 			/* add back to the rwnd */
3621 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3622 
3623 			/* remove from the total flight */
3624 			sctp_total_flight_decrease(stcb, tp1);
3625 
3626 			if ((stcb->asoc.prsctp_supported) &&
3627 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3628 				/*
3629 				 * Has it been retransmitted tv_sec times? -
3630 				 * we store the retran count there.
3631 				 */
3632 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3633 					/* Yes, so drop it */
3634 					if (tp1->data != NULL) {
3635 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3636 						    SCTP_SO_NOT_LOCKED);
3637 					}
3638 					/* Make sure to flag we had a FR */
3639 					if (tp1->whoTo != NULL) {
3640 						tp1->whoTo->net_ack++;
3641 					}
3642 					continue;
3643 				}
3644 			}
3645 			/*
3646 			 * SCTP_PRINTF("OK, we are now ready to FR this
3647 			 * guy\n");
3648 			 */
3649 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3650 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3651 				    0, SCTP_FR_MARKED);
3652 			}
3653 			if (strike_flag) {
3654 				/* This is a subsequent FR */
3655 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3656 			}
3657 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3658 			if (asoc->sctp_cmt_on_off > 0) {
3659 				/*
3660 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3661 				 * If CMT is being used, then pick dest with
3662 				 * largest ssthresh for any retransmission.
3663 				 */
3664 				tp1->no_fr_allowed = 1;
3665 				alt = tp1->whoTo;
3666 				/* sa_ignore NO_NULL_CHK */
3667 				if (asoc->sctp_cmt_pf > 0) {
3668 					/*
3669 					 * JRS 5/18/07 - If CMT PF is on,
3670 					 * use the PF version of
3671 					 * find_alt_net()
3672 					 */
3673 					alt = sctp_find_alternate_net(stcb, alt, 2);
3674 				} else {
3675 					/*
3676 					 * JRS 5/18/07 - If only CMT is on,
3677 					 * use the CMT version of
3678 					 * find_alt_net()
3679 					 */
3680 					/* sa_ignore NO_NULL_CHK */
3681 					alt = sctp_find_alternate_net(stcb, alt, 1);
3682 				}
3683 				if (alt == NULL) {
3684 					alt = tp1->whoTo;
3685 				}
3686 				/*
3687 				 * CUCv2: If a different dest is picked for
3688 				 * the retransmission, then new
3689 				 * (rtx-)pseudo_cumack needs to be tracked
3690 				 * for orig dest. Let CUCv2 track new (rtx-)
3691 				 * pseudo-cumack always.
3692 				 */
3693 				if (tp1->whoTo) {
3694 					tp1->whoTo->find_pseudo_cumack = 1;
3695 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3696 				}
3697 
3698 			} else {	/* CMT is OFF */
3699 
3700 #ifdef SCTP_FR_TO_ALTERNATE
3701 				/* Can we find an alternate? */
3702 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3703 #else
3704 				/*
3705 				 * default behavior is to NOT retransmit
3706 				 * FR's to an alternate. Armando Caro's
3707 				 * paper details why.
3708 				 */
3709 				alt = tp1->whoTo;
3710 #endif
3711 			}
3712 
3713 			tp1->rec.data.doing_fast_retransmit = 1;
3714 			tot_retrans++;
3715 			/* mark the sending seq for possible subsequent FR's */
3716 			/*
3717 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3718 			 * (uint32_t)tpi->rec.data.tsn);
3719 			 */
3720 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3721 				/*
3722 				 * If the queue of send is empty then its
3723 				 * the next sequence number that will be
3724 				 * assigned so we subtract one from this to
3725 				 * get the one we last sent.
3726 				 */
3727 				tp1->rec.data.fast_retran_tsn = sending_seq;
3728 			} else {
3729 				/*
3730 				 * If there are chunks on the send queue
3731 				 * (unsent data that has made it from the
3732 				 * stream queues but not out the door, we
3733 				 * take the first one (which will have the
3734 				 * lowest TSN) and subtract one to get the
3735 				 * one we last sent.
3736 				 */
3737 				struct sctp_tmit_chunk *ttt;
3738 
3739 				ttt = TAILQ_FIRST(&asoc->send_queue);
3740 				tp1->rec.data.fast_retran_tsn =
3741 				    ttt->rec.data.tsn;
3742 			}
3743 
3744 			if (tp1->do_rtt) {
3745 				/*
3746 				 * this guy had a RTO calculation pending on
3747 				 * it, cancel it
3748 				 */
3749 				if ((tp1->whoTo != NULL) &&
3750 				    (tp1->whoTo->rto_needed == 0)) {
3751 					tp1->whoTo->rto_needed = 1;
3752 				}
3753 				tp1->do_rtt = 0;
3754 			}
3755 			if (alt != tp1->whoTo) {
3756 				/* yes, there is an alternate. */
3757 				sctp_free_remote_addr(tp1->whoTo);
3758 				/* sa_ignore FREED_MEMORY */
3759 				tp1->whoTo = alt;
3760 				atomic_add_int(&alt->ref_count, 1);
3761 			}
3762 		}
3763 	}
3764 }
3765 
3766 struct sctp_tmit_chunk *
3767 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3768     struct sctp_association *asoc)
3769 {
3770 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3771 	struct timeval now;
3772 	int now_filled = 0;
3773 
3774 	if (asoc->prsctp_supported == 0) {
3775 		return (NULL);
3776 	}
3777 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3778 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3779 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3780 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3781 			/* no chance to advance, out of here */
3782 			break;
3783 		}
3784 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3785 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3786 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3787 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3788 				    asoc->advanced_peer_ack_point,
3789 				    tp1->rec.data.tsn, 0, 0);
3790 			}
3791 		}
3792 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3793 			/*
3794 			 * We can't fwd-tsn past any that are reliable aka
3795 			 * retransmitted until the asoc fails.
3796 			 */
3797 			break;
3798 		}
3799 		if (!now_filled) {
3800 			(void)SCTP_GETTIME_TIMEVAL(&now);
3801 			now_filled = 1;
3802 		}
3803 		/*
3804 		 * now we got a chunk which is marked for another
3805 		 * retransmission to a PR-stream but has run out its chances
3806 		 * already maybe OR has been marked to skip now. Can we skip
3807 		 * it if its a resend?
3808 		 */
3809 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3810 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3811 			/*
3812 			 * Now is this one marked for resend and its time is
3813 			 * now up?
3814 			 */
3815 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3816 				/* Yes so drop it */
3817 				if (tp1->data) {
3818 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3819 					    1, SCTP_SO_NOT_LOCKED);
3820 				}
3821 			} else {
3822 				/*
3823 				 * No, we are done when hit one for resend
3824 				 * whos time as not expired.
3825 				 */
3826 				break;
3827 			}
3828 		}
3829 		/*
3830 		 * Ok now if this chunk is marked to drop it we can clean up
3831 		 * the chunk, advance our peer ack point and we can check
3832 		 * the next chunk.
3833 		 */
3834 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3835 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3836 			/* advance PeerAckPoint goes forward */
3837 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3838 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3839 				a_adv = tp1;
3840 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3841 				/* No update but we do save the chk */
3842 				a_adv = tp1;
3843 			}
3844 		} else {
3845 			/*
3846 			 * If it is still in RESEND we can advance no
3847 			 * further
3848 			 */
3849 			break;
3850 		}
3851 	}
3852 	return (a_adv);
3853 }
3854 
3855 static int
3856 sctp_fs_audit(struct sctp_association *asoc)
3857 {
3858 	struct sctp_tmit_chunk *chk;
3859 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3860 	int ret;
3861 #ifndef INVARIANTS
3862 	int entry_flight, entry_cnt;
3863 #endif
3864 
3865 	ret = 0;
3866 #ifndef INVARIANTS
3867 	entry_flight = asoc->total_flight;
3868 	entry_cnt = asoc->total_flight_count;
3869 #endif
3870 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3871 		return (0);
3872 
3873 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3874 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3875 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3876 			    chk->rec.data.tsn,
3877 			    chk->send_size,
3878 			    chk->snd_count);
3879 			inflight++;
3880 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3881 			resend++;
3882 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3883 			inbetween++;
3884 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3885 			above++;
3886 		} else {
3887 			acked++;
3888 		}
3889 	}
3890 
3891 	if ((inflight > 0) || (inbetween > 0)) {
3892 #ifdef INVARIANTS
3893 		panic("Flight size-express incorrect? \n");
3894 #else
3895 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3896 		    entry_flight, entry_cnt);
3897 
3898 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3899 		    inflight, inbetween, resend, above, acked);
3900 		ret = 1;
3901 #endif
3902 	}
3903 	return (ret);
3904 }
3905 
3906 
3907 static void
3908 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3909     struct sctp_association *asoc,
3910     struct sctp_tmit_chunk *tp1)
3911 {
3912 	tp1->window_probe = 0;
3913 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3914 		/* TSN's skipped we do NOT move back. */
3915 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3916 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3917 		    tp1->book_size,
3918 		    (uint32_t)(uintptr_t)tp1->whoTo,
3919 		    tp1->rec.data.tsn);
3920 		return;
3921 	}
3922 	/* First setup this by shrinking flight */
3923 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3924 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3925 		    tp1);
3926 	}
3927 	sctp_flight_size_decrease(tp1);
3928 	sctp_total_flight_decrease(stcb, tp1);
3929 	/* Now mark for resend */
3930 	tp1->sent = SCTP_DATAGRAM_RESEND;
3931 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3932 
3933 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3934 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3935 		    tp1->whoTo->flight_size,
3936 		    tp1->book_size,
3937 		    (uint32_t)(uintptr_t)tp1->whoTo,
3938 		    tp1->rec.data.tsn);
3939 	}
3940 }
3941 
3942 void
3943 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3944     uint32_t rwnd, int *abort_now, int ecne_seen)
3945 {
3946 	struct sctp_nets *net;
3947 	struct sctp_association *asoc;
3948 	struct sctp_tmit_chunk *tp1, *tp2;
3949 	uint32_t old_rwnd;
3950 	int win_probe_recovery = 0;
3951 	int win_probe_recovered = 0;
3952 	int j, done_once = 0;
3953 	int rto_ok = 1;
3954 	uint32_t send_s;
3955 
3956 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3957 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3958 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3959 	}
3960 	SCTP_TCB_LOCK_ASSERT(stcb);
3961 #ifdef SCTP_ASOCLOG_OF_TSNS
3962 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3963 	stcb->asoc.cumack_log_at++;
3964 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3965 		stcb->asoc.cumack_log_at = 0;
3966 	}
3967 #endif
3968 	asoc = &stcb->asoc;
3969 	old_rwnd = asoc->peers_rwnd;
3970 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3971 		/* old ack */
3972 		return;
3973 	} else if (asoc->last_acked_seq == cumack) {
3974 		/* Window update sack */
3975 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3976 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3977 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3978 			/* SWS sender side engages */
3979 			asoc->peers_rwnd = 0;
3980 		}
3981 		if (asoc->peers_rwnd > old_rwnd) {
3982 			goto again;
3983 		}
3984 		return;
3985 	}
3986 
3987 	/* First setup for CC stuff */
3988 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3989 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3990 			/* Drag along the window_tsn for cwr's */
3991 			net->cwr_window_tsn = cumack;
3992 		}
3993 		net->prev_cwnd = net->cwnd;
3994 		net->net_ack = 0;
3995 		net->net_ack2 = 0;
3996 
3997 		/*
3998 		 * CMT: Reset CUC and Fast recovery algo variables before
3999 		 * SACK processing
4000 		 */
4001 		net->new_pseudo_cumack = 0;
4002 		net->will_exit_fast_recovery = 0;
4003 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4004 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4005 		}
4006 	}
4007 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4008 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4009 		    sctpchunk_listhead);
4010 		send_s = tp1->rec.data.tsn + 1;
4011 	} else {
4012 		send_s = asoc->sending_seq;
4013 	}
4014 	if (SCTP_TSN_GE(cumack, send_s)) {
4015 		struct mbuf *op_err;
4016 		char msg[SCTP_DIAG_INFO_LEN];
4017 
4018 		*abort_now = 1;
4019 		/* XXX */
4020 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4021 		    cumack, send_s);
4022 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4023 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4024 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4025 		return;
4026 	}
4027 	asoc->this_sack_highest_gap = cumack;
4028 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4029 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4030 		    stcb->asoc.overall_error_count,
4031 		    0,
4032 		    SCTP_FROM_SCTP_INDATA,
4033 		    __LINE__);
4034 	}
4035 	stcb->asoc.overall_error_count = 0;
4036 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4037 		/* process the new consecutive TSN first */
4038 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4039 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4040 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4041 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4042 				}
4043 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4044 					/*
4045 					 * If it is less than ACKED, it is
4046 					 * now no-longer in flight. Higher
4047 					 * values may occur during marking
4048 					 */
4049 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4050 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4051 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4052 							    tp1->whoTo->flight_size,
4053 							    tp1->book_size,
4054 							    (uint32_t)(uintptr_t)tp1->whoTo,
4055 							    tp1->rec.data.tsn);
4056 						}
4057 						sctp_flight_size_decrease(tp1);
4058 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4059 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4060 							    tp1);
4061 						}
4062 						/* sa_ignore NO_NULL_CHK */
4063 						sctp_total_flight_decrease(stcb, tp1);
4064 					}
4065 					tp1->whoTo->net_ack += tp1->send_size;
4066 					if (tp1->snd_count < 2) {
4067 						/*
4068 						 * True non-retransmitted
4069 						 * chunk
4070 						 */
4071 						tp1->whoTo->net_ack2 +=
4072 						    tp1->send_size;
4073 
4074 						/* update RTO too? */
4075 						if (tp1->do_rtt) {
4076 							if (rto_ok) {
4077 								tp1->whoTo->RTO =
4078 								/*
4079 								 * sa_ignore
4080 								 * NO_NULL_CHK
4081 								 */
4082 								    sctp_calculate_rto(stcb,
4083 								    asoc, tp1->whoTo,
4084 								    &tp1->sent_rcv_time,
4085 								    SCTP_RTT_FROM_DATA);
4086 								rto_ok = 0;
4087 							}
4088 							if (tp1->whoTo->rto_needed == 0) {
4089 								tp1->whoTo->rto_needed = 1;
4090 							}
4091 							tp1->do_rtt = 0;
4092 						}
4093 					}
4094 					/*
4095 					 * CMT: CUCv2 algorithm. From the
4096 					 * cumack'd TSNs, for each TSN being
4097 					 * acked for the first time, set the
4098 					 * following variables for the
4099 					 * corresp destination.
4100 					 * new_pseudo_cumack will trigger a
4101 					 * cwnd update.
4102 					 * find_(rtx_)pseudo_cumack will
4103 					 * trigger search for the next
4104 					 * expected (rtx-)pseudo-cumack.
4105 					 */
4106 					tp1->whoTo->new_pseudo_cumack = 1;
4107 					tp1->whoTo->find_pseudo_cumack = 1;
4108 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4109 
4110 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4111 						/* sa_ignore NO_NULL_CHK */
4112 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4113 					}
4114 				}
4115 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4116 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4117 				}
4118 				if (tp1->rec.data.chunk_was_revoked) {
4119 					/* deflate the cwnd */
4120 					tp1->whoTo->cwnd -= tp1->book_size;
4121 					tp1->rec.data.chunk_was_revoked = 0;
4122 				}
4123 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4124 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4125 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4126 #ifdef INVARIANTS
4127 					} else {
4128 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4129 #endif
4130 					}
4131 				}
4132 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4133 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4134 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4135 					asoc->trigger_reset = 1;
4136 				}
4137 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4138 				if (tp1->data) {
4139 					/* sa_ignore NO_NULL_CHK */
4140 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4141 					sctp_m_freem(tp1->data);
4142 					tp1->data = NULL;
4143 				}
4144 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4145 					sctp_log_sack(asoc->last_acked_seq,
4146 					    cumack,
4147 					    tp1->rec.data.tsn,
4148 					    0,
4149 					    0,
4150 					    SCTP_LOG_FREE_SENT);
4151 				}
4152 				asoc->sent_queue_cnt--;
4153 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4154 			} else {
4155 				break;
4156 			}
4157 		}
4158 
4159 	}
4160 	/* sa_ignore NO_NULL_CHK */
4161 	if (stcb->sctp_socket) {
4162 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4163 		struct socket *so;
4164 
4165 #endif
4166 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4167 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4168 			/* sa_ignore NO_NULL_CHK */
4169 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4170 		}
4171 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4172 		so = SCTP_INP_SO(stcb->sctp_ep);
4173 		atomic_add_int(&stcb->asoc.refcnt, 1);
4174 		SCTP_TCB_UNLOCK(stcb);
4175 		SCTP_SOCKET_LOCK(so, 1);
4176 		SCTP_TCB_LOCK(stcb);
4177 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4178 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4179 			/* assoc was freed while we were unlocked */
4180 			SCTP_SOCKET_UNLOCK(so, 1);
4181 			return;
4182 		}
4183 #endif
4184 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4185 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4186 		SCTP_SOCKET_UNLOCK(so, 1);
4187 #endif
4188 	} else {
4189 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4190 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4191 		}
4192 	}
4193 
4194 	/* JRS - Use the congestion control given in the CC module */
4195 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4196 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4197 			if (net->net_ack2 > 0) {
4198 				/*
4199 				 * Karn's rule applies to clearing error
4200 				 * count, this is optional.
4201 				 */
4202 				net->error_count = 0;
4203 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4204 					/* addr came good */
4205 					net->dest_state |= SCTP_ADDR_REACHABLE;
4206 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4207 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4208 				}
4209 				if (net == stcb->asoc.primary_destination) {
4210 					if (stcb->asoc.alternate) {
4211 						/*
4212 						 * release the alternate,
4213 						 * primary is good
4214 						 */
4215 						sctp_free_remote_addr(stcb->asoc.alternate);
4216 						stcb->asoc.alternate = NULL;
4217 					}
4218 				}
4219 				if (net->dest_state & SCTP_ADDR_PF) {
4220 					net->dest_state &= ~SCTP_ADDR_PF;
4221 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4222 					    stcb->sctp_ep, stcb, net,
4223 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4224 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4225 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4226 					/* Done with this net */
4227 					net->net_ack = 0;
4228 				}
4229 				/* restore any doubled timers */
4230 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4231 				if (net->RTO < stcb->asoc.minrto) {
4232 					net->RTO = stcb->asoc.minrto;
4233 				}
4234 				if (net->RTO > stcb->asoc.maxrto) {
4235 					net->RTO = stcb->asoc.maxrto;
4236 				}
4237 			}
4238 		}
4239 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4240 	}
4241 	asoc->last_acked_seq = cumack;
4242 
4243 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4244 		/* nothing left in-flight */
4245 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4246 			net->flight_size = 0;
4247 			net->partial_bytes_acked = 0;
4248 		}
4249 		asoc->total_flight = 0;
4250 		asoc->total_flight_count = 0;
4251 	}
4252 
4253 	/* RWND update */
4254 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4255 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4256 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4257 		/* SWS sender side engages */
4258 		asoc->peers_rwnd = 0;
4259 	}
4260 	if (asoc->peers_rwnd > old_rwnd) {
4261 		win_probe_recovery = 1;
4262 	}
4263 	/* Now assure a timer where data is queued at */
4264 again:
4265 	j = 0;
4266 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4267 		if (win_probe_recovery && (net->window_probe)) {
4268 			win_probe_recovered = 1;
4269 			/*
4270 			 * Find first chunk that was used with window probe
4271 			 * and clear the sent
4272 			 */
4273 			/* sa_ignore FREED_MEMORY */
4274 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4275 				if (tp1->window_probe) {
4276 					/* move back to data send queue */
4277 					sctp_window_probe_recovery(stcb, asoc, tp1);
4278 					break;
4279 				}
4280 			}
4281 		}
4282 		if (net->flight_size) {
4283 			j++;
4284 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4285 			if (net->window_probe) {
4286 				net->window_probe = 0;
4287 			}
4288 		} else {
4289 			if (net->window_probe) {
4290 				/*
4291 				 * In window probes we must assure a timer
4292 				 * is still running there
4293 				 */
4294 				net->window_probe = 0;
4295 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4296 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4297 				}
4298 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4299 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4300 				    stcb, net,
4301 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4302 			}
4303 		}
4304 	}
4305 	if ((j == 0) &&
4306 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4307 	    (asoc->sent_queue_retran_cnt == 0) &&
4308 	    (win_probe_recovered == 0) &&
4309 	    (done_once == 0)) {
4310 		/*
4311 		 * huh, this should not happen unless all packets are
4312 		 * PR-SCTP and marked to skip of course.
4313 		 */
4314 		if (sctp_fs_audit(asoc)) {
4315 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4316 				net->flight_size = 0;
4317 			}
4318 			asoc->total_flight = 0;
4319 			asoc->total_flight_count = 0;
4320 			asoc->sent_queue_retran_cnt = 0;
4321 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4322 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4323 					sctp_flight_size_increase(tp1);
4324 					sctp_total_flight_increase(stcb, tp1);
4325 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4326 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4327 				}
4328 			}
4329 		}
4330 		done_once = 1;
4331 		goto again;
4332 	}
4333 	/**********************************/
4334 	/* Now what about shutdown issues */
4335 	/**********************************/
4336 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4337 		/* nothing left on sendqueue.. consider done */
4338 		/* clean up */
4339 		if ((asoc->stream_queue_cnt == 1) &&
4340 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4341 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4342 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4343 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4344 		}
4345 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4346 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4347 		    (asoc->stream_queue_cnt == 1) &&
4348 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4349 			struct mbuf *op_err;
4350 
4351 			*abort_now = 1;
4352 			/* XXX */
4353 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4354 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4355 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4356 			return;
4357 		}
4358 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4359 		    (asoc->stream_queue_cnt == 0)) {
4360 			struct sctp_nets *netp;
4361 
4362 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4363 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4364 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4365 			}
4366 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4367 			sctp_stop_timers_for_shutdown(stcb);
4368 			if (asoc->alternate) {
4369 				netp = asoc->alternate;
4370 			} else {
4371 				netp = asoc->primary_destination;
4372 			}
4373 			sctp_send_shutdown(stcb, netp);
4374 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4375 			    stcb->sctp_ep, stcb, netp);
4376 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4377 			    stcb->sctp_ep, stcb, netp);
4378 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4379 		    (asoc->stream_queue_cnt == 0)) {
4380 			struct sctp_nets *netp;
4381 
4382 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4383 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4384 			sctp_stop_timers_for_shutdown(stcb);
4385 			if (asoc->alternate) {
4386 				netp = asoc->alternate;
4387 			} else {
4388 				netp = asoc->primary_destination;
4389 			}
4390 			sctp_send_shutdown_ack(stcb, netp);
4391 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4392 			    stcb->sctp_ep, stcb, netp);
4393 		}
4394 	}
4395 	/*********************************************/
4396 	/* Here we perform PR-SCTP procedures        */
4397 	/* (section 4.2)                             */
4398 	/*********************************************/
4399 	/* C1. update advancedPeerAckPoint */
4400 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4401 		asoc->advanced_peer_ack_point = cumack;
4402 	}
4403 	/* PR-Sctp issues need to be addressed too */
4404 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4405 		struct sctp_tmit_chunk *lchk;
4406 		uint32_t old_adv_peer_ack_point;
4407 
4408 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4409 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4410 		/* C3. See if we need to send a Fwd-TSN */
4411 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4412 			/*
4413 			 * ISSUE with ECN, see FWD-TSN processing.
4414 			 */
4415 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4416 				send_forward_tsn(stcb, asoc);
4417 			} else if (lchk) {
4418 				/* try to FR fwd-tsn's that get lost too */
4419 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4420 					send_forward_tsn(stcb, asoc);
4421 				}
4422 			}
4423 		}
4424 		if (lchk) {
4425 			/* Assure a timer is up */
4426 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4427 			    stcb->sctp_ep, stcb, lchk->whoTo);
4428 		}
4429 	}
4430 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4432 		    rwnd,
4433 		    stcb->asoc.peers_rwnd,
4434 		    stcb->asoc.total_flight,
4435 		    stcb->asoc.total_output_queue_size);
4436 	}
4437 }
4438 
4439 void
4440 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4441     struct sctp_tcb *stcb,
4442     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443     int *abort_now, uint8_t flags,
4444     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4445 {
4446 	struct sctp_association *asoc;
4447 	struct sctp_tmit_chunk *tp1, *tp2;
4448 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4449 	uint16_t wake_him = 0;
4450 	uint32_t send_s = 0;
4451 	long j;
4452 	int accum_moved = 0;
4453 	int will_exit_fast_recovery = 0;
4454 	uint32_t a_rwnd, old_rwnd;
4455 	int win_probe_recovery = 0;
4456 	int win_probe_recovered = 0;
4457 	struct sctp_nets *net = NULL;
4458 	int done_once;
4459 	int rto_ok = 1;
4460 	uint8_t reneged_all = 0;
4461 	uint8_t cmt_dac_flag;
4462 
4463 	/*
4464 	 * we take any chance we can to service our queues since we cannot
4465 	 * get awoken when the socket is read from :<
4466 	 */
4467 	/*
4468 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4469 	 * old sack, if so discard. 2) If there is nothing left in the send
4470 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4471 	 * too, update any rwnd change and verify no timers are running.
4472 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4473 	 * moved process these first and note that it moved. 4) Process any
4474 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4475 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4476 	 * sync up flightsizes and things, stop all timers and also check
4477 	 * for shutdown_pending state. If so then go ahead and send off the
4478 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4479 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4480 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4481 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4482 	 * if in shutdown_recv state.
4483 	 */
4484 	SCTP_TCB_LOCK_ASSERT(stcb);
4485 	/* CMT DAC algo */
4486 	this_sack_lowest_newack = 0;
4487 	SCTP_STAT_INCR(sctps_slowpath_sack);
4488 	last_tsn = cum_ack;
4489 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4490 #ifdef SCTP_ASOCLOG_OF_TSNS
4491 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4492 	stcb->asoc.cumack_log_at++;
4493 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4494 		stcb->asoc.cumack_log_at = 0;
4495 	}
4496 #endif
4497 	a_rwnd = rwnd;
4498 
4499 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4500 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4501 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4502 	}
4503 
4504 	old_rwnd = stcb->asoc.peers_rwnd;
4505 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4506 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4507 		    stcb->asoc.overall_error_count,
4508 		    0,
4509 		    SCTP_FROM_SCTP_INDATA,
4510 		    __LINE__);
4511 	}
4512 	stcb->asoc.overall_error_count = 0;
4513 	asoc = &stcb->asoc;
4514 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4515 		sctp_log_sack(asoc->last_acked_seq,
4516 		    cum_ack,
4517 		    0,
4518 		    num_seg,
4519 		    num_dup,
4520 		    SCTP_LOG_NEW_SACK);
4521 	}
4522 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4523 		uint16_t i;
4524 		uint32_t *dupdata, dblock;
4525 
4526 		for (i = 0; i < num_dup; i++) {
4527 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4528 			    sizeof(uint32_t), (uint8_t *)&dblock);
4529 			if (dupdata == NULL) {
4530 				break;
4531 			}
4532 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4533 		}
4534 	}
4535 	/* reality check */
4536 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4537 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4538 		    sctpchunk_listhead);
4539 		send_s = tp1->rec.data.tsn + 1;
4540 	} else {
4541 		tp1 = NULL;
4542 		send_s = asoc->sending_seq;
4543 	}
4544 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4545 		struct mbuf *op_err;
4546 		char msg[SCTP_DIAG_INFO_LEN];
4547 
4548 		/*
4549 		 * no way, we have not even sent this TSN out yet. Peer is
4550 		 * hopelessly messed up with us.
4551 		 */
4552 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4553 		    cum_ack, send_s);
4554 		if (tp1) {
4555 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4556 			    tp1->rec.data.tsn, (void *)tp1);
4557 		}
4558 hopeless_peer:
4559 		*abort_now = 1;
4560 		/* XXX */
4561 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4562 		    cum_ack, send_s);
4563 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4564 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4565 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4566 		return;
4567 	}
4568 	/**********************/
4569 	/* 1) check the range */
4570 	/**********************/
4571 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4572 		/* acking something behind */
4573 		return;
4574 	}
4575 
4576 	/* update the Rwnd of the peer */
4577 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4578 	    TAILQ_EMPTY(&asoc->send_queue) &&
4579 	    (asoc->stream_queue_cnt == 0)) {
4580 		/* nothing left on send/sent and strmq */
4581 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4582 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4583 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4584 		}
4585 		asoc->peers_rwnd = a_rwnd;
4586 		if (asoc->sent_queue_retran_cnt) {
4587 			asoc->sent_queue_retran_cnt = 0;
4588 		}
4589 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4590 			/* SWS sender side engages */
4591 			asoc->peers_rwnd = 0;
4592 		}
4593 		/* stop any timers */
4594 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4595 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4596 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4597 			net->partial_bytes_acked = 0;
4598 			net->flight_size = 0;
4599 		}
4600 		asoc->total_flight = 0;
4601 		asoc->total_flight_count = 0;
4602 		return;
4603 	}
4604 	/*
4605 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606 	 * things. The total byte count acked is tracked in netAckSz AND
4607 	 * netAck2 is used to track the total bytes acked that are un-
4608 	 * amibguious and were never retransmitted. We track these on a per
4609 	 * destination address basis.
4610 	 */
4611 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4613 			/* Drag along the window_tsn for cwr's */
4614 			net->cwr_window_tsn = cum_ack;
4615 		}
4616 		net->prev_cwnd = net->cwnd;
4617 		net->net_ack = 0;
4618 		net->net_ack2 = 0;
4619 
4620 		/*
4621 		 * CMT: Reset CUC and Fast recovery algo variables before
4622 		 * SACK processing
4623 		 */
4624 		net->new_pseudo_cumack = 0;
4625 		net->will_exit_fast_recovery = 0;
4626 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4627 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4628 		}
4629 
4630 		/*
4631 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4632 		 * to be greater than the cumack. Also reset saw_newack to 0
4633 		 * for all dests.
4634 		 */
4635 		net->saw_newack = 0;
4636 		net->this_sack_highest_newack = last_tsn;
4637 	}
4638 	/* process the new consecutive TSN first */
4639 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4640 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4641 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4642 				accum_moved = 1;
4643 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4644 					/*
4645 					 * If it is less than ACKED, it is
4646 					 * now no-longer in flight. Higher
4647 					 * values may occur during marking
4648 					 */
4649 					if ((tp1->whoTo->dest_state &
4650 					    SCTP_ADDR_UNCONFIRMED) &&
4651 					    (tp1->snd_count < 2)) {
4652 						/*
4653 						 * If there was no retran
4654 						 * and the address is
4655 						 * un-confirmed and we sent
4656 						 * there and are now
4657 						 * sacked.. its confirmed,
4658 						 * mark it so.
4659 						 */
4660 						tp1->whoTo->dest_state &=
4661 						    ~SCTP_ADDR_UNCONFIRMED;
4662 					}
4663 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4664 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4665 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4666 							    tp1->whoTo->flight_size,
4667 							    tp1->book_size,
4668 							    (uint32_t)(uintptr_t)tp1->whoTo,
4669 							    tp1->rec.data.tsn);
4670 						}
4671 						sctp_flight_size_decrease(tp1);
4672 						sctp_total_flight_decrease(stcb, tp1);
4673 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4674 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4675 							    tp1);
4676 						}
4677 					}
4678 					tp1->whoTo->net_ack += tp1->send_size;
4679 
4680 					/* CMT SFR and DAC algos */
4681 					this_sack_lowest_newack = tp1->rec.data.tsn;
4682 					tp1->whoTo->saw_newack = 1;
4683 
4684 					if (tp1->snd_count < 2) {
4685 						/*
4686 						 * True non-retransmitted
4687 						 * chunk
4688 						 */
4689 						tp1->whoTo->net_ack2 +=
4690 						    tp1->send_size;
4691 
4692 						/* update RTO too? */
4693 						if (tp1->do_rtt) {
4694 							if (rto_ok) {
4695 								tp1->whoTo->RTO =
4696 								    sctp_calculate_rto(stcb,
4697 								    asoc, tp1->whoTo,
4698 								    &tp1->sent_rcv_time,
4699 								    SCTP_RTT_FROM_DATA);
4700 								rto_ok = 0;
4701 							}
4702 							if (tp1->whoTo->rto_needed == 0) {
4703 								tp1->whoTo->rto_needed = 1;
4704 							}
4705 							tp1->do_rtt = 0;
4706 						}
4707 					}
4708 					/*
4709 					 * CMT: CUCv2 algorithm. From the
4710 					 * cumack'd TSNs, for each TSN being
4711 					 * acked for the first time, set the
4712 					 * following variables for the
4713 					 * corresp destination.
4714 					 * new_pseudo_cumack will trigger a
4715 					 * cwnd update.
4716 					 * find_(rtx_)pseudo_cumack will
4717 					 * trigger search for the next
4718 					 * expected (rtx-)pseudo-cumack.
4719 					 */
4720 					tp1->whoTo->new_pseudo_cumack = 1;
4721 					tp1->whoTo->find_pseudo_cumack = 1;
4722 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4723 
4724 
4725 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4726 						sctp_log_sack(asoc->last_acked_seq,
4727 						    cum_ack,
4728 						    tp1->rec.data.tsn,
4729 						    0,
4730 						    0,
4731 						    SCTP_LOG_TSN_ACKED);
4732 					}
4733 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4734 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4735 					}
4736 				}
4737 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4738 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4739 #ifdef SCTP_AUDITING_ENABLED
4740 					sctp_audit_log(0xB3,
4741 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4742 #endif
4743 				}
4744 				if (tp1->rec.data.chunk_was_revoked) {
4745 					/* deflate the cwnd */
4746 					tp1->whoTo->cwnd -= tp1->book_size;
4747 					tp1->rec.data.chunk_was_revoked = 0;
4748 				}
4749 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4750 					tp1->sent = SCTP_DATAGRAM_ACKED;
4751 				}
4752 			}
4753 		} else {
4754 			break;
4755 		}
4756 	}
4757 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4758 	/* always set this up to cum-ack */
4759 	asoc->this_sack_highest_gap = last_tsn;
4760 
4761 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4762 
4763 		/*
4764 		 * thisSackHighestGap will increase while handling NEW
4765 		 * segments this_sack_highest_newack will increase while
4766 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4767 		 * used for CMT DAC algo. saw_newack will also change.
4768 		 */
4769 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4770 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4771 		    num_seg, num_nr_seg, &rto_ok)) {
4772 			wake_him++;
4773 		}
4774 		/*
4775 		 * validate the biggest_tsn_acked in the gap acks if strict
4776 		 * adherence is wanted.
4777 		 */
4778 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4779 			/*
4780 			 * peer is either confused or we are under attack.
4781 			 * We must abort.
4782 			 */
4783 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4784 			    biggest_tsn_acked, send_s);
4785 			goto hopeless_peer;
4786 		}
4787 	}
4788 	/*******************************************/
4789 	/* cancel ALL T3-send timer if accum moved */
4790 	/*******************************************/
4791 	if (asoc->sctp_cmt_on_off > 0) {
4792 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4793 			if (net->new_pseudo_cumack)
4794 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4795 				    stcb, net,
4796 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4797 
4798 		}
4799 	} else {
4800 		if (accum_moved) {
4801 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4803 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4804 			}
4805 		}
4806 	}
4807 	/********************************************/
4808 	/* drop the acked chunks from the sentqueue */
4809 	/********************************************/
4810 	asoc->last_acked_seq = cum_ack;
4811 
4812 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4813 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4814 			break;
4815 		}
4816 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4817 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4818 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4819 #ifdef INVARIANTS
4820 			} else {
4821 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4822 #endif
4823 			}
4824 		}
4825 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4826 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4827 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4828 			asoc->trigger_reset = 1;
4829 		}
4830 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4831 		if (PR_SCTP_ENABLED(tp1->flags)) {
4832 			if (asoc->pr_sctp_cnt != 0)
4833 				asoc->pr_sctp_cnt--;
4834 		}
4835 		asoc->sent_queue_cnt--;
4836 		if (tp1->data) {
4837 			/* sa_ignore NO_NULL_CHK */
4838 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4839 			sctp_m_freem(tp1->data);
4840 			tp1->data = NULL;
4841 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4842 				asoc->sent_queue_cnt_removeable--;
4843 			}
4844 		}
4845 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4846 			sctp_log_sack(asoc->last_acked_seq,
4847 			    cum_ack,
4848 			    tp1->rec.data.tsn,
4849 			    0,
4850 			    0,
4851 			    SCTP_LOG_FREE_SENT);
4852 		}
4853 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4854 		wake_him++;
4855 	}
4856 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4857 #ifdef INVARIANTS
4858 		panic("Warning flight size is positive and should be 0");
4859 #else
4860 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4861 		    asoc->total_flight);
4862 #endif
4863 		asoc->total_flight = 0;
4864 	}
4865 
4866 	/* sa_ignore NO_NULL_CHK */
4867 	if ((wake_him) && (stcb->sctp_socket)) {
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4869 		struct socket *so;
4870 
4871 #endif
4872 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4873 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4874 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4875 		}
4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4877 		so = SCTP_INP_SO(stcb->sctp_ep);
4878 		atomic_add_int(&stcb->asoc.refcnt, 1);
4879 		SCTP_TCB_UNLOCK(stcb);
4880 		SCTP_SOCKET_LOCK(so, 1);
4881 		SCTP_TCB_LOCK(stcb);
4882 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4883 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4884 			/* assoc was freed while we were unlocked */
4885 			SCTP_SOCKET_UNLOCK(so, 1);
4886 			return;
4887 		}
4888 #endif
4889 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4890 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891 		SCTP_SOCKET_UNLOCK(so, 1);
4892 #endif
4893 	} else {
4894 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4895 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4896 		}
4897 	}
4898 
4899 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4900 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4901 			/* Setup so we will exit RFC2582 fast recovery */
4902 			will_exit_fast_recovery = 1;
4903 		}
4904 	}
4905 	/*
4906 	 * Check for revoked fragments:
4907 	 *
4908 	 * if Previous sack - Had no frags then we can't have any revoked if
4909 	 * Previous sack - Had frag's then - If we now have frags aka
4910 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4911 	 * some of them. else - The peer revoked all ACKED fragments, since
4912 	 * we had some before and now we have NONE.
4913 	 */
4914 
4915 	if (num_seg) {
4916 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4917 		asoc->saw_sack_with_frags = 1;
4918 	} else if (asoc->saw_sack_with_frags) {
4919 		int cnt_revoked = 0;
4920 
4921 		/* Peer revoked all dg's marked or acked */
4922 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4923 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4924 				tp1->sent = SCTP_DATAGRAM_SENT;
4925 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4926 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4927 					    tp1->whoTo->flight_size,
4928 					    tp1->book_size,
4929 					    (uint32_t)(uintptr_t)tp1->whoTo,
4930 					    tp1->rec.data.tsn);
4931 				}
4932 				sctp_flight_size_increase(tp1);
4933 				sctp_total_flight_increase(stcb, tp1);
4934 				tp1->rec.data.chunk_was_revoked = 1;
4935 				/*
4936 				 * To ensure that this increase in
4937 				 * flightsize, which is artificial, does not
4938 				 * throttle the sender, we also increase the
4939 				 * cwnd artificially.
4940 				 */
4941 				tp1->whoTo->cwnd += tp1->book_size;
4942 				cnt_revoked++;
4943 			}
4944 		}
4945 		if (cnt_revoked) {
4946 			reneged_all = 1;
4947 		}
4948 		asoc->saw_sack_with_frags = 0;
4949 	}
4950 	if (num_nr_seg > 0)
4951 		asoc->saw_sack_with_nr_frags = 1;
4952 	else
4953 		asoc->saw_sack_with_nr_frags = 0;
4954 
4955 	/* JRS - Use the congestion control given in the CC module */
4956 	if (ecne_seen == 0) {
4957 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4958 			if (net->net_ack2 > 0) {
4959 				/*
4960 				 * Karn's rule applies to clearing error
4961 				 * count, this is optional.
4962 				 */
4963 				net->error_count = 0;
4964 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4965 					/* addr came good */
4966 					net->dest_state |= SCTP_ADDR_REACHABLE;
4967 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4968 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4969 				}
4970 
4971 				if (net == stcb->asoc.primary_destination) {
4972 					if (stcb->asoc.alternate) {
4973 						/*
4974 						 * release the alternate,
4975 						 * primary is good
4976 						 */
4977 						sctp_free_remote_addr(stcb->asoc.alternate);
4978 						stcb->asoc.alternate = NULL;
4979 					}
4980 				}
4981 
4982 				if (net->dest_state & SCTP_ADDR_PF) {
4983 					net->dest_state &= ~SCTP_ADDR_PF;
4984 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4985 					    stcb->sctp_ep, stcb, net,
4986 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4987 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4988 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4989 					/* Done with this net */
4990 					net->net_ack = 0;
4991 				}
4992 				/* restore any doubled timers */
4993 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4994 				if (net->RTO < stcb->asoc.minrto) {
4995 					net->RTO = stcb->asoc.minrto;
4996 				}
4997 				if (net->RTO > stcb->asoc.maxrto) {
4998 					net->RTO = stcb->asoc.maxrto;
4999 				}
5000 			}
5001 		}
5002 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5003 	}
5004 
5005 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5006 		/* nothing left in-flight */
5007 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5008 			/* stop all timers */
5009 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5010 			    stcb, net,
5011 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5012 			net->flight_size = 0;
5013 			net->partial_bytes_acked = 0;
5014 		}
5015 		asoc->total_flight = 0;
5016 		asoc->total_flight_count = 0;
5017 	}
5018 
5019 	/**********************************/
5020 	/* Now what about shutdown issues */
5021 	/**********************************/
5022 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5023 		/* nothing left on sendqueue.. consider done */
5024 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5025 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5026 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5027 		}
5028 		asoc->peers_rwnd = a_rwnd;
5029 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5030 			/* SWS sender side engages */
5031 			asoc->peers_rwnd = 0;
5032 		}
5033 		/* clean up */
5034 		if ((asoc->stream_queue_cnt == 1) &&
5035 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5036 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5037 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5038 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5039 		}
5040 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5041 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5042 		    (asoc->stream_queue_cnt == 1) &&
5043 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5044 			struct mbuf *op_err;
5045 
5046 			*abort_now = 1;
5047 			/* XXX */
5048 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5049 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5050 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5051 			return;
5052 		}
5053 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5054 		    (asoc->stream_queue_cnt == 0)) {
5055 			struct sctp_nets *netp;
5056 
5057 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5058 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5059 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5060 			}
5061 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5062 			sctp_stop_timers_for_shutdown(stcb);
5063 			if (asoc->alternate) {
5064 				netp = asoc->alternate;
5065 			} else {
5066 				netp = asoc->primary_destination;
5067 			}
5068 			sctp_send_shutdown(stcb, netp);
5069 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5070 			    stcb->sctp_ep, stcb, netp);
5071 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5072 			    stcb->sctp_ep, stcb, netp);
5073 			return;
5074 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5075 		    (asoc->stream_queue_cnt == 0)) {
5076 			struct sctp_nets *netp;
5077 
5078 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5079 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5080 			sctp_stop_timers_for_shutdown(stcb);
5081 			if (asoc->alternate) {
5082 				netp = asoc->alternate;
5083 			} else {
5084 				netp = asoc->primary_destination;
5085 			}
5086 			sctp_send_shutdown_ack(stcb, netp);
5087 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5088 			    stcb->sctp_ep, stcb, netp);
5089 			return;
5090 		}
5091 	}
5092 	/*
5093 	 * Now here we are going to recycle net_ack for a different use...
5094 	 * HEADS UP.
5095 	 */
5096 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5097 		net->net_ack = 0;
5098 	}
5099 
5100 	/*
5101 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5102 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5103 	 * automatically ensure that.
5104 	 */
5105 	if ((asoc->sctp_cmt_on_off > 0) &&
5106 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5107 	    (cmt_dac_flag == 0)) {
5108 		this_sack_lowest_newack = cum_ack;
5109 	}
5110 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5111 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5112 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5113 	}
5114 	/* JRS - Use the congestion control given in the CC module */
5115 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5116 
5117 	/* Now are we exiting loss recovery ? */
5118 	if (will_exit_fast_recovery) {
5119 		/* Ok, we must exit fast recovery */
5120 		asoc->fast_retran_loss_recovery = 0;
5121 	}
5122 	if ((asoc->sat_t3_loss_recovery) &&
5123 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5124 		/* end satellite t3 loss recovery */
5125 		asoc->sat_t3_loss_recovery = 0;
5126 	}
5127 	/*
5128 	 * CMT Fast recovery
5129 	 */
5130 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5131 		if (net->will_exit_fast_recovery) {
5132 			/* Ok, we must exit fast recovery */
5133 			net->fast_retran_loss_recovery = 0;
5134 		}
5135 	}
5136 
5137 	/* Adjust and set the new rwnd value */
5138 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5139 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5140 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5141 	}
5142 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5143 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5144 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5145 		/* SWS sender side engages */
5146 		asoc->peers_rwnd = 0;
5147 	}
5148 	if (asoc->peers_rwnd > old_rwnd) {
5149 		win_probe_recovery = 1;
5150 	}
5151 
5152 	/*
5153 	 * Now we must setup so we have a timer up for anyone with
5154 	 * outstanding data.
5155 	 */
5156 	done_once = 0;
5157 again:
5158 	j = 0;
5159 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5160 		if (win_probe_recovery && (net->window_probe)) {
5161 			win_probe_recovered = 1;
5162 			/*-
5163 			 * Find first chunk that was used with
5164 			 * window probe and clear the event. Put
5165 			 * it back into the send queue as if has
5166 			 * not been sent.
5167 			 */
5168 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5169 				if (tp1->window_probe) {
5170 					sctp_window_probe_recovery(stcb, asoc, tp1);
5171 					break;
5172 				}
5173 			}
5174 		}
5175 		if (net->flight_size) {
5176 			j++;
5177 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5178 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5179 				    stcb->sctp_ep, stcb, net);
5180 			}
5181 			if (net->window_probe) {
5182 				net->window_probe = 0;
5183 			}
5184 		} else {
5185 			if (net->window_probe) {
5186 				/*
5187 				 * In window probes we must assure a timer
5188 				 * is still running there
5189 				 */
5190 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5191 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5192 					    stcb->sctp_ep, stcb, net);
5193 
5194 				}
5195 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5196 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5197 				    stcb, net,
5198 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5199 			}
5200 		}
5201 	}
5202 	if ((j == 0) &&
5203 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5204 	    (asoc->sent_queue_retran_cnt == 0) &&
5205 	    (win_probe_recovered == 0) &&
5206 	    (done_once == 0)) {
5207 		/*
5208 		 * huh, this should not happen unless all packets are
5209 		 * PR-SCTP and marked to skip of course.
5210 		 */
5211 		if (sctp_fs_audit(asoc)) {
5212 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5213 				net->flight_size = 0;
5214 			}
5215 			asoc->total_flight = 0;
5216 			asoc->total_flight_count = 0;
5217 			asoc->sent_queue_retran_cnt = 0;
5218 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5220 					sctp_flight_size_increase(tp1);
5221 					sctp_total_flight_increase(stcb, tp1);
5222 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5223 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5224 				}
5225 			}
5226 		}
5227 		done_once = 1;
5228 		goto again;
5229 	}
5230 	/*********************************************/
5231 	/* Here we perform PR-SCTP procedures        */
5232 	/* (section 4.2)                             */
5233 	/*********************************************/
5234 	/* C1. update advancedPeerAckPoint */
5235 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5236 		asoc->advanced_peer_ack_point = cum_ack;
5237 	}
5238 	/* C2. try to further move advancedPeerAckPoint ahead */
5239 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5240 		struct sctp_tmit_chunk *lchk;
5241 		uint32_t old_adv_peer_ack_point;
5242 
5243 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5244 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5245 		/* C3. See if we need to send a Fwd-TSN */
5246 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5247 			/*
5248 			 * ISSUE with ECN, see FWD-TSN processing.
5249 			 */
5250 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5251 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5252 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5253 				    old_adv_peer_ack_point);
5254 			}
5255 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5256 				send_forward_tsn(stcb, asoc);
5257 			} else if (lchk) {
5258 				/* try to FR fwd-tsn's that get lost too */
5259 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5260 					send_forward_tsn(stcb, asoc);
5261 				}
5262 			}
5263 		}
5264 		if (lchk) {
5265 			/* Assure a timer is up */
5266 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5267 			    stcb->sctp_ep, stcb, lchk->whoTo);
5268 		}
5269 	}
5270 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5271 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5272 		    a_rwnd,
5273 		    stcb->asoc.peers_rwnd,
5274 		    stcb->asoc.total_flight,
5275 		    stcb->asoc.total_output_queue_size);
5276 	}
5277 }
5278 
5279 void
5280 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5281 {
5282 	/* Copy cum-ack */
5283 	uint32_t cum_ack, a_rwnd;
5284 
5285 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5286 	/* Arrange so a_rwnd does NOT change */
5287 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5288 
5289 	/* Now call the express sack handling */
5290 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5291 }
5292 
5293 static void
5294 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5295     struct sctp_stream_in *strmin)
5296 {
5297 	struct sctp_queued_to_read *control, *ncontrol;
5298 	struct sctp_association *asoc;
5299 	uint32_t mid;
5300 	int need_reasm_check = 0;
5301 
5302 	asoc = &stcb->asoc;
5303 	mid = strmin->last_mid_delivered;
5304 	/*
5305 	 * First deliver anything prior to and including the stream no that
5306 	 * came in.
5307 	 */
5308 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5309 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5310 			/* this is deliverable now */
5311 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5312 				if (control->on_strm_q) {
5313 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5314 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5315 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5316 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5317 #ifdef INVARIANTS
5318 					} else {
5319 						panic("strmin: %p ctl: %p unknown %d",
5320 						    strmin, control, control->on_strm_q);
5321 #endif
5322 					}
5323 					control->on_strm_q = 0;
5324 				}
5325 				/* subtract pending on streams */
5326 				if (asoc->size_on_all_streams >= control->length) {
5327 					asoc->size_on_all_streams -= control->length;
5328 				} else {
5329 #ifdef INVARIANTS
5330 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5331 #else
5332 					asoc->size_on_all_streams = 0;
5333 #endif
5334 				}
5335 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5336 				/* deliver it to at least the delivery-q */
5337 				if (stcb->sctp_socket) {
5338 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5339 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5340 					    control,
5341 					    &stcb->sctp_socket->so_rcv,
5342 					    1, SCTP_READ_LOCK_HELD,
5343 					    SCTP_SO_NOT_LOCKED);
5344 				}
5345 			} else {
5346 				/* Its a fragmented message */
5347 				if (control->first_frag_seen) {
5348 					/*
5349 					 * Make it so this is next to
5350 					 * deliver, we restore later
5351 					 */
5352 					strmin->last_mid_delivered = control->mid - 1;
5353 					need_reasm_check = 1;
5354 					break;
5355 				}
5356 			}
5357 		} else {
5358 			/* no more delivery now. */
5359 			break;
5360 		}
5361 	}
5362 	if (need_reasm_check) {
5363 		int ret;
5364 
5365 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5366 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5367 			/* Restore the next to deliver unless we are ahead */
5368 			strmin->last_mid_delivered = mid;
5369 		}
5370 		if (ret == 0) {
5371 			/* Left the front Partial one on */
5372 			return;
5373 		}
5374 		need_reasm_check = 0;
5375 	}
5376 	/*
5377 	 * now we must deliver things in queue the normal way  if any are
5378 	 * now ready.
5379 	 */
5380 	mid = strmin->last_mid_delivered + 1;
5381 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5382 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5383 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5384 				/* this is deliverable now */
5385 				if (control->on_strm_q) {
5386 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5387 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5388 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5389 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5390 #ifdef INVARIANTS
5391 					} else {
5392 						panic("strmin: %p ctl: %p unknown %d",
5393 						    strmin, control, control->on_strm_q);
5394 #endif
5395 					}
5396 					control->on_strm_q = 0;
5397 				}
5398 				/* subtract pending on streams */
5399 				if (asoc->size_on_all_streams >= control->length) {
5400 					asoc->size_on_all_streams -= control->length;
5401 				} else {
5402 #ifdef INVARIANTS
5403 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5404 #else
5405 					asoc->size_on_all_streams = 0;
5406 #endif
5407 				}
5408 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5409 				/* deliver it to at least the delivery-q */
5410 				strmin->last_mid_delivered = control->mid;
5411 				if (stcb->sctp_socket) {
5412 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5413 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5414 					    control,
5415 					    &stcb->sctp_socket->so_rcv, 1,
5416 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5417 
5418 				}
5419 				mid = strmin->last_mid_delivered + 1;
5420 			} else {
5421 				/* Its a fragmented message */
5422 				if (control->first_frag_seen) {
5423 					/*
5424 					 * Make it so this is next to
5425 					 * deliver
5426 					 */
5427 					strmin->last_mid_delivered = control->mid - 1;
5428 					need_reasm_check = 1;
5429 					break;
5430 				}
5431 			}
5432 		} else {
5433 			break;
5434 		}
5435 	}
5436 	if (need_reasm_check) {
5437 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5438 	}
5439 }
5440 
5441 
5442 
5443 static void
5444 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5445     struct sctp_association *asoc,
5446     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5447 {
5448 	struct sctp_queued_to_read *control;
5449 	struct sctp_stream_in *strm;
5450 	struct sctp_tmit_chunk *chk, *nchk;
5451 	int cnt_removed = 0;
5452 
5453 	/*
5454 	 * For now large messages held on the stream reasm that are complete
5455 	 * will be tossed too. We could in theory do more work to spin
5456 	 * through and stop after dumping one msg aka seeing the start of a
5457 	 * new msg at the head, and call the delivery function... to see if
5458 	 * it can be delivered... But for now we just dump everything on the
5459 	 * queue.
5460 	 */
5461 	strm = &asoc->strmin[stream];
5462 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5463 	if (control == NULL) {
5464 		/* Not found */
5465 		return;
5466 	}
5467 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5468 		return;
5469 	}
5470 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5471 		/* Purge hanging chunks */
5472 		if (!asoc->idata_supported && (ordered == 0)) {
5473 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5474 				break;
5475 			}
5476 		}
5477 		cnt_removed++;
5478 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5479 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5480 			asoc->size_on_reasm_queue -= chk->send_size;
5481 		} else {
5482 #ifdef INVARIANTS
5483 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5484 #else
5485 			asoc->size_on_reasm_queue = 0;
5486 #endif
5487 		}
5488 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5489 		if (chk->data) {
5490 			sctp_m_freem(chk->data);
5491 			chk->data = NULL;
5492 		}
5493 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5494 	}
5495 	if (!TAILQ_EMPTY(&control->reasm)) {
5496 		/* This has to be old data, unordered */
5497 		if (control->data) {
5498 			sctp_m_freem(control->data);
5499 			control->data = NULL;
5500 		}
5501 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5502 		chk = TAILQ_FIRST(&control->reasm);
5503 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5504 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5505 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5506 			    chk, SCTP_READ_LOCK_HELD);
5507 		}
5508 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5509 		return;
5510 	}
5511 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5512 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5513 		if (asoc->size_on_all_streams >= control->length) {
5514 			asoc->size_on_all_streams -= control->length;
5515 		} else {
5516 #ifdef INVARIANTS
5517 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5518 #else
5519 			asoc->size_on_all_streams = 0;
5520 #endif
5521 		}
5522 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5523 		control->on_strm_q = 0;
5524 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5525 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5526 		control->on_strm_q = 0;
5527 #ifdef INVARIANTS
5528 	} else if (control->on_strm_q) {
5529 		panic("strm: %p ctl: %p unknown %d",
5530 		    strm, control, control->on_strm_q);
5531 #endif
5532 	}
5533 	control->on_strm_q = 0;
5534 	if (control->on_read_q == 0) {
5535 		sctp_free_remote_addr(control->whoFrom);
5536 		if (control->data) {
5537 			sctp_m_freem(control->data);
5538 			control->data = NULL;
5539 		}
5540 		sctp_free_a_readq(stcb, control);
5541 	}
5542 }
5543 
5544 void
5545 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5546     struct sctp_forward_tsn_chunk *fwd,
5547     int *abort_flag, struct mbuf *m, int offset)
5548 {
5549 	/* The pr-sctp fwd tsn */
5550 	/*
5551 	 * here we will perform all the data receiver side steps for
5552 	 * processing FwdTSN, as required in by pr-sctp draft:
5553 	 *
5554 	 * Assume we get FwdTSN(x):
5555 	 *
5556 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5557 	 * + others we have 3) examine and update re-ordering queue on
5558 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5559 	 * report where we are.
5560 	 */
5561 	struct sctp_association *asoc;
5562 	uint32_t new_cum_tsn, gap;
5563 	unsigned int i, fwd_sz, m_size;
5564 	uint32_t str_seq;
5565 	struct sctp_stream_in *strm;
5566 	struct sctp_queued_to_read *control, *sv;
5567 
5568 	asoc = &stcb->asoc;
5569 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5570 		SCTPDBG(SCTP_DEBUG_INDATA1,
5571 		    "Bad size too small/big fwd-tsn\n");
5572 		return;
5573 	}
5574 	m_size = (stcb->asoc.mapping_array_size << 3);
5575 	/*************************************************************/
5576 	/* 1. Here we update local cumTSN and shift the bitmap array */
5577 	/*************************************************************/
5578 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5579 
5580 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5581 		/* Already got there ... */
5582 		return;
5583 	}
5584 	/*
5585 	 * now we know the new TSN is more advanced, let's find the actual
5586 	 * gap
5587 	 */
5588 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5589 	asoc->cumulative_tsn = new_cum_tsn;
5590 	if (gap >= m_size) {
5591 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5592 			struct mbuf *op_err;
5593 			char msg[SCTP_DIAG_INFO_LEN];
5594 
5595 			/*
5596 			 * out of range (of single byte chunks in the rwnd I
5597 			 * give out). This must be an attacker.
5598 			 */
5599 			*abort_flag = 1;
5600 			snprintf(msg, sizeof(msg),
5601 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5602 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5603 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5604 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5605 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5606 			return;
5607 		}
5608 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5609 
5610 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5611 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5612 		asoc->highest_tsn_inside_map = new_cum_tsn;
5613 
5614 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5615 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5616 
5617 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5618 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5619 		}
5620 	} else {
5621 		SCTP_TCB_LOCK_ASSERT(stcb);
5622 		for (i = 0; i <= gap; i++) {
5623 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5624 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5625 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5626 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5627 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5628 				}
5629 			}
5630 		}
5631 	}
5632 	/*************************************************************/
5633 	/* 2. Clear up re-assembly queue                             */
5634 	/*************************************************************/
5635 
5636 	/* This is now done as part of clearing up the stream/seq */
5637 	if (asoc->idata_supported == 0) {
5638 		uint16_t sid;
5639 
5640 		/* Flush all the un-ordered data based on cum-tsn */
5641 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5642 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5643 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5644 		}
5645 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5646 	}
5647 	/*******************************************************/
5648 	/* 3. Update the PR-stream re-ordering queues and fix  */
5649 	/* delivery issues as needed.                       */
5650 	/*******************************************************/
5651 	fwd_sz -= sizeof(*fwd);
5652 	if (m && fwd_sz) {
5653 		/* New method. */
5654 		unsigned int num_str;
5655 		uint32_t mid, cur_mid;
5656 		uint16_t sid;
5657 		uint16_t ordered, flags;
5658 		struct sctp_strseq *stseq, strseqbuf;
5659 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5660 
5661 		offset += sizeof(*fwd);
5662 
5663 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5664 		if (asoc->idata_supported) {
5665 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5666 		} else {
5667 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5668 		}
5669 		for (i = 0; i < num_str; i++) {
5670 			if (asoc->idata_supported) {
5671 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5672 				    sizeof(struct sctp_strseq_mid),
5673 				    (uint8_t *)&strseqbuf_m);
5674 				offset += sizeof(struct sctp_strseq_mid);
5675 				if (stseq_m == NULL) {
5676 					break;
5677 				}
5678 				sid = ntohs(stseq_m->sid);
5679 				mid = ntohl(stseq_m->mid);
5680 				flags = ntohs(stseq_m->flags);
5681 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5682 					ordered = 0;
5683 				} else {
5684 					ordered = 1;
5685 				}
5686 			} else {
5687 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5688 				    sizeof(struct sctp_strseq),
5689 				    (uint8_t *)&strseqbuf);
5690 				offset += sizeof(struct sctp_strseq);
5691 				if (stseq == NULL) {
5692 					break;
5693 				}
5694 				sid = ntohs(stseq->sid);
5695 				mid = (uint32_t)ntohs(stseq->ssn);
5696 				ordered = 1;
5697 			}
5698 			/* Convert */
5699 
5700 			/* now process */
5701 
5702 			/*
5703 			 * Ok we now look for the stream/seq on the read
5704 			 * queue where its not all delivered. If we find it
5705 			 * we transmute the read entry into a PDI_ABORTED.
5706 			 */
5707 			if (sid >= asoc->streamincnt) {
5708 				/* screwed up streams, stop!  */
5709 				break;
5710 			}
5711 			if ((asoc->str_of_pdapi == sid) &&
5712 			    (asoc->ssn_of_pdapi == mid)) {
5713 				/*
5714 				 * If this is the one we were partially
5715 				 * delivering now then we no longer are.
5716 				 * Note this will change with the reassembly
5717 				 * re-write.
5718 				 */
5719 				asoc->fragmented_delivery_inprogress = 0;
5720 			}
5721 			strm = &asoc->strmin[sid];
5722 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5723 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5724 			}
5725 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5726 				if ((control->sinfo_stream == sid) &&
5727 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5728 					str_seq = (sid << 16) | (0x0000ffff & mid);
5729 					control->pdapi_aborted = 1;
5730 					sv = stcb->asoc.control_pdapi;
5731 					control->end_added = 1;
5732 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5733 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5734 						if (asoc->size_on_all_streams >= control->length) {
5735 							asoc->size_on_all_streams -= control->length;
5736 						} else {
5737 #ifdef INVARIANTS
5738 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5739 #else
5740 							asoc->size_on_all_streams = 0;
5741 #endif
5742 						}
5743 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5744 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5745 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5746 #ifdef INVARIANTS
5747 					} else if (control->on_strm_q) {
5748 						panic("strm: %p ctl: %p unknown %d",
5749 						    strm, control, control->on_strm_q);
5750 #endif
5751 					}
5752 					control->on_strm_q = 0;
5753 					stcb->asoc.control_pdapi = control;
5754 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5755 					    stcb,
5756 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5757 					    (void *)&str_seq,
5758 					    SCTP_SO_NOT_LOCKED);
5759 					stcb->asoc.control_pdapi = sv;
5760 					break;
5761 				} else if ((control->sinfo_stream == sid) &&
5762 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5763 					/* We are past our victim SSN */
5764 					break;
5765 				}
5766 			}
5767 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5768 				/* Update the sequence number */
5769 				strm->last_mid_delivered = mid;
5770 			}
5771 			/* now kick the stream the new way */
5772 			/* sa_ignore NO_NULL_CHK */
5773 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5774 		}
5775 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5776 	}
5777 	/*
5778 	 * Now slide thing forward.
5779 	 */
5780 	sctp_slide_mapping_arrays(stcb);
5781 }
5782