xref: /freebsd/sys/netinet/sctp_indata.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		SCTP_SNPRINTF(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid);
444 	} else {
445 		SCTP_SNPRINTF(msg, sizeof(msg),
446 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
447 		    opspot,
448 		    control->fsn_included,
449 		    chk->rec.data.tsn,
450 		    chk->rec.data.sid,
451 		    chk->rec.data.fsn,
452 		    (uint16_t)chk->rec.data.mid);
453 	}
454 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 	sctp_m_freem(chk->data);
456 	chk->data = NULL;
457 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 	*abort_flag = 1;
461 }
462 
463 static void
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
465 {
466 	/*
467 	 * The control could not be placed and must be cleaned.
468 	 */
469 	struct sctp_tmit_chunk *chk, *nchk;
470 
471 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
473 		if (chk->data)
474 			sctp_m_freem(chk->data);
475 		chk->data = NULL;
476 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 	}
478 	sctp_free_remote_addr(control->whoFrom);
479 	if (control->data) {
480 		sctp_m_freem(control->data);
481 		control->data = NULL;
482 	}
483 	sctp_free_a_readq(stcb, control);
484 }
485 
486 /*
487  * Queue the chunk either right into the socket buffer if it is the next one
488  * to go OR put it in the correct place in the delivery queue.  If we do
489  * append to the so_buf, keep doing so until we are out of order as
490  * long as the control's entered are non-fragmented.
491  */
492 static void
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494     struct sctp_association *asoc,
495     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
496 {
497 	/*
498 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 	 * all the data in one stream this could happen quite rapidly. One
500 	 * could use the TSN to keep track of things, but this scheme breaks
501 	 * down in the other type of stream usage that could occur. Send a
502 	 * single msg to stream 0, send 4Billion messages to stream 1, now
503 	 * send a message to stream 0. You have a situation where the TSN
504 	 * has wrapped but not in the stream. Is this worth worrying about
505 	 * or should we just change our queue sort at the bottom to be by
506 	 * TSN.
507 	 *
508 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 	 * assignment this could happen... and I don't see how this would be
511 	 * a violation. So for now I am undecided an will leave the sort by
512 	 * SSN alone. Maybe a hybred approach is the answer
513 	 *
514 	 */
515 	struct sctp_queued_to_read *at;
516 	int queue_needed;
517 	uint32_t nxt_todel;
518 	struct mbuf *op_err;
519 	struct sctp_stream_in *strm;
520 	char msg[SCTP_DIAG_INFO_LEN];
521 
522 	strm = &asoc->strmin[control->sinfo_stream];
523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
525 	}
526 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 		/* The incoming sseq is behind where we last delivered? */
528 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 		    strm->last_mid_delivered, control->mid);
530 		/*
531 		 * throw it in the stream so it gets cleaned up in
532 		 * association destruction
533 		 */
534 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 		if (asoc->idata_supported) {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 			    strm->last_mid_delivered, control->sinfo_tsn,
538 			    control->sinfo_stream, control->mid);
539 		} else {
540 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 			    (uint16_t)strm->last_mid_delivered,
542 			    control->sinfo_tsn,
543 			    control->sinfo_stream,
544 			    (uint16_t)control->mid);
545 		}
546 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 		*abort_flag = 1;
550 		return;
551 
552 	}
553 	queue_needed = 1;
554 	asoc->size_on_all_streams += control->length;
555 	sctp_ucount_incr(asoc->cnt_on_all_streams);
556 	nxt_todel = strm->last_mid_delivered + 1;
557 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 		/* can be delivered right away? */
559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 		}
562 		/* EY it wont be queued if it could be delivered directly */
563 		queue_needed = 0;
564 		if (asoc->size_on_all_streams >= control->length) {
565 			asoc->size_on_all_streams -= control->length;
566 		} else {
567 #ifdef INVARIANTS
568 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
569 #else
570 			asoc->size_on_all_streams = 0;
571 #endif
572 		}
573 		sctp_ucount_decr(asoc->cnt_on_all_streams);
574 		strm->last_mid_delivered++;
575 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 		sctp_add_to_readq(stcb->sctp_ep, stcb,
577 		    control,
578 		    &stcb->sctp_socket->so_rcv, 1,
579 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
581 			/* all delivered */
582 			nxt_todel = strm->last_mid_delivered + 1;
583 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 				if (control->on_strm_q == SCTP_ON_ORDERED) {
586 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 					if (asoc->size_on_all_streams >= control->length) {
588 						asoc->size_on_all_streams -= control->length;
589 					} else {
590 #ifdef INVARIANTS
591 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
592 #else
593 						asoc->size_on_all_streams = 0;
594 #endif
595 					}
596 					sctp_ucount_decr(asoc->cnt_on_all_streams);
597 #ifdef INVARIANTS
598 				} else {
599 					panic("Huh control: %p is on_strm_q: %d",
600 					    control, control->on_strm_q);
601 #endif
602 				}
603 				control->on_strm_q = 0;
604 				strm->last_mid_delivered++;
605 				/*
606 				 * We ignore the return of deliver_data here
607 				 * since we always can hold the chunk on the
608 				 * d-queue. And we have a finite number that
609 				 * can be delivered from the strq.
610 				 */
611 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 					sctp_log_strm_del(control, NULL,
613 					    SCTP_STR_LOG_FROM_IMMED_DEL);
614 				}
615 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 				sctp_add_to_readq(stcb->sctp_ep, stcb,
617 				    control,
618 				    &stcb->sctp_socket->so_rcv, 1,
619 				    SCTP_READ_LOCK_NOT_HELD,
620 				    SCTP_SO_LOCKED);
621 				continue;
622 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
623 				*need_reasm = 1;
624 			}
625 			break;
626 		}
627 	}
628 	if (queue_needed) {
629 		/*
630 		 * Ok, we did not deliver this guy, find the correct place
631 		 * to put it on the queue.
632 		 */
633 		if (sctp_place_control_in_stream(strm, asoc, control)) {
634 			SCTP_SNPRINTF(msg, sizeof(msg),
635 			    "Queue to str MID: %u duplicate", control->mid);
636 			sctp_clean_up_control(stcb, control);
637 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
640 			*abort_flag = 1;
641 		}
642 	}
643 }
644 
645 
646 static void
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
648 {
649 	struct mbuf *m, *prev = NULL;
650 	struct sctp_tcb *stcb;
651 
652 	stcb = control->stcb;
653 	control->held_length = 0;
654 	control->length = 0;
655 	m = control->data;
656 	while (m) {
657 		if (SCTP_BUF_LEN(m) == 0) {
658 			/* Skip mbufs with NO length */
659 			if (prev == NULL) {
660 				/* First one */
661 				control->data = sctp_m_free(m);
662 				m = control->data;
663 			} else {
664 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 				m = SCTP_BUF_NEXT(prev);
666 			}
667 			if (m == NULL) {
668 				control->tail_mbuf = prev;
669 			}
670 			continue;
671 		}
672 		prev = m;
673 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 		if (control->on_read_q) {
675 			/*
676 			 * On read queue so we must increment the SB stuff,
677 			 * we assume caller has done any locks of SB.
678 			 */
679 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
680 		}
681 		m = SCTP_BUF_NEXT(m);
682 	}
683 	if (prev) {
684 		control->tail_mbuf = prev;
685 	}
686 }
687 
688 static void
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
690 {
691 	struct mbuf *prev = NULL;
692 	struct sctp_tcb *stcb;
693 
694 	stcb = control->stcb;
695 	if (stcb == NULL) {
696 #ifdef INVARIANTS
697 		panic("Control broken");
698 #else
699 		return;
700 #endif
701 	}
702 	if (control->tail_mbuf == NULL) {
703 		/* TSNH */
704 		sctp_m_freem(control->data);
705 		control->data = m;
706 		sctp_setup_tail_pointer(control);
707 		return;
708 	}
709 	control->tail_mbuf->m_next = m;
710 	while (m) {
711 		if (SCTP_BUF_LEN(m) == 0) {
712 			/* Skip mbufs with NO length */
713 			if (prev == NULL) {
714 				/* First one */
715 				control->tail_mbuf->m_next = sctp_m_free(m);
716 				m = control->tail_mbuf->m_next;
717 			} else {
718 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 				m = SCTP_BUF_NEXT(prev);
720 			}
721 			if (m == NULL) {
722 				control->tail_mbuf = prev;
723 			}
724 			continue;
725 		}
726 		prev = m;
727 		if (control->on_read_q) {
728 			/*
729 			 * On read queue so we must increment the SB stuff,
730 			 * we assume caller has done any locks of SB.
731 			 */
732 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
733 		}
734 		*added += SCTP_BUF_LEN(m);
735 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 		m = SCTP_BUF_NEXT(m);
737 	}
738 	if (prev) {
739 		control->tail_mbuf = prev;
740 	}
741 }
742 
743 static void
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
745 {
746 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 	nc->sinfo_stream = control->sinfo_stream;
748 	nc->mid = control->mid;
749 	TAILQ_INIT(&nc->reasm);
750 	nc->top_fsn = control->top_fsn;
751 	nc->mid = control->mid;
752 	nc->sinfo_flags = control->sinfo_flags;
753 	nc->sinfo_ppid = control->sinfo_ppid;
754 	nc->sinfo_context = control->sinfo_context;
755 	nc->fsn_included = 0xffffffff;
756 	nc->sinfo_tsn = control->sinfo_tsn;
757 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 	nc->whoFrom = control->whoFrom;
760 	atomic_add_int(&nc->whoFrom->ref_count, 1);
761 	nc->stcb = control->stcb;
762 	nc->port_from = control->port_from;
763 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
764 }
765 
766 static void
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768     struct sctp_inpcb *inp, uint32_t tsn)
769 {
770 	control->fsn_included = tsn;
771 	if (control->on_read_q) {
772 		/*
773 		 * We have to purge it from there, hopefully this will work
774 		 * :-)
775 		 */
776 		TAILQ_REMOVE(&inp->read_queue, control, next);
777 		control->on_read_q = 0;
778 	}
779 }
780 
781 static int
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783     struct sctp_association *asoc,
784     struct sctp_stream_in *strm,
785     struct sctp_queued_to_read *control,
786     uint32_t pd_point,
787     int inp_read_lock_held)
788 {
789 	/*
790 	 * Special handling for the old un-ordered data chunk. All the
791 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 	 * to see if we have it all. If you return one, no other control
793 	 * entries on the un-ordered queue will be looked at. In theory
794 	 * there should be no others entries in reality, unless the guy is
795 	 * sending both unordered NDATA and unordered DATA...
796 	 */
797 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
798 	uint32_t fsn;
799 	struct sctp_queued_to_read *nc;
800 	int cnt_added;
801 
802 	if (control->first_frag_seen == 0) {
803 		/* Nothing we can do, we have not seen the first piece yet */
804 		return (1);
805 	}
806 	/* Collapse any we can */
807 	cnt_added = 0;
808 restart:
809 	fsn = control->fsn_included + 1;
810 	/* Now what can we add? */
811 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 		if (chk->rec.data.fsn == fsn) {
813 			/* Ok lets add it */
814 			sctp_alloc_a_readq(stcb, nc);
815 			if (nc == NULL) {
816 				break;
817 			}
818 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
821 			fsn++;
822 			cnt_added++;
823 			chk = NULL;
824 			if (control->end_added) {
825 				/* We are done */
826 				if (!TAILQ_EMPTY(&control->reasm)) {
827 					/*
828 					 * Ok we have to move anything left
829 					 * on the control queue to a new
830 					 * control.
831 					 */
832 					sctp_build_readq_entry_from_ctl(nc, control);
833 					tchk = TAILQ_FIRST(&control->reasm);
834 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 							asoc->size_on_reasm_queue -= tchk->send_size;
838 						} else {
839 #ifdef INVARIANTS
840 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
841 #else
842 							asoc->size_on_reasm_queue = 0;
843 #endif
844 						}
845 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 						nc->first_frag_seen = 1;
847 						nc->fsn_included = tchk->rec.data.fsn;
848 						nc->data = tchk->data;
849 						nc->sinfo_ppid = tchk->rec.data.ppid;
850 						nc->sinfo_tsn = tchk->rec.data.tsn;
851 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
852 						tchk->data = NULL;
853 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 						sctp_setup_tail_pointer(nc);
855 						tchk = TAILQ_FIRST(&control->reasm);
856 					}
857 					/* Spin the rest onto the queue */
858 					while (tchk) {
859 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 						tchk = TAILQ_FIRST(&control->reasm);
862 					}
863 					/*
864 					 * Now lets add it to the queue
865 					 * after removing control
866 					 */
867 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 					nc->on_strm_q = SCTP_ON_UNORDERED;
869 					if (control->on_strm_q) {
870 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 						control->on_strm_q = 0;
872 					}
873 				}
874 				if (control->pdapi_started) {
875 					strm->pd_api_started = 0;
876 					control->pdapi_started = 0;
877 				}
878 				if (control->on_strm_q) {
879 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 					control->on_strm_q = 0;
881 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882 				}
883 				if (control->on_read_q == 0) {
884 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 					    &stcb->sctp_socket->so_rcv, control->end_added,
886 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 				}
888 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
890 					/*
891 					 * Switch to the new guy and
892 					 * continue
893 					 */
894 					control = nc;
895 					goto restart;
896 				} else {
897 					if (nc->on_strm_q == 0) {
898 						sctp_free_a_readq(stcb, nc);
899 					}
900 				}
901 				return (1);
902 			} else {
903 				sctp_free_a_readq(stcb, nc);
904 			}
905 		} else {
906 			/* Can't add more */
907 			break;
908 		}
909 	}
910 	if (cnt_added && strm->pd_api_started) {
911 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
912 	}
913 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 		strm->pd_api_started = 1;
915 		control->pdapi_started = 1;
916 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 		    &stcb->sctp_socket->so_rcv, control->end_added,
918 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 		return (0);
921 	} else {
922 		return (1);
923 	}
924 }
925 
926 static void
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928     struct sctp_association *asoc,
929     struct sctp_queued_to_read *control,
930     struct sctp_tmit_chunk *chk,
931     int *abort_flag)
932 {
933 	struct sctp_tmit_chunk *at;
934 	int inserted;
935 
936 	/*
937 	 * Here we need to place the chunk into the control structure sorted
938 	 * in the correct order.
939 	 */
940 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 		/* Its the very first one. */
942 		SCTPDBG(SCTP_DEBUG_XXX,
943 		    "chunk is a first fsn: %u becomes fsn_included\n",
944 		    chk->rec.data.fsn);
945 		at = TAILQ_FIRST(&control->reasm);
946 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
947 			/*
948 			 * The first chunk in the reassembly is a smaller
949 			 * TSN than this one, even though this has a first,
950 			 * it must be from a subsequent msg.
951 			 */
952 			goto place_chunk;
953 		}
954 		if (control->first_frag_seen) {
955 			/*
956 			 * In old un-ordered we can reassembly on one
957 			 * control multiple messages. As long as the next
958 			 * FIRST is greater then the old first (TSN i.e. FSN
959 			 * wise)
960 			 */
961 			struct mbuf *tdata;
962 			uint32_t tmp;
963 
964 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
965 				/*
966 				 * Easy way the start of a new guy beyond
967 				 * the lowest
968 				 */
969 				goto place_chunk;
970 			}
971 			if ((chk->rec.data.fsn == control->fsn_included) ||
972 			    (control->pdapi_started)) {
973 				/*
974 				 * Ok this should not happen, if it does we
975 				 * started the pd-api on the higher TSN
976 				 * (since the equals part is a TSN failure
977 				 * it must be that).
978 				 *
979 				 * We are completly hosed in that case since
980 				 * I have no way to recover. This really
981 				 * will only happen if we can get more TSN's
982 				 * higher before the pd-api-point.
983 				 */
984 				sctp_abort_in_reasm(stcb, control, chk,
985 				    abort_flag,
986 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
987 
988 				return;
989 			}
990 			/*
991 			 * Ok we have two firsts and the one we just got is
992 			 * smaller than the one we previously placed.. yuck!
993 			 * We must swap them out.
994 			 */
995 			/* swap the mbufs */
996 			tdata = control->data;
997 			control->data = chk->data;
998 			chk->data = tdata;
999 			/* Save the lengths */
1000 			chk->send_size = control->length;
1001 			/* Recompute length of control and tail pointer */
1002 			sctp_setup_tail_pointer(control);
1003 			/* Fix the FSN included */
1004 			tmp = control->fsn_included;
1005 			control->fsn_included = chk->rec.data.fsn;
1006 			chk->rec.data.fsn = tmp;
1007 			/* Fix the TSN included */
1008 			tmp = control->sinfo_tsn;
1009 			control->sinfo_tsn = chk->rec.data.tsn;
1010 			chk->rec.data.tsn = tmp;
1011 			/* Fix the PPID included */
1012 			tmp = control->sinfo_ppid;
1013 			control->sinfo_ppid = chk->rec.data.ppid;
1014 			chk->rec.data.ppid = tmp;
1015 			/* Fix tail pointer */
1016 			goto place_chunk;
1017 		}
1018 		control->first_frag_seen = 1;
1019 		control->fsn_included = chk->rec.data.fsn;
1020 		control->top_fsn = chk->rec.data.fsn;
1021 		control->sinfo_tsn = chk->rec.data.tsn;
1022 		control->sinfo_ppid = chk->rec.data.ppid;
1023 		control->data = chk->data;
1024 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1025 		chk->data = NULL;
1026 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 		sctp_setup_tail_pointer(control);
1028 		return;
1029 	}
1030 place_chunk:
1031 	inserted = 0;
1032 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1034 			/*
1035 			 * This one in queue is bigger than the new one,
1036 			 * insert the new one before at.
1037 			 */
1038 			asoc->size_on_reasm_queue += chk->send_size;
1039 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 			inserted = 1;
1041 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1042 			break;
1043 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1044 			/*
1045 			 * They sent a duplicate fsn number. This really
1046 			 * should not happen since the FSN is a TSN and it
1047 			 * should have been dropped earlier.
1048 			 */
1049 			sctp_abort_in_reasm(stcb, control, chk,
1050 			    abort_flag,
1051 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1052 			return;
1053 		}
1054 
1055 	}
1056 	if (inserted == 0) {
1057 		/* Its at the end */
1058 		asoc->size_on_reasm_queue += chk->send_size;
1059 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 		control->top_fsn = chk->rec.data.fsn;
1061 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1062 	}
1063 }
1064 
1065 static int
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067     struct sctp_stream_in *strm, int inp_read_lock_held)
1068 {
1069 	/*
1070 	 * Given a stream, strm, see if any of the SSN's on it that are
1071 	 * fragmented are ready to deliver. If so go ahead and place them on
1072 	 * the read queue. In so placing if we have hit the end, then we
1073 	 * need to remove them from the stream's queue.
1074 	 */
1075 	struct sctp_queued_to_read *control, *nctl = NULL;
1076 	uint32_t next_to_del;
1077 	uint32_t pd_point;
1078 	int ret = 0;
1079 
1080 	if (stcb->sctp_socket) {
1081 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 		    stcb->sctp_ep->partial_delivery_point);
1083 	} else {
1084 		pd_point = stcb->sctp_ep->partial_delivery_point;
1085 	}
1086 	control = TAILQ_FIRST(&strm->uno_inqueue);
1087 
1088 	if ((control != NULL) &&
1089 	    (asoc->idata_supported == 0)) {
1090 		/* Special handling needed for "old" data format */
1091 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092 			goto done_un;
1093 		}
1094 	}
1095 	if (strm->pd_api_started) {
1096 		/* Can't add more */
1097 		return (0);
1098 	}
1099 	while (control) {
1100 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 		nctl = TAILQ_NEXT(control, next_instrm);
1103 		if (control->end_added) {
1104 			/* We just put the last bit on */
1105 			if (control->on_strm_q) {
1106 #ifdef INVARIANTS
1107 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 					panic("Huh control: %p on_q: %d -- not unordered?",
1109 					    control, control->on_strm_q);
1110 				}
1111 #endif
1112 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 				if (asoc->size_on_all_streams >= control->length) {
1115 					asoc->size_on_all_streams -= control->length;
1116 				} else {
1117 #ifdef INVARIANTS
1118 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1119 #else
1120 					asoc->size_on_all_streams = 0;
1121 #endif
1122 				}
1123 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1124 				control->on_strm_q = 0;
1125 			}
1126 			if (control->on_read_q == 0) {
1127 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1128 				    control,
1129 				    &stcb->sctp_socket->so_rcv, control->end_added,
1130 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131 			}
1132 		} else {
1133 			/* Can we do a PD-API for this un-ordered guy? */
1134 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135 				strm->pd_api_started = 1;
1136 				control->pdapi_started = 1;
1137 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1138 				    control,
1139 				    &stcb->sctp_socket->so_rcv, control->end_added,
1140 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 
1142 				break;
1143 			}
1144 		}
1145 		control = nctl;
1146 	}
1147 done_un:
1148 	control = TAILQ_FIRST(&strm->inqueue);
1149 	if (strm->pd_api_started) {
1150 		/* Can't add more */
1151 		return (0);
1152 	}
1153 	if (control == NULL) {
1154 		return (ret);
1155 	}
1156 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1157 		/*
1158 		 * Ok the guy at the top was being partially delivered
1159 		 * completed, so we remove it. Note the pd_api flag was
1160 		 * taken off when the chunk was merged on in
1161 		 * sctp_queue_data_for_reasm below.
1162 		 */
1163 		nctl = TAILQ_NEXT(control, next_instrm);
1164 		SCTPDBG(SCTP_DEBUG_XXX,
1165 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1166 		    control, control->end_added, control->mid,
1167 		    control->top_fsn, control->fsn_included,
1168 		    strm->last_mid_delivered);
1169 		if (control->end_added) {
1170 			if (control->on_strm_q) {
1171 #ifdef INVARIANTS
1172 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1173 					panic("Huh control: %p on_q: %d -- not ordered?",
1174 					    control, control->on_strm_q);
1175 				}
1176 #endif
1177 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1178 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1179 				if (asoc->size_on_all_streams >= control->length) {
1180 					asoc->size_on_all_streams -= control->length;
1181 				} else {
1182 #ifdef INVARIANTS
1183 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1184 #else
1185 					asoc->size_on_all_streams = 0;
1186 #endif
1187 				}
1188 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1189 				control->on_strm_q = 0;
1190 			}
1191 			if (strm->pd_api_started && control->pdapi_started) {
1192 				control->pdapi_started = 0;
1193 				strm->pd_api_started = 0;
1194 			}
1195 			if (control->on_read_q == 0) {
1196 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1197 				    control,
1198 				    &stcb->sctp_socket->so_rcv, control->end_added,
1199 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1200 			}
1201 			control = nctl;
1202 		}
1203 	}
1204 	if (strm->pd_api_started) {
1205 		/*
1206 		 * Can't add more must have gotten an un-ordered above being
1207 		 * partially delivered.
1208 		 */
1209 		return (0);
1210 	}
1211 deliver_more:
1212 	next_to_del = strm->last_mid_delivered + 1;
1213 	if (control) {
1214 		SCTPDBG(SCTP_DEBUG_XXX,
1215 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1216 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1217 		    next_to_del);
1218 		nctl = TAILQ_NEXT(control, next_instrm);
1219 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1220 		    (control->first_frag_seen)) {
1221 			int done;
1222 
1223 			/* Ok we can deliver it onto the stream. */
1224 			if (control->end_added) {
1225 				/* We are done with it afterwards */
1226 				if (control->on_strm_q) {
1227 #ifdef INVARIANTS
1228 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1229 						panic("Huh control: %p on_q: %d -- not ordered?",
1230 						    control, control->on_strm_q);
1231 					}
1232 #endif
1233 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1234 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1235 					if (asoc->size_on_all_streams >= control->length) {
1236 						asoc->size_on_all_streams -= control->length;
1237 					} else {
1238 #ifdef INVARIANTS
1239 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1240 #else
1241 						asoc->size_on_all_streams = 0;
1242 #endif
1243 					}
1244 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1245 					control->on_strm_q = 0;
1246 				}
1247 				ret++;
1248 			}
1249 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1250 				/*
1251 				 * A singleton now slipping through - mark
1252 				 * it non-revokable too
1253 				 */
1254 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1255 			} else if (control->end_added == 0) {
1256 				/*
1257 				 * Check if we can defer adding until its
1258 				 * all there
1259 				 */
1260 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1261 					/*
1262 					 * Don't need it or cannot add more
1263 					 * (one being delivered that way)
1264 					 */
1265 					goto out;
1266 				}
1267 			}
1268 			done = (control->end_added) && (control->last_frag_seen);
1269 			if (control->on_read_q == 0) {
1270 				if (!done) {
1271 					if (asoc->size_on_all_streams >= control->length) {
1272 						asoc->size_on_all_streams -= control->length;
1273 					} else {
1274 #ifdef INVARIANTS
1275 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1276 #else
1277 						asoc->size_on_all_streams = 0;
1278 #endif
1279 					}
1280 					strm->pd_api_started = 1;
1281 					control->pdapi_started = 1;
1282 				}
1283 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1284 				    control,
1285 				    &stcb->sctp_socket->so_rcv, control->end_added,
1286 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1287 			}
1288 			strm->last_mid_delivered = next_to_del;
1289 			if (done) {
1290 				control = nctl;
1291 				goto deliver_more;
1292 			}
1293 		}
1294 	}
1295 out:
1296 	return (ret);
1297 }
1298 
1299 
1300 uint32_t
1301 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1302     struct sctp_stream_in *strm,
1303     struct sctp_tcb *stcb, struct sctp_association *asoc,
1304     struct sctp_tmit_chunk *chk, int hold_rlock)
1305 {
1306 	/*
1307 	 * Given a control and a chunk, merge the data from the chk onto the
1308 	 * control and free up the chunk resources.
1309 	 */
1310 	uint32_t added = 0;
1311 	int i_locked = 0;
1312 
1313 	if (control->on_read_q && (hold_rlock == 0)) {
1314 		/*
1315 		 * Its being pd-api'd so we must do some locks.
1316 		 */
1317 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1318 		i_locked = 1;
1319 	}
1320 	if (control->data == NULL) {
1321 		control->data = chk->data;
1322 		sctp_setup_tail_pointer(control);
1323 	} else {
1324 		sctp_add_to_tail_pointer(control, chk->data, &added);
1325 	}
1326 	control->fsn_included = chk->rec.data.fsn;
1327 	asoc->size_on_reasm_queue -= chk->send_size;
1328 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1329 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1330 	chk->data = NULL;
1331 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 		control->first_frag_seen = 1;
1333 		control->sinfo_tsn = chk->rec.data.tsn;
1334 		control->sinfo_ppid = chk->rec.data.ppid;
1335 	}
1336 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1337 		/* Its complete */
1338 		if ((control->on_strm_q) && (control->on_read_q)) {
1339 			if (control->pdapi_started) {
1340 				control->pdapi_started = 0;
1341 				strm->pd_api_started = 0;
1342 			}
1343 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1344 				/* Unordered */
1345 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 				control->on_strm_q = 0;
1347 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1348 				/* Ordered */
1349 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1350 				/*
1351 				 * Don't need to decrement
1352 				 * size_on_all_streams, since control is on
1353 				 * the read queue.
1354 				 */
1355 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1356 				control->on_strm_q = 0;
1357 #ifdef INVARIANTS
1358 			} else if (control->on_strm_q) {
1359 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1360 				    control->on_strm_q);
1361 #endif
1362 			}
1363 		}
1364 		control->end_added = 1;
1365 		control->last_frag_seen = 1;
1366 	}
1367 	if (i_locked) {
1368 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1369 	}
1370 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1371 	return (added);
1372 }
1373 
1374 /*
1375  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1376  * queue, see if anthing can be delivered. If so pull it off (or as much as
1377  * we can. If we run out of space then we must dump what we can and set the
1378  * appropriate flag to say we queued what we could.
1379  */
1380 static void
1381 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1382     struct sctp_queued_to_read *control,
1383     struct sctp_tmit_chunk *chk,
1384     int created_control,
1385     int *abort_flag, uint32_t tsn)
1386 {
1387 	uint32_t next_fsn;
1388 	struct sctp_tmit_chunk *at, *nat;
1389 	struct sctp_stream_in *strm;
1390 	int do_wakeup, unordered;
1391 	uint32_t lenadded;
1392 
1393 	strm = &asoc->strmin[control->sinfo_stream];
1394 	/*
1395 	 * For old un-ordered data chunks.
1396 	 */
1397 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1398 		unordered = 1;
1399 	} else {
1400 		unordered = 0;
1401 	}
1402 	/* Must be added to the stream-in queue */
1403 	if (created_control) {
1404 		if ((unordered == 0) || (asoc->idata_supported)) {
1405 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1406 		}
1407 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1408 			/* Duplicate SSN? */
1409 			sctp_abort_in_reasm(stcb, control, chk,
1410 			    abort_flag,
1411 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1412 			sctp_clean_up_control(stcb, control);
1413 			return;
1414 		}
1415 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1416 			/*
1417 			 * Ok we created this control and now lets validate
1418 			 * that its legal i.e. there is a B bit set, if not
1419 			 * and we have up to the cum-ack then its invalid.
1420 			 */
1421 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1422 				sctp_abort_in_reasm(stcb, control, chk,
1423 				    abort_flag,
1424 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1425 				return;
1426 			}
1427 		}
1428 	}
1429 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1430 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1431 		return;
1432 	}
1433 	/*
1434 	 * Ok we must queue the chunk into the reasembly portion: o if its
1435 	 * the first it goes to the control mbuf. o if its not first but the
1436 	 * next in sequence it goes to the control, and each succeeding one
1437 	 * in order also goes. o if its not in order we place it on the list
1438 	 * in its place.
1439 	 */
1440 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1441 		/* Its the very first one. */
1442 		SCTPDBG(SCTP_DEBUG_XXX,
1443 		    "chunk is a first fsn: %u becomes fsn_included\n",
1444 		    chk->rec.data.fsn);
1445 		if (control->first_frag_seen) {
1446 			/*
1447 			 * Error on senders part, they either sent us two
1448 			 * data chunks with FIRST, or they sent two
1449 			 * un-ordered chunks that were fragmented at the
1450 			 * same time in the same stream.
1451 			 */
1452 			sctp_abort_in_reasm(stcb, control, chk,
1453 			    abort_flag,
1454 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1455 			return;
1456 		}
1457 		control->first_frag_seen = 1;
1458 		control->sinfo_ppid = chk->rec.data.ppid;
1459 		control->sinfo_tsn = chk->rec.data.tsn;
1460 		control->fsn_included = chk->rec.data.fsn;
1461 		control->data = chk->data;
1462 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1463 		chk->data = NULL;
1464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1465 		sctp_setup_tail_pointer(control);
1466 		asoc->size_on_all_streams += control->length;
1467 	} else {
1468 		/* Place the chunk in our list */
1469 		int inserted = 0;
1470 
1471 		if (control->last_frag_seen == 0) {
1472 			/* Still willing to raise highest FSN seen */
1473 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1474 				SCTPDBG(SCTP_DEBUG_XXX,
1475 				    "We have a new top_fsn: %u\n",
1476 				    chk->rec.data.fsn);
1477 				control->top_fsn = chk->rec.data.fsn;
1478 			}
1479 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1480 				SCTPDBG(SCTP_DEBUG_XXX,
1481 				    "The last fsn is now in place fsn: %u\n",
1482 				    chk->rec.data.fsn);
1483 				control->last_frag_seen = 1;
1484 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1485 					SCTPDBG(SCTP_DEBUG_XXX,
1486 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1487 					    chk->rec.data.fsn,
1488 					    control->top_fsn);
1489 					sctp_abort_in_reasm(stcb, control, chk,
1490 					    abort_flag,
1491 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1492 					return;
1493 				}
1494 			}
1495 			if (asoc->idata_supported || control->first_frag_seen) {
1496 				/*
1497 				 * For IDATA we always check since we know
1498 				 * that the first fragment is 0. For old
1499 				 * DATA we have to receive the first before
1500 				 * we know the first FSN (which is the TSN).
1501 				 */
1502 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1503 					/*
1504 					 * We have already delivered up to
1505 					 * this so its a dup
1506 					 */
1507 					sctp_abort_in_reasm(stcb, control, chk,
1508 					    abort_flag,
1509 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510 					return;
1511 				}
1512 			}
1513 		} else {
1514 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 				/* Second last? huh? */
1516 				SCTPDBG(SCTP_DEBUG_XXX,
1517 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1518 				    chk->rec.data.fsn, control->top_fsn);
1519 				sctp_abort_in_reasm(stcb, control,
1520 				    chk, abort_flag,
1521 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1522 				return;
1523 			}
1524 			if (asoc->idata_supported || control->first_frag_seen) {
1525 				/*
1526 				 * For IDATA we always check since we know
1527 				 * that the first fragment is 0. For old
1528 				 * DATA we have to receive the first before
1529 				 * we know the first FSN (which is the TSN).
1530 				 */
1531 
1532 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1533 					/*
1534 					 * We have already delivered up to
1535 					 * this so its a dup
1536 					 */
1537 					SCTPDBG(SCTP_DEBUG_XXX,
1538 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1539 					    chk->rec.data.fsn, control->fsn_included);
1540 					sctp_abort_in_reasm(stcb, control, chk,
1541 					    abort_flag,
1542 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1543 					return;
1544 				}
1545 			}
1546 			/*
1547 			 * validate not beyond top FSN if we have seen last
1548 			 * one
1549 			 */
1550 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1551 				SCTPDBG(SCTP_DEBUG_XXX,
1552 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1553 				    chk->rec.data.fsn,
1554 				    control->top_fsn);
1555 				sctp_abort_in_reasm(stcb, control, chk,
1556 				    abort_flag,
1557 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1558 				return;
1559 			}
1560 		}
1561 		/*
1562 		 * If we reach here, we need to place the new chunk in the
1563 		 * reassembly for this control.
1564 		 */
1565 		SCTPDBG(SCTP_DEBUG_XXX,
1566 		    "chunk is a not first fsn: %u needs to be inserted\n",
1567 		    chk->rec.data.fsn);
1568 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1569 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1570 				/*
1571 				 * This one in queue is bigger than the new
1572 				 * one, insert the new one before at.
1573 				 */
1574 				SCTPDBG(SCTP_DEBUG_XXX,
1575 				    "Insert it before fsn: %u\n",
1576 				    at->rec.data.fsn);
1577 				asoc->size_on_reasm_queue += chk->send_size;
1578 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1579 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1580 				inserted = 1;
1581 				break;
1582 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1583 				/*
1584 				 * Gak, He sent me a duplicate str seq
1585 				 * number
1586 				 */
1587 				/*
1588 				 * foo bar, I guess I will just free this
1589 				 * new guy, should we abort too? FIX ME
1590 				 * MAYBE? Or it COULD be that the SSN's have
1591 				 * wrapped. Maybe I should compare to TSN
1592 				 * somehow... sigh for now just blow away
1593 				 * the chunk!
1594 				 */
1595 				SCTPDBG(SCTP_DEBUG_XXX,
1596 				    "Duplicate to fsn: %u -- abort\n",
1597 				    at->rec.data.fsn);
1598 				sctp_abort_in_reasm(stcb, control,
1599 				    chk, abort_flag,
1600 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1601 				return;
1602 			}
1603 		}
1604 		if (inserted == 0) {
1605 			/* Goes on the end */
1606 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1607 			    chk->rec.data.fsn);
1608 			asoc->size_on_reasm_queue += chk->send_size;
1609 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1611 		}
1612 	}
1613 	/*
1614 	 * Ok lets see if we can suck any up into the control structure that
1615 	 * are in seq if it makes sense.
1616 	 */
1617 	do_wakeup = 0;
1618 	/*
1619 	 * If the first fragment has not been seen there is no sense in
1620 	 * looking.
1621 	 */
1622 	if (control->first_frag_seen) {
1623 		next_fsn = control->fsn_included + 1;
1624 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625 			if (at->rec.data.fsn == next_fsn) {
1626 				/* We can add this one now to the control */
1627 				SCTPDBG(SCTP_DEBUG_XXX,
1628 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1629 				    control, at,
1630 				    at->rec.data.fsn,
1631 				    next_fsn, control->fsn_included);
1632 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634 				if (control->on_read_q) {
1635 					do_wakeup = 1;
1636 				} else {
1637 					/*
1638 					 * We only add to the
1639 					 * size-on-all-streams if its not on
1640 					 * the read q. The read q flag will
1641 					 * cause a sballoc so its accounted
1642 					 * for there.
1643 					 */
1644 					asoc->size_on_all_streams += lenadded;
1645 				}
1646 				next_fsn++;
1647 				if (control->end_added && control->pdapi_started) {
1648 					if (strm->pd_api_started) {
1649 						strm->pd_api_started = 0;
1650 						control->pdapi_started = 0;
1651 					}
1652 					if (control->on_read_q == 0) {
1653 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1654 						    control,
1655 						    &stcb->sctp_socket->so_rcv, control->end_added,
1656 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1657 					}
1658 					break;
1659 				}
1660 			} else {
1661 				break;
1662 			}
1663 		}
1664 	}
1665 	if (do_wakeup) {
1666 		/* Need to wakeup the reader */
1667 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1668 	}
1669 }
1670 
1671 static struct sctp_queued_to_read *
1672 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1673 {
1674 	struct sctp_queued_to_read *control;
1675 
1676 	if (ordered) {
1677 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1678 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1679 				break;
1680 			}
1681 		}
1682 	} else {
1683 		if (idata_supported) {
1684 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1685 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1686 					break;
1687 				}
1688 			}
1689 		} else {
1690 			control = TAILQ_FIRST(&strm->uno_inqueue);
1691 		}
1692 	}
1693 	return (control);
1694 }
1695 
1696 static int
1697 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1698     struct mbuf **m, int offset, int chk_length,
1699     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1700     int *break_flag, int last_chunk, uint8_t chk_type)
1701 {
1702 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1703 	struct sctp_stream_in *strm;
1704 	uint32_t tsn, fsn, gap, mid;
1705 	struct mbuf *dmbuf;
1706 	int the_len;
1707 	int need_reasm_check = 0;
1708 	uint16_t sid;
1709 	struct mbuf *op_err;
1710 	char msg[SCTP_DIAG_INFO_LEN];
1711 	struct sctp_queued_to_read *control, *ncontrol;
1712 	uint32_t ppid;
1713 	uint8_t chk_flags;
1714 	struct sctp_stream_reset_list *liste;
1715 	int ordered;
1716 	size_t clen;
1717 	int created_control = 0;
1718 
1719 	if (chk_type == SCTP_IDATA) {
1720 		struct sctp_idata_chunk *chunk, chunk_buf;
1721 
1722 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1723 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1724 		chk_flags = chunk->ch.chunk_flags;
1725 		clen = sizeof(struct sctp_idata_chunk);
1726 		tsn = ntohl(chunk->dp.tsn);
1727 		sid = ntohs(chunk->dp.sid);
1728 		mid = ntohl(chunk->dp.mid);
1729 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1730 			fsn = 0;
1731 			ppid = chunk->dp.ppid_fsn.ppid;
1732 		} else {
1733 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1734 			ppid = 0xffffffff;	/* Use as an invalid value. */
1735 		}
1736 	} else {
1737 		struct sctp_data_chunk *chunk, chunk_buf;
1738 
1739 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1740 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1741 		chk_flags = chunk->ch.chunk_flags;
1742 		clen = sizeof(struct sctp_data_chunk);
1743 		tsn = ntohl(chunk->dp.tsn);
1744 		sid = ntohs(chunk->dp.sid);
1745 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1746 		fsn = tsn;
1747 		ppid = chunk->dp.ppid;
1748 	}
1749 	if ((size_t)chk_length == clen) {
1750 		/*
1751 		 * Need to send an abort since we had a empty data chunk.
1752 		 */
1753 		op_err = sctp_generate_no_user_data_cause(tsn);
1754 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1755 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1756 		*abort_flag = 1;
1757 		return (0);
1758 	}
1759 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1760 		asoc->send_sack = 1;
1761 	}
1762 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1763 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1764 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1765 	}
1766 	if (stcb == NULL) {
1767 		return (0);
1768 	}
1769 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1770 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1771 		/* It is a duplicate */
1772 		SCTP_STAT_INCR(sctps_recvdupdata);
1773 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1774 			/* Record a dup for the next outbound sack */
1775 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1776 			asoc->numduptsns++;
1777 		}
1778 		asoc->send_sack = 1;
1779 		return (0);
1780 	}
1781 	/* Calculate the number of TSN's between the base and this TSN */
1782 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1783 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1784 		/* Can't hold the bit in the mapping at max array, toss it */
1785 		return (0);
1786 	}
1787 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1788 		SCTP_TCB_LOCK_ASSERT(stcb);
1789 		if (sctp_expand_mapping_array(asoc, gap)) {
1790 			/* Can't expand, drop it */
1791 			return (0);
1792 		}
1793 	}
1794 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1795 		*high_tsn = tsn;
1796 	}
1797 	/* See if we have received this one already */
1798 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1799 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1800 		SCTP_STAT_INCR(sctps_recvdupdata);
1801 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1802 			/* Record a dup for the next outbound sack */
1803 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1804 			asoc->numduptsns++;
1805 		}
1806 		asoc->send_sack = 1;
1807 		return (0);
1808 	}
1809 	/*
1810 	 * Check to see about the GONE flag, duplicates would cause a sack
1811 	 * to be sent up above
1812 	 */
1813 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1815 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1816 		/*
1817 		 * wait a minute, this guy is gone, there is no longer a
1818 		 * receiver. Send peer an ABORT!
1819 		 */
1820 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1821 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1822 		*abort_flag = 1;
1823 		return (0);
1824 	}
1825 	/*
1826 	 * Now before going further we see if there is room. If NOT then we
1827 	 * MAY let one through only IF this TSN is the one we are waiting
1828 	 * for on a partial delivery API.
1829 	 */
1830 
1831 	/* Is the stream valid? */
1832 	if (sid >= asoc->streamincnt) {
1833 		struct sctp_error_invalid_stream *cause;
1834 
1835 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1836 		    0, M_NOWAIT, 1, MT_DATA);
1837 		if (op_err != NULL) {
1838 			/* add some space up front so prepend will work well */
1839 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1840 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1841 			/*
1842 			 * Error causes are just param's and this one has
1843 			 * two back to back phdr, one with the error type
1844 			 * and size, the other with the streamid and a rsvd
1845 			 */
1846 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1847 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1848 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1849 			cause->stream_id = htons(sid);
1850 			cause->reserved = htons(0);
1851 			sctp_queue_op_err(stcb, op_err);
1852 		}
1853 		SCTP_STAT_INCR(sctps_badsid);
1854 		SCTP_TCB_LOCK_ASSERT(stcb);
1855 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1856 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1857 			asoc->highest_tsn_inside_nr_map = tsn;
1858 		}
1859 		if (tsn == (asoc->cumulative_tsn + 1)) {
1860 			/* Update cum-ack */
1861 			asoc->cumulative_tsn = tsn;
1862 		}
1863 		return (0);
1864 	}
1865 	/*
1866 	 * If its a fragmented message, lets see if we can find the control
1867 	 * on the reassembly queues.
1868 	 */
1869 	if ((chk_type == SCTP_IDATA) &&
1870 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1871 	    (fsn == 0)) {
1872 		/*
1873 		 * The first *must* be fsn 0, and other (middle/end) pieces
1874 		 * can *not* be fsn 0. XXX: This can happen in case of a
1875 		 * wrap around. Ignore is for now.
1876 		 */
1877 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1878 		goto err_out;
1879 	}
1880 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1881 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1882 	    chk_flags, control);
1883 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1884 		/* See if we can find the re-assembly entity */
1885 		if (control != NULL) {
1886 			/* We found something, does it belong? */
1887 			if (ordered && (mid != control->mid)) {
1888 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1889 		err_out:
1890 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1891 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1892 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1893 				*abort_flag = 1;
1894 				return (0);
1895 			}
1896 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1897 				/*
1898 				 * We can't have a switched order with an
1899 				 * unordered chunk
1900 				 */
1901 				SCTP_SNPRINTF(msg, sizeof(msg),
1902 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1903 				    tsn);
1904 				goto err_out;
1905 			}
1906 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1907 				/*
1908 				 * We can't have a switched unordered with a
1909 				 * ordered chunk
1910 				 */
1911 				SCTP_SNPRINTF(msg, sizeof(msg),
1912 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1913 				    tsn);
1914 				goto err_out;
1915 			}
1916 		}
1917 	} else {
1918 		/*
1919 		 * Its a complete segment. Lets validate we don't have a
1920 		 * re-assembly going on with the same Stream/Seq (for
1921 		 * ordered) or in the same Stream for unordered.
1922 		 */
1923 		if (control != NULL) {
1924 			if (ordered || asoc->idata_supported) {
1925 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1926 				    chk_flags, mid);
1927 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1928 				goto err_out;
1929 			} else {
1930 				if ((tsn == control->fsn_included + 1) &&
1931 				    (control->end_added == 0)) {
1932 					SCTP_SNPRINTF(msg, sizeof(msg),
1933 					    "Illegal message sequence, missing end for MID: %8.8x",
1934 					    control->fsn_included);
1935 					goto err_out;
1936 				} else {
1937 					control = NULL;
1938 				}
1939 			}
1940 		}
1941 	}
1942 	/* now do the tests */
1943 	if (((asoc->cnt_on_all_streams +
1944 	    asoc->cnt_on_reasm_queue +
1945 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1946 	    (((int)asoc->my_rwnd) <= 0)) {
1947 		/*
1948 		 * When we have NO room in the rwnd we check to make sure
1949 		 * the reader is doing its job...
1950 		 */
1951 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1952 			/* some to read, wake-up */
1953 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1954 		}
1955 		/* now is it in the mapping array of what we have accepted? */
1956 		if (chk_type == SCTP_DATA) {
1957 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1958 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1959 				/* Nope not in the valid range dump it */
1960 		dump_packet:
1961 				sctp_set_rwnd(stcb, asoc);
1962 				if ((asoc->cnt_on_all_streams +
1963 				    asoc->cnt_on_reasm_queue +
1964 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1965 					SCTP_STAT_INCR(sctps_datadropchklmt);
1966 				} else {
1967 					SCTP_STAT_INCR(sctps_datadroprwnd);
1968 				}
1969 				*break_flag = 1;
1970 				return (0);
1971 			}
1972 		} else {
1973 			if (control == NULL) {
1974 				goto dump_packet;
1975 			}
1976 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1977 				goto dump_packet;
1978 			}
1979 		}
1980 	}
1981 #ifdef SCTP_ASOCLOG_OF_TSNS
1982 	SCTP_TCB_LOCK_ASSERT(stcb);
1983 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1984 		asoc->tsn_in_at = 0;
1985 		asoc->tsn_in_wrapped = 1;
1986 	}
1987 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1988 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1989 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1990 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1991 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1992 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1993 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1994 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1995 	asoc->tsn_in_at++;
1996 #endif
1997 	/*
1998 	 * Before we continue lets validate that we are not being fooled by
1999 	 * an evil attacker. We can only have Nk chunks based on our TSN
2000 	 * spread allowed by the mapping array N * 8 bits, so there is no
2001 	 * way our stream sequence numbers could have wrapped. We of course
2002 	 * only validate the FIRST fragment so the bit must be set.
2003 	 */
2004 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2005 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2006 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2007 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2008 		/* The incoming sseq is behind where we last delivered? */
2009 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2010 		    mid, asoc->strmin[sid].last_mid_delivered);
2011 
2012 		if (asoc->idata_supported) {
2013 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2014 			    asoc->strmin[sid].last_mid_delivered,
2015 			    tsn,
2016 			    sid,
2017 			    mid);
2018 		} else {
2019 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2020 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2021 			    tsn,
2022 			    sid,
2023 			    (uint16_t)mid);
2024 		}
2025 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2026 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2027 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2028 		*abort_flag = 1;
2029 		return (0);
2030 	}
2031 	if (chk_type == SCTP_IDATA) {
2032 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2033 	} else {
2034 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2035 	}
2036 	if (last_chunk == 0) {
2037 		if (chk_type == SCTP_IDATA) {
2038 			dmbuf = SCTP_M_COPYM(*m,
2039 			    (offset + sizeof(struct sctp_idata_chunk)),
2040 			    the_len, M_NOWAIT);
2041 		} else {
2042 			dmbuf = SCTP_M_COPYM(*m,
2043 			    (offset + sizeof(struct sctp_data_chunk)),
2044 			    the_len, M_NOWAIT);
2045 		}
2046 #ifdef SCTP_MBUF_LOGGING
2047 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2048 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2049 		}
2050 #endif
2051 	} else {
2052 		/* We can steal the last chunk */
2053 		int l_len;
2054 
2055 		dmbuf = *m;
2056 		/* lop off the top part */
2057 		if (chk_type == SCTP_IDATA) {
2058 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2059 		} else {
2060 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2061 		}
2062 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2063 			l_len = SCTP_BUF_LEN(dmbuf);
2064 		} else {
2065 			/*
2066 			 * need to count up the size hopefully does not hit
2067 			 * this to often :-0
2068 			 */
2069 			struct mbuf *lat;
2070 
2071 			l_len = 0;
2072 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2073 				l_len += SCTP_BUF_LEN(lat);
2074 			}
2075 		}
2076 		if (l_len > the_len) {
2077 			/* Trim the end round bytes off  too */
2078 			m_adj(dmbuf, -(l_len - the_len));
2079 		}
2080 	}
2081 	if (dmbuf == NULL) {
2082 		SCTP_STAT_INCR(sctps_nomem);
2083 		return (0);
2084 	}
2085 	/*
2086 	 * Now no matter what, we need a control, get one if we don't have
2087 	 * one (we may have gotten it above when we found the message was
2088 	 * fragmented
2089 	 */
2090 	if (control == NULL) {
2091 		sctp_alloc_a_readq(stcb, control);
2092 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2093 		    ppid,
2094 		    sid,
2095 		    chk_flags,
2096 		    NULL, fsn, mid);
2097 		if (control == NULL) {
2098 			SCTP_STAT_INCR(sctps_nomem);
2099 			return (0);
2100 		}
2101 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2102 			struct mbuf *mm;
2103 
2104 			control->data = dmbuf;
2105 			control->tail_mbuf = NULL;
2106 			for (mm = control->data; mm; mm = mm->m_next) {
2107 				control->length += SCTP_BUF_LEN(mm);
2108 				if (SCTP_BUF_NEXT(mm) == NULL) {
2109 					control->tail_mbuf = mm;
2110 				}
2111 			}
2112 			control->end_added = 1;
2113 			control->last_frag_seen = 1;
2114 			control->first_frag_seen = 1;
2115 			control->fsn_included = fsn;
2116 			control->top_fsn = fsn;
2117 		}
2118 		created_control = 1;
2119 	}
2120 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2121 	    chk_flags, ordered, mid, control);
2122 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2123 	    TAILQ_EMPTY(&asoc->resetHead) &&
2124 	    ((ordered == 0) ||
2125 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2126 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2127 		/* Candidate for express delivery */
2128 		/*
2129 		 * Its not fragmented, No PD-API is up, Nothing in the
2130 		 * delivery queue, Its un-ordered OR ordered and the next to
2131 		 * deliver AND nothing else is stuck on the stream queue,
2132 		 * And there is room for it in the socket buffer. Lets just
2133 		 * stuff it up the buffer....
2134 		 */
2135 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2136 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2137 			asoc->highest_tsn_inside_nr_map = tsn;
2138 		}
2139 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2140 		    control, mid);
2141 
2142 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2143 		    control, &stcb->sctp_socket->so_rcv,
2144 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2145 
2146 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2147 			/* for ordered, bump what we delivered */
2148 			asoc->strmin[sid].last_mid_delivered++;
2149 		}
2150 		SCTP_STAT_INCR(sctps_recvexpress);
2151 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2152 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2153 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2154 		}
2155 		control = NULL;
2156 		goto finish_express_del;
2157 	}
2158 
2159 	/* Now will we need a chunk too? */
2160 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2161 		sctp_alloc_a_chunk(stcb, chk);
2162 		if (chk == NULL) {
2163 			/* No memory so we drop the chunk */
2164 			SCTP_STAT_INCR(sctps_nomem);
2165 			if (last_chunk == 0) {
2166 				/* we copied it, free the copy */
2167 				sctp_m_freem(dmbuf);
2168 			}
2169 			return (0);
2170 		}
2171 		chk->rec.data.tsn = tsn;
2172 		chk->no_fr_allowed = 0;
2173 		chk->rec.data.fsn = fsn;
2174 		chk->rec.data.mid = mid;
2175 		chk->rec.data.sid = sid;
2176 		chk->rec.data.ppid = ppid;
2177 		chk->rec.data.context = stcb->asoc.context;
2178 		chk->rec.data.doing_fast_retransmit = 0;
2179 		chk->rec.data.rcv_flags = chk_flags;
2180 		chk->asoc = asoc;
2181 		chk->send_size = the_len;
2182 		chk->whoTo = net;
2183 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2184 		    chk,
2185 		    control, mid);
2186 		atomic_add_int(&net->ref_count, 1);
2187 		chk->data = dmbuf;
2188 	}
2189 	/* Set the appropriate TSN mark */
2190 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2191 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2192 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2193 			asoc->highest_tsn_inside_nr_map = tsn;
2194 		}
2195 	} else {
2196 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2197 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2198 			asoc->highest_tsn_inside_map = tsn;
2199 		}
2200 	}
2201 	/* Now is it complete (i.e. not fragmented)? */
2202 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2203 		/*
2204 		 * Special check for when streams are resetting. We could be
2205 		 * more smart about this and check the actual stream to see
2206 		 * if it is not being reset.. that way we would not create a
2207 		 * HOLB when amongst streams being reset and those not being
2208 		 * reset.
2209 		 *
2210 		 */
2211 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2212 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2213 			/*
2214 			 * yep its past where we need to reset... go ahead
2215 			 * and queue it.
2216 			 */
2217 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2218 				/* first one on */
2219 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2220 			} else {
2221 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2222 				unsigned char inserted = 0;
2223 
2224 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2225 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2226 
2227 						continue;
2228 					} else {
2229 						/* found it */
2230 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2231 						inserted = 1;
2232 						break;
2233 					}
2234 				}
2235 				if (inserted == 0) {
2236 					/*
2237 					 * must be put at end, use prevP
2238 					 * (all setup from loop) to setup
2239 					 * nextP.
2240 					 */
2241 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2242 				}
2243 			}
2244 			goto finish_express_del;
2245 		}
2246 		if (chk_flags & SCTP_DATA_UNORDERED) {
2247 			/* queue directly into socket buffer */
2248 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2249 			    control, mid);
2250 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2251 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2252 			    control,
2253 			    &stcb->sctp_socket->so_rcv, 1,
2254 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2255 
2256 		} else {
2257 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2258 			    mid);
2259 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2260 			if (*abort_flag) {
2261 				if (last_chunk) {
2262 					*m = NULL;
2263 				}
2264 				return (0);
2265 			}
2266 		}
2267 		goto finish_express_del;
2268 	}
2269 	/* If we reach here its a reassembly */
2270 	need_reasm_check = 1;
2271 	SCTPDBG(SCTP_DEBUG_XXX,
2272 	    "Queue data to stream for reasm control: %p MID: %u\n",
2273 	    control, mid);
2274 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2275 	if (*abort_flag) {
2276 		/*
2277 		 * the assoc is now gone and chk was put onto the reasm
2278 		 * queue, which has all been freed.
2279 		 */
2280 		if (last_chunk) {
2281 			*m = NULL;
2282 		}
2283 		return (0);
2284 	}
2285 finish_express_del:
2286 	/* Here we tidy up things */
2287 	if (tsn == (asoc->cumulative_tsn + 1)) {
2288 		/* Update cum-ack */
2289 		asoc->cumulative_tsn = tsn;
2290 	}
2291 	if (last_chunk) {
2292 		*m = NULL;
2293 	}
2294 	if (ordered) {
2295 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2296 	} else {
2297 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2298 	}
2299 	SCTP_STAT_INCR(sctps_recvdata);
2300 	/* Set it present please */
2301 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2302 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2303 	}
2304 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2305 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2306 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2307 	}
2308 	if (need_reasm_check) {
2309 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2310 		need_reasm_check = 0;
2311 	}
2312 	/* check the special flag for stream resets */
2313 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2315 		/*
2316 		 * we have finished working through the backlogged TSN's now
2317 		 * time to reset streams. 1: call reset function. 2: free
2318 		 * pending_reply space 3: distribute any chunks in
2319 		 * pending_reply_queue.
2320 		 */
2321 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2322 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2323 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2324 		SCTP_FREE(liste, SCTP_M_STRESET);
2325 		/* sa_ignore FREED_MEMORY */
2326 		liste = TAILQ_FIRST(&asoc->resetHead);
2327 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2328 			/* All can be removed */
2329 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2330 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2331 				strm = &asoc->strmin[control->sinfo_stream];
2332 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2333 				if (*abort_flag) {
2334 					return (0);
2335 				}
2336 				if (need_reasm_check) {
2337 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2338 					need_reasm_check = 0;
2339 				}
2340 			}
2341 		} else {
2342 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2343 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2344 					break;
2345 				}
2346 				/*
2347 				 * if control->sinfo_tsn is <= liste->tsn we
2348 				 * can process it which is the NOT of
2349 				 * control->sinfo_tsn > liste->tsn
2350 				 */
2351 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2352 				strm = &asoc->strmin[control->sinfo_stream];
2353 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2354 				if (*abort_flag) {
2355 					return (0);
2356 				}
2357 				if (need_reasm_check) {
2358 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2359 					need_reasm_check = 0;
2360 				}
2361 			}
2362 		}
2363 	}
2364 	return (1);
2365 }
2366 
2367 static const int8_t sctp_map_lookup_tab[256] = {
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 4,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 5,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 4,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 6,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 4,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 5,
2380 	0, 1, 0, 2, 0, 1, 0, 3,
2381 	0, 1, 0, 2, 0, 1, 0, 4,
2382 	0, 1, 0, 2, 0, 1, 0, 3,
2383 	0, 1, 0, 2, 0, 1, 0, 7,
2384 	0, 1, 0, 2, 0, 1, 0, 3,
2385 	0, 1, 0, 2, 0, 1, 0, 4,
2386 	0, 1, 0, 2, 0, 1, 0, 3,
2387 	0, 1, 0, 2, 0, 1, 0, 5,
2388 	0, 1, 0, 2, 0, 1, 0, 3,
2389 	0, 1, 0, 2, 0, 1, 0, 4,
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 6,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 4,
2394 	0, 1, 0, 2, 0, 1, 0, 3,
2395 	0, 1, 0, 2, 0, 1, 0, 5,
2396 	0, 1, 0, 2, 0, 1, 0, 3,
2397 	0, 1, 0, 2, 0, 1, 0, 4,
2398 	0, 1, 0, 2, 0, 1, 0, 3,
2399 	0, 1, 0, 2, 0, 1, 0, 8
2400 };
2401 
2402 
2403 void
2404 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2405 {
2406 	/*
2407 	 * Now we also need to check the mapping array in a couple of ways.
2408 	 * 1) Did we move the cum-ack point?
2409 	 *
2410 	 * When you first glance at this you might think that all entries
2411 	 * that make up the position of the cum-ack would be in the
2412 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2413 	 * deliverable. Thats true with one exception, when its a fragmented
2414 	 * message we may not deliver the data until some threshold (or all
2415 	 * of it) is in place. So we must OR the nr_mapping_array and
2416 	 * mapping_array to get a true picture of the cum-ack.
2417 	 */
2418 	struct sctp_association *asoc;
2419 	int at;
2420 	uint8_t val;
2421 	int slide_from, slide_end, lgap, distance;
2422 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2423 
2424 	asoc = &stcb->asoc;
2425 
2426 	old_cumack = asoc->cumulative_tsn;
2427 	old_base = asoc->mapping_array_base_tsn;
2428 	old_highest = asoc->highest_tsn_inside_map;
2429 	/*
2430 	 * We could probably improve this a small bit by calculating the
2431 	 * offset of the current cum-ack as the starting point.
2432 	 */
2433 	at = 0;
2434 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2435 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2436 		if (val == 0xff) {
2437 			at += 8;
2438 		} else {
2439 			/* there is a 0 bit */
2440 			at += sctp_map_lookup_tab[val];
2441 			break;
2442 		}
2443 	}
2444 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2445 
2446 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2447 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2448 #ifdef INVARIANTS
2449 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2450 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 #else
2452 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2453 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2454 		sctp_print_mapping_array(asoc);
2455 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2456 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2457 		}
2458 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2459 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2460 #endif
2461 	}
2462 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2463 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2464 	} else {
2465 		highest_tsn = asoc->highest_tsn_inside_map;
2466 	}
2467 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2468 		/* The complete array was completed by a single FR */
2469 		/* highest becomes the cum-ack */
2470 		int clr;
2471 #ifdef INVARIANTS
2472 		unsigned int i;
2473 #endif
2474 
2475 		/* clear the array */
2476 		clr = ((at + 7) >> 3);
2477 		if (clr > asoc->mapping_array_size) {
2478 			clr = asoc->mapping_array_size;
2479 		}
2480 		memset(asoc->mapping_array, 0, clr);
2481 		memset(asoc->nr_mapping_array, 0, clr);
2482 #ifdef INVARIANTS
2483 		for (i = 0; i < asoc->mapping_array_size; i++) {
2484 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2485 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2486 				sctp_print_mapping_array(asoc);
2487 			}
2488 		}
2489 #endif
2490 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2491 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2492 	} else if (at >= 8) {
2493 		/* we can slide the mapping array down */
2494 		/* slide_from holds where we hit the first NON 0xff byte */
2495 
2496 		/*
2497 		 * now calculate the ceiling of the move using our highest
2498 		 * TSN value
2499 		 */
2500 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2501 		slide_end = (lgap >> 3);
2502 		if (slide_end < slide_from) {
2503 			sctp_print_mapping_array(asoc);
2504 #ifdef INVARIANTS
2505 			panic("impossible slide");
2506 #else
2507 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2508 			    lgap, slide_end, slide_from, at);
2509 			return;
2510 #endif
2511 		}
2512 		if (slide_end > asoc->mapping_array_size) {
2513 #ifdef INVARIANTS
2514 			panic("would overrun buffer");
2515 #else
2516 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2517 			    asoc->mapping_array_size, slide_end);
2518 			slide_end = asoc->mapping_array_size;
2519 #endif
2520 		}
2521 		distance = (slide_end - slide_from) + 1;
2522 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523 			sctp_log_map(old_base, old_cumack, old_highest,
2524 			    SCTP_MAP_PREPARE_SLIDE);
2525 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2526 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2527 		}
2528 		if (distance + slide_from > asoc->mapping_array_size ||
2529 		    distance < 0) {
2530 			/*
2531 			 * Here we do NOT slide forward the array so that
2532 			 * hopefully when more data comes in to fill it up
2533 			 * we will be able to slide it forward. Really I
2534 			 * don't think this should happen :-0
2535 			 */
2536 
2537 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2538 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2539 				    (uint32_t)asoc->mapping_array_size,
2540 				    SCTP_MAP_SLIDE_NONE);
2541 			}
2542 		} else {
2543 			int ii;
2544 
2545 			for (ii = 0; ii < distance; ii++) {
2546 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2547 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2548 
2549 			}
2550 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2551 				asoc->mapping_array[ii] = 0;
2552 				asoc->nr_mapping_array[ii] = 0;
2553 			}
2554 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2555 				asoc->highest_tsn_inside_map += (slide_from << 3);
2556 			}
2557 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2558 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2559 			}
2560 			asoc->mapping_array_base_tsn += (slide_from << 3);
2561 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2562 				sctp_log_map(asoc->mapping_array_base_tsn,
2563 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2564 				    SCTP_MAP_SLIDE_RESULT);
2565 			}
2566 		}
2567 	}
2568 }
2569 
2570 void
2571 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2572 {
2573 	struct sctp_association *asoc;
2574 	uint32_t highest_tsn;
2575 	int is_a_gap;
2576 
2577 	sctp_slide_mapping_arrays(stcb);
2578 	asoc = &stcb->asoc;
2579 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2580 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2581 	} else {
2582 		highest_tsn = asoc->highest_tsn_inside_map;
2583 	}
2584 	/* Is there a gap now? */
2585 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2586 
2587 	/*
2588 	 * Now we need to see if we need to queue a sack or just start the
2589 	 * timer (if allowed).
2590 	 */
2591 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2592 		/*
2593 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2594 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2595 		 * SACK
2596 		 */
2597 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2598 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2599 			    stcb->sctp_ep, stcb, NULL,
2600 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2601 		}
2602 		sctp_send_shutdown(stcb,
2603 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2604 		if (is_a_gap) {
2605 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2606 		}
2607 	} else {
2608 		/*
2609 		 * CMT DAC algorithm: increase number of packets received
2610 		 * since last ack
2611 		 */
2612 		stcb->asoc.cmt_dac_pkts_rcvd++;
2613 
2614 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2615 							 * SACK */
2616 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2617 							 * longer is one */
2618 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2619 		    (is_a_gap) ||	/* is still a gap */
2620 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2621 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2622 		    ) {
2623 
2624 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2625 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2626 			    (stcb->asoc.send_sack == 0) &&
2627 			    (stcb->asoc.numduptsns == 0) &&
2628 			    (stcb->asoc.delayed_ack) &&
2629 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2630 
2631 				/*
2632 				 * CMT DAC algorithm: With CMT, delay acks
2633 				 * even in the face of
2634 				 *
2635 				 * reordering. Therefore, if acks that do
2636 				 * not have to be sent because of the above
2637 				 * reasons, will be delayed. That is, acks
2638 				 * that would have been sent due to gap
2639 				 * reports will be delayed with DAC. Start
2640 				 * the delayed ack timer.
2641 				 */
2642 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2643 				    stcb->sctp_ep, stcb, NULL);
2644 			} else {
2645 				/*
2646 				 * Ok we must build a SACK since the timer
2647 				 * is pending, we got our first packet OR
2648 				 * there are gaps or duplicates.
2649 				 */
2650 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2651 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2652 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2653 			}
2654 		} else {
2655 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2656 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2657 				    stcb->sctp_ep, stcb, NULL);
2658 			}
2659 		}
2660 	}
2661 }
2662 
2663 int
2664 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2665     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2666     struct sctp_nets *net, uint32_t *high_tsn)
2667 {
2668 	struct sctp_chunkhdr *ch, chunk_buf;
2669 	struct sctp_association *asoc;
2670 	int num_chunks = 0;	/* number of control chunks processed */
2671 	int stop_proc = 0;
2672 	int break_flag, last_chunk;
2673 	int abort_flag = 0, was_a_gap;
2674 	struct mbuf *m;
2675 	uint32_t highest_tsn;
2676 	uint16_t chk_length;
2677 
2678 	/* set the rwnd */
2679 	sctp_set_rwnd(stcb, &stcb->asoc);
2680 
2681 	m = *mm;
2682 	SCTP_TCB_LOCK_ASSERT(stcb);
2683 	asoc = &stcb->asoc;
2684 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2685 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2686 	} else {
2687 		highest_tsn = asoc->highest_tsn_inside_map;
2688 	}
2689 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2690 	/*
2691 	 * setup where we got the last DATA packet from for any SACK that
2692 	 * may need to go out. Don't bump the net. This is done ONLY when a
2693 	 * chunk is assigned.
2694 	 */
2695 	asoc->last_data_chunk_from = net;
2696 
2697 	/*-
2698 	 * Now before we proceed we must figure out if this is a wasted
2699 	 * cluster... i.e. it is a small packet sent in and yet the driver
2700 	 * underneath allocated a full cluster for it. If so we must copy it
2701 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2702 	 * with cluster starvation.
2703 	 */
2704 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2705 		/* we only handle mbufs that are singletons.. not chains */
2706 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2707 		if (m) {
2708 			/* ok lets see if we can copy the data up */
2709 			caddr_t *from, *to;
2710 
2711 			/* get the pointers and copy */
2712 			to = mtod(m, caddr_t *);
2713 			from = mtod((*mm), caddr_t *);
2714 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2715 			/* copy the length and free up the old */
2716 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2717 			sctp_m_freem(*mm);
2718 			/* success, back copy */
2719 			*mm = m;
2720 		} else {
2721 			/* We are in trouble in the mbuf world .. yikes */
2722 			m = *mm;
2723 		}
2724 	}
2725 	/* get pointer to the first chunk header */
2726 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2727 	    sizeof(struct sctp_chunkhdr),
2728 	    (uint8_t *)&chunk_buf);
2729 	if (ch == NULL) {
2730 		return (1);
2731 	}
2732 	/*
2733 	 * process all DATA chunks...
2734 	 */
2735 	*high_tsn = asoc->cumulative_tsn;
2736 	break_flag = 0;
2737 	asoc->data_pkts_seen++;
2738 	while (stop_proc == 0) {
2739 		/* validate chunk length */
2740 		chk_length = ntohs(ch->chunk_length);
2741 		if (length - *offset < chk_length) {
2742 			/* all done, mutulated chunk */
2743 			stop_proc = 1;
2744 			continue;
2745 		}
2746 		if ((asoc->idata_supported == 1) &&
2747 		    (ch->chunk_type == SCTP_DATA)) {
2748 			struct mbuf *op_err;
2749 			char msg[SCTP_DIAG_INFO_LEN];
2750 
2751 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2752 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2753 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2754 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2755 			return (2);
2756 		}
2757 		if ((asoc->idata_supported == 0) &&
2758 		    (ch->chunk_type == SCTP_IDATA)) {
2759 			struct mbuf *op_err;
2760 			char msg[SCTP_DIAG_INFO_LEN];
2761 
2762 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2763 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2764 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2765 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2766 			return (2);
2767 		}
2768 		if ((ch->chunk_type == SCTP_DATA) ||
2769 		    (ch->chunk_type == SCTP_IDATA)) {
2770 			uint16_t clen;
2771 
2772 			if (ch->chunk_type == SCTP_DATA) {
2773 				clen = sizeof(struct sctp_data_chunk);
2774 			} else {
2775 				clen = sizeof(struct sctp_idata_chunk);
2776 			}
2777 			if (chk_length < clen) {
2778 				/*
2779 				 * Need to send an abort since we had a
2780 				 * invalid data chunk.
2781 				 */
2782 				struct mbuf *op_err;
2783 				char msg[SCTP_DIAG_INFO_LEN];
2784 
2785 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2786 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2787 				    chk_length);
2788 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2789 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2790 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2791 				return (2);
2792 			}
2793 #ifdef SCTP_AUDITING_ENABLED
2794 			sctp_audit_log(0xB1, 0);
2795 #endif
2796 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2797 				last_chunk = 1;
2798 			} else {
2799 				last_chunk = 0;
2800 			}
2801 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2802 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2803 			    last_chunk, ch->chunk_type)) {
2804 				num_chunks++;
2805 			}
2806 			if (abort_flag)
2807 				return (2);
2808 
2809 			if (break_flag) {
2810 				/*
2811 				 * Set because of out of rwnd space and no
2812 				 * drop rep space left.
2813 				 */
2814 				stop_proc = 1;
2815 				continue;
2816 			}
2817 		} else {
2818 			/* not a data chunk in the data region */
2819 			switch (ch->chunk_type) {
2820 			case SCTP_INITIATION:
2821 			case SCTP_INITIATION_ACK:
2822 			case SCTP_SELECTIVE_ACK:
2823 			case SCTP_NR_SELECTIVE_ACK:
2824 			case SCTP_HEARTBEAT_REQUEST:
2825 			case SCTP_HEARTBEAT_ACK:
2826 			case SCTP_ABORT_ASSOCIATION:
2827 			case SCTP_SHUTDOWN:
2828 			case SCTP_SHUTDOWN_ACK:
2829 			case SCTP_OPERATION_ERROR:
2830 			case SCTP_COOKIE_ECHO:
2831 			case SCTP_COOKIE_ACK:
2832 			case SCTP_ECN_ECHO:
2833 			case SCTP_ECN_CWR:
2834 			case SCTP_SHUTDOWN_COMPLETE:
2835 			case SCTP_AUTHENTICATION:
2836 			case SCTP_ASCONF_ACK:
2837 			case SCTP_PACKET_DROPPED:
2838 			case SCTP_STREAM_RESET:
2839 			case SCTP_FORWARD_CUM_TSN:
2840 			case SCTP_ASCONF:
2841 				{
2842 					/*
2843 					 * Now, what do we do with KNOWN
2844 					 * chunks that are NOT in the right
2845 					 * place?
2846 					 *
2847 					 * For now, I do nothing but ignore
2848 					 * them. We may later want to add
2849 					 * sysctl stuff to switch out and do
2850 					 * either an ABORT() or possibly
2851 					 * process them.
2852 					 */
2853 					struct mbuf *op_err;
2854 					char msg[SCTP_DIAG_INFO_LEN];
2855 
2856 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2857 					    ch->chunk_type);
2858 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2859 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2860 					return (2);
2861 				}
2862 			default:
2863 				/*
2864 				 * Unknown chunk type: use bit rules after
2865 				 * checking length
2866 				 */
2867 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2868 					/*
2869 					 * Need to send an abort since we
2870 					 * had a invalid chunk.
2871 					 */
2872 					struct mbuf *op_err;
2873 					char msg[SCTP_DIAG_INFO_LEN];
2874 
2875 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2876 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2877 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2878 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2879 					return (2);
2880 				}
2881 				if (ch->chunk_type & 0x40) {
2882 					/* Add a error report to the queue */
2883 					struct mbuf *op_err;
2884 					struct sctp_gen_error_cause *cause;
2885 
2886 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2887 					    0, M_NOWAIT, 1, MT_DATA);
2888 					if (op_err != NULL) {
2889 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2890 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2891 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2892 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2893 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2894 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2895 							sctp_queue_op_err(stcb, op_err);
2896 						} else {
2897 							sctp_m_freem(op_err);
2898 						}
2899 					}
2900 				}
2901 				if ((ch->chunk_type & 0x80) == 0) {
2902 					/* discard the rest of this packet */
2903 					stop_proc = 1;
2904 				}	/* else skip this bad chunk and
2905 					 * continue... */
2906 				break;
2907 			}	/* switch of chunk type */
2908 		}
2909 		*offset += SCTP_SIZE32(chk_length);
2910 		if ((*offset >= length) || stop_proc) {
2911 			/* no more data left in the mbuf chain */
2912 			stop_proc = 1;
2913 			continue;
2914 		}
2915 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2916 		    sizeof(struct sctp_chunkhdr),
2917 		    (uint8_t *)&chunk_buf);
2918 		if (ch == NULL) {
2919 			*offset = length;
2920 			stop_proc = 1;
2921 			continue;
2922 		}
2923 	}
2924 	if (break_flag) {
2925 		/*
2926 		 * we need to report rwnd overrun drops.
2927 		 */
2928 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2929 	}
2930 	if (num_chunks) {
2931 		/*
2932 		 * Did we get data, if so update the time for auto-close and
2933 		 * give peer credit for being alive.
2934 		 */
2935 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2936 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2937 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2938 			    stcb->asoc.overall_error_count,
2939 			    0,
2940 			    SCTP_FROM_SCTP_INDATA,
2941 			    __LINE__);
2942 		}
2943 		stcb->asoc.overall_error_count = 0;
2944 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2945 	}
2946 	/* now service all of the reassm queue if needed */
2947 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2948 		/* Assure that we ack right away */
2949 		stcb->asoc.send_sack = 1;
2950 	}
2951 	/* Start a sack timer or QUEUE a SACK for sending */
2952 	sctp_sack_check(stcb, was_a_gap);
2953 	return (0);
2954 }
2955 
2956 static int
2957 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2958     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2959     int *num_frs,
2960     uint32_t *biggest_newly_acked_tsn,
2961     uint32_t *this_sack_lowest_newack,
2962     int *rto_ok)
2963 {
2964 	struct sctp_tmit_chunk *tp1;
2965 	unsigned int theTSN;
2966 	int j, wake_him = 0, circled = 0;
2967 
2968 	/* Recover the tp1 we last saw */
2969 	tp1 = *p_tp1;
2970 	if (tp1 == NULL) {
2971 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2972 	}
2973 	for (j = frag_strt; j <= frag_end; j++) {
2974 		theTSN = j + last_tsn;
2975 		while (tp1) {
2976 			if (tp1->rec.data.doing_fast_retransmit)
2977 				(*num_frs) += 1;
2978 
2979 			/*-
2980 			 * CMT: CUCv2 algorithm. For each TSN being
2981 			 * processed from the sent queue, track the
2982 			 * next expected pseudo-cumack, or
2983 			 * rtx_pseudo_cumack, if required. Separate
2984 			 * cumack trackers for first transmissions,
2985 			 * and retransmissions.
2986 			 */
2987 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2988 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2989 			    (tp1->snd_count == 1)) {
2990 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2991 				tp1->whoTo->find_pseudo_cumack = 0;
2992 			}
2993 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2994 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2995 			    (tp1->snd_count > 1)) {
2996 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2997 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2998 			}
2999 			if (tp1->rec.data.tsn == theTSN) {
3000 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3001 					/*-
3002 					 * must be held until
3003 					 * cum-ack passes
3004 					 */
3005 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3006 						/*-
3007 						 * If it is less than RESEND, it is
3008 						 * now no-longer in flight.
3009 						 * Higher values may already be set
3010 						 * via previous Gap Ack Blocks...
3011 						 * i.e. ACKED or RESEND.
3012 						 */
3013 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 						    *biggest_newly_acked_tsn)) {
3015 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3016 						}
3017 						/*-
3018 						 * CMT: SFR algo (and HTNA) - set
3019 						 * saw_newack to 1 for dest being
3020 						 * newly acked. update
3021 						 * this_sack_highest_newack if
3022 						 * appropriate.
3023 						 */
3024 						if (tp1->rec.data.chunk_was_revoked == 0)
3025 							tp1->whoTo->saw_newack = 1;
3026 
3027 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3028 						    tp1->whoTo->this_sack_highest_newack)) {
3029 							tp1->whoTo->this_sack_highest_newack =
3030 							    tp1->rec.data.tsn;
3031 						}
3032 						/*-
3033 						 * CMT DAC algo: also update
3034 						 * this_sack_lowest_newack
3035 						 */
3036 						if (*this_sack_lowest_newack == 0) {
3037 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3038 								sctp_log_sack(*this_sack_lowest_newack,
3039 								    last_tsn,
3040 								    tp1->rec.data.tsn,
3041 								    0,
3042 								    0,
3043 								    SCTP_LOG_TSN_ACKED);
3044 							}
3045 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3046 						}
3047 						/*-
3048 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3049 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3050 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3051 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3052 						 * Separate pseudo_cumack trackers for first transmissions and
3053 						 * retransmissions.
3054 						 */
3055 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3056 							if (tp1->rec.data.chunk_was_revoked == 0) {
3057 								tp1->whoTo->new_pseudo_cumack = 1;
3058 							}
3059 							tp1->whoTo->find_pseudo_cumack = 1;
3060 						}
3061 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3062 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3063 						}
3064 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3065 							if (tp1->rec.data.chunk_was_revoked == 0) {
3066 								tp1->whoTo->new_pseudo_cumack = 1;
3067 							}
3068 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3069 						}
3070 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3071 							sctp_log_sack(*biggest_newly_acked_tsn,
3072 							    last_tsn,
3073 							    tp1->rec.data.tsn,
3074 							    frag_strt,
3075 							    frag_end,
3076 							    SCTP_LOG_TSN_ACKED);
3077 						}
3078 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3079 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3080 							    tp1->whoTo->flight_size,
3081 							    tp1->book_size,
3082 							    (uint32_t)(uintptr_t)tp1->whoTo,
3083 							    tp1->rec.data.tsn);
3084 						}
3085 						sctp_flight_size_decrease(tp1);
3086 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3087 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3088 							    tp1);
3089 						}
3090 						sctp_total_flight_decrease(stcb, tp1);
3091 
3092 						tp1->whoTo->net_ack += tp1->send_size;
3093 						if (tp1->snd_count < 2) {
3094 							/*-
3095 							 * True non-retransmitted chunk
3096 							 */
3097 							tp1->whoTo->net_ack2 += tp1->send_size;
3098 
3099 							/*-
3100 							 * update RTO too ?
3101 							 */
3102 							if (tp1->do_rtt) {
3103 								if (*rto_ok &&
3104 								    sctp_calculate_rto(stcb,
3105 								    &stcb->asoc,
3106 								    tp1->whoTo,
3107 								    &tp1->sent_rcv_time,
3108 								    SCTP_RTT_FROM_DATA)) {
3109 									*rto_ok = 0;
3110 								}
3111 								if (tp1->whoTo->rto_needed == 0) {
3112 									tp1->whoTo->rto_needed = 1;
3113 								}
3114 								tp1->do_rtt = 0;
3115 							}
3116 						}
3117 
3118 					}
3119 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3120 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3121 						    stcb->asoc.this_sack_highest_gap)) {
3122 							stcb->asoc.this_sack_highest_gap =
3123 							    tp1->rec.data.tsn;
3124 						}
3125 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3126 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3127 #ifdef SCTP_AUDITING_ENABLED
3128 							sctp_audit_log(0xB2,
3129 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3130 #endif
3131 						}
3132 					}
3133 					/*-
3134 					 * All chunks NOT UNSENT fall through here and are marked
3135 					 * (leave PR-SCTP ones that are to skip alone though)
3136 					 */
3137 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3138 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3139 						tp1->sent = SCTP_DATAGRAM_MARKED;
3140 					}
3141 					if (tp1->rec.data.chunk_was_revoked) {
3142 						/* deflate the cwnd */
3143 						tp1->whoTo->cwnd -= tp1->book_size;
3144 						tp1->rec.data.chunk_was_revoked = 0;
3145 					}
3146 					/* NR Sack code here */
3147 					if (nr_sacking &&
3148 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3149 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3150 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3151 #ifdef INVARIANTS
3152 						} else {
3153 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3154 #endif
3155 						}
3156 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3157 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3158 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3159 							stcb->asoc.trigger_reset = 1;
3160 						}
3161 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3162 						if (tp1->data) {
3163 							/*
3164 							 * sa_ignore
3165 							 * NO_NULL_CHK
3166 							 */
3167 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3168 							sctp_m_freem(tp1->data);
3169 							tp1->data = NULL;
3170 						}
3171 						wake_him++;
3172 					}
3173 				}
3174 				break;
3175 			}	/* if (tp1->tsn == theTSN) */
3176 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3177 				break;
3178 			}
3179 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3180 			if ((tp1 == NULL) && (circled == 0)) {
3181 				circled++;
3182 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3183 			}
3184 		}		/* end while (tp1) */
3185 		if (tp1 == NULL) {
3186 			circled = 0;
3187 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3188 		}
3189 		/* In case the fragments were not in order we must reset */
3190 	}			/* end for (j = fragStart */
3191 	*p_tp1 = tp1;
3192 	return (wake_him);	/* Return value only used for nr-sack */
3193 }
3194 
3195 
3196 static int
3197 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3198     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3199     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3200     int num_seg, int num_nr_seg, int *rto_ok)
3201 {
3202 	struct sctp_gap_ack_block *frag, block;
3203 	struct sctp_tmit_chunk *tp1;
3204 	int i;
3205 	int num_frs = 0;
3206 	int chunk_freed;
3207 	int non_revocable;
3208 	uint16_t frag_strt, frag_end, prev_frag_end;
3209 
3210 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3211 	prev_frag_end = 0;
3212 	chunk_freed = 0;
3213 
3214 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3215 		if (i == num_seg) {
3216 			prev_frag_end = 0;
3217 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 		}
3219 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3220 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3221 		*offset += sizeof(block);
3222 		if (frag == NULL) {
3223 			return (chunk_freed);
3224 		}
3225 		frag_strt = ntohs(frag->start);
3226 		frag_end = ntohs(frag->end);
3227 
3228 		if (frag_strt > frag_end) {
3229 			/* This gap report is malformed, skip it. */
3230 			continue;
3231 		}
3232 		if (frag_strt <= prev_frag_end) {
3233 			/* This gap report is not in order, so restart. */
3234 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 		}
3236 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3237 			*biggest_tsn_acked = last_tsn + frag_end;
3238 		}
3239 		if (i < num_seg) {
3240 			non_revocable = 0;
3241 		} else {
3242 			non_revocable = 1;
3243 		}
3244 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3245 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3246 		    this_sack_lowest_newack, rto_ok)) {
3247 			chunk_freed = 1;
3248 		}
3249 		prev_frag_end = frag_end;
3250 	}
3251 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252 		if (num_frs)
3253 			sctp_log_fr(*biggest_tsn_acked,
3254 			    *biggest_newly_acked_tsn,
3255 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3256 	}
3257 	return (chunk_freed);
3258 }
3259 
3260 static void
3261 sctp_check_for_revoked(struct sctp_tcb *stcb,
3262     struct sctp_association *asoc, uint32_t cumack,
3263     uint32_t biggest_tsn_acked)
3264 {
3265 	struct sctp_tmit_chunk *tp1;
3266 
3267 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3268 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3269 			/*
3270 			 * ok this guy is either ACK or MARKED. If it is
3271 			 * ACKED it has been previously acked but not this
3272 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3273 			 * again.
3274 			 */
3275 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3276 				break;
3277 			}
3278 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3279 				/* it has been revoked */
3280 				tp1->sent = SCTP_DATAGRAM_SENT;
3281 				tp1->rec.data.chunk_was_revoked = 1;
3282 				/*
3283 				 * We must add this stuff back in to assure
3284 				 * timers and such get started.
3285 				 */
3286 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3287 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3288 					    tp1->whoTo->flight_size,
3289 					    tp1->book_size,
3290 					    (uint32_t)(uintptr_t)tp1->whoTo,
3291 					    tp1->rec.data.tsn);
3292 				}
3293 				sctp_flight_size_increase(tp1);
3294 				sctp_total_flight_increase(stcb, tp1);
3295 				/*
3296 				 * We inflate the cwnd to compensate for our
3297 				 * artificial inflation of the flight_size.
3298 				 */
3299 				tp1->whoTo->cwnd += tp1->book_size;
3300 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3301 					sctp_log_sack(asoc->last_acked_seq,
3302 					    cumack,
3303 					    tp1->rec.data.tsn,
3304 					    0,
3305 					    0,
3306 					    SCTP_LOG_TSN_REVOKED);
3307 				}
3308 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3309 				/* it has been re-acked in this SACK */
3310 				tp1->sent = SCTP_DATAGRAM_ACKED;
3311 			}
3312 		}
3313 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3314 			break;
3315 	}
3316 }
3317 
3318 
3319 static void
3320 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3321     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3322 {
3323 	struct sctp_tmit_chunk *tp1;
3324 	int strike_flag = 0;
3325 	struct timeval now;
3326 	int tot_retrans = 0;
3327 	uint32_t sending_seq;
3328 	struct sctp_nets *net;
3329 	int num_dests_sacked = 0;
3330 
3331 	/*
3332 	 * select the sending_seq, this is either the next thing ready to be
3333 	 * sent but not transmitted, OR, the next seq we assign.
3334 	 */
3335 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3336 	if (tp1 == NULL) {
3337 		sending_seq = asoc->sending_seq;
3338 	} else {
3339 		sending_seq = tp1->rec.data.tsn;
3340 	}
3341 
3342 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3343 	if ((asoc->sctp_cmt_on_off > 0) &&
3344 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3345 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3346 			if (net->saw_newack)
3347 				num_dests_sacked++;
3348 		}
3349 	}
3350 	if (stcb->asoc.prsctp_supported) {
3351 		(void)SCTP_GETTIME_TIMEVAL(&now);
3352 	}
3353 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3354 		strike_flag = 0;
3355 		if (tp1->no_fr_allowed) {
3356 			/* this one had a timeout or something */
3357 			continue;
3358 		}
3359 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3360 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3361 				sctp_log_fr(biggest_tsn_newly_acked,
3362 				    tp1->rec.data.tsn,
3363 				    tp1->sent,
3364 				    SCTP_FR_LOG_CHECK_STRIKE);
3365 		}
3366 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3367 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3368 			/* done */
3369 			break;
3370 		}
3371 		if (stcb->asoc.prsctp_supported) {
3372 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3373 				/* Is it expired? */
3374 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3375 					/* Yes so drop it */
3376 					if (tp1->data != NULL) {
3377 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3378 						    SCTP_SO_NOT_LOCKED);
3379 					}
3380 					continue;
3381 				}
3382 			}
3383 
3384 		}
3385 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3386 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3387 			/* we are beyond the tsn in the sack  */
3388 			break;
3389 		}
3390 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3391 			/* either a RESEND, ACKED, or MARKED */
3392 			/* skip */
3393 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3394 				/* Continue strikin FWD-TSN chunks */
3395 				tp1->rec.data.fwd_tsn_cnt++;
3396 			}
3397 			continue;
3398 		}
3399 		/*
3400 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3401 		 */
3402 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3403 			/*
3404 			 * No new acks were receieved for data sent to this
3405 			 * dest. Therefore, according to the SFR algo for
3406 			 * CMT, no data sent to this dest can be marked for
3407 			 * FR using this SACK.
3408 			 */
3409 			continue;
3410 		} else if (tp1->whoTo &&
3411 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3412 			    tp1->whoTo->this_sack_highest_newack) &&
3413 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3414 			/*
3415 			 * CMT: New acks were receieved for data sent to
3416 			 * this dest. But no new acks were seen for data
3417 			 * sent after tp1. Therefore, according to the SFR
3418 			 * algo for CMT, tp1 cannot be marked for FR using
3419 			 * this SACK. This step covers part of the DAC algo
3420 			 * and the HTNA algo as well.
3421 			 */
3422 			continue;
3423 		}
3424 		/*
3425 		 * Here we check to see if we were have already done a FR
3426 		 * and if so we see if the biggest TSN we saw in the sack is
3427 		 * smaller than the recovery point. If so we don't strike
3428 		 * the tsn... otherwise we CAN strike the TSN.
3429 		 */
3430 		/*
3431 		 * @@@ JRI: Check for CMT if (accum_moved &&
3432 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3433 		 * 0)) {
3434 		 */
3435 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3436 			/*
3437 			 * Strike the TSN if in fast-recovery and cum-ack
3438 			 * moved.
3439 			 */
3440 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3441 				sctp_log_fr(biggest_tsn_newly_acked,
3442 				    tp1->rec.data.tsn,
3443 				    tp1->sent,
3444 				    SCTP_FR_LOG_STRIKE_CHUNK);
3445 			}
3446 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3447 				tp1->sent++;
3448 			}
3449 			if ((asoc->sctp_cmt_on_off > 0) &&
3450 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3451 				/*
3452 				 * CMT DAC algorithm: If SACK flag is set to
3453 				 * 0, then lowest_newack test will not pass
3454 				 * because it would have been set to the
3455 				 * cumack earlier. If not already to be
3456 				 * rtx'd, If not a mixed sack and if tp1 is
3457 				 * not between two sacked TSNs, then mark by
3458 				 * one more. NOTE that we are marking by one
3459 				 * additional time since the SACK DAC flag
3460 				 * indicates that two packets have been
3461 				 * received after this missing TSN.
3462 				 */
3463 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3464 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3465 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3466 						sctp_log_fr(16 + num_dests_sacked,
3467 						    tp1->rec.data.tsn,
3468 						    tp1->sent,
3469 						    SCTP_FR_LOG_STRIKE_CHUNK);
3470 					}
3471 					tp1->sent++;
3472 				}
3473 			}
3474 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3475 		    (asoc->sctp_cmt_on_off == 0)) {
3476 			/*
3477 			 * For those that have done a FR we must take
3478 			 * special consideration if we strike. I.e the
3479 			 * biggest_newly_acked must be higher than the
3480 			 * sending_seq at the time we did the FR.
3481 			 */
3482 			if (
3483 #ifdef SCTP_FR_TO_ALTERNATE
3484 			/*
3485 			 * If FR's go to new networks, then we must only do
3486 			 * this for singly homed asoc's. However if the FR's
3487 			 * go to the same network (Armando's work) then its
3488 			 * ok to FR multiple times.
3489 			 */
3490 			    (asoc->numnets < 2)
3491 #else
3492 			    (1)
3493 #endif
3494 			    ) {
3495 
3496 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3497 				    tp1->rec.data.fast_retran_tsn)) {
3498 					/*
3499 					 * Strike the TSN, since this ack is
3500 					 * beyond where things were when we
3501 					 * did a FR.
3502 					 */
3503 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3504 						sctp_log_fr(biggest_tsn_newly_acked,
3505 						    tp1->rec.data.tsn,
3506 						    tp1->sent,
3507 						    SCTP_FR_LOG_STRIKE_CHUNK);
3508 					}
3509 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3510 						tp1->sent++;
3511 					}
3512 					strike_flag = 1;
3513 					if ((asoc->sctp_cmt_on_off > 0) &&
3514 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3515 						/*
3516 						 * CMT DAC algorithm: If
3517 						 * SACK flag is set to 0,
3518 						 * then lowest_newack test
3519 						 * will not pass because it
3520 						 * would have been set to
3521 						 * the cumack earlier. If
3522 						 * not already to be rtx'd,
3523 						 * If not a mixed sack and
3524 						 * if tp1 is not between two
3525 						 * sacked TSNs, then mark by
3526 						 * one more. NOTE that we
3527 						 * are marking by one
3528 						 * additional time since the
3529 						 * SACK DAC flag indicates
3530 						 * that two packets have
3531 						 * been received after this
3532 						 * missing TSN.
3533 						 */
3534 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3535 						    (num_dests_sacked == 1) &&
3536 						    SCTP_TSN_GT(this_sack_lowest_newack,
3537 						    tp1->rec.data.tsn)) {
3538 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3539 								sctp_log_fr(32 + num_dests_sacked,
3540 								    tp1->rec.data.tsn,
3541 								    tp1->sent,
3542 								    SCTP_FR_LOG_STRIKE_CHUNK);
3543 							}
3544 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3545 								tp1->sent++;
3546 							}
3547 						}
3548 					}
3549 				}
3550 			}
3551 			/*
3552 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3553 			 * algo covers HTNA.
3554 			 */
3555 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3556 		    biggest_tsn_newly_acked)) {
3557 			/*
3558 			 * We don't strike these: This is the  HTNA
3559 			 * algorithm i.e. we don't strike If our TSN is
3560 			 * larger than the Highest TSN Newly Acked.
3561 			 */
3562 			;
3563 		} else {
3564 			/* Strike the TSN */
3565 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3566 				sctp_log_fr(biggest_tsn_newly_acked,
3567 				    tp1->rec.data.tsn,
3568 				    tp1->sent,
3569 				    SCTP_FR_LOG_STRIKE_CHUNK);
3570 			}
3571 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3572 				tp1->sent++;
3573 			}
3574 			if ((asoc->sctp_cmt_on_off > 0) &&
3575 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3576 				/*
3577 				 * CMT DAC algorithm: If SACK flag is set to
3578 				 * 0, then lowest_newack test will not pass
3579 				 * because it would have been set to the
3580 				 * cumack earlier. If not already to be
3581 				 * rtx'd, If not a mixed sack and if tp1 is
3582 				 * not between two sacked TSNs, then mark by
3583 				 * one more. NOTE that we are marking by one
3584 				 * additional time since the SACK DAC flag
3585 				 * indicates that two packets have been
3586 				 * received after this missing TSN.
3587 				 */
3588 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3589 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3590 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3591 						sctp_log_fr(48 + num_dests_sacked,
3592 						    tp1->rec.data.tsn,
3593 						    tp1->sent,
3594 						    SCTP_FR_LOG_STRIKE_CHUNK);
3595 					}
3596 					tp1->sent++;
3597 				}
3598 			}
3599 		}
3600 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3601 			struct sctp_nets *alt;
3602 
3603 			/* fix counts and things */
3604 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3605 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3606 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3607 				    tp1->book_size,
3608 				    (uint32_t)(uintptr_t)tp1->whoTo,
3609 				    tp1->rec.data.tsn);
3610 			}
3611 			if (tp1->whoTo) {
3612 				tp1->whoTo->net_ack++;
3613 				sctp_flight_size_decrease(tp1);
3614 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3615 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3616 					    tp1);
3617 				}
3618 			}
3619 
3620 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3621 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3622 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3623 			}
3624 			/* add back to the rwnd */
3625 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3626 
3627 			/* remove from the total flight */
3628 			sctp_total_flight_decrease(stcb, tp1);
3629 
3630 			if ((stcb->asoc.prsctp_supported) &&
3631 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3632 				/*
3633 				 * Has it been retransmitted tv_sec times? -
3634 				 * we store the retran count there.
3635 				 */
3636 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3637 					/* Yes, so drop it */
3638 					if (tp1->data != NULL) {
3639 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3640 						    SCTP_SO_NOT_LOCKED);
3641 					}
3642 					/* Make sure to flag we had a FR */
3643 					if (tp1->whoTo != NULL) {
3644 						tp1->whoTo->net_ack++;
3645 					}
3646 					continue;
3647 				}
3648 			}
3649 			/*
3650 			 * SCTP_PRINTF("OK, we are now ready to FR this
3651 			 * guy\n");
3652 			 */
3653 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3654 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3655 				    0, SCTP_FR_MARKED);
3656 			}
3657 			if (strike_flag) {
3658 				/* This is a subsequent FR */
3659 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3660 			}
3661 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3662 			if (asoc->sctp_cmt_on_off > 0) {
3663 				/*
3664 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3665 				 * If CMT is being used, then pick dest with
3666 				 * largest ssthresh for any retransmission.
3667 				 */
3668 				tp1->no_fr_allowed = 1;
3669 				alt = tp1->whoTo;
3670 				/* sa_ignore NO_NULL_CHK */
3671 				if (asoc->sctp_cmt_pf > 0) {
3672 					/*
3673 					 * JRS 5/18/07 - If CMT PF is on,
3674 					 * use the PF version of
3675 					 * find_alt_net()
3676 					 */
3677 					alt = sctp_find_alternate_net(stcb, alt, 2);
3678 				} else {
3679 					/*
3680 					 * JRS 5/18/07 - If only CMT is on,
3681 					 * use the CMT version of
3682 					 * find_alt_net()
3683 					 */
3684 					/* sa_ignore NO_NULL_CHK */
3685 					alt = sctp_find_alternate_net(stcb, alt, 1);
3686 				}
3687 				if (alt == NULL) {
3688 					alt = tp1->whoTo;
3689 				}
3690 				/*
3691 				 * CUCv2: If a different dest is picked for
3692 				 * the retransmission, then new
3693 				 * (rtx-)pseudo_cumack needs to be tracked
3694 				 * for orig dest. Let CUCv2 track new (rtx-)
3695 				 * pseudo-cumack always.
3696 				 */
3697 				if (tp1->whoTo) {
3698 					tp1->whoTo->find_pseudo_cumack = 1;
3699 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3700 				}
3701 
3702 			} else {	/* CMT is OFF */
3703 
3704 #ifdef SCTP_FR_TO_ALTERNATE
3705 				/* Can we find an alternate? */
3706 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3707 #else
3708 				/*
3709 				 * default behavior is to NOT retransmit
3710 				 * FR's to an alternate. Armando Caro's
3711 				 * paper details why.
3712 				 */
3713 				alt = tp1->whoTo;
3714 #endif
3715 			}
3716 
3717 			tp1->rec.data.doing_fast_retransmit = 1;
3718 			tot_retrans++;
3719 			/* mark the sending seq for possible subsequent FR's */
3720 			/*
3721 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3722 			 * (uint32_t)tpi->rec.data.tsn);
3723 			 */
3724 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3725 				/*
3726 				 * If the queue of send is empty then its
3727 				 * the next sequence number that will be
3728 				 * assigned so we subtract one from this to
3729 				 * get the one we last sent.
3730 				 */
3731 				tp1->rec.data.fast_retran_tsn = sending_seq;
3732 			} else {
3733 				/*
3734 				 * If there are chunks on the send queue
3735 				 * (unsent data that has made it from the
3736 				 * stream queues but not out the door, we
3737 				 * take the first one (which will have the
3738 				 * lowest TSN) and subtract one to get the
3739 				 * one we last sent.
3740 				 */
3741 				struct sctp_tmit_chunk *ttt;
3742 
3743 				ttt = TAILQ_FIRST(&asoc->send_queue);
3744 				tp1->rec.data.fast_retran_tsn =
3745 				    ttt->rec.data.tsn;
3746 			}
3747 
3748 			if (tp1->do_rtt) {
3749 				/*
3750 				 * this guy had a RTO calculation pending on
3751 				 * it, cancel it
3752 				 */
3753 				if ((tp1->whoTo != NULL) &&
3754 				    (tp1->whoTo->rto_needed == 0)) {
3755 					tp1->whoTo->rto_needed = 1;
3756 				}
3757 				tp1->do_rtt = 0;
3758 			}
3759 			if (alt != tp1->whoTo) {
3760 				/* yes, there is an alternate. */
3761 				sctp_free_remote_addr(tp1->whoTo);
3762 				/* sa_ignore FREED_MEMORY */
3763 				tp1->whoTo = alt;
3764 				atomic_add_int(&alt->ref_count, 1);
3765 			}
3766 		}
3767 	}
3768 }
3769 
3770 struct sctp_tmit_chunk *
3771 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3772     struct sctp_association *asoc)
3773 {
3774 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3775 	struct timeval now;
3776 	int now_filled = 0;
3777 
3778 	if (asoc->prsctp_supported == 0) {
3779 		return (NULL);
3780 	}
3781 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3782 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3783 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3784 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3785 			/* no chance to advance, out of here */
3786 			break;
3787 		}
3788 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3789 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3790 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3791 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3792 				    asoc->advanced_peer_ack_point,
3793 				    tp1->rec.data.tsn, 0, 0);
3794 			}
3795 		}
3796 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3797 			/*
3798 			 * We can't fwd-tsn past any that are reliable aka
3799 			 * retransmitted until the asoc fails.
3800 			 */
3801 			break;
3802 		}
3803 		if (!now_filled) {
3804 			(void)SCTP_GETTIME_TIMEVAL(&now);
3805 			now_filled = 1;
3806 		}
3807 		/*
3808 		 * now we got a chunk which is marked for another
3809 		 * retransmission to a PR-stream but has run out its chances
3810 		 * already maybe OR has been marked to skip now. Can we skip
3811 		 * it if its a resend?
3812 		 */
3813 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3814 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3815 			/*
3816 			 * Now is this one marked for resend and its time is
3817 			 * now up?
3818 			 */
3819 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3820 				/* Yes so drop it */
3821 				if (tp1->data) {
3822 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3823 					    1, SCTP_SO_NOT_LOCKED);
3824 				}
3825 			} else {
3826 				/*
3827 				 * No, we are done when hit one for resend
3828 				 * whos time as not expired.
3829 				 */
3830 				break;
3831 			}
3832 		}
3833 		/*
3834 		 * Ok now if this chunk is marked to drop it we can clean up
3835 		 * the chunk, advance our peer ack point and we can check
3836 		 * the next chunk.
3837 		 */
3838 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3839 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3840 			/* advance PeerAckPoint goes forward */
3841 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3842 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3843 				a_adv = tp1;
3844 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3845 				/* No update but we do save the chk */
3846 				a_adv = tp1;
3847 			}
3848 		} else {
3849 			/*
3850 			 * If it is still in RESEND we can advance no
3851 			 * further
3852 			 */
3853 			break;
3854 		}
3855 	}
3856 	return (a_adv);
3857 }
3858 
3859 static int
3860 sctp_fs_audit(struct sctp_association *asoc)
3861 {
3862 	struct sctp_tmit_chunk *chk;
3863 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3864 	int ret;
3865 #ifndef INVARIANTS
3866 	int entry_flight, entry_cnt;
3867 #endif
3868 
3869 	ret = 0;
3870 #ifndef INVARIANTS
3871 	entry_flight = asoc->total_flight;
3872 	entry_cnt = asoc->total_flight_count;
3873 #endif
3874 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3875 		return (0);
3876 
3877 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3878 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3879 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3880 			    chk->rec.data.tsn,
3881 			    chk->send_size,
3882 			    chk->snd_count);
3883 			inflight++;
3884 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3885 			resend++;
3886 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3887 			inbetween++;
3888 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3889 			above++;
3890 		} else {
3891 			acked++;
3892 		}
3893 	}
3894 
3895 	if ((inflight > 0) || (inbetween > 0)) {
3896 #ifdef INVARIANTS
3897 		panic("Flight size-express incorrect? \n");
3898 #else
3899 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3900 		    entry_flight, entry_cnt);
3901 
3902 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3903 		    inflight, inbetween, resend, above, acked);
3904 		ret = 1;
3905 #endif
3906 	}
3907 	return (ret);
3908 }
3909 
3910 
3911 static void
3912 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3913     struct sctp_association *asoc,
3914     struct sctp_tmit_chunk *tp1)
3915 {
3916 	tp1->window_probe = 0;
3917 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3918 		/* TSN's skipped we do NOT move back. */
3919 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3920 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3921 		    tp1->book_size,
3922 		    (uint32_t)(uintptr_t)tp1->whoTo,
3923 		    tp1->rec.data.tsn);
3924 		return;
3925 	}
3926 	/* First setup this by shrinking flight */
3927 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3928 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3929 		    tp1);
3930 	}
3931 	sctp_flight_size_decrease(tp1);
3932 	sctp_total_flight_decrease(stcb, tp1);
3933 	/* Now mark for resend */
3934 	tp1->sent = SCTP_DATAGRAM_RESEND;
3935 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3936 
3937 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3938 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3939 		    tp1->whoTo->flight_size,
3940 		    tp1->book_size,
3941 		    (uint32_t)(uintptr_t)tp1->whoTo,
3942 		    tp1->rec.data.tsn);
3943 	}
3944 }
3945 
3946 void
3947 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3948     uint32_t rwnd, int *abort_now, int ecne_seen)
3949 {
3950 	struct sctp_nets *net;
3951 	struct sctp_association *asoc;
3952 	struct sctp_tmit_chunk *tp1, *tp2;
3953 	uint32_t old_rwnd;
3954 	int win_probe_recovery = 0;
3955 	int win_probe_recovered = 0;
3956 	int j, done_once = 0;
3957 	int rto_ok = 1;
3958 	uint32_t send_s;
3959 
3960 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3961 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3962 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3963 	}
3964 	SCTP_TCB_LOCK_ASSERT(stcb);
3965 #ifdef SCTP_ASOCLOG_OF_TSNS
3966 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3967 	stcb->asoc.cumack_log_at++;
3968 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3969 		stcb->asoc.cumack_log_at = 0;
3970 	}
3971 #endif
3972 	asoc = &stcb->asoc;
3973 	old_rwnd = asoc->peers_rwnd;
3974 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3975 		/* old ack */
3976 		return;
3977 	} else if (asoc->last_acked_seq == cumack) {
3978 		/* Window update sack */
3979 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3980 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3981 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3982 			/* SWS sender side engages */
3983 			asoc->peers_rwnd = 0;
3984 		}
3985 		if (asoc->peers_rwnd > old_rwnd) {
3986 			goto again;
3987 		}
3988 		return;
3989 	}
3990 
3991 	/* First setup for CC stuff */
3992 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3993 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3994 			/* Drag along the window_tsn for cwr's */
3995 			net->cwr_window_tsn = cumack;
3996 		}
3997 		net->prev_cwnd = net->cwnd;
3998 		net->net_ack = 0;
3999 		net->net_ack2 = 0;
4000 
4001 		/*
4002 		 * CMT: Reset CUC and Fast recovery algo variables before
4003 		 * SACK processing
4004 		 */
4005 		net->new_pseudo_cumack = 0;
4006 		net->will_exit_fast_recovery = 0;
4007 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4008 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4009 		}
4010 	}
4011 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4012 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4013 		    sctpchunk_listhead);
4014 		send_s = tp1->rec.data.tsn + 1;
4015 	} else {
4016 		send_s = asoc->sending_seq;
4017 	}
4018 	if (SCTP_TSN_GE(cumack, send_s)) {
4019 		struct mbuf *op_err;
4020 		char msg[SCTP_DIAG_INFO_LEN];
4021 
4022 		*abort_now = 1;
4023 		/* XXX */
4024 		SCTP_SNPRINTF(msg, sizeof(msg),
4025 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4026 		    cumack, send_s);
4027 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4028 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4029 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4030 		return;
4031 	}
4032 	asoc->this_sack_highest_gap = cumack;
4033 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4034 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4035 		    stcb->asoc.overall_error_count,
4036 		    0,
4037 		    SCTP_FROM_SCTP_INDATA,
4038 		    __LINE__);
4039 	}
4040 	stcb->asoc.overall_error_count = 0;
4041 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4042 		/* process the new consecutive TSN first */
4043 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4044 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4045 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4046 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4047 				}
4048 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4049 					/*
4050 					 * If it is less than ACKED, it is
4051 					 * now no-longer in flight. Higher
4052 					 * values may occur during marking
4053 					 */
4054 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4055 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4056 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4057 							    tp1->whoTo->flight_size,
4058 							    tp1->book_size,
4059 							    (uint32_t)(uintptr_t)tp1->whoTo,
4060 							    tp1->rec.data.tsn);
4061 						}
4062 						sctp_flight_size_decrease(tp1);
4063 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4064 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4065 							    tp1);
4066 						}
4067 						/* sa_ignore NO_NULL_CHK */
4068 						sctp_total_flight_decrease(stcb, tp1);
4069 					}
4070 					tp1->whoTo->net_ack += tp1->send_size;
4071 					if (tp1->snd_count < 2) {
4072 						/*
4073 						 * True non-retransmitted
4074 						 * chunk
4075 						 */
4076 						tp1->whoTo->net_ack2 +=
4077 						    tp1->send_size;
4078 
4079 						/* update RTO too? */
4080 						if (tp1->do_rtt) {
4081 							if (rto_ok &&
4082 							    sctp_calculate_rto(stcb,
4083 							    &stcb->asoc,
4084 							    tp1->whoTo,
4085 							    &tp1->sent_rcv_time,
4086 							    SCTP_RTT_FROM_DATA)) {
4087 								rto_ok = 0;
4088 							}
4089 							if (tp1->whoTo->rto_needed == 0) {
4090 								tp1->whoTo->rto_needed = 1;
4091 							}
4092 							tp1->do_rtt = 0;
4093 						}
4094 					}
4095 					/*
4096 					 * CMT: CUCv2 algorithm. From the
4097 					 * cumack'd TSNs, for each TSN being
4098 					 * acked for the first time, set the
4099 					 * following variables for the
4100 					 * corresp destination.
4101 					 * new_pseudo_cumack will trigger a
4102 					 * cwnd update.
4103 					 * find_(rtx_)pseudo_cumack will
4104 					 * trigger search for the next
4105 					 * expected (rtx-)pseudo-cumack.
4106 					 */
4107 					tp1->whoTo->new_pseudo_cumack = 1;
4108 					tp1->whoTo->find_pseudo_cumack = 1;
4109 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4110 
4111 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4112 						/* sa_ignore NO_NULL_CHK */
4113 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4114 					}
4115 				}
4116 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4117 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4118 				}
4119 				if (tp1->rec.data.chunk_was_revoked) {
4120 					/* deflate the cwnd */
4121 					tp1->whoTo->cwnd -= tp1->book_size;
4122 					tp1->rec.data.chunk_was_revoked = 0;
4123 				}
4124 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4125 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4126 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4127 #ifdef INVARIANTS
4128 					} else {
4129 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4130 #endif
4131 					}
4132 				}
4133 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4134 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4135 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4136 					asoc->trigger_reset = 1;
4137 				}
4138 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4139 				if (tp1->data) {
4140 					/* sa_ignore NO_NULL_CHK */
4141 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4142 					sctp_m_freem(tp1->data);
4143 					tp1->data = NULL;
4144 				}
4145 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4146 					sctp_log_sack(asoc->last_acked_seq,
4147 					    cumack,
4148 					    tp1->rec.data.tsn,
4149 					    0,
4150 					    0,
4151 					    SCTP_LOG_FREE_SENT);
4152 				}
4153 				asoc->sent_queue_cnt--;
4154 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4155 			} else {
4156 				break;
4157 			}
4158 		}
4159 
4160 	}
4161 	/* sa_ignore NO_NULL_CHK */
4162 	if (stcb->sctp_socket) {
4163 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4164 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4165 			/* sa_ignore NO_NULL_CHK */
4166 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4167 		}
4168 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4169 	} else {
4170 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4172 		}
4173 	}
4174 
4175 	/* JRS - Use the congestion control given in the CC module */
4176 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4177 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4178 			if (net->net_ack2 > 0) {
4179 				/*
4180 				 * Karn's rule applies to clearing error
4181 				 * count, this is optional.
4182 				 */
4183 				net->error_count = 0;
4184 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4185 					/* addr came good */
4186 					net->dest_state |= SCTP_ADDR_REACHABLE;
4187 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4188 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4189 				}
4190 				if (net == stcb->asoc.primary_destination) {
4191 					if (stcb->asoc.alternate) {
4192 						/*
4193 						 * release the alternate,
4194 						 * primary is good
4195 						 */
4196 						sctp_free_remote_addr(stcb->asoc.alternate);
4197 						stcb->asoc.alternate = NULL;
4198 					}
4199 				}
4200 				if (net->dest_state & SCTP_ADDR_PF) {
4201 					net->dest_state &= ~SCTP_ADDR_PF;
4202 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4203 					    stcb->sctp_ep, stcb, net,
4204 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4205 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4206 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4207 					/* Done with this net */
4208 					net->net_ack = 0;
4209 				}
4210 				/* restore any doubled timers */
4211 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4212 				if (net->RTO < stcb->asoc.minrto) {
4213 					net->RTO = stcb->asoc.minrto;
4214 				}
4215 				if (net->RTO > stcb->asoc.maxrto) {
4216 					net->RTO = stcb->asoc.maxrto;
4217 				}
4218 			}
4219 		}
4220 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4221 	}
4222 	asoc->last_acked_seq = cumack;
4223 
4224 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4225 		/* nothing left in-flight */
4226 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4227 			net->flight_size = 0;
4228 			net->partial_bytes_acked = 0;
4229 		}
4230 		asoc->total_flight = 0;
4231 		asoc->total_flight_count = 0;
4232 	}
4233 
4234 	/* RWND update */
4235 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4236 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4237 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4238 		/* SWS sender side engages */
4239 		asoc->peers_rwnd = 0;
4240 	}
4241 	if (asoc->peers_rwnd > old_rwnd) {
4242 		win_probe_recovery = 1;
4243 	}
4244 	/* Now assure a timer where data is queued at */
4245 again:
4246 	j = 0;
4247 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4248 		if (win_probe_recovery && (net->window_probe)) {
4249 			win_probe_recovered = 1;
4250 			/*
4251 			 * Find first chunk that was used with window probe
4252 			 * and clear the sent
4253 			 */
4254 			/* sa_ignore FREED_MEMORY */
4255 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4256 				if (tp1->window_probe) {
4257 					/* move back to data send queue */
4258 					sctp_window_probe_recovery(stcb, asoc, tp1);
4259 					break;
4260 				}
4261 			}
4262 		}
4263 		if (net->flight_size) {
4264 			j++;
4265 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4266 			if (net->window_probe) {
4267 				net->window_probe = 0;
4268 			}
4269 		} else {
4270 			if (net->window_probe) {
4271 				/*
4272 				 * In window probes we must assure a timer
4273 				 * is still running there
4274 				 */
4275 				net->window_probe = 0;
4276 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4277 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4278 				}
4279 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4280 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4281 				    stcb, net,
4282 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4283 			}
4284 		}
4285 	}
4286 	if ((j == 0) &&
4287 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4288 	    (asoc->sent_queue_retran_cnt == 0) &&
4289 	    (win_probe_recovered == 0) &&
4290 	    (done_once == 0)) {
4291 		/*
4292 		 * huh, this should not happen unless all packets are
4293 		 * PR-SCTP and marked to skip of course.
4294 		 */
4295 		if (sctp_fs_audit(asoc)) {
4296 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4297 				net->flight_size = 0;
4298 			}
4299 			asoc->total_flight = 0;
4300 			asoc->total_flight_count = 0;
4301 			asoc->sent_queue_retran_cnt = 0;
4302 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4303 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4304 					sctp_flight_size_increase(tp1);
4305 					sctp_total_flight_increase(stcb, tp1);
4306 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4307 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4308 				}
4309 			}
4310 		}
4311 		done_once = 1;
4312 		goto again;
4313 	}
4314 	/**********************************/
4315 	/* Now what about shutdown issues */
4316 	/**********************************/
4317 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4318 		/* nothing left on sendqueue.. consider done */
4319 		/* clean up */
4320 		if ((asoc->stream_queue_cnt == 1) &&
4321 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4322 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4323 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4324 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4325 		}
4326 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4327 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4328 		    (asoc->stream_queue_cnt == 1) &&
4329 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4330 			struct mbuf *op_err;
4331 
4332 			*abort_now = 1;
4333 			/* XXX */
4334 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4335 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4336 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4337 			return;
4338 		}
4339 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4340 		    (asoc->stream_queue_cnt == 0)) {
4341 			struct sctp_nets *netp;
4342 
4343 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4344 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4345 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4346 			}
4347 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4348 			sctp_stop_timers_for_shutdown(stcb);
4349 			if (asoc->alternate) {
4350 				netp = asoc->alternate;
4351 			} else {
4352 				netp = asoc->primary_destination;
4353 			}
4354 			sctp_send_shutdown(stcb, netp);
4355 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4356 			    stcb->sctp_ep, stcb, netp);
4357 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4358 			    stcb->sctp_ep, stcb, NULL);
4359 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4360 		    (asoc->stream_queue_cnt == 0)) {
4361 			struct sctp_nets *netp;
4362 
4363 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 			sctp_stop_timers_for_shutdown(stcb);
4366 			if (asoc->alternate) {
4367 				netp = asoc->alternate;
4368 			} else {
4369 				netp = asoc->primary_destination;
4370 			}
4371 			sctp_send_shutdown_ack(stcb, netp);
4372 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4373 			    stcb->sctp_ep, stcb, netp);
4374 		}
4375 	}
4376 	/*********************************************/
4377 	/* Here we perform PR-SCTP procedures        */
4378 	/* (section 4.2)                             */
4379 	/*********************************************/
4380 	/* C1. update advancedPeerAckPoint */
4381 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4382 		asoc->advanced_peer_ack_point = cumack;
4383 	}
4384 	/* PR-Sctp issues need to be addressed too */
4385 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4386 		struct sctp_tmit_chunk *lchk;
4387 		uint32_t old_adv_peer_ack_point;
4388 
4389 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4390 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4391 		/* C3. See if we need to send a Fwd-TSN */
4392 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4393 			/*
4394 			 * ISSUE with ECN, see FWD-TSN processing.
4395 			 */
4396 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4397 				send_forward_tsn(stcb, asoc);
4398 			} else if (lchk) {
4399 				/* try to FR fwd-tsn's that get lost too */
4400 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4401 					send_forward_tsn(stcb, asoc);
4402 				}
4403 			}
4404 		}
4405 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4406 			if (lchk->whoTo != NULL) {
4407 				break;
4408 			}
4409 		}
4410 		if (lchk != NULL) {
4411 			/* Assure a timer is up */
4412 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4413 			    stcb->sctp_ep, stcb, lchk->whoTo);
4414 		}
4415 	}
4416 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4417 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4418 		    rwnd,
4419 		    stcb->asoc.peers_rwnd,
4420 		    stcb->asoc.total_flight,
4421 		    stcb->asoc.total_output_queue_size);
4422 	}
4423 }
4424 
4425 void
4426 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4427     struct sctp_tcb *stcb,
4428     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4429     int *abort_now, uint8_t flags,
4430     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4431 {
4432 	struct sctp_association *asoc;
4433 	struct sctp_tmit_chunk *tp1, *tp2;
4434 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4435 	uint16_t wake_him = 0;
4436 	uint32_t send_s = 0;
4437 	long j;
4438 	int accum_moved = 0;
4439 	int will_exit_fast_recovery = 0;
4440 	uint32_t a_rwnd, old_rwnd;
4441 	int win_probe_recovery = 0;
4442 	int win_probe_recovered = 0;
4443 	struct sctp_nets *net = NULL;
4444 	int done_once;
4445 	int rto_ok = 1;
4446 	uint8_t reneged_all = 0;
4447 	uint8_t cmt_dac_flag;
4448 
4449 	/*
4450 	 * we take any chance we can to service our queues since we cannot
4451 	 * get awoken when the socket is read from :<
4452 	 */
4453 	/*
4454 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4455 	 * old sack, if so discard. 2) If there is nothing left in the send
4456 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4457 	 * too, update any rwnd change and verify no timers are running.
4458 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4459 	 * moved process these first and note that it moved. 4) Process any
4460 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4461 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4462 	 * sync up flightsizes and things, stop all timers and also check
4463 	 * for shutdown_pending state. If so then go ahead and send off the
4464 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4465 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4466 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4467 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4468 	 * if in shutdown_recv state.
4469 	 */
4470 	SCTP_TCB_LOCK_ASSERT(stcb);
4471 	/* CMT DAC algo */
4472 	this_sack_lowest_newack = 0;
4473 	SCTP_STAT_INCR(sctps_slowpath_sack);
4474 	last_tsn = cum_ack;
4475 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4476 #ifdef SCTP_ASOCLOG_OF_TSNS
4477 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4478 	stcb->asoc.cumack_log_at++;
4479 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4480 		stcb->asoc.cumack_log_at = 0;
4481 	}
4482 #endif
4483 	a_rwnd = rwnd;
4484 
4485 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4486 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4487 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4488 	}
4489 
4490 	old_rwnd = stcb->asoc.peers_rwnd;
4491 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4492 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4493 		    stcb->asoc.overall_error_count,
4494 		    0,
4495 		    SCTP_FROM_SCTP_INDATA,
4496 		    __LINE__);
4497 	}
4498 	stcb->asoc.overall_error_count = 0;
4499 	asoc = &stcb->asoc;
4500 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4501 		sctp_log_sack(asoc->last_acked_seq,
4502 		    cum_ack,
4503 		    0,
4504 		    num_seg,
4505 		    num_dup,
4506 		    SCTP_LOG_NEW_SACK);
4507 	}
4508 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4509 		uint16_t i;
4510 		uint32_t *dupdata, dblock;
4511 
4512 		for (i = 0; i < num_dup; i++) {
4513 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4514 			    sizeof(uint32_t), (uint8_t *)&dblock);
4515 			if (dupdata == NULL) {
4516 				break;
4517 			}
4518 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4519 		}
4520 	}
4521 	/* reality check */
4522 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4523 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4524 		    sctpchunk_listhead);
4525 		send_s = tp1->rec.data.tsn + 1;
4526 	} else {
4527 		tp1 = NULL;
4528 		send_s = asoc->sending_seq;
4529 	}
4530 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4531 		struct mbuf *op_err;
4532 		char msg[SCTP_DIAG_INFO_LEN];
4533 
4534 		/*
4535 		 * no way, we have not even sent this TSN out yet. Peer is
4536 		 * hopelessly messed up with us.
4537 		 */
4538 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4539 		    cum_ack, send_s);
4540 		if (tp1) {
4541 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4542 			    tp1->rec.data.tsn, (void *)tp1);
4543 		}
4544 hopeless_peer:
4545 		*abort_now = 1;
4546 		/* XXX */
4547 		SCTP_SNPRINTF(msg, sizeof(msg),
4548 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4549 		    cum_ack, send_s);
4550 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4551 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4552 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4553 		return;
4554 	}
4555 	/**********************/
4556 	/* 1) check the range */
4557 	/**********************/
4558 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4559 		/* acking something behind */
4560 		return;
4561 	}
4562 
4563 	/* update the Rwnd of the peer */
4564 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4565 	    TAILQ_EMPTY(&asoc->send_queue) &&
4566 	    (asoc->stream_queue_cnt == 0)) {
4567 		/* nothing left on send/sent and strmq */
4568 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4569 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4570 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4571 		}
4572 		asoc->peers_rwnd = a_rwnd;
4573 		if (asoc->sent_queue_retran_cnt) {
4574 			asoc->sent_queue_retran_cnt = 0;
4575 		}
4576 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4577 			/* SWS sender side engages */
4578 			asoc->peers_rwnd = 0;
4579 		}
4580 		/* stop any timers */
4581 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4582 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4583 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4584 			net->partial_bytes_acked = 0;
4585 			net->flight_size = 0;
4586 		}
4587 		asoc->total_flight = 0;
4588 		asoc->total_flight_count = 0;
4589 		return;
4590 	}
4591 	/*
4592 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4593 	 * things. The total byte count acked is tracked in netAckSz AND
4594 	 * netAck2 is used to track the total bytes acked that are un-
4595 	 * amibguious and were never retransmitted. We track these on a per
4596 	 * destination address basis.
4597 	 */
4598 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4600 			/* Drag along the window_tsn for cwr's */
4601 			net->cwr_window_tsn = cum_ack;
4602 		}
4603 		net->prev_cwnd = net->cwnd;
4604 		net->net_ack = 0;
4605 		net->net_ack2 = 0;
4606 
4607 		/*
4608 		 * CMT: Reset CUC and Fast recovery algo variables before
4609 		 * SACK processing
4610 		 */
4611 		net->new_pseudo_cumack = 0;
4612 		net->will_exit_fast_recovery = 0;
4613 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4614 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4615 		}
4616 
4617 		/*
4618 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4619 		 * to be greater than the cumack. Also reset saw_newack to 0
4620 		 * for all dests.
4621 		 */
4622 		net->saw_newack = 0;
4623 		net->this_sack_highest_newack = last_tsn;
4624 	}
4625 	/* process the new consecutive TSN first */
4626 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4627 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4628 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4629 				accum_moved = 1;
4630 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4631 					/*
4632 					 * If it is less than ACKED, it is
4633 					 * now no-longer in flight. Higher
4634 					 * values may occur during marking
4635 					 */
4636 					if ((tp1->whoTo->dest_state &
4637 					    SCTP_ADDR_UNCONFIRMED) &&
4638 					    (tp1->snd_count < 2)) {
4639 						/*
4640 						 * If there was no retran
4641 						 * and the address is
4642 						 * un-confirmed and we sent
4643 						 * there and are now
4644 						 * sacked.. its confirmed,
4645 						 * mark it so.
4646 						 */
4647 						tp1->whoTo->dest_state &=
4648 						    ~SCTP_ADDR_UNCONFIRMED;
4649 					}
4650 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4651 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4652 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4653 							    tp1->whoTo->flight_size,
4654 							    tp1->book_size,
4655 							    (uint32_t)(uintptr_t)tp1->whoTo,
4656 							    tp1->rec.data.tsn);
4657 						}
4658 						sctp_flight_size_decrease(tp1);
4659 						sctp_total_flight_decrease(stcb, tp1);
4660 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4661 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4662 							    tp1);
4663 						}
4664 					}
4665 					tp1->whoTo->net_ack += tp1->send_size;
4666 
4667 					/* CMT SFR and DAC algos */
4668 					this_sack_lowest_newack = tp1->rec.data.tsn;
4669 					tp1->whoTo->saw_newack = 1;
4670 
4671 					if (tp1->snd_count < 2) {
4672 						/*
4673 						 * True non-retransmitted
4674 						 * chunk
4675 						 */
4676 						tp1->whoTo->net_ack2 +=
4677 						    tp1->send_size;
4678 
4679 						/* update RTO too? */
4680 						if (tp1->do_rtt) {
4681 							if (rto_ok &&
4682 							    sctp_calculate_rto(stcb,
4683 							    &stcb->asoc,
4684 							    tp1->whoTo,
4685 							    &tp1->sent_rcv_time,
4686 							    SCTP_RTT_FROM_DATA)) {
4687 								rto_ok = 0;
4688 							}
4689 							if (tp1->whoTo->rto_needed == 0) {
4690 								tp1->whoTo->rto_needed = 1;
4691 							}
4692 							tp1->do_rtt = 0;
4693 						}
4694 					}
4695 					/*
4696 					 * CMT: CUCv2 algorithm. From the
4697 					 * cumack'd TSNs, for each TSN being
4698 					 * acked for the first time, set the
4699 					 * following variables for the
4700 					 * corresp destination.
4701 					 * new_pseudo_cumack will trigger a
4702 					 * cwnd update.
4703 					 * find_(rtx_)pseudo_cumack will
4704 					 * trigger search for the next
4705 					 * expected (rtx-)pseudo-cumack.
4706 					 */
4707 					tp1->whoTo->new_pseudo_cumack = 1;
4708 					tp1->whoTo->find_pseudo_cumack = 1;
4709 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4710 
4711 
4712 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4713 						sctp_log_sack(asoc->last_acked_seq,
4714 						    cum_ack,
4715 						    tp1->rec.data.tsn,
4716 						    0,
4717 						    0,
4718 						    SCTP_LOG_TSN_ACKED);
4719 					}
4720 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4721 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4722 					}
4723 				}
4724 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4725 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4726 #ifdef SCTP_AUDITING_ENABLED
4727 					sctp_audit_log(0xB3,
4728 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4729 #endif
4730 				}
4731 				if (tp1->rec.data.chunk_was_revoked) {
4732 					/* deflate the cwnd */
4733 					tp1->whoTo->cwnd -= tp1->book_size;
4734 					tp1->rec.data.chunk_was_revoked = 0;
4735 				}
4736 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4737 					tp1->sent = SCTP_DATAGRAM_ACKED;
4738 				}
4739 			}
4740 		} else {
4741 			break;
4742 		}
4743 	}
4744 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4745 	/* always set this up to cum-ack */
4746 	asoc->this_sack_highest_gap = last_tsn;
4747 
4748 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4749 
4750 		/*
4751 		 * thisSackHighestGap will increase while handling NEW
4752 		 * segments this_sack_highest_newack will increase while
4753 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4754 		 * used for CMT DAC algo. saw_newack will also change.
4755 		 */
4756 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4757 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4758 		    num_seg, num_nr_seg, &rto_ok)) {
4759 			wake_him++;
4760 		}
4761 		/*
4762 		 * validate the biggest_tsn_acked in the gap acks if strict
4763 		 * adherence is wanted.
4764 		 */
4765 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4766 			/*
4767 			 * peer is either confused or we are under attack.
4768 			 * We must abort.
4769 			 */
4770 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4771 			    biggest_tsn_acked, send_s);
4772 			goto hopeless_peer;
4773 		}
4774 	}
4775 	/*******************************************/
4776 	/* cancel ALL T3-send timer if accum moved */
4777 	/*******************************************/
4778 	if (asoc->sctp_cmt_on_off > 0) {
4779 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4780 			if (net->new_pseudo_cumack)
4781 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4782 				    stcb, net,
4783 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4784 
4785 		}
4786 	} else {
4787 		if (accum_moved) {
4788 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4789 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4790 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4791 			}
4792 		}
4793 	}
4794 	/********************************************/
4795 	/* drop the acked chunks from the sentqueue */
4796 	/********************************************/
4797 	asoc->last_acked_seq = cum_ack;
4798 
4799 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4800 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4801 			break;
4802 		}
4803 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4804 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4805 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4806 #ifdef INVARIANTS
4807 			} else {
4808 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4809 #endif
4810 			}
4811 		}
4812 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4813 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4814 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4815 			asoc->trigger_reset = 1;
4816 		}
4817 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4818 		if (PR_SCTP_ENABLED(tp1->flags)) {
4819 			if (asoc->pr_sctp_cnt != 0)
4820 				asoc->pr_sctp_cnt--;
4821 		}
4822 		asoc->sent_queue_cnt--;
4823 		if (tp1->data) {
4824 			/* sa_ignore NO_NULL_CHK */
4825 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4826 			sctp_m_freem(tp1->data);
4827 			tp1->data = NULL;
4828 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4829 				asoc->sent_queue_cnt_removeable--;
4830 			}
4831 		}
4832 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4833 			sctp_log_sack(asoc->last_acked_seq,
4834 			    cum_ack,
4835 			    tp1->rec.data.tsn,
4836 			    0,
4837 			    0,
4838 			    SCTP_LOG_FREE_SENT);
4839 		}
4840 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4841 		wake_him++;
4842 	}
4843 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4844 #ifdef INVARIANTS
4845 		panic("Warning flight size is positive and should be 0");
4846 #else
4847 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4848 		    asoc->total_flight);
4849 #endif
4850 		asoc->total_flight = 0;
4851 	}
4852 
4853 	/* sa_ignore NO_NULL_CHK */
4854 	if ((wake_him) && (stcb->sctp_socket)) {
4855 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4856 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4857 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4858 		}
4859 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4860 	} else {
4861 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4862 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4863 		}
4864 	}
4865 
4866 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4867 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4868 			/* Setup so we will exit RFC2582 fast recovery */
4869 			will_exit_fast_recovery = 1;
4870 		}
4871 	}
4872 	/*
4873 	 * Check for revoked fragments:
4874 	 *
4875 	 * if Previous sack - Had no frags then we can't have any revoked if
4876 	 * Previous sack - Had frag's then - If we now have frags aka
4877 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4878 	 * some of them. else - The peer revoked all ACKED fragments, since
4879 	 * we had some before and now we have NONE.
4880 	 */
4881 
4882 	if (num_seg) {
4883 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4884 		asoc->saw_sack_with_frags = 1;
4885 	} else if (asoc->saw_sack_with_frags) {
4886 		int cnt_revoked = 0;
4887 
4888 		/* Peer revoked all dg's marked or acked */
4889 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4890 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4891 				tp1->sent = SCTP_DATAGRAM_SENT;
4892 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4893 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4894 					    tp1->whoTo->flight_size,
4895 					    tp1->book_size,
4896 					    (uint32_t)(uintptr_t)tp1->whoTo,
4897 					    tp1->rec.data.tsn);
4898 				}
4899 				sctp_flight_size_increase(tp1);
4900 				sctp_total_flight_increase(stcb, tp1);
4901 				tp1->rec.data.chunk_was_revoked = 1;
4902 				/*
4903 				 * To ensure that this increase in
4904 				 * flightsize, which is artificial, does not
4905 				 * throttle the sender, we also increase the
4906 				 * cwnd artificially.
4907 				 */
4908 				tp1->whoTo->cwnd += tp1->book_size;
4909 				cnt_revoked++;
4910 			}
4911 		}
4912 		if (cnt_revoked) {
4913 			reneged_all = 1;
4914 		}
4915 		asoc->saw_sack_with_frags = 0;
4916 	}
4917 	if (num_nr_seg > 0)
4918 		asoc->saw_sack_with_nr_frags = 1;
4919 	else
4920 		asoc->saw_sack_with_nr_frags = 0;
4921 
4922 	/* JRS - Use the congestion control given in the CC module */
4923 	if (ecne_seen == 0) {
4924 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4925 			if (net->net_ack2 > 0) {
4926 				/*
4927 				 * Karn's rule applies to clearing error
4928 				 * count, this is optional.
4929 				 */
4930 				net->error_count = 0;
4931 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4932 					/* addr came good */
4933 					net->dest_state |= SCTP_ADDR_REACHABLE;
4934 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4935 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4936 				}
4937 
4938 				if (net == stcb->asoc.primary_destination) {
4939 					if (stcb->asoc.alternate) {
4940 						/*
4941 						 * release the alternate,
4942 						 * primary is good
4943 						 */
4944 						sctp_free_remote_addr(stcb->asoc.alternate);
4945 						stcb->asoc.alternate = NULL;
4946 					}
4947 				}
4948 
4949 				if (net->dest_state & SCTP_ADDR_PF) {
4950 					net->dest_state &= ~SCTP_ADDR_PF;
4951 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4952 					    stcb->sctp_ep, stcb, net,
4953 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4954 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4955 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4956 					/* Done with this net */
4957 					net->net_ack = 0;
4958 				}
4959 				/* restore any doubled timers */
4960 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4961 				if (net->RTO < stcb->asoc.minrto) {
4962 					net->RTO = stcb->asoc.minrto;
4963 				}
4964 				if (net->RTO > stcb->asoc.maxrto) {
4965 					net->RTO = stcb->asoc.maxrto;
4966 				}
4967 			}
4968 		}
4969 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4970 	}
4971 
4972 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4973 		/* nothing left in-flight */
4974 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 			/* stop all timers */
4976 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4977 			    stcb, net,
4978 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4979 			net->flight_size = 0;
4980 			net->partial_bytes_acked = 0;
4981 		}
4982 		asoc->total_flight = 0;
4983 		asoc->total_flight_count = 0;
4984 	}
4985 
4986 	/**********************************/
4987 	/* Now what about shutdown issues */
4988 	/**********************************/
4989 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4990 		/* nothing left on sendqueue.. consider done */
4991 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4992 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4993 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4994 		}
4995 		asoc->peers_rwnd = a_rwnd;
4996 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4997 			/* SWS sender side engages */
4998 			asoc->peers_rwnd = 0;
4999 		}
5000 		/* clean up */
5001 		if ((asoc->stream_queue_cnt == 1) &&
5002 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5003 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5004 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5005 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5006 		}
5007 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5008 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5009 		    (asoc->stream_queue_cnt == 1) &&
5010 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5011 			struct mbuf *op_err;
5012 
5013 			*abort_now = 1;
5014 			/* XXX */
5015 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5016 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5017 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5018 			return;
5019 		}
5020 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5021 		    (asoc->stream_queue_cnt == 0)) {
5022 			struct sctp_nets *netp;
5023 
5024 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5025 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5026 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5027 			}
5028 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5029 			sctp_stop_timers_for_shutdown(stcb);
5030 			if (asoc->alternate) {
5031 				netp = asoc->alternate;
5032 			} else {
5033 				netp = asoc->primary_destination;
5034 			}
5035 			sctp_send_shutdown(stcb, netp);
5036 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5037 			    stcb->sctp_ep, stcb, netp);
5038 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5039 			    stcb->sctp_ep, stcb, NULL);
5040 			return;
5041 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5042 		    (asoc->stream_queue_cnt == 0)) {
5043 			struct sctp_nets *netp;
5044 
5045 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5046 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5047 			sctp_stop_timers_for_shutdown(stcb);
5048 			if (asoc->alternate) {
5049 				netp = asoc->alternate;
5050 			} else {
5051 				netp = asoc->primary_destination;
5052 			}
5053 			sctp_send_shutdown_ack(stcb, netp);
5054 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5055 			    stcb->sctp_ep, stcb, netp);
5056 			return;
5057 		}
5058 	}
5059 	/*
5060 	 * Now here we are going to recycle net_ack for a different use...
5061 	 * HEADS UP.
5062 	 */
5063 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5064 		net->net_ack = 0;
5065 	}
5066 
5067 	/*
5068 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5069 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5070 	 * automatically ensure that.
5071 	 */
5072 	if ((asoc->sctp_cmt_on_off > 0) &&
5073 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5074 	    (cmt_dac_flag == 0)) {
5075 		this_sack_lowest_newack = cum_ack;
5076 	}
5077 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5078 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5079 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5080 	}
5081 	/* JRS - Use the congestion control given in the CC module */
5082 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5083 
5084 	/* Now are we exiting loss recovery ? */
5085 	if (will_exit_fast_recovery) {
5086 		/* Ok, we must exit fast recovery */
5087 		asoc->fast_retran_loss_recovery = 0;
5088 	}
5089 	if ((asoc->sat_t3_loss_recovery) &&
5090 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5091 		/* end satellite t3 loss recovery */
5092 		asoc->sat_t3_loss_recovery = 0;
5093 	}
5094 	/*
5095 	 * CMT Fast recovery
5096 	 */
5097 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5098 		if (net->will_exit_fast_recovery) {
5099 			/* Ok, we must exit fast recovery */
5100 			net->fast_retran_loss_recovery = 0;
5101 		}
5102 	}
5103 
5104 	/* Adjust and set the new rwnd value */
5105 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5106 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5107 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5108 	}
5109 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5110 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5111 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5112 		/* SWS sender side engages */
5113 		asoc->peers_rwnd = 0;
5114 	}
5115 	if (asoc->peers_rwnd > old_rwnd) {
5116 		win_probe_recovery = 1;
5117 	}
5118 
5119 	/*
5120 	 * Now we must setup so we have a timer up for anyone with
5121 	 * outstanding data.
5122 	 */
5123 	done_once = 0;
5124 again:
5125 	j = 0;
5126 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5127 		if (win_probe_recovery && (net->window_probe)) {
5128 			win_probe_recovered = 1;
5129 			/*-
5130 			 * Find first chunk that was used with
5131 			 * window probe and clear the event. Put
5132 			 * it back into the send queue as if has
5133 			 * not been sent.
5134 			 */
5135 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5136 				if (tp1->window_probe) {
5137 					sctp_window_probe_recovery(stcb, asoc, tp1);
5138 					break;
5139 				}
5140 			}
5141 		}
5142 		if (net->flight_size) {
5143 			j++;
5144 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5145 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5146 				    stcb->sctp_ep, stcb, net);
5147 			}
5148 			if (net->window_probe) {
5149 				net->window_probe = 0;
5150 			}
5151 		} else {
5152 			if (net->window_probe) {
5153 				/*
5154 				 * In window probes we must assure a timer
5155 				 * is still running there
5156 				 */
5157 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5158 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5159 					    stcb->sctp_ep, stcb, net);
5160 
5161 				}
5162 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5163 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5164 				    stcb, net,
5165 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5166 			}
5167 		}
5168 	}
5169 	if ((j == 0) &&
5170 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5171 	    (asoc->sent_queue_retran_cnt == 0) &&
5172 	    (win_probe_recovered == 0) &&
5173 	    (done_once == 0)) {
5174 		/*
5175 		 * huh, this should not happen unless all packets are
5176 		 * PR-SCTP and marked to skip of course.
5177 		 */
5178 		if (sctp_fs_audit(asoc)) {
5179 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5180 				net->flight_size = 0;
5181 			}
5182 			asoc->total_flight = 0;
5183 			asoc->total_flight_count = 0;
5184 			asoc->sent_queue_retran_cnt = 0;
5185 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5186 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5187 					sctp_flight_size_increase(tp1);
5188 					sctp_total_flight_increase(stcb, tp1);
5189 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5190 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5191 				}
5192 			}
5193 		}
5194 		done_once = 1;
5195 		goto again;
5196 	}
5197 	/*********************************************/
5198 	/* Here we perform PR-SCTP procedures        */
5199 	/* (section 4.2)                             */
5200 	/*********************************************/
5201 	/* C1. update advancedPeerAckPoint */
5202 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5203 		asoc->advanced_peer_ack_point = cum_ack;
5204 	}
5205 	/* C2. try to further move advancedPeerAckPoint ahead */
5206 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5207 		struct sctp_tmit_chunk *lchk;
5208 		uint32_t old_adv_peer_ack_point;
5209 
5210 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5211 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5212 		/* C3. See if we need to send a Fwd-TSN */
5213 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5214 			/*
5215 			 * ISSUE with ECN, see FWD-TSN processing.
5216 			 */
5217 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5218 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5219 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5220 				    old_adv_peer_ack_point);
5221 			}
5222 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5223 				send_forward_tsn(stcb, asoc);
5224 			} else if (lchk) {
5225 				/* try to FR fwd-tsn's that get lost too */
5226 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5227 					send_forward_tsn(stcb, asoc);
5228 				}
5229 			}
5230 		}
5231 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5232 			if (lchk->whoTo != NULL) {
5233 				break;
5234 			}
5235 		}
5236 		if (lchk != NULL) {
5237 			/* Assure a timer is up */
5238 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5239 			    stcb->sctp_ep, stcb, lchk->whoTo);
5240 		}
5241 	}
5242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5243 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5244 		    a_rwnd,
5245 		    stcb->asoc.peers_rwnd,
5246 		    stcb->asoc.total_flight,
5247 		    stcb->asoc.total_output_queue_size);
5248 	}
5249 }
5250 
5251 void
5252 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5253 {
5254 	/* Copy cum-ack */
5255 	uint32_t cum_ack, a_rwnd;
5256 
5257 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5258 	/* Arrange so a_rwnd does NOT change */
5259 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5260 
5261 	/* Now call the express sack handling */
5262 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5263 }
5264 
5265 static void
5266 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5267     struct sctp_stream_in *strmin)
5268 {
5269 	struct sctp_queued_to_read *control, *ncontrol;
5270 	struct sctp_association *asoc;
5271 	uint32_t mid;
5272 	int need_reasm_check = 0;
5273 
5274 	asoc = &stcb->asoc;
5275 	mid = strmin->last_mid_delivered;
5276 	/*
5277 	 * First deliver anything prior to and including the stream no that
5278 	 * came in.
5279 	 */
5280 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5281 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5282 			/* this is deliverable now */
5283 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5284 				if (control->on_strm_q) {
5285 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5286 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5287 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5288 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5289 #ifdef INVARIANTS
5290 					} else {
5291 						panic("strmin: %p ctl: %p unknown %d",
5292 						    strmin, control, control->on_strm_q);
5293 #endif
5294 					}
5295 					control->on_strm_q = 0;
5296 				}
5297 				/* subtract pending on streams */
5298 				if (asoc->size_on_all_streams >= control->length) {
5299 					asoc->size_on_all_streams -= control->length;
5300 				} else {
5301 #ifdef INVARIANTS
5302 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5303 #else
5304 					asoc->size_on_all_streams = 0;
5305 #endif
5306 				}
5307 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5308 				/* deliver it to at least the delivery-q */
5309 				if (stcb->sctp_socket) {
5310 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5311 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5312 					    control,
5313 					    &stcb->sctp_socket->so_rcv,
5314 					    1, SCTP_READ_LOCK_HELD,
5315 					    SCTP_SO_NOT_LOCKED);
5316 				}
5317 			} else {
5318 				/* Its a fragmented message */
5319 				if (control->first_frag_seen) {
5320 					/*
5321 					 * Make it so this is next to
5322 					 * deliver, we restore later
5323 					 */
5324 					strmin->last_mid_delivered = control->mid - 1;
5325 					need_reasm_check = 1;
5326 					break;
5327 				}
5328 			}
5329 		} else {
5330 			/* no more delivery now. */
5331 			break;
5332 		}
5333 	}
5334 	if (need_reasm_check) {
5335 		int ret;
5336 
5337 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5338 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5339 			/* Restore the next to deliver unless we are ahead */
5340 			strmin->last_mid_delivered = mid;
5341 		}
5342 		if (ret == 0) {
5343 			/* Left the front Partial one on */
5344 			return;
5345 		}
5346 		need_reasm_check = 0;
5347 	}
5348 	/*
5349 	 * now we must deliver things in queue the normal way  if any are
5350 	 * now ready.
5351 	 */
5352 	mid = strmin->last_mid_delivered + 1;
5353 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5354 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5355 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5356 				/* this is deliverable now */
5357 				if (control->on_strm_q) {
5358 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5359 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5360 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5361 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5362 #ifdef INVARIANTS
5363 					} else {
5364 						panic("strmin: %p ctl: %p unknown %d",
5365 						    strmin, control, control->on_strm_q);
5366 #endif
5367 					}
5368 					control->on_strm_q = 0;
5369 				}
5370 				/* subtract pending on streams */
5371 				if (asoc->size_on_all_streams >= control->length) {
5372 					asoc->size_on_all_streams -= control->length;
5373 				} else {
5374 #ifdef INVARIANTS
5375 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5376 #else
5377 					asoc->size_on_all_streams = 0;
5378 #endif
5379 				}
5380 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5381 				/* deliver it to at least the delivery-q */
5382 				strmin->last_mid_delivered = control->mid;
5383 				if (stcb->sctp_socket) {
5384 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5385 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5386 					    control,
5387 					    &stcb->sctp_socket->so_rcv, 1,
5388 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5389 
5390 				}
5391 				mid = strmin->last_mid_delivered + 1;
5392 			} else {
5393 				/* Its a fragmented message */
5394 				if (control->first_frag_seen) {
5395 					/*
5396 					 * Make it so this is next to
5397 					 * deliver
5398 					 */
5399 					strmin->last_mid_delivered = control->mid - 1;
5400 					need_reasm_check = 1;
5401 					break;
5402 				}
5403 			}
5404 		} else {
5405 			break;
5406 		}
5407 	}
5408 	if (need_reasm_check) {
5409 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5410 	}
5411 }
5412 
5413 
5414 
5415 static void
5416 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5417     struct sctp_association *asoc, struct sctp_stream_in *strm,
5418     struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5419 {
5420 	struct sctp_tmit_chunk *chk, *nchk;
5421 	int cnt_removed = 0;
5422 
5423 	/*
5424 	 * For now large messages held on the stream reasm that are complete
5425 	 * will be tossed too. We could in theory do more work to spin
5426 	 * through and stop after dumping one msg aka seeing the start of a
5427 	 * new msg at the head, and call the delivery function... to see if
5428 	 * it can be delivered... But for now we just dump everything on the
5429 	 * queue.
5430 	 */
5431 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5432 		return;
5433 	}
5434 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5435 		/* Purge hanging chunks */
5436 		if (!asoc->idata_supported && (ordered == 0)) {
5437 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5438 				break;
5439 			}
5440 		}
5441 		cnt_removed++;
5442 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5443 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5444 			asoc->size_on_reasm_queue -= chk->send_size;
5445 		} else {
5446 #ifdef INVARIANTS
5447 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5448 #else
5449 			asoc->size_on_reasm_queue = 0;
5450 #endif
5451 		}
5452 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5453 		if (chk->data) {
5454 			sctp_m_freem(chk->data);
5455 			chk->data = NULL;
5456 		}
5457 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5458 	}
5459 	if (!TAILQ_EMPTY(&control->reasm)) {
5460 		/* This has to be old data, unordered */
5461 		if (control->data) {
5462 			sctp_m_freem(control->data);
5463 			control->data = NULL;
5464 		}
5465 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5466 		chk = TAILQ_FIRST(&control->reasm);
5467 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5468 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5469 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5470 			    chk, SCTP_READ_LOCK_HELD);
5471 		}
5472 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5473 		return;
5474 	}
5475 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5476 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5477 		if (asoc->size_on_all_streams >= control->length) {
5478 			asoc->size_on_all_streams -= control->length;
5479 		} else {
5480 #ifdef INVARIANTS
5481 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5482 #else
5483 			asoc->size_on_all_streams = 0;
5484 #endif
5485 		}
5486 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5487 		control->on_strm_q = 0;
5488 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5489 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5490 		control->on_strm_q = 0;
5491 #ifdef INVARIANTS
5492 	} else if (control->on_strm_q) {
5493 		panic("strm: %p ctl: %p unknown %d",
5494 		    strm, control, control->on_strm_q);
5495 #endif
5496 	}
5497 	control->on_strm_q = 0;
5498 	if (control->on_read_q == 0) {
5499 		sctp_free_remote_addr(control->whoFrom);
5500 		if (control->data) {
5501 			sctp_m_freem(control->data);
5502 			control->data = NULL;
5503 		}
5504 		sctp_free_a_readq(stcb, control);
5505 	}
5506 }
5507 
5508 void
5509 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5510     struct sctp_forward_tsn_chunk *fwd,
5511     int *abort_flag, struct mbuf *m, int offset)
5512 {
5513 	/* The pr-sctp fwd tsn */
5514 	/*
5515 	 * here we will perform all the data receiver side steps for
5516 	 * processing FwdTSN, as required in by pr-sctp draft:
5517 	 *
5518 	 * Assume we get FwdTSN(x):
5519 	 *
5520 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5521 	 * + others we have 3) examine and update re-ordering queue on
5522 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5523 	 * report where we are.
5524 	 */
5525 	struct sctp_association *asoc;
5526 	uint32_t new_cum_tsn, gap;
5527 	unsigned int i, fwd_sz, m_size;
5528 	uint32_t str_seq;
5529 	struct sctp_stream_in *strm;
5530 	struct sctp_queued_to_read *control, *sv;
5531 
5532 	asoc = &stcb->asoc;
5533 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5534 		SCTPDBG(SCTP_DEBUG_INDATA1,
5535 		    "Bad size too small/big fwd-tsn\n");
5536 		return;
5537 	}
5538 	m_size = (stcb->asoc.mapping_array_size << 3);
5539 	/*************************************************************/
5540 	/* 1. Here we update local cumTSN and shift the bitmap array */
5541 	/*************************************************************/
5542 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5543 
5544 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5545 		/* Already got there ... */
5546 		return;
5547 	}
5548 	/*
5549 	 * now we know the new TSN is more advanced, let's find the actual
5550 	 * gap
5551 	 */
5552 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5553 	asoc->cumulative_tsn = new_cum_tsn;
5554 	if (gap >= m_size) {
5555 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5556 			struct mbuf *op_err;
5557 			char msg[SCTP_DIAG_INFO_LEN];
5558 
5559 			/*
5560 			 * out of range (of single byte chunks in the rwnd I
5561 			 * give out). This must be an attacker.
5562 			 */
5563 			*abort_flag = 1;
5564 			SCTP_SNPRINTF(msg, sizeof(msg),
5565 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5566 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5567 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5568 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5569 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5570 			return;
5571 		}
5572 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5573 
5574 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5575 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5576 		asoc->highest_tsn_inside_map = new_cum_tsn;
5577 
5578 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5579 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5580 
5581 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5582 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5583 		}
5584 	} else {
5585 		SCTP_TCB_LOCK_ASSERT(stcb);
5586 		for (i = 0; i <= gap; i++) {
5587 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5588 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5589 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5590 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5591 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5592 				}
5593 			}
5594 		}
5595 	}
5596 	/*************************************************************/
5597 	/* 2. Clear up re-assembly queue                             */
5598 	/*************************************************************/
5599 
5600 	/* This is now done as part of clearing up the stream/seq */
5601 	if (asoc->idata_supported == 0) {
5602 		uint16_t sid;
5603 
5604 		/* Flush all the un-ordered data based on cum-tsn */
5605 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5606 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5607 			strm = &asoc->strmin[sid];
5608 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5609 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5610 			}
5611 		}
5612 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5613 	}
5614 	/*******************************************************/
5615 	/* 3. Update the PR-stream re-ordering queues and fix  */
5616 	/* delivery issues as needed.                       */
5617 	/*******************************************************/
5618 	fwd_sz -= sizeof(*fwd);
5619 	if (m && fwd_sz) {
5620 		/* New method. */
5621 		unsigned int num_str;
5622 		uint32_t mid;
5623 		uint16_t sid;
5624 		uint16_t ordered, flags;
5625 		struct sctp_strseq *stseq, strseqbuf;
5626 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5627 
5628 		offset += sizeof(*fwd);
5629 
5630 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5631 		if (asoc->idata_supported) {
5632 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5633 		} else {
5634 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5635 		}
5636 		for (i = 0; i < num_str; i++) {
5637 			if (asoc->idata_supported) {
5638 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5639 				    sizeof(struct sctp_strseq_mid),
5640 				    (uint8_t *)&strseqbuf_m);
5641 				offset += sizeof(struct sctp_strseq_mid);
5642 				if (stseq_m == NULL) {
5643 					break;
5644 				}
5645 				sid = ntohs(stseq_m->sid);
5646 				mid = ntohl(stseq_m->mid);
5647 				flags = ntohs(stseq_m->flags);
5648 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5649 					ordered = 0;
5650 				} else {
5651 					ordered = 1;
5652 				}
5653 			} else {
5654 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5655 				    sizeof(struct sctp_strseq),
5656 				    (uint8_t *)&strseqbuf);
5657 				offset += sizeof(struct sctp_strseq);
5658 				if (stseq == NULL) {
5659 					break;
5660 				}
5661 				sid = ntohs(stseq->sid);
5662 				mid = (uint32_t)ntohs(stseq->ssn);
5663 				ordered = 1;
5664 			}
5665 			/* Convert */
5666 
5667 			/* now process */
5668 
5669 			/*
5670 			 * Ok we now look for the stream/seq on the read
5671 			 * queue where its not all delivered. If we find it
5672 			 * we transmute the read entry into a PDI_ABORTED.
5673 			 */
5674 			if (sid >= asoc->streamincnt) {
5675 				/* screwed up streams, stop!  */
5676 				break;
5677 			}
5678 			if ((asoc->str_of_pdapi == sid) &&
5679 			    (asoc->ssn_of_pdapi == mid)) {
5680 				/*
5681 				 * If this is the one we were partially
5682 				 * delivering now then we no longer are.
5683 				 * Note this will change with the reassembly
5684 				 * re-write.
5685 				 */
5686 				asoc->fragmented_delivery_inprogress = 0;
5687 			}
5688 			strm = &asoc->strmin[sid];
5689 			if (ordered) {
5690 				TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5691 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5692 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5693 					}
5694 				}
5695 			} else {
5696 				if (asoc->idata_supported) {
5697 					TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5698 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5699 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5700 						}
5701 					}
5702 				} else {
5703 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5704 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5705 					}
5706 				}
5707 			}
5708 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5709 				if ((control->sinfo_stream == sid) &&
5710 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5711 					str_seq = (sid << 16) | (0x0000ffff & mid);
5712 					control->pdapi_aborted = 1;
5713 					sv = stcb->asoc.control_pdapi;
5714 					control->end_added = 1;
5715 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5716 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5717 						if (asoc->size_on_all_streams >= control->length) {
5718 							asoc->size_on_all_streams -= control->length;
5719 						} else {
5720 #ifdef INVARIANTS
5721 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5722 #else
5723 							asoc->size_on_all_streams = 0;
5724 #endif
5725 						}
5726 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5727 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5728 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5729 #ifdef INVARIANTS
5730 					} else if (control->on_strm_q) {
5731 						panic("strm: %p ctl: %p unknown %d",
5732 						    strm, control, control->on_strm_q);
5733 #endif
5734 					}
5735 					control->on_strm_q = 0;
5736 					stcb->asoc.control_pdapi = control;
5737 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5738 					    stcb,
5739 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5740 					    (void *)&str_seq,
5741 					    SCTP_SO_NOT_LOCKED);
5742 					stcb->asoc.control_pdapi = sv;
5743 					break;
5744 				} else if ((control->sinfo_stream == sid) &&
5745 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5746 					/* We are past our victim SSN */
5747 					break;
5748 				}
5749 			}
5750 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5751 				/* Update the sequence number */
5752 				strm->last_mid_delivered = mid;
5753 			}
5754 			/* now kick the stream the new way */
5755 			/* sa_ignore NO_NULL_CHK */
5756 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5757 		}
5758 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5759 	}
5760 	/*
5761 	 * Now slide thing forward.
5762 	 */
5763 	sctp_slide_mapping_arrays(stcb);
5764 }
5765