xref: /freebsd/sys/netinet/sctp_indata.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 		cmh->cmsg_level = IPPROTO_SCTP;
255 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 		cmh->cmsg_type = SCTP_RCVINFO;
257 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 		rcvinfo->rcv_context = sinfo->sinfo_context;
265 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268 	}
269 	if (provide_nxt) {
270 		cmh->cmsg_level = IPPROTO_SCTP;
271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 		cmh->cmsg_type = SCTP_NXTINFO;
273 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 		nxtinfo->nxt_flags = 0;
276 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278 		}
279 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281 		}
282 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284 		}
285 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/*
322 		 * this tsn is behind the cum ack and thus we don't need to
323 		 * worry about it being moved from one to the other.
324 		 */
325 		return;
326 	}
327 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 		sctp_print_mapping_array(asoc);
331 #ifdef INVARIANTS
332 		panic("Things are really messed up now!!");
333 #endif
334 	}
335 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 		asoc->highest_tsn_inside_nr_map = tsn;
339 	}
340 	if (tsn == asoc->highest_tsn_inside_map) {
341 		/* We must back down to see what the new highest is */
342 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 				asoc->highest_tsn_inside_map = i;
346 				fnd = 1;
347 				break;
348 			}
349 		}
350 		if (!fnd) {
351 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352 		}
353 	}
354 }
355 
356 
357 /*
358  * We are delivering currently from the reassembly queue. We must continue to
359  * deliver until we either: 1) run out of space. 2) run out of sequential
360  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361  */
362 static void
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364 {
365 	struct sctp_tmit_chunk *chk, *nchk;
366 	uint16_t nxt_todel;
367 	uint16_t stream_no;
368 	int end = 0;
369 	int cntDel;
370 	struct sctp_queued_to_read *control, *ctl, *nctl;
371 
372 	if (stcb == NULL)
373 		return;
374 
375 	cntDel = stream_no = 0;
376 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 		/* socket above is long gone or going.. */
380 abandon:
381 		asoc->fragmented_delivery_inprogress = 0;
382 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 			asoc->size_on_reasm_queue -= chk->send_size;
385 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386 			/*
387 			 * Lose the data pointer, since its in the socket
388 			 * buffer
389 			 */
390 			if (chk->data) {
391 				sctp_m_freem(chk->data);
392 				chk->data = NULL;
393 			}
394 			/* Now free the address and data */
395 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 			/* sa_ignore FREED_MEMORY */
397 		}
398 		return;
399 	}
400 	SCTP_TCB_LOCK_ASSERT(stcb);
401 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 			/* Can't deliver more :< */
404 			return;
405 		}
406 		stream_no = chk->rec.data.stream_number;
407 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 		if (nxt_todel != chk->rec.data.stream_seq &&
409 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410 			/*
411 			 * Not the next sequence to deliver in its stream OR
412 			 * unordered
413 			 */
414 			return;
415 		}
416 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417 
418 			control = sctp_build_readq_entry_chk(stcb, chk);
419 			if (control == NULL) {
420 				/* out of memory? */
421 				return;
422 			}
423 			/* save it off for our future deliveries */
424 			stcb->asoc.control_pdapi = control;
425 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 				end = 1;
427 			else
428 				end = 0;
429 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 			sctp_add_to_readq(stcb->sctp_ep,
431 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433 			cntDel++;
434 		} else {
435 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436 				end = 1;
437 			else
438 				end = 0;
439 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 			    stcb->asoc.control_pdapi,
442 			    chk->data, end, chk->rec.data.TSN_seq,
443 			    &stcb->sctp_socket->so_rcv)) {
444 				/*
445 				 * something is very wrong, either
446 				 * control_pdapi is NULL, or the tail_mbuf
447 				 * is corrupt, or there is a EOM already on
448 				 * the mbuf chain.
449 				 */
450 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451 					goto abandon;
452 				} else {
453 #ifdef INVARIANTS
454 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 						panic("This should not happen control_pdapi NULL?");
456 					}
457 					/* if we did not panic, it was a EOM */
458 					panic("Bad chunking ??");
459 #else
460 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462 					}
463 					SCTP_PRINTF("Bad chunking ??\n");
464 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465 
466 #endif
467 					goto abandon;
468 				}
469 			}
470 			cntDel++;
471 		}
472 		/* pull it we did it */
473 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 			asoc->fragmented_delivery_inprogress = 0;
476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 				asoc->strmin[stream_no].last_sequence_delivered++;
478 			}
479 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481 			}
482 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483 			/*
484 			 * turn the flag back on since we just  delivered
485 			 * yet another one.
486 			 */
487 			asoc->fragmented_delivery_inprogress = 1;
488 		}
489 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493 
494 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->size_on_reasm_queue -= chk->send_size;
496 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 		/* free up the chk */
498 		chk->data = NULL;
499 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500 
501 		if (asoc->fragmented_delivery_inprogress == 0) {
502 			/*
503 			 * Now lets see if we can deliver the next one on
504 			 * the stream
505 			 */
506 			struct sctp_stream_in *strm;
507 
508 			strm = &asoc->strmin[stream_no];
509 			nxt_todel = strm->last_sequence_delivered + 1;
510 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 				/* Deliver more if we can. */
512 				if (nxt_todel == ctl->sinfo_ssn) {
513 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 					asoc->size_on_all_streams -= ctl->length;
515 					sctp_ucount_decr(asoc->cnt_on_all_streams);
516 					strm->last_sequence_delivered++;
517 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 					sctp_add_to_readq(stcb->sctp_ep, stcb,
519 					    ctl,
520 					    &stcb->sctp_socket->so_rcv, 1,
521 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 				} else {
523 					break;
524 				}
525 				nxt_todel = strm->last_sequence_delivered + 1;
526 			}
527 			break;
528 		}
529 	}
530 }
531 
532 /*
533  * Queue the chunk either right into the socket buffer if it is the next one
534  * to go OR put it in the correct place in the delivery queue.  If we do
535  * append to the so_buf, keep doing so until we are out of order. One big
536  * question still remains, what to do when the socket buffer is FULL??
537  */
538 static void
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540     struct sctp_queued_to_read *control, int *abort_flag)
541 {
542 	/*
543 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 	 * all the data in one stream this could happen quite rapidly. One
545 	 * could use the TSN to keep track of things, but this scheme breaks
546 	 * down in the other type of stream useage that could occur. Send a
547 	 * single msg to stream 0, send 4Billion messages to stream 1, now
548 	 * send a message to stream 0. You have a situation where the TSN
549 	 * has wrapped but not in the stream. Is this worth worrying about
550 	 * or should we just change our queue sort at the bottom to be by
551 	 * TSN.
552 	 *
553 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 	 * assignment this could happen... and I don't see how this would be
556 	 * a violation. So for now I am undecided an will leave the sort by
557 	 * SSN alone. Maybe a hybred approach is the answer
558 	 *
559 	 */
560 	struct sctp_stream_in *strm;
561 	struct sctp_queued_to_read *at;
562 	int queue_needed;
563 	uint16_t nxt_todel;
564 	struct mbuf *oper;
565 
566 	queue_needed = 1;
567 	asoc->size_on_all_streams += control->length;
568 	sctp_ucount_incr(asoc->cnt_on_all_streams);
569 	strm = &asoc->strmin[control->sinfo_stream];
570 	nxt_todel = strm->last_sequence_delivered + 1;
571 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
573 	}
574 	SCTPDBG(SCTP_DEBUG_INDATA1,
575 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 	    (uint32_t) control->sinfo_stream,
577 	    (uint32_t) strm->last_sequence_delivered,
578 	    (uint32_t) nxt_todel);
579 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 		/* The incoming sseq is behind where we last delivered? */
581 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
582 		    control->sinfo_ssn, strm->last_sequence_delivered);
583 protocol_error:
584 		/*
585 		 * throw it in the stream so it gets cleaned up in
586 		 * association destruction
587 		 */
588 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 		    0, M_NOWAIT, 1, MT_DATA);
591 		if (oper) {
592 			struct sctp_paramhdr *ph;
593 			uint32_t *ippp;
594 
595 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 			    (sizeof(uint32_t) * 3);
597 			ph = mtod(oper, struct sctp_paramhdr *);
598 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 			ph->param_length = htons(SCTP_BUF_LEN(oper));
600 			ippp = (uint32_t *) (ph + 1);
601 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
602 			ippp++;
603 			*ippp = control->sinfo_tsn;
604 			ippp++;
605 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
606 		}
607 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
630 			/* all delivered */
631 			nxt_todel = strm->last_sequence_delivered + 1;
632 			if (nxt_todel == control->sinfo_ssn) {
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				continue;
654 			}
655 			break;
656 		}
657 	}
658 	if (queue_needed) {
659 		/*
660 		 * Ok, we did not deliver this guy, find the correct place
661 		 * to put it on the queue.
662 		 */
663 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
664 			goto protocol_error;
665 		}
666 		if (TAILQ_EMPTY(&strm->inqueue)) {
667 			/* Empty queue */
668 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
670 			}
671 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
672 		} else {
673 			TAILQ_FOREACH(at, &strm->inqueue, next) {
674 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
675 					/*
676 					 * one in queue is bigger than the
677 					 * new one, insert before this one
678 					 */
679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 						sctp_log_strm_del(control, at,
681 						    SCTP_STR_LOG_FROM_INSERT_MD);
682 					}
683 					TAILQ_INSERT_BEFORE(at, control, next);
684 					break;
685 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
686 					/*
687 					 * Gak, He sent me a duplicate str
688 					 * seq number
689 					 */
690 					/*
691 					 * foo bar, I guess I will just free
692 					 * this new guy, should we abort
693 					 * too? FIX ME MAYBE? Or it COULD be
694 					 * that the SSN's have wrapped.
695 					 * Maybe I should compare to TSN
696 					 * somehow... sigh for now just blow
697 					 * away the chunk!
698 					 */
699 
700 					if (control->data)
701 						sctp_m_freem(control->data);
702 					control->data = NULL;
703 					asoc->size_on_all_streams -= control->length;
704 					sctp_ucount_decr(asoc->cnt_on_all_streams);
705 					if (control->whoFrom) {
706 						sctp_free_remote_addr(control->whoFrom);
707 						control->whoFrom = NULL;
708 					}
709 					sctp_free_a_readq(stcb, control);
710 					return;
711 				} else {
712 					if (TAILQ_NEXT(at, next) == NULL) {
713 						/*
714 						 * We are at the end, insert
715 						 * it after this one
716 						 */
717 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 							sctp_log_strm_del(control, at,
719 							    SCTP_STR_LOG_FROM_INSERT_TL);
720 						}
721 						TAILQ_INSERT_AFTER(&strm->inqueue,
722 						    at, control, next);
723 						break;
724 					}
725 				}
726 			}
727 		}
728 	}
729 }
730 
731 /*
732  * Returns two things: You get the total size of the deliverable parts of the
733  * first fragmented message on the reassembly queue. And you get a 1 back if
734  * all of the message is ready or a 0 back if the message is still incomplete
735  */
736 static int
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
738 {
739 	struct sctp_tmit_chunk *chk;
740 	uint32_t tsn;
741 
742 	*t_size = 0;
743 	chk = TAILQ_FIRST(&asoc->reasmqueue);
744 	if (chk == NULL) {
745 		/* nothing on the queue */
746 		return (0);
747 	}
748 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 		/* Not a first on the queue */
750 		return (0);
751 	}
752 	tsn = chk->rec.data.TSN_seq;
753 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 		if (tsn != chk->rec.data.TSN_seq) {
755 			return (0);
756 		}
757 		*t_size += chk->send_size;
758 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
759 			return (1);
760 		}
761 		tsn++;
762 	}
763 	return (0);
764 }
765 
766 static void
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
768 {
769 	struct sctp_tmit_chunk *chk;
770 	uint16_t nxt_todel;
771 	uint32_t tsize, pd_point;
772 
773 doit_again:
774 	chk = TAILQ_FIRST(&asoc->reasmqueue);
775 	if (chk == NULL) {
776 		/* Huh? */
777 		asoc->size_on_reasm_queue = 0;
778 		asoc->cnt_on_reasm_queue = 0;
779 		return;
780 	}
781 	if (asoc->fragmented_delivery_inprogress == 0) {
782 		nxt_todel =
783 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 		    (nxt_todel == chk->rec.data.stream_seq ||
786 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
787 			/*
788 			 * Yep the first one is here and its ok to deliver
789 			 * but should we?
790 			 */
791 			if (stcb->sctp_socket) {
792 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
793 				    stcb->sctp_ep->partial_delivery_point);
794 			} else {
795 				pd_point = stcb->sctp_ep->partial_delivery_point;
796 			}
797 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
798 
799 				/*
800 				 * Yes, we setup to start reception, by
801 				 * backing down the TSN just in case we
802 				 * can't deliver. If we
803 				 */
804 				asoc->fragmented_delivery_inprogress = 1;
805 				asoc->tsn_last_delivered =
806 				    chk->rec.data.TSN_seq - 1;
807 				asoc->str_of_pdapi =
808 				    chk->rec.data.stream_number;
809 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
810 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
811 				asoc->fragment_flags = chk->rec.data.rcv_flags;
812 				sctp_service_reassembly(stcb, asoc);
813 			}
814 		}
815 	} else {
816 		/*
817 		 * Service re-assembly will deliver stream data queued at
818 		 * the end of fragmented delivery.. but it wont know to go
819 		 * back and call itself again... we do that here with the
820 		 * got doit_again
821 		 */
822 		sctp_service_reassembly(stcb, asoc);
823 		if (asoc->fragmented_delivery_inprogress == 0) {
824 			/*
825 			 * finished our Fragmented delivery, could be more
826 			 * waiting?
827 			 */
828 			goto doit_again;
829 		}
830 	}
831 }
832 
833 /*
834  * Dump onto the re-assembly queue, in its proper place. After dumping on the
835  * queue, see if anthing can be delivered. If so pull it off (or as much as
836  * we can. If we run out of space then we must dump what we can and set the
837  * appropriate flag to say we queued what we could.
838  */
839 static void
840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841     struct sctp_tmit_chunk *chk, int *abort_flag)
842 {
843 	struct mbuf *oper;
844 	uint32_t cum_ackp1, prev_tsn, post_tsn;
845 	struct sctp_tmit_chunk *at, *prev, *next;
846 
847 	prev = next = NULL;
848 	cum_ackp1 = asoc->tsn_last_delivered + 1;
849 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
850 		/* This is the first one on the queue */
851 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852 		/*
853 		 * we do not check for delivery of anything when only one
854 		 * fragment is here
855 		 */
856 		asoc->size_on_reasm_queue = chk->send_size;
857 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
858 		if (chk->rec.data.TSN_seq == cum_ackp1) {
859 			if (asoc->fragmented_delivery_inprogress == 0 &&
860 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
861 			    SCTP_DATA_FIRST_FRAG) {
862 				/*
863 				 * An empty queue, no delivery inprogress,
864 				 * we hit the next one and it does NOT have
865 				 * a FIRST fragment mark.
866 				 */
867 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
868 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869 				    0, M_NOWAIT, 1, MT_DATA);
870 
871 				if (oper) {
872 					struct sctp_paramhdr *ph;
873 					uint32_t *ippp;
874 
875 					SCTP_BUF_LEN(oper) =
876 					    sizeof(struct sctp_paramhdr) +
877 					    (sizeof(uint32_t) * 3);
878 					ph = mtod(oper, struct sctp_paramhdr *);
879 					ph->param_type =
880 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881 					ph->param_length = htons(SCTP_BUF_LEN(oper));
882 					ippp = (uint32_t *) (ph + 1);
883 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884 					ippp++;
885 					*ippp = chk->rec.data.TSN_seq;
886 					ippp++;
887 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
888 
889 				}
890 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
891 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892 				*abort_flag = 1;
893 			} else if (asoc->fragmented_delivery_inprogress &&
894 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 				/*
896 				 * We are doing a partial delivery and the
897 				 * NEXT chunk MUST be either the LAST or
898 				 * MIDDLE fragment NOT a FIRST
899 				 */
900 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
901 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
902 				    0, M_NOWAIT, 1, MT_DATA);
903 				if (oper) {
904 					struct sctp_paramhdr *ph;
905 					uint32_t *ippp;
906 
907 					SCTP_BUF_LEN(oper) =
908 					    sizeof(struct sctp_paramhdr) +
909 					    (3 * sizeof(uint32_t));
910 					ph = mtod(oper, struct sctp_paramhdr *);
911 					ph->param_type =
912 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
913 					ph->param_length = htons(SCTP_BUF_LEN(oper));
914 					ippp = (uint32_t *) (ph + 1);
915 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916 					ippp++;
917 					*ippp = chk->rec.data.TSN_seq;
918 					ippp++;
919 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920 				}
921 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
922 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923 				*abort_flag = 1;
924 			} else if (asoc->fragmented_delivery_inprogress) {
925 				/*
926 				 * Here we are ok with a MIDDLE or LAST
927 				 * piece
928 				 */
929 				if (chk->rec.data.stream_number !=
930 				    asoc->str_of_pdapi) {
931 					/* Got to be the right STR No */
932 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
933 					    chk->rec.data.stream_number,
934 					    asoc->str_of_pdapi);
935 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
936 					    0, M_NOWAIT, 1, MT_DATA);
937 					if (oper) {
938 						struct sctp_paramhdr *ph;
939 						uint32_t *ippp;
940 
941 						SCTP_BUF_LEN(oper) =
942 						    sizeof(struct sctp_paramhdr) +
943 						    (sizeof(uint32_t) * 3);
944 						ph = mtod(oper,
945 						    struct sctp_paramhdr *);
946 						ph->param_type =
947 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 						ph->param_length =
949 						    htons(SCTP_BUF_LEN(oper));
950 						ippp = (uint32_t *) (ph + 1);
951 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 						ippp++;
953 						*ippp = chk->rec.data.TSN_seq;
954 						ippp++;
955 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 					}
957 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
958 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959 					*abort_flag = 1;
960 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 					    SCTP_DATA_UNORDERED &&
962 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 					/* Got to be the right STR Seq */
964 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 					    chk->rec.data.stream_seq,
966 					    asoc->ssn_of_pdapi);
967 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 					    0, M_NOWAIT, 1, MT_DATA);
969 					if (oper) {
970 						struct sctp_paramhdr *ph;
971 						uint32_t *ippp;
972 
973 						SCTP_BUF_LEN(oper) =
974 						    sizeof(struct sctp_paramhdr) +
975 						    (3 * sizeof(uint32_t));
976 						ph = mtod(oper,
977 						    struct sctp_paramhdr *);
978 						ph->param_type =
979 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 						ph->param_length =
981 						    htons(SCTP_BUF_LEN(oper));
982 						ippp = (uint32_t *) (ph + 1);
983 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 						ippp++;
985 						*ippp = chk->rec.data.TSN_seq;
986 						ippp++;
987 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988 
989 					}
990 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
992 					*abort_flag = 1;
993 				}
994 			}
995 		}
996 		return;
997 	}
998 	/* Find its place */
999 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1000 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001 			/*
1002 			 * one in queue is bigger than the new one, insert
1003 			 * before this one
1004 			 */
1005 			/* A check */
1006 			asoc->size_on_reasm_queue += chk->send_size;
1007 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008 			next = at;
1009 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010 			break;
1011 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1012 			/* Gak, He sent me a duplicate str seq number */
1013 			/*
1014 			 * foo bar, I guess I will just free this new guy,
1015 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1016 			 * that the SSN's have wrapped. Maybe I should
1017 			 * compare to TSN somehow... sigh for now just blow
1018 			 * away the chunk!
1019 			 */
1020 			if (chk->data) {
1021 				sctp_m_freem(chk->data);
1022 				chk->data = NULL;
1023 			}
1024 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1025 			return;
1026 		} else {
1027 			prev = at;
1028 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029 				/*
1030 				 * We are at the end, insert it after this
1031 				 * one
1032 				 */
1033 				/* check it first */
1034 				asoc->size_on_reasm_queue += chk->send_size;
1035 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1037 				break;
1038 			}
1039 		}
1040 	}
1041 	/* Now the audits */
1042 	if (prev) {
1043 		prev_tsn = chk->rec.data.TSN_seq - 1;
1044 		if (prev_tsn == prev->rec.data.TSN_seq) {
1045 			/*
1046 			 * Ok the one I am dropping onto the end is the
1047 			 * NEXT. A bit of valdiation here.
1048 			 */
1049 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 			    SCTP_DATA_FIRST_FRAG ||
1051 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1052 			    SCTP_DATA_MIDDLE_FRAG) {
1053 				/*
1054 				 * Insert chk MUST be a MIDDLE or LAST
1055 				 * fragment
1056 				 */
1057 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 				    SCTP_DATA_FIRST_FRAG) {
1059 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1060 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1061 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062 					    0, M_NOWAIT, 1, MT_DATA);
1063 					if (oper) {
1064 						struct sctp_paramhdr *ph;
1065 						uint32_t *ippp;
1066 
1067 						SCTP_BUF_LEN(oper) =
1068 						    sizeof(struct sctp_paramhdr) +
1069 						    (3 * sizeof(uint32_t));
1070 						ph = mtod(oper,
1071 						    struct sctp_paramhdr *);
1072 						ph->param_type =
1073 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 						ph->param_length =
1075 						    htons(SCTP_BUF_LEN(oper));
1076 						ippp = (uint32_t *) (ph + 1);
1077 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078 						ippp++;
1079 						*ippp = chk->rec.data.TSN_seq;
1080 						ippp++;
1081 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1082 
1083 					}
1084 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1085 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1086 					*abort_flag = 1;
1087 					return;
1088 				}
1089 				if (chk->rec.data.stream_number !=
1090 				    prev->rec.data.stream_number) {
1091 					/*
1092 					 * Huh, need the correct STR here,
1093 					 * they must be the same.
1094 					 */
1095 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1096 					    chk->rec.data.stream_number,
1097 					    prev->rec.data.stream_number);
1098 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1099 					    0, M_NOWAIT, 1, MT_DATA);
1100 					if (oper) {
1101 						struct sctp_paramhdr *ph;
1102 						uint32_t *ippp;
1103 
1104 						SCTP_BUF_LEN(oper) =
1105 						    sizeof(struct sctp_paramhdr) +
1106 						    (3 * sizeof(uint32_t));
1107 						ph = mtod(oper,
1108 						    struct sctp_paramhdr *);
1109 						ph->param_type =
1110 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111 						ph->param_length =
1112 						    htons(SCTP_BUF_LEN(oper));
1113 						ippp = (uint32_t *) (ph + 1);
1114 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115 						ippp++;
1116 						*ippp = chk->rec.data.TSN_seq;
1117 						ippp++;
1118 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119 					}
1120 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1121 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1122 					*abort_flag = 1;
1123 					return;
1124 				}
1125 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1126 				    chk->rec.data.stream_seq !=
1127 				    prev->rec.data.stream_seq) {
1128 					/*
1129 					 * Huh, need the correct STR here,
1130 					 * they must be the same.
1131 					 */
1132 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1133 					    chk->rec.data.stream_seq,
1134 					    prev->rec.data.stream_seq);
1135 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1136 					    0, M_NOWAIT, 1, MT_DATA);
1137 					if (oper) {
1138 						struct sctp_paramhdr *ph;
1139 						uint32_t *ippp;
1140 
1141 						SCTP_BUF_LEN(oper) =
1142 						    sizeof(struct sctp_paramhdr) +
1143 						    (3 * sizeof(uint32_t));
1144 						ph = mtod(oper,
1145 						    struct sctp_paramhdr *);
1146 						ph->param_type =
1147 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 						ph->param_length =
1149 						    htons(SCTP_BUF_LEN(oper));
1150 						ippp = (uint32_t *) (ph + 1);
1151 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152 						ippp++;
1153 						*ippp = chk->rec.data.TSN_seq;
1154 						ippp++;
1155 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156 					}
1157 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1158 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1159 					*abort_flag = 1;
1160 					return;
1161 				}
1162 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163 			    SCTP_DATA_LAST_FRAG) {
1164 				/* Insert chk MUST be a FIRST */
1165 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166 				    SCTP_DATA_FIRST_FRAG) {
1167 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 					    0, M_NOWAIT, 1, MT_DATA);
1170 					if (oper) {
1171 						struct sctp_paramhdr *ph;
1172 						uint32_t *ippp;
1173 
1174 						SCTP_BUF_LEN(oper) =
1175 						    sizeof(struct sctp_paramhdr) +
1176 						    (3 * sizeof(uint32_t));
1177 						ph = mtod(oper,
1178 						    struct sctp_paramhdr *);
1179 						ph->param_type =
1180 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 						ph->param_length =
1182 						    htons(SCTP_BUF_LEN(oper));
1183 						ippp = (uint32_t *) (ph + 1);
1184 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185 						ippp++;
1186 						*ippp = chk->rec.data.TSN_seq;
1187 						ippp++;
1188 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1189 
1190 					}
1191 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1193 					*abort_flag = 1;
1194 					return;
1195 				}
1196 			}
1197 		}
1198 	}
1199 	if (next) {
1200 		post_tsn = chk->rec.data.TSN_seq + 1;
1201 		if (post_tsn == next->rec.data.TSN_seq) {
1202 			/*
1203 			 * Ok the one I am inserting ahead of is my NEXT
1204 			 * one. A bit of valdiation here.
1205 			 */
1206 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1207 				/* Insert chk MUST be a last fragment */
1208 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1209 				    != SCTP_DATA_LAST_FRAG) {
1210 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1211 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1212 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1213 					    0, M_NOWAIT, 1, MT_DATA);
1214 					if (oper) {
1215 						struct sctp_paramhdr *ph;
1216 						uint32_t *ippp;
1217 
1218 						SCTP_BUF_LEN(oper) =
1219 						    sizeof(struct sctp_paramhdr) +
1220 						    (3 * sizeof(uint32_t));
1221 						ph = mtod(oper,
1222 						    struct sctp_paramhdr *);
1223 						ph->param_type =
1224 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 						ph->param_length =
1226 						    htons(SCTP_BUF_LEN(oper));
1227 						ippp = (uint32_t *) (ph + 1);
1228 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 						ippp++;
1230 						*ippp = chk->rec.data.TSN_seq;
1231 						ippp++;
1232 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 					}
1234 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1235 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1236 					*abort_flag = 1;
1237 					return;
1238 				}
1239 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1240 				    SCTP_DATA_MIDDLE_FRAG ||
1241 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1242 			    SCTP_DATA_LAST_FRAG) {
1243 				/*
1244 				 * Insert chk CAN be MIDDLE or FIRST NOT
1245 				 * LAST
1246 				 */
1247 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1248 				    SCTP_DATA_LAST_FRAG) {
1249 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1250 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1251 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1252 					    0, M_NOWAIT, 1, MT_DATA);
1253 					if (oper) {
1254 						struct sctp_paramhdr *ph;
1255 						uint32_t *ippp;
1256 
1257 						SCTP_BUF_LEN(oper) =
1258 						    sizeof(struct sctp_paramhdr) +
1259 						    (3 * sizeof(uint32_t));
1260 						ph = mtod(oper,
1261 						    struct sctp_paramhdr *);
1262 						ph->param_type =
1263 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 						ph->param_length =
1265 						    htons(SCTP_BUF_LEN(oper));
1266 						ippp = (uint32_t *) (ph + 1);
1267 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 						ippp++;
1269 						*ippp = chk->rec.data.TSN_seq;
1270 						ippp++;
1271 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1272 
1273 					}
1274 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1275 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1276 					*abort_flag = 1;
1277 					return;
1278 				}
1279 				if (chk->rec.data.stream_number !=
1280 				    next->rec.data.stream_number) {
1281 					/*
1282 					 * Huh, need the correct STR here,
1283 					 * they must be the same.
1284 					 */
1285 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1286 					    chk->rec.data.stream_number,
1287 					    next->rec.data.stream_number);
1288 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289 					    0, M_NOWAIT, 1, MT_DATA);
1290 					if (oper) {
1291 						struct sctp_paramhdr *ph;
1292 						uint32_t *ippp;
1293 
1294 						SCTP_BUF_LEN(oper) =
1295 						    sizeof(struct sctp_paramhdr) +
1296 						    (3 * sizeof(uint32_t));
1297 						ph = mtod(oper,
1298 						    struct sctp_paramhdr *);
1299 						ph->param_type =
1300 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 						ph->param_length =
1302 						    htons(SCTP_BUF_LEN(oper));
1303 						ippp = (uint32_t *) (ph + 1);
1304 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305 						ippp++;
1306 						*ippp = chk->rec.data.TSN_seq;
1307 						ippp++;
1308 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1309 
1310 					}
1311 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1312 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1313 					*abort_flag = 1;
1314 					return;
1315 				}
1316 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1317 				    chk->rec.data.stream_seq !=
1318 				    next->rec.data.stream_seq) {
1319 					/*
1320 					 * Huh, need the correct STR here,
1321 					 * they must be the same.
1322 					 */
1323 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1324 					    chk->rec.data.stream_seq,
1325 					    next->rec.data.stream_seq);
1326 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1327 					    0, M_NOWAIT, 1, MT_DATA);
1328 					if (oper) {
1329 						struct sctp_paramhdr *ph;
1330 						uint32_t *ippp;
1331 
1332 						SCTP_BUF_LEN(oper) =
1333 						    sizeof(struct sctp_paramhdr) +
1334 						    (3 * sizeof(uint32_t));
1335 						ph = mtod(oper,
1336 						    struct sctp_paramhdr *);
1337 						ph->param_type =
1338 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339 						ph->param_length =
1340 						    htons(SCTP_BUF_LEN(oper));
1341 						ippp = (uint32_t *) (ph + 1);
1342 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343 						ippp++;
1344 						*ippp = chk->rec.data.TSN_seq;
1345 						ippp++;
1346 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347 					}
1348 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1349 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1350 					*abort_flag = 1;
1351 					return;
1352 				}
1353 			}
1354 		}
1355 	}
1356 	/* Do we need to do some delivery? check */
1357 	sctp_deliver_reasm_check(stcb, asoc);
1358 }
1359 
1360 /*
1361  * This is an unfortunate routine. It checks to make sure a evil guy is not
1362  * stuffing us full of bad packet fragments. A broken peer could also do this
1363  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1364  * :< more cycles.
1365  */
1366 static int
1367 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1368     uint32_t TSN_seq)
1369 {
1370 	struct sctp_tmit_chunk *at;
1371 	uint32_t tsn_est;
1372 
1373 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1374 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1375 			/* is it one bigger? */
1376 			tsn_est = at->rec.data.TSN_seq + 1;
1377 			if (tsn_est == TSN_seq) {
1378 				/* yep. It better be a last then */
1379 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380 				    SCTP_DATA_LAST_FRAG) {
1381 					/*
1382 					 * Ok this guy belongs next to a guy
1383 					 * that is NOT last, it should be a
1384 					 * middle/last, not a complete
1385 					 * chunk.
1386 					 */
1387 					return (1);
1388 				} else {
1389 					/*
1390 					 * This guy is ok since its a LAST
1391 					 * and the new chunk is a fully
1392 					 * self- contained one.
1393 					 */
1394 					return (0);
1395 				}
1396 			}
1397 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1398 			/* Software error since I have a dup? */
1399 			return (1);
1400 		} else {
1401 			/*
1402 			 * Ok, 'at' is larger than new chunk but does it
1403 			 * need to be right before it.
1404 			 */
1405 			tsn_est = TSN_seq + 1;
1406 			if (tsn_est == at->rec.data.TSN_seq) {
1407 				/* Yep, It better be a first */
1408 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1409 				    SCTP_DATA_FIRST_FRAG) {
1410 					return (1);
1411 				} else {
1412 					return (0);
1413 				}
1414 			}
1415 		}
1416 	}
1417 	return (0);
1418 }
1419 
1420 
1421 static int
1422 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1423     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1424     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1425     int *break_flag, int last_chunk)
1426 {
1427 	/* Process a data chunk */
1428 	/* struct sctp_tmit_chunk *chk; */
1429 	struct sctp_tmit_chunk *chk;
1430 	uint32_t tsn, gap;
1431 	struct mbuf *dmbuf;
1432 	int the_len;
1433 	int need_reasm_check = 0;
1434 	uint16_t strmno, strmseq;
1435 	struct mbuf *oper;
1436 	struct sctp_queued_to_read *control;
1437 	int ordered;
1438 	uint32_t protocol_id;
1439 	uint8_t chunk_flags;
1440 	struct sctp_stream_reset_list *liste;
1441 
1442 	chk = NULL;
1443 	tsn = ntohl(ch->dp.tsn);
1444 	chunk_flags = ch->ch.chunk_flags;
1445 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1446 		asoc->send_sack = 1;
1447 	}
1448 	protocol_id = ch->dp.protocol_id;
1449 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1450 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1451 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1452 	}
1453 	if (stcb == NULL) {
1454 		return (0);
1455 	}
1456 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1457 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1458 		/* It is a duplicate */
1459 		SCTP_STAT_INCR(sctps_recvdupdata);
1460 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1461 			/* Record a dup for the next outbound sack */
1462 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1463 			asoc->numduptsns++;
1464 		}
1465 		asoc->send_sack = 1;
1466 		return (0);
1467 	}
1468 	/* Calculate the number of TSN's between the base and this TSN */
1469 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1470 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1471 		/* Can't hold the bit in the mapping at max array, toss it */
1472 		return (0);
1473 	}
1474 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1475 		SCTP_TCB_LOCK_ASSERT(stcb);
1476 		if (sctp_expand_mapping_array(asoc, gap)) {
1477 			/* Can't expand, drop it */
1478 			return (0);
1479 		}
1480 	}
1481 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1482 		*high_tsn = tsn;
1483 	}
1484 	/* See if we have received this one already */
1485 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1486 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1487 		SCTP_STAT_INCR(sctps_recvdupdata);
1488 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1489 			/* Record a dup for the next outbound sack */
1490 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1491 			asoc->numduptsns++;
1492 		}
1493 		asoc->send_sack = 1;
1494 		return (0);
1495 	}
1496 	/*
1497 	 * Check to see about the GONE flag, duplicates would cause a sack
1498 	 * to be sent up above
1499 	 */
1500 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1501 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1502 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1503 	    ) {
1504 		/*
1505 		 * wait a minute, this guy is gone, there is no longer a
1506 		 * receiver. Send peer an ABORT!
1507 		 */
1508 		struct mbuf *op_err;
1509 
1510 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1511 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1512 		*abort_flag = 1;
1513 		return (0);
1514 	}
1515 	/*
1516 	 * Now before going further we see if there is room. If NOT then we
1517 	 * MAY let one through only IF this TSN is the one we are waiting
1518 	 * for on a partial delivery API.
1519 	 */
1520 
1521 	/* now do the tests */
1522 	if (((asoc->cnt_on_all_streams +
1523 	    asoc->cnt_on_reasm_queue +
1524 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1525 	    (((int)asoc->my_rwnd) <= 0)) {
1526 		/*
1527 		 * When we have NO room in the rwnd we check to make sure
1528 		 * the reader is doing its job...
1529 		 */
1530 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1531 			/* some to read, wake-up */
1532 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1533 			struct socket *so;
1534 
1535 			so = SCTP_INP_SO(stcb->sctp_ep);
1536 			atomic_add_int(&stcb->asoc.refcnt, 1);
1537 			SCTP_TCB_UNLOCK(stcb);
1538 			SCTP_SOCKET_LOCK(so, 1);
1539 			SCTP_TCB_LOCK(stcb);
1540 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1541 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1542 				/* assoc was freed while we were unlocked */
1543 				SCTP_SOCKET_UNLOCK(so, 1);
1544 				return (0);
1545 			}
1546 #endif
1547 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1549 			SCTP_SOCKET_UNLOCK(so, 1);
1550 #endif
1551 		}
1552 		/* now is it in the mapping array of what we have accepted? */
1553 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1554 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1555 			/* Nope not in the valid range dump it */
1556 			sctp_set_rwnd(stcb, asoc);
1557 			if ((asoc->cnt_on_all_streams +
1558 			    asoc->cnt_on_reasm_queue +
1559 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1560 				SCTP_STAT_INCR(sctps_datadropchklmt);
1561 			} else {
1562 				SCTP_STAT_INCR(sctps_datadroprwnd);
1563 			}
1564 			*break_flag = 1;
1565 			return (0);
1566 		}
1567 	}
1568 	strmno = ntohs(ch->dp.stream_id);
1569 	if (strmno >= asoc->streamincnt) {
1570 		struct sctp_paramhdr *phdr;
1571 		struct mbuf *mb;
1572 
1573 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1574 		    0, M_NOWAIT, 1, MT_DATA);
1575 		if (mb != NULL) {
1576 			/* add some space up front so prepend will work well */
1577 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1578 			phdr = mtod(mb, struct sctp_paramhdr *);
1579 			/*
1580 			 * Error causes are just param's and this one has
1581 			 * two back to back phdr, one with the error type
1582 			 * and size, the other with the streamid and a rsvd
1583 			 */
1584 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1585 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1586 			phdr->param_length =
1587 			    htons(sizeof(struct sctp_paramhdr) * 2);
1588 			phdr++;
1589 			/* We insert the stream in the type field */
1590 			phdr->param_type = ch->dp.stream_id;
1591 			/* And set the length to 0 for the rsvd field */
1592 			phdr->param_length = 0;
1593 			sctp_queue_op_err(stcb, mb);
1594 		}
1595 		SCTP_STAT_INCR(sctps_badsid);
1596 		SCTP_TCB_LOCK_ASSERT(stcb);
1597 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1598 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1599 			asoc->highest_tsn_inside_nr_map = tsn;
1600 		}
1601 		if (tsn == (asoc->cumulative_tsn + 1)) {
1602 			/* Update cum-ack */
1603 			asoc->cumulative_tsn = tsn;
1604 		}
1605 		return (0);
1606 	}
1607 	/*
1608 	 * Before we continue lets validate that we are not being fooled by
1609 	 * an evil attacker. We can only have 4k chunks based on our TSN
1610 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1611 	 * way our stream sequence numbers could have wrapped. We of course
1612 	 * only validate the FIRST fragment so the bit must be set.
1613 	 */
1614 	strmseq = ntohs(ch->dp.stream_sequence);
1615 #ifdef SCTP_ASOCLOG_OF_TSNS
1616 	SCTP_TCB_LOCK_ASSERT(stcb);
1617 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1618 		asoc->tsn_in_at = 0;
1619 		asoc->tsn_in_wrapped = 1;
1620 	}
1621 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1622 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1623 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1624 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1625 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1626 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1627 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1628 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1629 	asoc->tsn_in_at++;
1630 #endif
1631 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1632 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1633 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1634 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1635 		/* The incoming sseq is behind where we last delivered? */
1636 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1637 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1638 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1639 		    0, M_NOWAIT, 1, MT_DATA);
1640 		if (oper) {
1641 			struct sctp_paramhdr *ph;
1642 			uint32_t *ippp;
1643 
1644 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1645 			    (3 * sizeof(uint32_t));
1646 			ph = mtod(oper, struct sctp_paramhdr *);
1647 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1648 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1649 			ippp = (uint32_t *) (ph + 1);
1650 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1651 			ippp++;
1652 			*ippp = tsn;
1653 			ippp++;
1654 			*ippp = ((strmno << 16) | strmseq);
1655 
1656 		}
1657 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1658 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1659 		*abort_flag = 1;
1660 		return (0);
1661 	}
1662 	/************************************
1663 	 * From here down we may find ch-> invalid
1664 	 * so its a good idea NOT to use it.
1665 	 *************************************/
1666 
1667 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1668 	if (last_chunk == 0) {
1669 		dmbuf = SCTP_M_COPYM(*m,
1670 		    (offset + sizeof(struct sctp_data_chunk)),
1671 		    the_len, M_NOWAIT);
1672 #ifdef SCTP_MBUF_LOGGING
1673 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1674 			struct mbuf *mat;
1675 
1676 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1677 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1678 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1679 				}
1680 			}
1681 		}
1682 #endif
1683 	} else {
1684 		/* We can steal the last chunk */
1685 		int l_len;
1686 
1687 		dmbuf = *m;
1688 		/* lop off the top part */
1689 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1690 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1691 			l_len = SCTP_BUF_LEN(dmbuf);
1692 		} else {
1693 			/*
1694 			 * need to count up the size hopefully does not hit
1695 			 * this to often :-0
1696 			 */
1697 			struct mbuf *lat;
1698 
1699 			l_len = 0;
1700 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1701 				l_len += SCTP_BUF_LEN(lat);
1702 			}
1703 		}
1704 		if (l_len > the_len) {
1705 			/* Trim the end round bytes off  too */
1706 			m_adj(dmbuf, -(l_len - the_len));
1707 		}
1708 	}
1709 	if (dmbuf == NULL) {
1710 		SCTP_STAT_INCR(sctps_nomem);
1711 		return (0);
1712 	}
1713 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1714 	    asoc->fragmented_delivery_inprogress == 0 &&
1715 	    TAILQ_EMPTY(&asoc->resetHead) &&
1716 	    ((ordered == 0) ||
1717 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1718 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1719 		/* Candidate for express delivery */
1720 		/*
1721 		 * Its not fragmented, No PD-API is up, Nothing in the
1722 		 * delivery queue, Its un-ordered OR ordered and the next to
1723 		 * deliver AND nothing else is stuck on the stream queue,
1724 		 * And there is room for it in the socket buffer. Lets just
1725 		 * stuff it up the buffer....
1726 		 */
1727 
1728 		/* It would be nice to avoid this copy if we could :< */
1729 		sctp_alloc_a_readq(stcb, control);
1730 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1731 		    protocol_id,
1732 		    strmno, strmseq,
1733 		    chunk_flags,
1734 		    dmbuf);
1735 		if (control == NULL) {
1736 			goto failed_express_del;
1737 		}
1738 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1739 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1740 			asoc->highest_tsn_inside_nr_map = tsn;
1741 		}
1742 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1743 		    control, &stcb->sctp_socket->so_rcv,
1744 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1745 
1746 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1747 			/* for ordered, bump what we delivered */
1748 			asoc->strmin[strmno].last_sequence_delivered++;
1749 		}
1750 		SCTP_STAT_INCR(sctps_recvexpress);
1751 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1752 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1753 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1754 		}
1755 		control = NULL;
1756 
1757 		goto finish_express_del;
1758 	}
1759 failed_express_del:
1760 	/* If we reach here this is a new chunk */
1761 	chk = NULL;
1762 	control = NULL;
1763 	/* Express for fragmented delivery? */
1764 	if ((asoc->fragmented_delivery_inprogress) &&
1765 	    (stcb->asoc.control_pdapi) &&
1766 	    (asoc->str_of_pdapi == strmno) &&
1767 	    (asoc->ssn_of_pdapi == strmseq)
1768 	    ) {
1769 		control = stcb->asoc.control_pdapi;
1770 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1771 			/* Can't be another first? */
1772 			goto failed_pdapi_express_del;
1773 		}
1774 		if (tsn == (control->sinfo_tsn + 1)) {
1775 			/* Yep, we can add it on */
1776 			int end = 0;
1777 
1778 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1779 				end = 1;
1780 			}
1781 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1782 			    tsn,
1783 			    &stcb->sctp_socket->so_rcv)) {
1784 				SCTP_PRINTF("Append fails end:%d\n", end);
1785 				goto failed_pdapi_express_del;
1786 			}
1787 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1788 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1789 				asoc->highest_tsn_inside_nr_map = tsn;
1790 			}
1791 			SCTP_STAT_INCR(sctps_recvexpressm);
1792 			control->sinfo_tsn = tsn;
1793 			asoc->tsn_last_delivered = tsn;
1794 			asoc->fragment_flags = chunk_flags;
1795 			asoc->tsn_of_pdapi_last_delivered = tsn;
1796 			asoc->last_flags_delivered = chunk_flags;
1797 			asoc->last_strm_seq_delivered = strmseq;
1798 			asoc->last_strm_no_delivered = strmno;
1799 			if (end) {
1800 				/* clean up the flags and such */
1801 				asoc->fragmented_delivery_inprogress = 0;
1802 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1803 					asoc->strmin[strmno].last_sequence_delivered++;
1804 				}
1805 				stcb->asoc.control_pdapi = NULL;
1806 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1807 					/*
1808 					 * There could be another message
1809 					 * ready
1810 					 */
1811 					need_reasm_check = 1;
1812 				}
1813 			}
1814 			control = NULL;
1815 			goto finish_express_del;
1816 		}
1817 	}
1818 failed_pdapi_express_del:
1819 	control = NULL;
1820 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1821 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1822 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1823 			asoc->highest_tsn_inside_nr_map = tsn;
1824 		}
1825 	} else {
1826 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1827 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1828 			asoc->highest_tsn_inside_map = tsn;
1829 		}
1830 	}
1831 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1832 		sctp_alloc_a_chunk(stcb, chk);
1833 		if (chk == NULL) {
1834 			/* No memory so we drop the chunk */
1835 			SCTP_STAT_INCR(sctps_nomem);
1836 			if (last_chunk == 0) {
1837 				/* we copied it, free the copy */
1838 				sctp_m_freem(dmbuf);
1839 			}
1840 			return (0);
1841 		}
1842 		chk->rec.data.TSN_seq = tsn;
1843 		chk->no_fr_allowed = 0;
1844 		chk->rec.data.stream_seq = strmseq;
1845 		chk->rec.data.stream_number = strmno;
1846 		chk->rec.data.payloadtype = protocol_id;
1847 		chk->rec.data.context = stcb->asoc.context;
1848 		chk->rec.data.doing_fast_retransmit = 0;
1849 		chk->rec.data.rcv_flags = chunk_flags;
1850 		chk->asoc = asoc;
1851 		chk->send_size = the_len;
1852 		chk->whoTo = net;
1853 		atomic_add_int(&net->ref_count, 1);
1854 		chk->data = dmbuf;
1855 	} else {
1856 		sctp_alloc_a_readq(stcb, control);
1857 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1858 		    protocol_id,
1859 		    strmno, strmseq,
1860 		    chunk_flags,
1861 		    dmbuf);
1862 		if (control == NULL) {
1863 			/* No memory so we drop the chunk */
1864 			SCTP_STAT_INCR(sctps_nomem);
1865 			if (last_chunk == 0) {
1866 				/* we copied it, free the copy */
1867 				sctp_m_freem(dmbuf);
1868 			}
1869 			return (0);
1870 		}
1871 		control->length = the_len;
1872 	}
1873 
1874 	/* Mark it as received */
1875 	/* Now queue it where it belongs */
1876 	if (control != NULL) {
1877 		/* First a sanity check */
1878 		if (asoc->fragmented_delivery_inprogress) {
1879 			/*
1880 			 * Ok, we have a fragmented delivery in progress if
1881 			 * this chunk is next to deliver OR belongs in our
1882 			 * view to the reassembly, the peer is evil or
1883 			 * broken.
1884 			 */
1885 			uint32_t estimate_tsn;
1886 
1887 			estimate_tsn = asoc->tsn_last_delivered + 1;
1888 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1889 			    (estimate_tsn == control->sinfo_tsn)) {
1890 				/* Evil/Broke peer */
1891 				sctp_m_freem(control->data);
1892 				control->data = NULL;
1893 				if (control->whoFrom) {
1894 					sctp_free_remote_addr(control->whoFrom);
1895 					control->whoFrom = NULL;
1896 				}
1897 				sctp_free_a_readq(stcb, control);
1898 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1899 				    0, M_NOWAIT, 1, MT_DATA);
1900 				if (oper) {
1901 					struct sctp_paramhdr *ph;
1902 					uint32_t *ippp;
1903 
1904 					SCTP_BUF_LEN(oper) =
1905 					    sizeof(struct sctp_paramhdr) +
1906 					    (3 * sizeof(uint32_t));
1907 					ph = mtod(oper, struct sctp_paramhdr *);
1908 					ph->param_type =
1909 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1910 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1911 					ippp = (uint32_t *) (ph + 1);
1912 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1913 					ippp++;
1914 					*ippp = tsn;
1915 					ippp++;
1916 					*ippp = ((strmno << 16) | strmseq);
1917 				}
1918 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1919 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1920 				*abort_flag = 1;
1921 				return (0);
1922 			} else {
1923 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1924 					sctp_m_freem(control->data);
1925 					control->data = NULL;
1926 					if (control->whoFrom) {
1927 						sctp_free_remote_addr(control->whoFrom);
1928 						control->whoFrom = NULL;
1929 					}
1930 					sctp_free_a_readq(stcb, control);
1931 
1932 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1933 					    0, M_NOWAIT, 1, MT_DATA);
1934 					if (oper) {
1935 						struct sctp_paramhdr *ph;
1936 						uint32_t *ippp;
1937 
1938 						SCTP_BUF_LEN(oper) =
1939 						    sizeof(struct sctp_paramhdr) +
1940 						    (3 * sizeof(uint32_t));
1941 						ph = mtod(oper,
1942 						    struct sctp_paramhdr *);
1943 						ph->param_type =
1944 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1945 						ph->param_length =
1946 						    htons(SCTP_BUF_LEN(oper));
1947 						ippp = (uint32_t *) (ph + 1);
1948 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1949 						ippp++;
1950 						*ippp = tsn;
1951 						ippp++;
1952 						*ippp = ((strmno << 16) | strmseq);
1953 					}
1954 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1955 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1956 					*abort_flag = 1;
1957 					return (0);
1958 				}
1959 			}
1960 		} else {
1961 			/* No PDAPI running */
1962 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1963 				/*
1964 				 * Reassembly queue is NOT empty validate
1965 				 * that this tsn does not need to be in
1966 				 * reasembly queue. If it does then our peer
1967 				 * is broken or evil.
1968 				 */
1969 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1970 					sctp_m_freem(control->data);
1971 					control->data = NULL;
1972 					if (control->whoFrom) {
1973 						sctp_free_remote_addr(control->whoFrom);
1974 						control->whoFrom = NULL;
1975 					}
1976 					sctp_free_a_readq(stcb, control);
1977 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1978 					    0, M_NOWAIT, 1, MT_DATA);
1979 					if (oper) {
1980 						struct sctp_paramhdr *ph;
1981 						uint32_t *ippp;
1982 
1983 						SCTP_BUF_LEN(oper) =
1984 						    sizeof(struct sctp_paramhdr) +
1985 						    (3 * sizeof(uint32_t));
1986 						ph = mtod(oper,
1987 						    struct sctp_paramhdr *);
1988 						ph->param_type =
1989 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1990 						ph->param_length =
1991 						    htons(SCTP_BUF_LEN(oper));
1992 						ippp = (uint32_t *) (ph + 1);
1993 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1994 						ippp++;
1995 						*ippp = tsn;
1996 						ippp++;
1997 						*ippp = ((strmno << 16) | strmseq);
1998 					}
1999 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2000 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2001 					*abort_flag = 1;
2002 					return (0);
2003 				}
2004 			}
2005 		}
2006 		/* ok, if we reach here we have passed the sanity checks */
2007 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2008 			/* queue directly into socket buffer */
2009 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2010 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2011 			    control,
2012 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2013 		} else {
2014 			/*
2015 			 * Special check for when streams are resetting. We
2016 			 * could be more smart about this and check the
2017 			 * actual stream to see if it is not being reset..
2018 			 * that way we would not create a HOLB when amongst
2019 			 * streams being reset and those not being reset.
2020 			 *
2021 			 * We take complete messages that have a stream reset
2022 			 * intervening (aka the TSN is after where our
2023 			 * cum-ack needs to be) off and put them on a
2024 			 * pending_reply_queue. The reassembly ones we do
2025 			 * not have to worry about since they are all sorted
2026 			 * and proceessed by TSN order. It is only the
2027 			 * singletons I must worry about.
2028 			 */
2029 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2030 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2031 				/*
2032 				 * yep its past where we need to reset... go
2033 				 * ahead and queue it.
2034 				 */
2035 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2036 					/* first one on */
2037 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2038 				} else {
2039 					struct sctp_queued_to_read *ctlOn,
2040 					                   *nctlOn;
2041 					unsigned char inserted = 0;
2042 
2043 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2044 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2045 							continue;
2046 						} else {
2047 							/* found it */
2048 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2049 							inserted = 1;
2050 							break;
2051 						}
2052 					}
2053 					if (inserted == 0) {
2054 						/*
2055 						 * must be put at end, use
2056 						 * prevP (all setup from
2057 						 * loop) to setup nextP.
2058 						 */
2059 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2060 					}
2061 				}
2062 			} else {
2063 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2064 				if (*abort_flag) {
2065 					return (0);
2066 				}
2067 			}
2068 		}
2069 	} else {
2070 		/* Into the re-assembly queue */
2071 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2072 		if (*abort_flag) {
2073 			/*
2074 			 * the assoc is now gone and chk was put onto the
2075 			 * reasm queue, which has all been freed.
2076 			 */
2077 			*m = NULL;
2078 			return (0);
2079 		}
2080 	}
2081 finish_express_del:
2082 	if (tsn == (asoc->cumulative_tsn + 1)) {
2083 		/* Update cum-ack */
2084 		asoc->cumulative_tsn = tsn;
2085 	}
2086 	if (last_chunk) {
2087 		*m = NULL;
2088 	}
2089 	if (ordered) {
2090 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2091 	} else {
2092 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2093 	}
2094 	SCTP_STAT_INCR(sctps_recvdata);
2095 	/* Set it present please */
2096 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2097 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2098 	}
2099 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2100 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2101 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2102 	}
2103 	/* check the special flag for stream resets */
2104 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2105 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2106 		/*
2107 		 * we have finished working through the backlogged TSN's now
2108 		 * time to reset streams. 1: call reset function. 2: free
2109 		 * pending_reply space 3: distribute any chunks in
2110 		 * pending_reply_queue.
2111 		 */
2112 		struct sctp_queued_to_read *ctl, *nctl;
2113 
2114 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2115 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2116 		SCTP_FREE(liste, SCTP_M_STRESET);
2117 		/* sa_ignore FREED_MEMORY */
2118 		liste = TAILQ_FIRST(&asoc->resetHead);
2119 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2120 			/* All can be removed */
2121 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2122 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2123 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2124 				if (*abort_flag) {
2125 					return (0);
2126 				}
2127 			}
2128 		} else {
2129 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2130 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2131 					break;
2132 				}
2133 				/*
2134 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2135 				 * process it which is the NOT of
2136 				 * ctl->sinfo_tsn > liste->tsn
2137 				 */
2138 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2139 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2140 				if (*abort_flag) {
2141 					return (0);
2142 				}
2143 			}
2144 		}
2145 		/*
2146 		 * Now service re-assembly to pick up anything that has been
2147 		 * held on reassembly queue?
2148 		 */
2149 		sctp_deliver_reasm_check(stcb, asoc);
2150 		need_reasm_check = 0;
2151 	}
2152 	if (need_reasm_check) {
2153 		/* Another one waits ? */
2154 		sctp_deliver_reasm_check(stcb, asoc);
2155 	}
2156 	return (1);
2157 }
2158 
2159 int8_t sctp_map_lookup_tab[256] = {
2160 	0, 1, 0, 2, 0, 1, 0, 3,
2161 	0, 1, 0, 2, 0, 1, 0, 4,
2162 	0, 1, 0, 2, 0, 1, 0, 3,
2163 	0, 1, 0, 2, 0, 1, 0, 5,
2164 	0, 1, 0, 2, 0, 1, 0, 3,
2165 	0, 1, 0, 2, 0, 1, 0, 4,
2166 	0, 1, 0, 2, 0, 1, 0, 3,
2167 	0, 1, 0, 2, 0, 1, 0, 6,
2168 	0, 1, 0, 2, 0, 1, 0, 3,
2169 	0, 1, 0, 2, 0, 1, 0, 4,
2170 	0, 1, 0, 2, 0, 1, 0, 3,
2171 	0, 1, 0, 2, 0, 1, 0, 5,
2172 	0, 1, 0, 2, 0, 1, 0, 3,
2173 	0, 1, 0, 2, 0, 1, 0, 4,
2174 	0, 1, 0, 2, 0, 1, 0, 3,
2175 	0, 1, 0, 2, 0, 1, 0, 7,
2176 	0, 1, 0, 2, 0, 1, 0, 3,
2177 	0, 1, 0, 2, 0, 1, 0, 4,
2178 	0, 1, 0, 2, 0, 1, 0, 3,
2179 	0, 1, 0, 2, 0, 1, 0, 5,
2180 	0, 1, 0, 2, 0, 1, 0, 3,
2181 	0, 1, 0, 2, 0, 1, 0, 4,
2182 	0, 1, 0, 2, 0, 1, 0, 3,
2183 	0, 1, 0, 2, 0, 1, 0, 6,
2184 	0, 1, 0, 2, 0, 1, 0, 3,
2185 	0, 1, 0, 2, 0, 1, 0, 4,
2186 	0, 1, 0, 2, 0, 1, 0, 3,
2187 	0, 1, 0, 2, 0, 1, 0, 5,
2188 	0, 1, 0, 2, 0, 1, 0, 3,
2189 	0, 1, 0, 2, 0, 1, 0, 4,
2190 	0, 1, 0, 2, 0, 1, 0, 3,
2191 	0, 1, 0, 2, 0, 1, 0, 8
2192 };
2193 
2194 
2195 void
2196 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2197 {
2198 	/*
2199 	 * Now we also need to check the mapping array in a couple of ways.
2200 	 * 1) Did we move the cum-ack point?
2201 	 *
2202 	 * When you first glance at this you might think that all entries that
2203 	 * make up the postion of the cum-ack would be in the nr-mapping
2204 	 * array only.. i.e. things up to the cum-ack are always
2205 	 * deliverable. Thats true with one exception, when its a fragmented
2206 	 * message we may not deliver the data until some threshold (or all
2207 	 * of it) is in place. So we must OR the nr_mapping_array and
2208 	 * mapping_array to get a true picture of the cum-ack.
2209 	 */
2210 	struct sctp_association *asoc;
2211 	int at;
2212 	uint8_t val;
2213 	int slide_from, slide_end, lgap, distance;
2214 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2215 
2216 	asoc = &stcb->asoc;
2217 
2218 	old_cumack = asoc->cumulative_tsn;
2219 	old_base = asoc->mapping_array_base_tsn;
2220 	old_highest = asoc->highest_tsn_inside_map;
2221 	/*
2222 	 * We could probably improve this a small bit by calculating the
2223 	 * offset of the current cum-ack as the starting point.
2224 	 */
2225 	at = 0;
2226 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2227 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2228 		if (val == 0xff) {
2229 			at += 8;
2230 		} else {
2231 			/* there is a 0 bit */
2232 			at += sctp_map_lookup_tab[val];
2233 			break;
2234 		}
2235 	}
2236 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2237 
2238 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2239 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2240 #ifdef INVARIANTS
2241 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2242 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2243 #else
2244 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2245 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2246 		sctp_print_mapping_array(asoc);
2247 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2248 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2249 		}
2250 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2251 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2252 #endif
2253 	}
2254 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2255 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2256 	} else {
2257 		highest_tsn = asoc->highest_tsn_inside_map;
2258 	}
2259 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2260 		/* The complete array was completed by a single FR */
2261 		/* highest becomes the cum-ack */
2262 		int clr;
2263 
2264 #ifdef INVARIANTS
2265 		unsigned int i;
2266 
2267 #endif
2268 
2269 		/* clear the array */
2270 		clr = ((at + 7) >> 3);
2271 		if (clr > asoc->mapping_array_size) {
2272 			clr = asoc->mapping_array_size;
2273 		}
2274 		memset(asoc->mapping_array, 0, clr);
2275 		memset(asoc->nr_mapping_array, 0, clr);
2276 #ifdef INVARIANTS
2277 		for (i = 0; i < asoc->mapping_array_size; i++) {
2278 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2279 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2280 				sctp_print_mapping_array(asoc);
2281 			}
2282 		}
2283 #endif
2284 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2285 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2286 	} else if (at >= 8) {
2287 		/* we can slide the mapping array down */
2288 		/* slide_from holds where we hit the first NON 0xff byte */
2289 
2290 		/*
2291 		 * now calculate the ceiling of the move using our highest
2292 		 * TSN value
2293 		 */
2294 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2295 		slide_end = (lgap >> 3);
2296 		if (slide_end < slide_from) {
2297 			sctp_print_mapping_array(asoc);
2298 #ifdef INVARIANTS
2299 			panic("impossible slide");
2300 #else
2301 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2302 			    lgap, slide_end, slide_from, at);
2303 			return;
2304 #endif
2305 		}
2306 		if (slide_end > asoc->mapping_array_size) {
2307 #ifdef INVARIANTS
2308 			panic("would overrun buffer");
2309 #else
2310 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2311 			    asoc->mapping_array_size, slide_end);
2312 			slide_end = asoc->mapping_array_size;
2313 #endif
2314 		}
2315 		distance = (slide_end - slide_from) + 1;
2316 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317 			sctp_log_map(old_base, old_cumack, old_highest,
2318 			    SCTP_MAP_PREPARE_SLIDE);
2319 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2320 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2321 		}
2322 		if (distance + slide_from > asoc->mapping_array_size ||
2323 		    distance < 0) {
2324 			/*
2325 			 * Here we do NOT slide forward the array so that
2326 			 * hopefully when more data comes in to fill it up
2327 			 * we will be able to slide it forward. Really I
2328 			 * don't think this should happen :-0
2329 			 */
2330 
2331 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2332 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2333 				    (uint32_t) asoc->mapping_array_size,
2334 				    SCTP_MAP_SLIDE_NONE);
2335 			}
2336 		} else {
2337 			int ii;
2338 
2339 			for (ii = 0; ii < distance; ii++) {
2340 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2341 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2342 
2343 			}
2344 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2345 				asoc->mapping_array[ii] = 0;
2346 				asoc->nr_mapping_array[ii] = 0;
2347 			}
2348 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2349 				asoc->highest_tsn_inside_map += (slide_from << 3);
2350 			}
2351 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2352 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2353 			}
2354 			asoc->mapping_array_base_tsn += (slide_from << 3);
2355 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2356 				sctp_log_map(asoc->mapping_array_base_tsn,
2357 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2358 				    SCTP_MAP_SLIDE_RESULT);
2359 			}
2360 		}
2361 	}
2362 }
2363 
2364 void
2365 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2366 {
2367 	struct sctp_association *asoc;
2368 	uint32_t highest_tsn;
2369 
2370 	asoc = &stcb->asoc;
2371 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2372 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2373 	} else {
2374 		highest_tsn = asoc->highest_tsn_inside_map;
2375 	}
2376 
2377 	/*
2378 	 * Now we need to see if we need to queue a sack or just start the
2379 	 * timer (if allowed).
2380 	 */
2381 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2382 		/*
2383 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2384 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2385 		 * SACK
2386 		 */
2387 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2388 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2389 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2390 		}
2391 		sctp_send_shutdown(stcb,
2392 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2393 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2394 	} else {
2395 		int is_a_gap;
2396 
2397 		/* is there a gap now ? */
2398 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2399 
2400 		/*
2401 		 * CMT DAC algorithm: increase number of packets received
2402 		 * since last ack
2403 		 */
2404 		stcb->asoc.cmt_dac_pkts_rcvd++;
2405 
2406 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2407 							 * SACK */
2408 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2409 							 * longer is one */
2410 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2411 		    (is_a_gap) ||	/* is still a gap */
2412 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2413 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2414 		    ) {
2415 
2416 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2417 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2418 			    (stcb->asoc.send_sack == 0) &&
2419 			    (stcb->asoc.numduptsns == 0) &&
2420 			    (stcb->asoc.delayed_ack) &&
2421 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2422 
2423 				/*
2424 				 * CMT DAC algorithm: With CMT, delay acks
2425 				 * even in the face of
2426 				 *
2427 				 * reordering. Therefore, if acks that do not
2428 				 * have to be sent because of the above
2429 				 * reasons, will be delayed. That is, acks
2430 				 * that would have been sent due to gap
2431 				 * reports will be delayed with DAC. Start
2432 				 * the delayed ack timer.
2433 				 */
2434 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2435 				    stcb->sctp_ep, stcb, NULL);
2436 			} else {
2437 				/*
2438 				 * Ok we must build a SACK since the timer
2439 				 * is pending, we got our first packet OR
2440 				 * there are gaps or duplicates.
2441 				 */
2442 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2443 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2444 			}
2445 		} else {
2446 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2447 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2448 				    stcb->sctp_ep, stcb, NULL);
2449 			}
2450 		}
2451 	}
2452 }
2453 
2454 void
2455 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2456 {
2457 	struct sctp_tmit_chunk *chk;
2458 	uint32_t tsize, pd_point;
2459 	uint16_t nxt_todel;
2460 
2461 	if (asoc->fragmented_delivery_inprogress) {
2462 		sctp_service_reassembly(stcb, asoc);
2463 	}
2464 	/* Can we proceed further, i.e. the PD-API is complete */
2465 	if (asoc->fragmented_delivery_inprogress) {
2466 		/* no */
2467 		return;
2468 	}
2469 	/*
2470 	 * Now is there some other chunk I can deliver from the reassembly
2471 	 * queue.
2472 	 */
2473 doit_again:
2474 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2475 	if (chk == NULL) {
2476 		asoc->size_on_reasm_queue = 0;
2477 		asoc->cnt_on_reasm_queue = 0;
2478 		return;
2479 	}
2480 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2481 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2482 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2483 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2484 		/*
2485 		 * Yep the first one is here. We setup to start reception,
2486 		 * by backing down the TSN just in case we can't deliver.
2487 		 */
2488 
2489 		/*
2490 		 * Before we start though either all of the message should
2491 		 * be here or the socket buffer max or nothing on the
2492 		 * delivery queue and something can be delivered.
2493 		 */
2494 		if (stcb->sctp_socket) {
2495 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2496 			    stcb->sctp_ep->partial_delivery_point);
2497 		} else {
2498 			pd_point = stcb->sctp_ep->partial_delivery_point;
2499 		}
2500 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2501 			asoc->fragmented_delivery_inprogress = 1;
2502 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2503 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2504 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2505 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2506 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2507 			sctp_service_reassembly(stcb, asoc);
2508 			if (asoc->fragmented_delivery_inprogress == 0) {
2509 				goto doit_again;
2510 			}
2511 		}
2512 	}
2513 }
2514 
2515 int
2516 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2517     struct sockaddr *src, struct sockaddr *dst,
2518     struct sctphdr *sh, struct sctp_inpcb *inp,
2519     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2520     uint8_t use_mflowid, uint32_t mflowid,
2521     uint32_t vrf_id, uint16_t port)
2522 {
2523 	struct sctp_data_chunk *ch, chunk_buf;
2524 	struct sctp_association *asoc;
2525 	int num_chunks = 0;	/* number of control chunks processed */
2526 	int stop_proc = 0;
2527 	int chk_length, break_flag, last_chunk;
2528 	int abort_flag = 0, was_a_gap;
2529 	struct mbuf *m;
2530 	uint32_t highest_tsn;
2531 
2532 	/* set the rwnd */
2533 	sctp_set_rwnd(stcb, &stcb->asoc);
2534 
2535 	m = *mm;
2536 	SCTP_TCB_LOCK_ASSERT(stcb);
2537 	asoc = &stcb->asoc;
2538 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2539 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2540 	} else {
2541 		highest_tsn = asoc->highest_tsn_inside_map;
2542 	}
2543 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2544 	/*
2545 	 * setup where we got the last DATA packet from for any SACK that
2546 	 * may need to go out. Don't bump the net. This is done ONLY when a
2547 	 * chunk is assigned.
2548 	 */
2549 	asoc->last_data_chunk_from = net;
2550 
2551 	/*-
2552 	 * Now before we proceed we must figure out if this is a wasted
2553 	 * cluster... i.e. it is a small packet sent in and yet the driver
2554 	 * underneath allocated a full cluster for it. If so we must copy it
2555 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2556 	 * with cluster starvation. Note for __Panda__ we don't do this
2557 	 * since it has clusters all the way down to 64 bytes.
2558 	 */
2559 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2560 		/* we only handle mbufs that are singletons.. not chains */
2561 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2562 		if (m) {
2563 			/* ok lets see if we can copy the data up */
2564 			caddr_t *from, *to;
2565 
2566 			/* get the pointers and copy */
2567 			to = mtod(m, caddr_t *);
2568 			from = mtod((*mm), caddr_t *);
2569 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2570 			/* copy the length and free up the old */
2571 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2572 			sctp_m_freem(*mm);
2573 			/* sucess, back copy */
2574 			*mm = m;
2575 		} else {
2576 			/* We are in trouble in the mbuf world .. yikes */
2577 			m = *mm;
2578 		}
2579 	}
2580 	/* get pointer to the first chunk header */
2581 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2582 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2583 	if (ch == NULL) {
2584 		return (1);
2585 	}
2586 	/*
2587 	 * process all DATA chunks...
2588 	 */
2589 	*high_tsn = asoc->cumulative_tsn;
2590 	break_flag = 0;
2591 	asoc->data_pkts_seen++;
2592 	while (stop_proc == 0) {
2593 		/* validate chunk length */
2594 		chk_length = ntohs(ch->ch.chunk_length);
2595 		if (length - *offset < chk_length) {
2596 			/* all done, mutulated chunk */
2597 			stop_proc = 1;
2598 			continue;
2599 		}
2600 		if (ch->ch.chunk_type == SCTP_DATA) {
2601 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2602 				/*
2603 				 * Need to send an abort since we had a
2604 				 * invalid data chunk.
2605 				 */
2606 				struct mbuf *op_err;
2607 
2608 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2609 				    0, M_NOWAIT, 1, MT_DATA);
2610 
2611 				if (op_err) {
2612 					struct sctp_paramhdr *ph;
2613 					uint32_t *ippp;
2614 
2615 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2616 					    (2 * sizeof(uint32_t));
2617 					ph = mtod(op_err, struct sctp_paramhdr *);
2618 					ph->param_type =
2619 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2620 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2621 					ippp = (uint32_t *) (ph + 1);
2622 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2623 					ippp++;
2624 					*ippp = asoc->cumulative_tsn;
2625 
2626 				}
2627 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2628 				sctp_abort_association(inp, stcb, m, iphlen,
2629 				    src, dst, sh, op_err,
2630 				    use_mflowid, mflowid,
2631 				    vrf_id, port);
2632 				return (2);
2633 			}
2634 #ifdef SCTP_AUDITING_ENABLED
2635 			sctp_audit_log(0xB1, 0);
2636 #endif
2637 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2638 				last_chunk = 1;
2639 			} else {
2640 				last_chunk = 0;
2641 			}
2642 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2643 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2644 			    last_chunk)) {
2645 				num_chunks++;
2646 			}
2647 			if (abort_flag)
2648 				return (2);
2649 
2650 			if (break_flag) {
2651 				/*
2652 				 * Set because of out of rwnd space and no
2653 				 * drop rep space left.
2654 				 */
2655 				stop_proc = 1;
2656 				continue;
2657 			}
2658 		} else {
2659 			/* not a data chunk in the data region */
2660 			switch (ch->ch.chunk_type) {
2661 			case SCTP_INITIATION:
2662 			case SCTP_INITIATION_ACK:
2663 			case SCTP_SELECTIVE_ACK:
2664 			case SCTP_NR_SELECTIVE_ACK:
2665 			case SCTP_HEARTBEAT_REQUEST:
2666 			case SCTP_HEARTBEAT_ACK:
2667 			case SCTP_ABORT_ASSOCIATION:
2668 			case SCTP_SHUTDOWN:
2669 			case SCTP_SHUTDOWN_ACK:
2670 			case SCTP_OPERATION_ERROR:
2671 			case SCTP_COOKIE_ECHO:
2672 			case SCTP_COOKIE_ACK:
2673 			case SCTP_ECN_ECHO:
2674 			case SCTP_ECN_CWR:
2675 			case SCTP_SHUTDOWN_COMPLETE:
2676 			case SCTP_AUTHENTICATION:
2677 			case SCTP_ASCONF_ACK:
2678 			case SCTP_PACKET_DROPPED:
2679 			case SCTP_STREAM_RESET:
2680 			case SCTP_FORWARD_CUM_TSN:
2681 			case SCTP_ASCONF:
2682 				/*
2683 				 * Now, what do we do with KNOWN chunks that
2684 				 * are NOT in the right place?
2685 				 *
2686 				 * For now, I do nothing but ignore them. We
2687 				 * may later want to add sysctl stuff to
2688 				 * switch out and do either an ABORT() or
2689 				 * possibly process them.
2690 				 */
2691 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2692 					struct mbuf *op_err;
2693 
2694 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2695 					sctp_abort_association(inp, stcb,
2696 					    m, iphlen,
2697 					    src, dst,
2698 					    sh, op_err,
2699 					    use_mflowid, mflowid,
2700 					    vrf_id, port);
2701 					return (2);
2702 				}
2703 				break;
2704 			default:
2705 				/* unknown chunk type, use bit rules */
2706 				if (ch->ch.chunk_type & 0x40) {
2707 					/* Add a error report to the queue */
2708 					struct mbuf *merr;
2709 					struct sctp_paramhdr *phd;
2710 
2711 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2712 					if (merr) {
2713 						phd = mtod(merr, struct sctp_paramhdr *);
2714 						/*
2715 						 * We cheat and use param
2716 						 * type since we did not
2717 						 * bother to define a error
2718 						 * cause struct. They are
2719 						 * the same basic format
2720 						 * with different names.
2721 						 */
2722 						phd->param_type =
2723 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2724 						phd->param_length =
2725 						    htons(chk_length + sizeof(*phd));
2726 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2727 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2728 						if (SCTP_BUF_NEXT(merr)) {
2729 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2730 								sctp_m_freem(merr);
2731 							} else {
2732 								sctp_queue_op_err(stcb, merr);
2733 							}
2734 						} else {
2735 							sctp_m_freem(merr);
2736 						}
2737 					}
2738 				}
2739 				if ((ch->ch.chunk_type & 0x80) == 0) {
2740 					/* discard the rest of this packet */
2741 					stop_proc = 1;
2742 				}	/* else skip this bad chunk and
2743 					 * continue... */
2744 				break;
2745 			}	/* switch of chunk type */
2746 		}
2747 		*offset += SCTP_SIZE32(chk_length);
2748 		if ((*offset >= length) || stop_proc) {
2749 			/* no more data left in the mbuf chain */
2750 			stop_proc = 1;
2751 			continue;
2752 		}
2753 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2754 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2755 		if (ch == NULL) {
2756 			*offset = length;
2757 			stop_proc = 1;
2758 			continue;
2759 		}
2760 	}
2761 	if (break_flag) {
2762 		/*
2763 		 * we need to report rwnd overrun drops.
2764 		 */
2765 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2766 	}
2767 	if (num_chunks) {
2768 		/*
2769 		 * Did we get data, if so update the time for auto-close and
2770 		 * give peer credit for being alive.
2771 		 */
2772 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2773 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2774 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2775 			    stcb->asoc.overall_error_count,
2776 			    0,
2777 			    SCTP_FROM_SCTP_INDATA,
2778 			    __LINE__);
2779 		}
2780 		stcb->asoc.overall_error_count = 0;
2781 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2782 	}
2783 	/* now service all of the reassm queue if needed */
2784 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2785 		sctp_service_queues(stcb, asoc);
2786 
2787 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2788 		/* Assure that we ack right away */
2789 		stcb->asoc.send_sack = 1;
2790 	}
2791 	/* Start a sack timer or QUEUE a SACK for sending */
2792 	sctp_sack_check(stcb, was_a_gap);
2793 	return (0);
2794 }
2795 
2796 static int
2797 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2798     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2799     int *num_frs,
2800     uint32_t * biggest_newly_acked_tsn,
2801     uint32_t * this_sack_lowest_newack,
2802     int *rto_ok)
2803 {
2804 	struct sctp_tmit_chunk *tp1;
2805 	unsigned int theTSN;
2806 	int j, wake_him = 0, circled = 0;
2807 
2808 	/* Recover the tp1 we last saw */
2809 	tp1 = *p_tp1;
2810 	if (tp1 == NULL) {
2811 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2812 	}
2813 	for (j = frag_strt; j <= frag_end; j++) {
2814 		theTSN = j + last_tsn;
2815 		while (tp1) {
2816 			if (tp1->rec.data.doing_fast_retransmit)
2817 				(*num_frs) += 1;
2818 
2819 			/*-
2820 			 * CMT: CUCv2 algorithm. For each TSN being
2821 			 * processed from the sent queue, track the
2822 			 * next expected pseudo-cumack, or
2823 			 * rtx_pseudo_cumack, if required. Separate
2824 			 * cumack trackers for first transmissions,
2825 			 * and retransmissions.
2826 			 */
2827 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2828 			    (tp1->snd_count == 1)) {
2829 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2830 				tp1->whoTo->find_pseudo_cumack = 0;
2831 			}
2832 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 			    (tp1->snd_count > 1)) {
2834 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2835 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2836 			}
2837 			if (tp1->rec.data.TSN_seq == theTSN) {
2838 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2839 					/*-
2840 					 * must be held until
2841 					 * cum-ack passes
2842 					 */
2843 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2844 						/*-
2845 						 * If it is less than RESEND, it is
2846 						 * now no-longer in flight.
2847 						 * Higher values may already be set
2848 						 * via previous Gap Ack Blocks...
2849 						 * i.e. ACKED or RESEND.
2850 						 */
2851 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2852 						    *biggest_newly_acked_tsn)) {
2853 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2854 						}
2855 						/*-
2856 						 * CMT: SFR algo (and HTNA) - set
2857 						 * saw_newack to 1 for dest being
2858 						 * newly acked. update
2859 						 * this_sack_highest_newack if
2860 						 * appropriate.
2861 						 */
2862 						if (tp1->rec.data.chunk_was_revoked == 0)
2863 							tp1->whoTo->saw_newack = 1;
2864 
2865 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2866 						    tp1->whoTo->this_sack_highest_newack)) {
2867 							tp1->whoTo->this_sack_highest_newack =
2868 							    tp1->rec.data.TSN_seq;
2869 						}
2870 						/*-
2871 						 * CMT DAC algo: also update
2872 						 * this_sack_lowest_newack
2873 						 */
2874 						if (*this_sack_lowest_newack == 0) {
2875 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2876 								sctp_log_sack(*this_sack_lowest_newack,
2877 								    last_tsn,
2878 								    tp1->rec.data.TSN_seq,
2879 								    0,
2880 								    0,
2881 								    SCTP_LOG_TSN_ACKED);
2882 							}
2883 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2884 						}
2885 						/*-
2886 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2887 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2888 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2889 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2890 						 * Separate pseudo_cumack trackers for first transmissions and
2891 						 * retransmissions.
2892 						 */
2893 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2894 							if (tp1->rec.data.chunk_was_revoked == 0) {
2895 								tp1->whoTo->new_pseudo_cumack = 1;
2896 							}
2897 							tp1->whoTo->find_pseudo_cumack = 1;
2898 						}
2899 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2900 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2901 						}
2902 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2903 							if (tp1->rec.data.chunk_was_revoked == 0) {
2904 								tp1->whoTo->new_pseudo_cumack = 1;
2905 							}
2906 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2907 						}
2908 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2909 							sctp_log_sack(*biggest_newly_acked_tsn,
2910 							    last_tsn,
2911 							    tp1->rec.data.TSN_seq,
2912 							    frag_strt,
2913 							    frag_end,
2914 							    SCTP_LOG_TSN_ACKED);
2915 						}
2916 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2917 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2918 							    tp1->whoTo->flight_size,
2919 							    tp1->book_size,
2920 							    (uintptr_t) tp1->whoTo,
2921 							    tp1->rec.data.TSN_seq);
2922 						}
2923 						sctp_flight_size_decrease(tp1);
2924 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2925 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2926 							    tp1);
2927 						}
2928 						sctp_total_flight_decrease(stcb, tp1);
2929 
2930 						tp1->whoTo->net_ack += tp1->send_size;
2931 						if (tp1->snd_count < 2) {
2932 							/*-
2933 							 * True non-retransmited chunk
2934 							 */
2935 							tp1->whoTo->net_ack2 += tp1->send_size;
2936 
2937 							/*-
2938 							 * update RTO too ?
2939 							 */
2940 							if (tp1->do_rtt) {
2941 								if (*rto_ok) {
2942 									tp1->whoTo->RTO =
2943 									    sctp_calculate_rto(stcb,
2944 									    &stcb->asoc,
2945 									    tp1->whoTo,
2946 									    &tp1->sent_rcv_time,
2947 									    sctp_align_safe_nocopy,
2948 									    SCTP_RTT_FROM_DATA);
2949 									*rto_ok = 0;
2950 								}
2951 								if (tp1->whoTo->rto_needed == 0) {
2952 									tp1->whoTo->rto_needed = 1;
2953 								}
2954 								tp1->do_rtt = 0;
2955 							}
2956 						}
2957 					}
2958 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2959 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2960 						    stcb->asoc.this_sack_highest_gap)) {
2961 							stcb->asoc.this_sack_highest_gap =
2962 							    tp1->rec.data.TSN_seq;
2963 						}
2964 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2965 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2966 #ifdef SCTP_AUDITING_ENABLED
2967 							sctp_audit_log(0xB2,
2968 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2969 #endif
2970 						}
2971 					}
2972 					/*-
2973 					 * All chunks NOT UNSENT fall through here and are marked
2974 					 * (leave PR-SCTP ones that are to skip alone though)
2975 					 */
2976 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2977 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2978 						tp1->sent = SCTP_DATAGRAM_MARKED;
2979 					}
2980 					if (tp1->rec.data.chunk_was_revoked) {
2981 						/* deflate the cwnd */
2982 						tp1->whoTo->cwnd -= tp1->book_size;
2983 						tp1->rec.data.chunk_was_revoked = 0;
2984 					}
2985 					/* NR Sack code here */
2986 					if (nr_sacking &&
2987 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2988 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2989 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2990 #ifdef INVARIANTS
2991 						} else {
2992 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2993 #endif
2994 						}
2995 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2996 						if (tp1->data) {
2997 							/*
2998 							 * sa_ignore
2999 							 * NO_NULL_CHK
3000 							 */
3001 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3002 							sctp_m_freem(tp1->data);
3003 							tp1->data = NULL;
3004 						}
3005 						wake_him++;
3006 					}
3007 				}
3008 				break;
3009 			}	/* if (tp1->TSN_seq == theTSN) */
3010 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3011 				break;
3012 			}
3013 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3014 			if ((tp1 == NULL) && (circled == 0)) {
3015 				circled++;
3016 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3017 			}
3018 		}		/* end while (tp1) */
3019 		if (tp1 == NULL) {
3020 			circled = 0;
3021 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3022 		}
3023 		/* In case the fragments were not in order we must reset */
3024 	}			/* end for (j = fragStart */
3025 	*p_tp1 = tp1;
3026 	return (wake_him);	/* Return value only used for nr-sack */
3027 }
3028 
3029 
3030 static int
3031 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3032     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3033     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3034     int num_seg, int num_nr_seg, int *rto_ok)
3035 {
3036 	struct sctp_gap_ack_block *frag, block;
3037 	struct sctp_tmit_chunk *tp1;
3038 	int i;
3039 	int num_frs = 0;
3040 	int chunk_freed;
3041 	int non_revocable;
3042 	uint16_t frag_strt, frag_end, prev_frag_end;
3043 
3044 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3045 	prev_frag_end = 0;
3046 	chunk_freed = 0;
3047 
3048 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3049 		if (i == num_seg) {
3050 			prev_frag_end = 0;
3051 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3052 		}
3053 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3054 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3055 		*offset += sizeof(block);
3056 		if (frag == NULL) {
3057 			return (chunk_freed);
3058 		}
3059 		frag_strt = ntohs(frag->start);
3060 		frag_end = ntohs(frag->end);
3061 
3062 		if (frag_strt > frag_end) {
3063 			/* This gap report is malformed, skip it. */
3064 			continue;
3065 		}
3066 		if (frag_strt <= prev_frag_end) {
3067 			/* This gap report is not in order, so restart. */
3068 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3069 		}
3070 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3071 			*biggest_tsn_acked = last_tsn + frag_end;
3072 		}
3073 		if (i < num_seg) {
3074 			non_revocable = 0;
3075 		} else {
3076 			non_revocable = 1;
3077 		}
3078 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3079 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3080 		    this_sack_lowest_newack, rto_ok)) {
3081 			chunk_freed = 1;
3082 		}
3083 		prev_frag_end = frag_end;
3084 	}
3085 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3086 		if (num_frs)
3087 			sctp_log_fr(*biggest_tsn_acked,
3088 			    *biggest_newly_acked_tsn,
3089 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3090 	}
3091 	return (chunk_freed);
3092 }
3093 
3094 static void
3095 sctp_check_for_revoked(struct sctp_tcb *stcb,
3096     struct sctp_association *asoc, uint32_t cumack,
3097     uint32_t biggest_tsn_acked)
3098 {
3099 	struct sctp_tmit_chunk *tp1;
3100 
3101 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3102 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3103 			/*
3104 			 * ok this guy is either ACK or MARKED. If it is
3105 			 * ACKED it has been previously acked but not this
3106 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3107 			 * again.
3108 			 */
3109 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3110 				break;
3111 			}
3112 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3113 				/* it has been revoked */
3114 				tp1->sent = SCTP_DATAGRAM_SENT;
3115 				tp1->rec.data.chunk_was_revoked = 1;
3116 				/*
3117 				 * We must add this stuff back in to assure
3118 				 * timers and such get started.
3119 				 */
3120 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3121 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3122 					    tp1->whoTo->flight_size,
3123 					    tp1->book_size,
3124 					    (uintptr_t) tp1->whoTo,
3125 					    tp1->rec.data.TSN_seq);
3126 				}
3127 				sctp_flight_size_increase(tp1);
3128 				sctp_total_flight_increase(stcb, tp1);
3129 				/*
3130 				 * We inflate the cwnd to compensate for our
3131 				 * artificial inflation of the flight_size.
3132 				 */
3133 				tp1->whoTo->cwnd += tp1->book_size;
3134 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3135 					sctp_log_sack(asoc->last_acked_seq,
3136 					    cumack,
3137 					    tp1->rec.data.TSN_seq,
3138 					    0,
3139 					    0,
3140 					    SCTP_LOG_TSN_REVOKED);
3141 				}
3142 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3143 				/* it has been re-acked in this SACK */
3144 				tp1->sent = SCTP_DATAGRAM_ACKED;
3145 			}
3146 		}
3147 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3148 			break;
3149 	}
3150 }
3151 
3152 
3153 static void
3154 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3155     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3156 {
3157 	struct sctp_tmit_chunk *tp1;
3158 	int strike_flag = 0;
3159 	struct timeval now;
3160 	int tot_retrans = 0;
3161 	uint32_t sending_seq;
3162 	struct sctp_nets *net;
3163 	int num_dests_sacked = 0;
3164 
3165 	/*
3166 	 * select the sending_seq, this is either the next thing ready to be
3167 	 * sent but not transmitted, OR, the next seq we assign.
3168 	 */
3169 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3170 	if (tp1 == NULL) {
3171 		sending_seq = asoc->sending_seq;
3172 	} else {
3173 		sending_seq = tp1->rec.data.TSN_seq;
3174 	}
3175 
3176 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3177 	if ((asoc->sctp_cmt_on_off > 0) &&
3178 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3179 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3180 			if (net->saw_newack)
3181 				num_dests_sacked++;
3182 		}
3183 	}
3184 	if (stcb->asoc.peer_supports_prsctp) {
3185 		(void)SCTP_GETTIME_TIMEVAL(&now);
3186 	}
3187 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3188 		strike_flag = 0;
3189 		if (tp1->no_fr_allowed) {
3190 			/* this one had a timeout or something */
3191 			continue;
3192 		}
3193 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3194 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3195 				sctp_log_fr(biggest_tsn_newly_acked,
3196 				    tp1->rec.data.TSN_seq,
3197 				    tp1->sent,
3198 				    SCTP_FR_LOG_CHECK_STRIKE);
3199 		}
3200 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3201 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3202 			/* done */
3203 			break;
3204 		}
3205 		if (stcb->asoc.peer_supports_prsctp) {
3206 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3207 				/* Is it expired? */
3208 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3209 					/* Yes so drop it */
3210 					if (tp1->data != NULL) {
3211 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3212 						    SCTP_SO_NOT_LOCKED);
3213 					}
3214 					continue;
3215 				}
3216 			}
3217 		}
3218 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3219 			/* we are beyond the tsn in the sack  */
3220 			break;
3221 		}
3222 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3223 			/* either a RESEND, ACKED, or MARKED */
3224 			/* skip */
3225 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3226 				/* Continue strikin FWD-TSN chunks */
3227 				tp1->rec.data.fwd_tsn_cnt++;
3228 			}
3229 			continue;
3230 		}
3231 		/*
3232 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3233 		 */
3234 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3235 			/*
3236 			 * No new acks were receieved for data sent to this
3237 			 * dest. Therefore, according to the SFR algo for
3238 			 * CMT, no data sent to this dest can be marked for
3239 			 * FR using this SACK.
3240 			 */
3241 			continue;
3242 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3243 		    tp1->whoTo->this_sack_highest_newack)) {
3244 			/*
3245 			 * CMT: New acks were receieved for data sent to
3246 			 * this dest. But no new acks were seen for data
3247 			 * sent after tp1. Therefore, according to the SFR
3248 			 * algo for CMT, tp1 cannot be marked for FR using
3249 			 * this SACK. This step covers part of the DAC algo
3250 			 * and the HTNA algo as well.
3251 			 */
3252 			continue;
3253 		}
3254 		/*
3255 		 * Here we check to see if we were have already done a FR
3256 		 * and if so we see if the biggest TSN we saw in the sack is
3257 		 * smaller than the recovery point. If so we don't strike
3258 		 * the tsn... otherwise we CAN strike the TSN.
3259 		 */
3260 		/*
3261 		 * @@@ JRI: Check for CMT if (accum_moved &&
3262 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3263 		 * 0)) {
3264 		 */
3265 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3266 			/*
3267 			 * Strike the TSN if in fast-recovery and cum-ack
3268 			 * moved.
3269 			 */
3270 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3271 				sctp_log_fr(biggest_tsn_newly_acked,
3272 				    tp1->rec.data.TSN_seq,
3273 				    tp1->sent,
3274 				    SCTP_FR_LOG_STRIKE_CHUNK);
3275 			}
3276 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3277 				tp1->sent++;
3278 			}
3279 			if ((asoc->sctp_cmt_on_off > 0) &&
3280 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3281 				/*
3282 				 * CMT DAC algorithm: If SACK flag is set to
3283 				 * 0, then lowest_newack test will not pass
3284 				 * because it would have been set to the
3285 				 * cumack earlier. If not already to be
3286 				 * rtx'd, If not a mixed sack and if tp1 is
3287 				 * not between two sacked TSNs, then mark by
3288 				 * one more. NOTE that we are marking by one
3289 				 * additional time since the SACK DAC flag
3290 				 * indicates that two packets have been
3291 				 * received after this missing TSN.
3292 				 */
3293 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3294 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3295 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3296 						sctp_log_fr(16 + num_dests_sacked,
3297 						    tp1->rec.data.TSN_seq,
3298 						    tp1->sent,
3299 						    SCTP_FR_LOG_STRIKE_CHUNK);
3300 					}
3301 					tp1->sent++;
3302 				}
3303 			}
3304 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3305 		    (asoc->sctp_cmt_on_off == 0)) {
3306 			/*
3307 			 * For those that have done a FR we must take
3308 			 * special consideration if we strike. I.e the
3309 			 * biggest_newly_acked must be higher than the
3310 			 * sending_seq at the time we did the FR.
3311 			 */
3312 			if (
3313 #ifdef SCTP_FR_TO_ALTERNATE
3314 			/*
3315 			 * If FR's go to new networks, then we must only do
3316 			 * this for singly homed asoc's. However if the FR's
3317 			 * go to the same network (Armando's work) then its
3318 			 * ok to FR multiple times.
3319 			 */
3320 			    (asoc->numnets < 2)
3321 #else
3322 			    (1)
3323 #endif
3324 			    ) {
3325 
3326 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3327 				    tp1->rec.data.fast_retran_tsn)) {
3328 					/*
3329 					 * Strike the TSN, since this ack is
3330 					 * beyond where things were when we
3331 					 * did a FR.
3332 					 */
3333 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3334 						sctp_log_fr(biggest_tsn_newly_acked,
3335 						    tp1->rec.data.TSN_seq,
3336 						    tp1->sent,
3337 						    SCTP_FR_LOG_STRIKE_CHUNK);
3338 					}
3339 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3340 						tp1->sent++;
3341 					}
3342 					strike_flag = 1;
3343 					if ((asoc->sctp_cmt_on_off > 0) &&
3344 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3345 						/*
3346 						 * CMT DAC algorithm: If
3347 						 * SACK flag is set to 0,
3348 						 * then lowest_newack test
3349 						 * will not pass because it
3350 						 * would have been set to
3351 						 * the cumack earlier. If
3352 						 * not already to be rtx'd,
3353 						 * If not a mixed sack and
3354 						 * if tp1 is not between two
3355 						 * sacked TSNs, then mark by
3356 						 * one more. NOTE that we
3357 						 * are marking by one
3358 						 * additional time since the
3359 						 * SACK DAC flag indicates
3360 						 * that two packets have
3361 						 * been received after this
3362 						 * missing TSN.
3363 						 */
3364 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3365 						    (num_dests_sacked == 1) &&
3366 						    SCTP_TSN_GT(this_sack_lowest_newack,
3367 						    tp1->rec.data.TSN_seq)) {
3368 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3369 								sctp_log_fr(32 + num_dests_sacked,
3370 								    tp1->rec.data.TSN_seq,
3371 								    tp1->sent,
3372 								    SCTP_FR_LOG_STRIKE_CHUNK);
3373 							}
3374 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3375 								tp1->sent++;
3376 							}
3377 						}
3378 					}
3379 				}
3380 			}
3381 			/*
3382 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3383 			 * algo covers HTNA.
3384 			 */
3385 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3386 		    biggest_tsn_newly_acked)) {
3387 			/*
3388 			 * We don't strike these: This is the  HTNA
3389 			 * algorithm i.e. we don't strike If our TSN is
3390 			 * larger than the Highest TSN Newly Acked.
3391 			 */
3392 			;
3393 		} else {
3394 			/* Strike the TSN */
3395 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3396 				sctp_log_fr(biggest_tsn_newly_acked,
3397 				    tp1->rec.data.TSN_seq,
3398 				    tp1->sent,
3399 				    SCTP_FR_LOG_STRIKE_CHUNK);
3400 			}
3401 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3402 				tp1->sent++;
3403 			}
3404 			if ((asoc->sctp_cmt_on_off > 0) &&
3405 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3406 				/*
3407 				 * CMT DAC algorithm: If SACK flag is set to
3408 				 * 0, then lowest_newack test will not pass
3409 				 * because it would have been set to the
3410 				 * cumack earlier. If not already to be
3411 				 * rtx'd, If not a mixed sack and if tp1 is
3412 				 * not between two sacked TSNs, then mark by
3413 				 * one more. NOTE that we are marking by one
3414 				 * additional time since the SACK DAC flag
3415 				 * indicates that two packets have been
3416 				 * received after this missing TSN.
3417 				 */
3418 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3419 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3420 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3421 						sctp_log_fr(48 + num_dests_sacked,
3422 						    tp1->rec.data.TSN_seq,
3423 						    tp1->sent,
3424 						    SCTP_FR_LOG_STRIKE_CHUNK);
3425 					}
3426 					tp1->sent++;
3427 				}
3428 			}
3429 		}
3430 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3431 			struct sctp_nets *alt;
3432 
3433 			/* fix counts and things */
3434 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3435 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3436 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3437 				    tp1->book_size,
3438 				    (uintptr_t) tp1->whoTo,
3439 				    tp1->rec.data.TSN_seq);
3440 			}
3441 			if (tp1->whoTo) {
3442 				tp1->whoTo->net_ack++;
3443 				sctp_flight_size_decrease(tp1);
3444 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3445 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3446 					    tp1);
3447 				}
3448 			}
3449 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3450 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3451 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3452 			}
3453 			/* add back to the rwnd */
3454 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3455 
3456 			/* remove from the total flight */
3457 			sctp_total_flight_decrease(stcb, tp1);
3458 
3459 			if ((stcb->asoc.peer_supports_prsctp) &&
3460 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3461 				/*
3462 				 * Has it been retransmitted tv_sec times? -
3463 				 * we store the retran count there.
3464 				 */
3465 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3466 					/* Yes, so drop it */
3467 					if (tp1->data != NULL) {
3468 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3469 						    SCTP_SO_NOT_LOCKED);
3470 					}
3471 					/* Make sure to flag we had a FR */
3472 					tp1->whoTo->net_ack++;
3473 					continue;
3474 				}
3475 			}
3476 			/*
3477 			 * SCTP_PRINTF("OK, we are now ready to FR this
3478 			 * guy\n");
3479 			 */
3480 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3482 				    0, SCTP_FR_MARKED);
3483 			}
3484 			if (strike_flag) {
3485 				/* This is a subsequent FR */
3486 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3487 			}
3488 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3489 			if (asoc->sctp_cmt_on_off > 0) {
3490 				/*
3491 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3492 				 * If CMT is being used, then pick dest with
3493 				 * largest ssthresh for any retransmission.
3494 				 */
3495 				tp1->no_fr_allowed = 1;
3496 				alt = tp1->whoTo;
3497 				/* sa_ignore NO_NULL_CHK */
3498 				if (asoc->sctp_cmt_pf > 0) {
3499 					/*
3500 					 * JRS 5/18/07 - If CMT PF is on,
3501 					 * use the PF version of
3502 					 * find_alt_net()
3503 					 */
3504 					alt = sctp_find_alternate_net(stcb, alt, 2);
3505 				} else {
3506 					/*
3507 					 * JRS 5/18/07 - If only CMT is on,
3508 					 * use the CMT version of
3509 					 * find_alt_net()
3510 					 */
3511 					/* sa_ignore NO_NULL_CHK */
3512 					alt = sctp_find_alternate_net(stcb, alt, 1);
3513 				}
3514 				if (alt == NULL) {
3515 					alt = tp1->whoTo;
3516 				}
3517 				/*
3518 				 * CUCv2: If a different dest is picked for
3519 				 * the retransmission, then new
3520 				 * (rtx-)pseudo_cumack needs to be tracked
3521 				 * for orig dest. Let CUCv2 track new (rtx-)
3522 				 * pseudo-cumack always.
3523 				 */
3524 				if (tp1->whoTo) {
3525 					tp1->whoTo->find_pseudo_cumack = 1;
3526 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3527 				}
3528 			} else {/* CMT is OFF */
3529 
3530 #ifdef SCTP_FR_TO_ALTERNATE
3531 				/* Can we find an alternate? */
3532 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3533 #else
3534 				/*
3535 				 * default behavior is to NOT retransmit
3536 				 * FR's to an alternate. Armando Caro's
3537 				 * paper details why.
3538 				 */
3539 				alt = tp1->whoTo;
3540 #endif
3541 			}
3542 
3543 			tp1->rec.data.doing_fast_retransmit = 1;
3544 			tot_retrans++;
3545 			/* mark the sending seq for possible subsequent FR's */
3546 			/*
3547 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3548 			 * (uint32_t)tpi->rec.data.TSN_seq);
3549 			 */
3550 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3551 				/*
3552 				 * If the queue of send is empty then its
3553 				 * the next sequence number that will be
3554 				 * assigned so we subtract one from this to
3555 				 * get the one we last sent.
3556 				 */
3557 				tp1->rec.data.fast_retran_tsn = sending_seq;
3558 			} else {
3559 				/*
3560 				 * If there are chunks on the send queue
3561 				 * (unsent data that has made it from the
3562 				 * stream queues but not out the door, we
3563 				 * take the first one (which will have the
3564 				 * lowest TSN) and subtract one to get the
3565 				 * one we last sent.
3566 				 */
3567 				struct sctp_tmit_chunk *ttt;
3568 
3569 				ttt = TAILQ_FIRST(&asoc->send_queue);
3570 				tp1->rec.data.fast_retran_tsn =
3571 				    ttt->rec.data.TSN_seq;
3572 			}
3573 
3574 			if (tp1->do_rtt) {
3575 				/*
3576 				 * this guy had a RTO calculation pending on
3577 				 * it, cancel it
3578 				 */
3579 				if ((tp1->whoTo != NULL) &&
3580 				    (tp1->whoTo->rto_needed == 0)) {
3581 					tp1->whoTo->rto_needed = 1;
3582 				}
3583 				tp1->do_rtt = 0;
3584 			}
3585 			if (alt != tp1->whoTo) {
3586 				/* yes, there is an alternate. */
3587 				sctp_free_remote_addr(tp1->whoTo);
3588 				/* sa_ignore FREED_MEMORY */
3589 				tp1->whoTo = alt;
3590 				atomic_add_int(&alt->ref_count, 1);
3591 			}
3592 		}
3593 	}
3594 }
3595 
3596 struct sctp_tmit_chunk *
3597 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3598     struct sctp_association *asoc)
3599 {
3600 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3601 	struct timeval now;
3602 	int now_filled = 0;
3603 
3604 	if (asoc->peer_supports_prsctp == 0) {
3605 		return (NULL);
3606 	}
3607 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3608 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3609 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3610 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3611 			/* no chance to advance, out of here */
3612 			break;
3613 		}
3614 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3615 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3616 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3617 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3618 				    asoc->advanced_peer_ack_point,
3619 				    tp1->rec.data.TSN_seq, 0, 0);
3620 			}
3621 		}
3622 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3623 			/*
3624 			 * We can't fwd-tsn past any that are reliable aka
3625 			 * retransmitted until the asoc fails.
3626 			 */
3627 			break;
3628 		}
3629 		if (!now_filled) {
3630 			(void)SCTP_GETTIME_TIMEVAL(&now);
3631 			now_filled = 1;
3632 		}
3633 		/*
3634 		 * now we got a chunk which is marked for another
3635 		 * retransmission to a PR-stream but has run out its chances
3636 		 * already maybe OR has been marked to skip now. Can we skip
3637 		 * it if its a resend?
3638 		 */
3639 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3640 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3641 			/*
3642 			 * Now is this one marked for resend and its time is
3643 			 * now up?
3644 			 */
3645 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3646 				/* Yes so drop it */
3647 				if (tp1->data) {
3648 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3649 					    1, SCTP_SO_NOT_LOCKED);
3650 				}
3651 			} else {
3652 				/*
3653 				 * No, we are done when hit one for resend
3654 				 * whos time as not expired.
3655 				 */
3656 				break;
3657 			}
3658 		}
3659 		/*
3660 		 * Ok now if this chunk is marked to drop it we can clean up
3661 		 * the chunk, advance our peer ack point and we can check
3662 		 * the next chunk.
3663 		 */
3664 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3665 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3666 			/* advance PeerAckPoint goes forward */
3667 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3668 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3669 				a_adv = tp1;
3670 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3671 				/* No update but we do save the chk */
3672 				a_adv = tp1;
3673 			}
3674 		} else {
3675 			/*
3676 			 * If it is still in RESEND we can advance no
3677 			 * further
3678 			 */
3679 			break;
3680 		}
3681 	}
3682 	return (a_adv);
3683 }
3684 
3685 static int
3686 sctp_fs_audit(struct sctp_association *asoc)
3687 {
3688 	struct sctp_tmit_chunk *chk;
3689 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3690 	int entry_flight, entry_cnt, ret;
3691 
3692 	entry_flight = asoc->total_flight;
3693 	entry_cnt = asoc->total_flight_count;
3694 	ret = 0;
3695 
3696 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3697 		return (0);
3698 
3699 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3700 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3701 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3702 			    chk->rec.data.TSN_seq,
3703 			    chk->send_size,
3704 			    chk->snd_count);
3705 			inflight++;
3706 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3707 			resend++;
3708 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3709 			inbetween++;
3710 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3711 			above++;
3712 		} else {
3713 			acked++;
3714 		}
3715 	}
3716 
3717 	if ((inflight > 0) || (inbetween > 0)) {
3718 #ifdef INVARIANTS
3719 		panic("Flight size-express incorrect? \n");
3720 #else
3721 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3722 		    entry_flight, entry_cnt);
3723 
3724 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3725 		    inflight, inbetween, resend, above, acked);
3726 		ret = 1;
3727 #endif
3728 	}
3729 	return (ret);
3730 }
3731 
3732 
3733 static void
3734 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3735     struct sctp_association *asoc,
3736     struct sctp_tmit_chunk *tp1)
3737 {
3738 	tp1->window_probe = 0;
3739 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3740 		/* TSN's skipped we do NOT move back. */
3741 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3742 		    tp1->whoTo->flight_size,
3743 		    tp1->book_size,
3744 		    (uintptr_t) tp1->whoTo,
3745 		    tp1->rec.data.TSN_seq);
3746 		return;
3747 	}
3748 	/* First setup this by shrinking flight */
3749 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3750 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3751 		    tp1);
3752 	}
3753 	sctp_flight_size_decrease(tp1);
3754 	sctp_total_flight_decrease(stcb, tp1);
3755 	/* Now mark for resend */
3756 	tp1->sent = SCTP_DATAGRAM_RESEND;
3757 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3758 
3759 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3760 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3761 		    tp1->whoTo->flight_size,
3762 		    tp1->book_size,
3763 		    (uintptr_t) tp1->whoTo,
3764 		    tp1->rec.data.TSN_seq);
3765 	}
3766 }
3767 
3768 void
3769 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3770     uint32_t rwnd, int *abort_now, int ecne_seen)
3771 {
3772 	struct sctp_nets *net;
3773 	struct sctp_association *asoc;
3774 	struct sctp_tmit_chunk *tp1, *tp2;
3775 	uint32_t old_rwnd;
3776 	int win_probe_recovery = 0;
3777 	int win_probe_recovered = 0;
3778 	int j, done_once = 0;
3779 	int rto_ok = 1;
3780 
3781 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3782 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3783 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3784 	}
3785 	SCTP_TCB_LOCK_ASSERT(stcb);
3786 #ifdef SCTP_ASOCLOG_OF_TSNS
3787 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3788 	stcb->asoc.cumack_log_at++;
3789 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3790 		stcb->asoc.cumack_log_at = 0;
3791 	}
3792 #endif
3793 	asoc = &stcb->asoc;
3794 	old_rwnd = asoc->peers_rwnd;
3795 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3796 		/* old ack */
3797 		return;
3798 	} else if (asoc->last_acked_seq == cumack) {
3799 		/* Window update sack */
3800 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3801 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3802 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3803 			/* SWS sender side engages */
3804 			asoc->peers_rwnd = 0;
3805 		}
3806 		if (asoc->peers_rwnd > old_rwnd) {
3807 			goto again;
3808 		}
3809 		return;
3810 	}
3811 	/* First setup for CC stuff */
3812 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3813 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3814 			/* Drag along the window_tsn for cwr's */
3815 			net->cwr_window_tsn = cumack;
3816 		}
3817 		net->prev_cwnd = net->cwnd;
3818 		net->net_ack = 0;
3819 		net->net_ack2 = 0;
3820 
3821 		/*
3822 		 * CMT: Reset CUC and Fast recovery algo variables before
3823 		 * SACK processing
3824 		 */
3825 		net->new_pseudo_cumack = 0;
3826 		net->will_exit_fast_recovery = 0;
3827 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3828 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3829 		}
3830 	}
3831 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3832 		uint32_t send_s;
3833 
3834 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3835 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3836 			    sctpchunk_listhead);
3837 			send_s = tp1->rec.data.TSN_seq + 1;
3838 		} else {
3839 			send_s = asoc->sending_seq;
3840 		}
3841 		if (SCTP_TSN_GE(cumack, send_s)) {
3842 #ifndef INVARIANTS
3843 			struct mbuf *oper;
3844 
3845 #endif
3846 #ifdef INVARIANTS
3847 			panic("Impossible sack 1");
3848 #else
3849 
3850 			*abort_now = 1;
3851 			/* XXX */
3852 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3853 			    0, M_NOWAIT, 1, MT_DATA);
3854 			if (oper) {
3855 				struct sctp_paramhdr *ph;
3856 				uint32_t *ippp;
3857 
3858 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3859 				    sizeof(uint32_t);
3860 				ph = mtod(oper, struct sctp_paramhdr *);
3861 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3862 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3863 				ippp = (uint32_t *) (ph + 1);
3864 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3865 			}
3866 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3867 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3868 			return;
3869 #endif
3870 		}
3871 	}
3872 	asoc->this_sack_highest_gap = cumack;
3873 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3874 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3875 		    stcb->asoc.overall_error_count,
3876 		    0,
3877 		    SCTP_FROM_SCTP_INDATA,
3878 		    __LINE__);
3879 	}
3880 	stcb->asoc.overall_error_count = 0;
3881 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3882 		/* process the new consecutive TSN first */
3883 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3884 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3885 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3886 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3887 				}
3888 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3889 					/*
3890 					 * If it is less than ACKED, it is
3891 					 * now no-longer in flight. Higher
3892 					 * values may occur during marking
3893 					 */
3894 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3895 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3896 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3897 							    tp1->whoTo->flight_size,
3898 							    tp1->book_size,
3899 							    (uintptr_t) tp1->whoTo,
3900 							    tp1->rec.data.TSN_seq);
3901 						}
3902 						sctp_flight_size_decrease(tp1);
3903 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3904 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3905 							    tp1);
3906 						}
3907 						/* sa_ignore NO_NULL_CHK */
3908 						sctp_total_flight_decrease(stcb, tp1);
3909 					}
3910 					tp1->whoTo->net_ack += tp1->send_size;
3911 					if (tp1->snd_count < 2) {
3912 						/*
3913 						 * True non-retransmited
3914 						 * chunk
3915 						 */
3916 						tp1->whoTo->net_ack2 +=
3917 						    tp1->send_size;
3918 
3919 						/* update RTO too? */
3920 						if (tp1->do_rtt) {
3921 							if (rto_ok) {
3922 								tp1->whoTo->RTO =
3923 								/*
3924 								 * sa_ignore
3925 								 * NO_NULL_CH
3926 								 * K
3927 								 */
3928 								    sctp_calculate_rto(stcb,
3929 								    asoc, tp1->whoTo,
3930 								    &tp1->sent_rcv_time,
3931 								    sctp_align_safe_nocopy,
3932 								    SCTP_RTT_FROM_DATA);
3933 								rto_ok = 0;
3934 							}
3935 							if (tp1->whoTo->rto_needed == 0) {
3936 								tp1->whoTo->rto_needed = 1;
3937 							}
3938 							tp1->do_rtt = 0;
3939 						}
3940 					}
3941 					/*
3942 					 * CMT: CUCv2 algorithm. From the
3943 					 * cumack'd TSNs, for each TSN being
3944 					 * acked for the first time, set the
3945 					 * following variables for the
3946 					 * corresp destination.
3947 					 * new_pseudo_cumack will trigger a
3948 					 * cwnd update.
3949 					 * find_(rtx_)pseudo_cumack will
3950 					 * trigger search for the next
3951 					 * expected (rtx-)pseudo-cumack.
3952 					 */
3953 					tp1->whoTo->new_pseudo_cumack = 1;
3954 					tp1->whoTo->find_pseudo_cumack = 1;
3955 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3956 
3957 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3958 						/* sa_ignore NO_NULL_CHK */
3959 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3960 					}
3961 				}
3962 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3963 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3964 				}
3965 				if (tp1->rec.data.chunk_was_revoked) {
3966 					/* deflate the cwnd */
3967 					tp1->whoTo->cwnd -= tp1->book_size;
3968 					tp1->rec.data.chunk_was_revoked = 0;
3969 				}
3970 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3971 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3972 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3973 #ifdef INVARIANTS
3974 					} else {
3975 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3976 #endif
3977 					}
3978 				}
3979 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3980 				if (tp1->data) {
3981 					/* sa_ignore NO_NULL_CHK */
3982 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3983 					sctp_m_freem(tp1->data);
3984 					tp1->data = NULL;
3985 				}
3986 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3987 					sctp_log_sack(asoc->last_acked_seq,
3988 					    cumack,
3989 					    tp1->rec.data.TSN_seq,
3990 					    0,
3991 					    0,
3992 					    SCTP_LOG_FREE_SENT);
3993 				}
3994 				asoc->sent_queue_cnt--;
3995 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3996 			} else {
3997 				break;
3998 			}
3999 		}
4000 
4001 	}
4002 	/* sa_ignore NO_NULL_CHK */
4003 	if (stcb->sctp_socket) {
4004 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4005 		struct socket *so;
4006 
4007 #endif
4008 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4009 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4010 			/* sa_ignore NO_NULL_CHK */
4011 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4012 		}
4013 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 		so = SCTP_INP_SO(stcb->sctp_ep);
4015 		atomic_add_int(&stcb->asoc.refcnt, 1);
4016 		SCTP_TCB_UNLOCK(stcb);
4017 		SCTP_SOCKET_LOCK(so, 1);
4018 		SCTP_TCB_LOCK(stcb);
4019 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4020 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4021 			/* assoc was freed while we were unlocked */
4022 			SCTP_SOCKET_UNLOCK(so, 1);
4023 			return;
4024 		}
4025 #endif
4026 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 		SCTP_SOCKET_UNLOCK(so, 1);
4029 #endif
4030 	} else {
4031 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4032 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4033 		}
4034 	}
4035 
4036 	/* JRS - Use the congestion control given in the CC module */
4037 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4038 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4039 			if (net->net_ack2 > 0) {
4040 				/*
4041 				 * Karn's rule applies to clearing error
4042 				 * count, this is optional.
4043 				 */
4044 				net->error_count = 0;
4045 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4046 					/* addr came good */
4047 					net->dest_state |= SCTP_ADDR_REACHABLE;
4048 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4049 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4050 				}
4051 				if (net == stcb->asoc.primary_destination) {
4052 					if (stcb->asoc.alternate) {
4053 						/*
4054 						 * release the alternate,
4055 						 * primary is good
4056 						 */
4057 						sctp_free_remote_addr(stcb->asoc.alternate);
4058 						stcb->asoc.alternate = NULL;
4059 					}
4060 				}
4061 				if (net->dest_state & SCTP_ADDR_PF) {
4062 					net->dest_state &= ~SCTP_ADDR_PF;
4063 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4064 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4065 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4066 					/* Done with this net */
4067 					net->net_ack = 0;
4068 				}
4069 				/* restore any doubled timers */
4070 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4071 				if (net->RTO < stcb->asoc.minrto) {
4072 					net->RTO = stcb->asoc.minrto;
4073 				}
4074 				if (net->RTO > stcb->asoc.maxrto) {
4075 					net->RTO = stcb->asoc.maxrto;
4076 				}
4077 			}
4078 		}
4079 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4080 	}
4081 	asoc->last_acked_seq = cumack;
4082 
4083 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4084 		/* nothing left in-flight */
4085 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4086 			net->flight_size = 0;
4087 			net->partial_bytes_acked = 0;
4088 		}
4089 		asoc->total_flight = 0;
4090 		asoc->total_flight_count = 0;
4091 	}
4092 	/* RWND update */
4093 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4094 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4095 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4096 		/* SWS sender side engages */
4097 		asoc->peers_rwnd = 0;
4098 	}
4099 	if (asoc->peers_rwnd > old_rwnd) {
4100 		win_probe_recovery = 1;
4101 	}
4102 	/* Now assure a timer where data is queued at */
4103 again:
4104 	j = 0;
4105 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4106 		int to_ticks;
4107 
4108 		if (win_probe_recovery && (net->window_probe)) {
4109 			win_probe_recovered = 1;
4110 			/*
4111 			 * Find first chunk that was used with window probe
4112 			 * and clear the sent
4113 			 */
4114 			/* sa_ignore FREED_MEMORY */
4115 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4116 				if (tp1->window_probe) {
4117 					/* move back to data send queue */
4118 					sctp_window_probe_recovery(stcb, asoc, tp1);
4119 					break;
4120 				}
4121 			}
4122 		}
4123 		if (net->RTO == 0) {
4124 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4125 		} else {
4126 			to_ticks = MSEC_TO_TICKS(net->RTO);
4127 		}
4128 		if (net->flight_size) {
4129 			j++;
4130 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4131 			    sctp_timeout_handler, &net->rxt_timer);
4132 			if (net->window_probe) {
4133 				net->window_probe = 0;
4134 			}
4135 		} else {
4136 			if (net->window_probe) {
4137 				/*
4138 				 * In window probes we must assure a timer
4139 				 * is still running there
4140 				 */
4141 				net->window_probe = 0;
4142 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4143 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4144 					    sctp_timeout_handler, &net->rxt_timer);
4145 				}
4146 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4147 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4148 				    stcb, net,
4149 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4150 			}
4151 		}
4152 	}
4153 	if ((j == 0) &&
4154 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4155 	    (asoc->sent_queue_retran_cnt == 0) &&
4156 	    (win_probe_recovered == 0) &&
4157 	    (done_once == 0)) {
4158 		/*
4159 		 * huh, this should not happen unless all packets are
4160 		 * PR-SCTP and marked to skip of course.
4161 		 */
4162 		if (sctp_fs_audit(asoc)) {
4163 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4164 				net->flight_size = 0;
4165 			}
4166 			asoc->total_flight = 0;
4167 			asoc->total_flight_count = 0;
4168 			asoc->sent_queue_retran_cnt = 0;
4169 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4170 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4171 					sctp_flight_size_increase(tp1);
4172 					sctp_total_flight_increase(stcb, tp1);
4173 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4174 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4175 				}
4176 			}
4177 		}
4178 		done_once = 1;
4179 		goto again;
4180 	}
4181 	/**********************************/
4182 	/* Now what about shutdown issues */
4183 	/**********************************/
4184 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4185 		/* nothing left on sendqueue.. consider done */
4186 		/* clean up */
4187 		if ((asoc->stream_queue_cnt == 1) &&
4188 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4189 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4190 		    (asoc->locked_on_sending)
4191 		    ) {
4192 			struct sctp_stream_queue_pending *sp;
4193 
4194 			/*
4195 			 * I may be in a state where we got all across.. but
4196 			 * cannot write more due to a shutdown... we abort
4197 			 * since the user did not indicate EOR in this case.
4198 			 * The sp will be cleaned during free of the asoc.
4199 			 */
4200 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4201 			    sctp_streamhead);
4202 			if ((sp) && (sp->length == 0)) {
4203 				/* Let cleanup code purge it */
4204 				if (sp->msg_is_complete) {
4205 					asoc->stream_queue_cnt--;
4206 				} else {
4207 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4208 					asoc->locked_on_sending = NULL;
4209 					asoc->stream_queue_cnt--;
4210 				}
4211 			}
4212 		}
4213 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4214 		    (asoc->stream_queue_cnt == 0)) {
4215 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4216 				/* Need to abort here */
4217 				struct mbuf *oper;
4218 
4219 		abort_out_now:
4220 				*abort_now = 1;
4221 				/* XXX */
4222 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4223 				    0, M_NOWAIT, 1, MT_DATA);
4224 				if (oper) {
4225 					struct sctp_paramhdr *ph;
4226 
4227 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4228 					ph = mtod(oper, struct sctp_paramhdr *);
4229 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4230 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4231 				}
4232 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4233 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4234 			} else {
4235 				struct sctp_nets *netp;
4236 
4237 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4238 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4239 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4240 				}
4241 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4242 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4243 				sctp_stop_timers_for_shutdown(stcb);
4244 				if (asoc->alternate) {
4245 					netp = asoc->alternate;
4246 				} else {
4247 					netp = asoc->primary_destination;
4248 				}
4249 				sctp_send_shutdown(stcb, netp);
4250 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4251 				    stcb->sctp_ep, stcb, netp);
4252 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4253 				    stcb->sctp_ep, stcb, netp);
4254 			}
4255 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4256 		    (asoc->stream_queue_cnt == 0)) {
4257 			struct sctp_nets *netp;
4258 
4259 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4260 				goto abort_out_now;
4261 			}
4262 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4263 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4264 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4265 			sctp_stop_timers_for_shutdown(stcb);
4266 			if (asoc->alternate) {
4267 				netp = asoc->alternate;
4268 			} else {
4269 				netp = asoc->primary_destination;
4270 			}
4271 			sctp_send_shutdown_ack(stcb, netp);
4272 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4273 			    stcb->sctp_ep, stcb, netp);
4274 		}
4275 	}
4276 	/*********************************************/
4277 	/* Here we perform PR-SCTP procedures        */
4278 	/* (section 4.2)                             */
4279 	/*********************************************/
4280 	/* C1. update advancedPeerAckPoint */
4281 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4282 		asoc->advanced_peer_ack_point = cumack;
4283 	}
4284 	/* PR-Sctp issues need to be addressed too */
4285 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4286 		struct sctp_tmit_chunk *lchk;
4287 		uint32_t old_adv_peer_ack_point;
4288 
4289 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4290 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4291 		/* C3. See if we need to send a Fwd-TSN */
4292 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4293 			/*
4294 			 * ISSUE with ECN, see FWD-TSN processing.
4295 			 */
4296 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4297 				send_forward_tsn(stcb, asoc);
4298 			} else if (lchk) {
4299 				/* try to FR fwd-tsn's that get lost too */
4300 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4301 					send_forward_tsn(stcb, asoc);
4302 				}
4303 			}
4304 		}
4305 		if (lchk) {
4306 			/* Assure a timer is up */
4307 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4308 			    stcb->sctp_ep, stcb, lchk->whoTo);
4309 		}
4310 	}
4311 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4312 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4313 		    rwnd,
4314 		    stcb->asoc.peers_rwnd,
4315 		    stcb->asoc.total_flight,
4316 		    stcb->asoc.total_output_queue_size);
4317 	}
4318 }
4319 
4320 void
4321 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4322     struct sctp_tcb *stcb,
4323     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4324     int *abort_now, uint8_t flags,
4325     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4326 {
4327 	struct sctp_association *asoc;
4328 	struct sctp_tmit_chunk *tp1, *tp2;
4329 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4330 	uint16_t wake_him = 0;
4331 	uint32_t send_s = 0;
4332 	long j;
4333 	int accum_moved = 0;
4334 	int will_exit_fast_recovery = 0;
4335 	uint32_t a_rwnd, old_rwnd;
4336 	int win_probe_recovery = 0;
4337 	int win_probe_recovered = 0;
4338 	struct sctp_nets *net = NULL;
4339 	int done_once;
4340 	int rto_ok = 1;
4341 	uint8_t reneged_all = 0;
4342 	uint8_t cmt_dac_flag;
4343 
4344 	/*
4345 	 * we take any chance we can to service our queues since we cannot
4346 	 * get awoken when the socket is read from :<
4347 	 */
4348 	/*
4349 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4350 	 * old sack, if so discard. 2) If there is nothing left in the send
4351 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4352 	 * too, update any rwnd change and verify no timers are running.
4353 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4354 	 * moved process these first and note that it moved. 4) Process any
4355 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4356 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4357 	 * sync up flightsizes and things, stop all timers and also check
4358 	 * for shutdown_pending state. If so then go ahead and send off the
4359 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4360 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4361 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4362 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4363 	 * if in shutdown_recv state.
4364 	 */
4365 	SCTP_TCB_LOCK_ASSERT(stcb);
4366 	/* CMT DAC algo */
4367 	this_sack_lowest_newack = 0;
4368 	SCTP_STAT_INCR(sctps_slowpath_sack);
4369 	last_tsn = cum_ack;
4370 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4371 #ifdef SCTP_ASOCLOG_OF_TSNS
4372 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4373 	stcb->asoc.cumack_log_at++;
4374 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4375 		stcb->asoc.cumack_log_at = 0;
4376 	}
4377 #endif
4378 	a_rwnd = rwnd;
4379 
4380 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4381 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4382 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4383 	}
4384 	old_rwnd = stcb->asoc.peers_rwnd;
4385 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4386 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4387 		    stcb->asoc.overall_error_count,
4388 		    0,
4389 		    SCTP_FROM_SCTP_INDATA,
4390 		    __LINE__);
4391 	}
4392 	stcb->asoc.overall_error_count = 0;
4393 	asoc = &stcb->asoc;
4394 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4395 		sctp_log_sack(asoc->last_acked_seq,
4396 		    cum_ack,
4397 		    0,
4398 		    num_seg,
4399 		    num_dup,
4400 		    SCTP_LOG_NEW_SACK);
4401 	}
4402 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4403 		uint16_t i;
4404 		uint32_t *dupdata, dblock;
4405 
4406 		for (i = 0; i < num_dup; i++) {
4407 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4408 			    sizeof(uint32_t), (uint8_t *) & dblock);
4409 			if (dupdata == NULL) {
4410 				break;
4411 			}
4412 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4413 		}
4414 	}
4415 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4416 		/* reality check */
4417 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4418 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4419 			    sctpchunk_listhead);
4420 			send_s = tp1->rec.data.TSN_seq + 1;
4421 		} else {
4422 			tp1 = NULL;
4423 			send_s = asoc->sending_seq;
4424 		}
4425 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4426 			struct mbuf *oper;
4427 
4428 			/*
4429 			 * no way, we have not even sent this TSN out yet.
4430 			 * Peer is hopelessly messed up with us.
4431 			 */
4432 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4433 			    cum_ack, send_s);
4434 			if (tp1) {
4435 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4436 				    tp1->rec.data.TSN_seq, (void *)tp1);
4437 			}
4438 	hopeless_peer:
4439 			*abort_now = 1;
4440 			/* XXX */
4441 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4442 			    0, M_NOWAIT, 1, MT_DATA);
4443 			if (oper) {
4444 				struct sctp_paramhdr *ph;
4445 				uint32_t *ippp;
4446 
4447 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4448 				    sizeof(uint32_t);
4449 				ph = mtod(oper, struct sctp_paramhdr *);
4450 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4451 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4452 				ippp = (uint32_t *) (ph + 1);
4453 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4454 			}
4455 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4456 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4457 			return;
4458 		}
4459 	}
4460 	/**********************/
4461 	/* 1) check the range */
4462 	/**********************/
4463 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4464 		/* acking something behind */
4465 		return;
4466 	}
4467 	/* update the Rwnd of the peer */
4468 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4469 	    TAILQ_EMPTY(&asoc->send_queue) &&
4470 	    (asoc->stream_queue_cnt == 0)) {
4471 		/* nothing left on send/sent and strmq */
4472 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4473 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4474 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4475 		}
4476 		asoc->peers_rwnd = a_rwnd;
4477 		if (asoc->sent_queue_retran_cnt) {
4478 			asoc->sent_queue_retran_cnt = 0;
4479 		}
4480 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4481 			/* SWS sender side engages */
4482 			asoc->peers_rwnd = 0;
4483 		}
4484 		/* stop any timers */
4485 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4486 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4487 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4488 			net->partial_bytes_acked = 0;
4489 			net->flight_size = 0;
4490 		}
4491 		asoc->total_flight = 0;
4492 		asoc->total_flight_count = 0;
4493 		return;
4494 	}
4495 	/*
4496 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4497 	 * things. The total byte count acked is tracked in netAckSz AND
4498 	 * netAck2 is used to track the total bytes acked that are un-
4499 	 * amibguious and were never retransmitted. We track these on a per
4500 	 * destination address basis.
4501 	 */
4502 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4503 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4504 			/* Drag along the window_tsn for cwr's */
4505 			net->cwr_window_tsn = cum_ack;
4506 		}
4507 		net->prev_cwnd = net->cwnd;
4508 		net->net_ack = 0;
4509 		net->net_ack2 = 0;
4510 
4511 		/*
4512 		 * CMT: Reset CUC and Fast recovery algo variables before
4513 		 * SACK processing
4514 		 */
4515 		net->new_pseudo_cumack = 0;
4516 		net->will_exit_fast_recovery = 0;
4517 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4518 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4519 		}
4520 	}
4521 	/* process the new consecutive TSN first */
4522 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4523 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4524 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4525 				accum_moved = 1;
4526 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4527 					/*
4528 					 * If it is less than ACKED, it is
4529 					 * now no-longer in flight. Higher
4530 					 * values may occur during marking
4531 					 */
4532 					if ((tp1->whoTo->dest_state &
4533 					    SCTP_ADDR_UNCONFIRMED) &&
4534 					    (tp1->snd_count < 2)) {
4535 						/*
4536 						 * If there was no retran
4537 						 * and the address is
4538 						 * un-confirmed and we sent
4539 						 * there and are now
4540 						 * sacked.. its confirmed,
4541 						 * mark it so.
4542 						 */
4543 						tp1->whoTo->dest_state &=
4544 						    ~SCTP_ADDR_UNCONFIRMED;
4545 					}
4546 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4547 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4548 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4549 							    tp1->whoTo->flight_size,
4550 							    tp1->book_size,
4551 							    (uintptr_t) tp1->whoTo,
4552 							    tp1->rec.data.TSN_seq);
4553 						}
4554 						sctp_flight_size_decrease(tp1);
4555 						sctp_total_flight_decrease(stcb, tp1);
4556 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4557 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4558 							    tp1);
4559 						}
4560 					}
4561 					tp1->whoTo->net_ack += tp1->send_size;
4562 
4563 					/* CMT SFR and DAC algos */
4564 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4565 					tp1->whoTo->saw_newack = 1;
4566 
4567 					if (tp1->snd_count < 2) {
4568 						/*
4569 						 * True non-retransmited
4570 						 * chunk
4571 						 */
4572 						tp1->whoTo->net_ack2 +=
4573 						    tp1->send_size;
4574 
4575 						/* update RTO too? */
4576 						if (tp1->do_rtt) {
4577 							if (rto_ok) {
4578 								tp1->whoTo->RTO =
4579 								    sctp_calculate_rto(stcb,
4580 								    asoc, tp1->whoTo,
4581 								    &tp1->sent_rcv_time,
4582 								    sctp_align_safe_nocopy,
4583 								    SCTP_RTT_FROM_DATA);
4584 								rto_ok = 0;
4585 							}
4586 							if (tp1->whoTo->rto_needed == 0) {
4587 								tp1->whoTo->rto_needed = 1;
4588 							}
4589 							tp1->do_rtt = 0;
4590 						}
4591 					}
4592 					/*
4593 					 * CMT: CUCv2 algorithm. From the
4594 					 * cumack'd TSNs, for each TSN being
4595 					 * acked for the first time, set the
4596 					 * following variables for the
4597 					 * corresp destination.
4598 					 * new_pseudo_cumack will trigger a
4599 					 * cwnd update.
4600 					 * find_(rtx_)pseudo_cumack will
4601 					 * trigger search for the next
4602 					 * expected (rtx-)pseudo-cumack.
4603 					 */
4604 					tp1->whoTo->new_pseudo_cumack = 1;
4605 					tp1->whoTo->find_pseudo_cumack = 1;
4606 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4607 
4608 
4609 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4610 						sctp_log_sack(asoc->last_acked_seq,
4611 						    cum_ack,
4612 						    tp1->rec.data.TSN_seq,
4613 						    0,
4614 						    0,
4615 						    SCTP_LOG_TSN_ACKED);
4616 					}
4617 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4618 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4619 					}
4620 				}
4621 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4622 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4623 #ifdef SCTP_AUDITING_ENABLED
4624 					sctp_audit_log(0xB3,
4625 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4626 #endif
4627 				}
4628 				if (tp1->rec.data.chunk_was_revoked) {
4629 					/* deflate the cwnd */
4630 					tp1->whoTo->cwnd -= tp1->book_size;
4631 					tp1->rec.data.chunk_was_revoked = 0;
4632 				}
4633 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4634 					tp1->sent = SCTP_DATAGRAM_ACKED;
4635 				}
4636 			}
4637 		} else {
4638 			break;
4639 		}
4640 	}
4641 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4642 	/* always set this up to cum-ack */
4643 	asoc->this_sack_highest_gap = last_tsn;
4644 
4645 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4646 
4647 		/*
4648 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4649 		 * to be greater than the cumack. Also reset saw_newack to 0
4650 		 * for all dests.
4651 		 */
4652 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4653 			net->saw_newack = 0;
4654 			net->this_sack_highest_newack = last_tsn;
4655 		}
4656 
4657 		/*
4658 		 * thisSackHighestGap will increase while handling NEW
4659 		 * segments this_sack_highest_newack will increase while
4660 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4661 		 * used for CMT DAC algo. saw_newack will also change.
4662 		 */
4663 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4664 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4665 		    num_seg, num_nr_seg, &rto_ok)) {
4666 			wake_him++;
4667 		}
4668 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4669 			/*
4670 			 * validate the biggest_tsn_acked in the gap acks if
4671 			 * strict adherence is wanted.
4672 			 */
4673 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4674 				/*
4675 				 * peer is either confused or we are under
4676 				 * attack. We must abort.
4677 				 */
4678 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4679 				    biggest_tsn_acked, send_s);
4680 				goto hopeless_peer;
4681 			}
4682 		}
4683 	}
4684 	/*******************************************/
4685 	/* cancel ALL T3-send timer if accum moved */
4686 	/*******************************************/
4687 	if (asoc->sctp_cmt_on_off > 0) {
4688 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4689 			if (net->new_pseudo_cumack)
4690 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4691 				    stcb, net,
4692 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4693 
4694 		}
4695 	} else {
4696 		if (accum_moved) {
4697 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4698 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4699 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4700 			}
4701 		}
4702 	}
4703 	/********************************************/
4704 	/* drop the acked chunks from the sentqueue */
4705 	/********************************************/
4706 	asoc->last_acked_seq = cum_ack;
4707 
4708 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4709 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4710 			break;
4711 		}
4712 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4713 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4714 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4715 #ifdef INVARIANTS
4716 			} else {
4717 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4718 #endif
4719 			}
4720 		}
4721 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4722 		if (tp1->pr_sctp_on) {
4723 			if (asoc->pr_sctp_cnt != 0)
4724 				asoc->pr_sctp_cnt--;
4725 		}
4726 		asoc->sent_queue_cnt--;
4727 		if (tp1->data) {
4728 			/* sa_ignore NO_NULL_CHK */
4729 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4730 			sctp_m_freem(tp1->data);
4731 			tp1->data = NULL;
4732 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4733 				asoc->sent_queue_cnt_removeable--;
4734 			}
4735 		}
4736 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4737 			sctp_log_sack(asoc->last_acked_seq,
4738 			    cum_ack,
4739 			    tp1->rec.data.TSN_seq,
4740 			    0,
4741 			    0,
4742 			    SCTP_LOG_FREE_SENT);
4743 		}
4744 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4745 		wake_him++;
4746 	}
4747 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4748 #ifdef INVARIANTS
4749 		panic("Warning flight size is postive and should be 0");
4750 #else
4751 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4752 		    asoc->total_flight);
4753 #endif
4754 		asoc->total_flight = 0;
4755 	}
4756 	/* sa_ignore NO_NULL_CHK */
4757 	if ((wake_him) && (stcb->sctp_socket)) {
4758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4759 		struct socket *so;
4760 
4761 #endif
4762 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4763 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4764 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4765 		}
4766 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4767 		so = SCTP_INP_SO(stcb->sctp_ep);
4768 		atomic_add_int(&stcb->asoc.refcnt, 1);
4769 		SCTP_TCB_UNLOCK(stcb);
4770 		SCTP_SOCKET_LOCK(so, 1);
4771 		SCTP_TCB_LOCK(stcb);
4772 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4773 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4774 			/* assoc was freed while we were unlocked */
4775 			SCTP_SOCKET_UNLOCK(so, 1);
4776 			return;
4777 		}
4778 #endif
4779 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4781 		SCTP_SOCKET_UNLOCK(so, 1);
4782 #endif
4783 	} else {
4784 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4785 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4786 		}
4787 	}
4788 
4789 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4790 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4791 			/* Setup so we will exit RFC2582 fast recovery */
4792 			will_exit_fast_recovery = 1;
4793 		}
4794 	}
4795 	/*
4796 	 * Check for revoked fragments:
4797 	 *
4798 	 * if Previous sack - Had no frags then we can't have any revoked if
4799 	 * Previous sack - Had frag's then - If we now have frags aka
4800 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4801 	 * some of them. else - The peer revoked all ACKED fragments, since
4802 	 * we had some before and now we have NONE.
4803 	 */
4804 
4805 	if (num_seg) {
4806 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4807 		asoc->saw_sack_with_frags = 1;
4808 	} else if (asoc->saw_sack_with_frags) {
4809 		int cnt_revoked = 0;
4810 
4811 		/* Peer revoked all dg's marked or acked */
4812 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4813 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4814 				tp1->sent = SCTP_DATAGRAM_SENT;
4815 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4816 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4817 					    tp1->whoTo->flight_size,
4818 					    tp1->book_size,
4819 					    (uintptr_t) tp1->whoTo,
4820 					    tp1->rec.data.TSN_seq);
4821 				}
4822 				sctp_flight_size_increase(tp1);
4823 				sctp_total_flight_increase(stcb, tp1);
4824 				tp1->rec.data.chunk_was_revoked = 1;
4825 				/*
4826 				 * To ensure that this increase in
4827 				 * flightsize, which is artificial, does not
4828 				 * throttle the sender, we also increase the
4829 				 * cwnd artificially.
4830 				 */
4831 				tp1->whoTo->cwnd += tp1->book_size;
4832 				cnt_revoked++;
4833 			}
4834 		}
4835 		if (cnt_revoked) {
4836 			reneged_all = 1;
4837 		}
4838 		asoc->saw_sack_with_frags = 0;
4839 	}
4840 	if (num_nr_seg > 0)
4841 		asoc->saw_sack_with_nr_frags = 1;
4842 	else
4843 		asoc->saw_sack_with_nr_frags = 0;
4844 
4845 	/* JRS - Use the congestion control given in the CC module */
4846 	if (ecne_seen == 0) {
4847 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4848 			if (net->net_ack2 > 0) {
4849 				/*
4850 				 * Karn's rule applies to clearing error
4851 				 * count, this is optional.
4852 				 */
4853 				net->error_count = 0;
4854 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4855 					/* addr came good */
4856 					net->dest_state |= SCTP_ADDR_REACHABLE;
4857 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4858 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4859 				}
4860 				if (net == stcb->asoc.primary_destination) {
4861 					if (stcb->asoc.alternate) {
4862 						/*
4863 						 * release the alternate,
4864 						 * primary is good
4865 						 */
4866 						sctp_free_remote_addr(stcb->asoc.alternate);
4867 						stcb->asoc.alternate = NULL;
4868 					}
4869 				}
4870 				if (net->dest_state & SCTP_ADDR_PF) {
4871 					net->dest_state &= ~SCTP_ADDR_PF;
4872 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4873 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4874 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4875 					/* Done with this net */
4876 					net->net_ack = 0;
4877 				}
4878 				/* restore any doubled timers */
4879 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4880 				if (net->RTO < stcb->asoc.minrto) {
4881 					net->RTO = stcb->asoc.minrto;
4882 				}
4883 				if (net->RTO > stcb->asoc.maxrto) {
4884 					net->RTO = stcb->asoc.maxrto;
4885 				}
4886 			}
4887 		}
4888 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4889 	}
4890 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4891 		/* nothing left in-flight */
4892 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4893 			/* stop all timers */
4894 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4895 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4896 			net->flight_size = 0;
4897 			net->partial_bytes_acked = 0;
4898 		}
4899 		asoc->total_flight = 0;
4900 		asoc->total_flight_count = 0;
4901 	}
4902 	/**********************************/
4903 	/* Now what about shutdown issues */
4904 	/**********************************/
4905 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4906 		/* nothing left on sendqueue.. consider done */
4907 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4908 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4909 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4910 		}
4911 		asoc->peers_rwnd = a_rwnd;
4912 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4913 			/* SWS sender side engages */
4914 			asoc->peers_rwnd = 0;
4915 		}
4916 		/* clean up */
4917 		if ((asoc->stream_queue_cnt == 1) &&
4918 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4919 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4920 		    (asoc->locked_on_sending)
4921 		    ) {
4922 			struct sctp_stream_queue_pending *sp;
4923 
4924 			/*
4925 			 * I may be in a state where we got all across.. but
4926 			 * cannot write more due to a shutdown... we abort
4927 			 * since the user did not indicate EOR in this case.
4928 			 */
4929 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4930 			    sctp_streamhead);
4931 			if ((sp) && (sp->length == 0)) {
4932 				asoc->locked_on_sending = NULL;
4933 				if (sp->msg_is_complete) {
4934 					asoc->stream_queue_cnt--;
4935 				} else {
4936 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4937 					asoc->stream_queue_cnt--;
4938 				}
4939 			}
4940 		}
4941 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4942 		    (asoc->stream_queue_cnt == 0)) {
4943 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4944 				/* Need to abort here */
4945 				struct mbuf *oper;
4946 
4947 		abort_out_now:
4948 				*abort_now = 1;
4949 				/* XXX */
4950 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4951 				    0, M_NOWAIT, 1, MT_DATA);
4952 				if (oper) {
4953 					struct sctp_paramhdr *ph;
4954 
4955 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4956 					ph = mtod(oper, struct sctp_paramhdr *);
4957 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4958 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4959 				}
4960 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4961 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4962 				return;
4963 			} else {
4964 				struct sctp_nets *netp;
4965 
4966 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4967 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4968 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4969 				}
4970 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4971 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4972 				sctp_stop_timers_for_shutdown(stcb);
4973 				if (asoc->alternate) {
4974 					netp = asoc->alternate;
4975 				} else {
4976 					netp = asoc->primary_destination;
4977 				}
4978 				sctp_send_shutdown(stcb, netp);
4979 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4980 				    stcb->sctp_ep, stcb, netp);
4981 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4982 				    stcb->sctp_ep, stcb, netp);
4983 			}
4984 			return;
4985 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4986 		    (asoc->stream_queue_cnt == 0)) {
4987 			struct sctp_nets *netp;
4988 
4989 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4990 				goto abort_out_now;
4991 			}
4992 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4993 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4994 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4995 			sctp_stop_timers_for_shutdown(stcb);
4996 			if (asoc->alternate) {
4997 				netp = asoc->alternate;
4998 			} else {
4999 				netp = asoc->primary_destination;
5000 			}
5001 			sctp_send_shutdown_ack(stcb, netp);
5002 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5003 			    stcb->sctp_ep, stcb, netp);
5004 			return;
5005 		}
5006 	}
5007 	/*
5008 	 * Now here we are going to recycle net_ack for a different use...
5009 	 * HEADS UP.
5010 	 */
5011 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5012 		net->net_ack = 0;
5013 	}
5014 
5015 	/*
5016 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5017 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5018 	 * automatically ensure that.
5019 	 */
5020 	if ((asoc->sctp_cmt_on_off > 0) &&
5021 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5022 	    (cmt_dac_flag == 0)) {
5023 		this_sack_lowest_newack = cum_ack;
5024 	}
5025 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5026 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5027 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5028 	}
5029 	/* JRS - Use the congestion control given in the CC module */
5030 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5031 
5032 	/* Now are we exiting loss recovery ? */
5033 	if (will_exit_fast_recovery) {
5034 		/* Ok, we must exit fast recovery */
5035 		asoc->fast_retran_loss_recovery = 0;
5036 	}
5037 	if ((asoc->sat_t3_loss_recovery) &&
5038 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5039 		/* end satellite t3 loss recovery */
5040 		asoc->sat_t3_loss_recovery = 0;
5041 	}
5042 	/*
5043 	 * CMT Fast recovery
5044 	 */
5045 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5046 		if (net->will_exit_fast_recovery) {
5047 			/* Ok, we must exit fast recovery */
5048 			net->fast_retran_loss_recovery = 0;
5049 		}
5050 	}
5051 
5052 	/* Adjust and set the new rwnd value */
5053 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5054 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5055 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5056 	}
5057 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5058 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5059 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5060 		/* SWS sender side engages */
5061 		asoc->peers_rwnd = 0;
5062 	}
5063 	if (asoc->peers_rwnd > old_rwnd) {
5064 		win_probe_recovery = 1;
5065 	}
5066 	/*
5067 	 * Now we must setup so we have a timer up for anyone with
5068 	 * outstanding data.
5069 	 */
5070 	done_once = 0;
5071 again:
5072 	j = 0;
5073 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5074 		if (win_probe_recovery && (net->window_probe)) {
5075 			win_probe_recovered = 1;
5076 			/*-
5077 			 * Find first chunk that was used with
5078 			 * window probe and clear the event. Put
5079 			 * it back into the send queue as if has
5080 			 * not been sent.
5081 			 */
5082 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5083 				if (tp1->window_probe) {
5084 					sctp_window_probe_recovery(stcb, asoc, tp1);
5085 					break;
5086 				}
5087 			}
5088 		}
5089 		if (net->flight_size) {
5090 			j++;
5091 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5092 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5093 				    stcb->sctp_ep, stcb, net);
5094 			}
5095 			if (net->window_probe) {
5096 				net->window_probe = 0;
5097 			}
5098 		} else {
5099 			if (net->window_probe) {
5100 				/*
5101 				 * In window probes we must assure a timer
5102 				 * is still running there
5103 				 */
5104 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5105 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5106 					    stcb->sctp_ep, stcb, net);
5107 
5108 				}
5109 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5110 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5111 				    stcb, net,
5112 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5113 			}
5114 		}
5115 	}
5116 	if ((j == 0) &&
5117 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5118 	    (asoc->sent_queue_retran_cnt == 0) &&
5119 	    (win_probe_recovered == 0) &&
5120 	    (done_once == 0)) {
5121 		/*
5122 		 * huh, this should not happen unless all packets are
5123 		 * PR-SCTP and marked to skip of course.
5124 		 */
5125 		if (sctp_fs_audit(asoc)) {
5126 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5127 				net->flight_size = 0;
5128 			}
5129 			asoc->total_flight = 0;
5130 			asoc->total_flight_count = 0;
5131 			asoc->sent_queue_retran_cnt = 0;
5132 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5133 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5134 					sctp_flight_size_increase(tp1);
5135 					sctp_total_flight_increase(stcb, tp1);
5136 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5137 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5138 				}
5139 			}
5140 		}
5141 		done_once = 1;
5142 		goto again;
5143 	}
5144 	/*********************************************/
5145 	/* Here we perform PR-SCTP procedures        */
5146 	/* (section 4.2)                             */
5147 	/*********************************************/
5148 	/* C1. update advancedPeerAckPoint */
5149 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5150 		asoc->advanced_peer_ack_point = cum_ack;
5151 	}
5152 	/* C2. try to further move advancedPeerAckPoint ahead */
5153 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5154 		struct sctp_tmit_chunk *lchk;
5155 		uint32_t old_adv_peer_ack_point;
5156 
5157 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5158 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5159 		/* C3. See if we need to send a Fwd-TSN */
5160 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5161 			/*
5162 			 * ISSUE with ECN, see FWD-TSN processing.
5163 			 */
5164 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5165 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5166 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5167 				    old_adv_peer_ack_point);
5168 			}
5169 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5170 				send_forward_tsn(stcb, asoc);
5171 			} else if (lchk) {
5172 				/* try to FR fwd-tsn's that get lost too */
5173 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5174 					send_forward_tsn(stcb, asoc);
5175 				}
5176 			}
5177 		}
5178 		if (lchk) {
5179 			/* Assure a timer is up */
5180 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5181 			    stcb->sctp_ep, stcb, lchk->whoTo);
5182 		}
5183 	}
5184 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5185 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5186 		    a_rwnd,
5187 		    stcb->asoc.peers_rwnd,
5188 		    stcb->asoc.total_flight,
5189 		    stcb->asoc.total_output_queue_size);
5190 	}
5191 }
5192 
5193 void
5194 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5195 {
5196 	/* Copy cum-ack */
5197 	uint32_t cum_ack, a_rwnd;
5198 
5199 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5200 	/* Arrange so a_rwnd does NOT change */
5201 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5202 
5203 	/* Now call the express sack handling */
5204 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5205 }
5206 
5207 static void
5208 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5209     struct sctp_stream_in *strmin)
5210 {
5211 	struct sctp_queued_to_read *ctl, *nctl;
5212 	struct sctp_association *asoc;
5213 	uint16_t tt;
5214 
5215 	asoc = &stcb->asoc;
5216 	tt = strmin->last_sequence_delivered;
5217 	/*
5218 	 * First deliver anything prior to and including the stream no that
5219 	 * came in
5220 	 */
5221 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5222 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5223 			/* this is deliverable now */
5224 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5225 			/* subtract pending on streams */
5226 			asoc->size_on_all_streams -= ctl->length;
5227 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5228 			/* deliver it to at least the delivery-q */
5229 			if (stcb->sctp_socket) {
5230 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5231 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5232 				    ctl,
5233 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5234 			}
5235 		} else {
5236 			/* no more delivery now. */
5237 			break;
5238 		}
5239 	}
5240 	/*
5241 	 * now we must deliver things in queue the normal way  if any are
5242 	 * now ready.
5243 	 */
5244 	tt = strmin->last_sequence_delivered + 1;
5245 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5246 		if (tt == ctl->sinfo_ssn) {
5247 			/* this is deliverable now */
5248 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5249 			/* subtract pending on streams */
5250 			asoc->size_on_all_streams -= ctl->length;
5251 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5252 			/* deliver it to at least the delivery-q */
5253 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5254 			if (stcb->sctp_socket) {
5255 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5256 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5257 				    ctl,
5258 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5259 
5260 			}
5261 			tt = strmin->last_sequence_delivered + 1;
5262 		} else {
5263 			break;
5264 		}
5265 	}
5266 }
5267 
5268 static void
5269 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5270     struct sctp_association *asoc,
5271     uint16_t stream, uint16_t seq)
5272 {
5273 	struct sctp_tmit_chunk *chk, *nchk;
5274 
5275 	/* For each one on here see if we need to toss it */
5276 	/*
5277 	 * For now large messages held on the reasmqueue that are complete
5278 	 * will be tossed too. We could in theory do more work to spin
5279 	 * through and stop after dumping one msg aka seeing the start of a
5280 	 * new msg at the head, and call the delivery function... to see if
5281 	 * it can be delivered... But for now we just dump everything on the
5282 	 * queue.
5283 	 */
5284 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5285 		/*
5286 		 * Do not toss it if on a different stream or marked for
5287 		 * unordered delivery in which case the stream sequence
5288 		 * number has no meaning.
5289 		 */
5290 		if ((chk->rec.data.stream_number != stream) ||
5291 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5292 			continue;
5293 		}
5294 		if (chk->rec.data.stream_seq == seq) {
5295 			/* It needs to be tossed */
5296 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5297 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5298 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5299 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5300 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5301 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5302 			}
5303 			asoc->size_on_reasm_queue -= chk->send_size;
5304 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5305 
5306 			/* Clear up any stream problem */
5307 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5308 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5309 				/*
5310 				 * We must dump forward this streams
5311 				 * sequence number if the chunk is not
5312 				 * unordered that is being skipped. There is
5313 				 * a chance that if the peer does not
5314 				 * include the last fragment in its FWD-TSN
5315 				 * we WILL have a problem here since you
5316 				 * would have a partial chunk in queue that
5317 				 * may not be deliverable. Also if a Partial
5318 				 * delivery API as started the user may get
5319 				 * a partial chunk. The next read returning
5320 				 * a new chunk... really ugly but I see no
5321 				 * way around it! Maybe a notify??
5322 				 */
5323 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5324 			}
5325 			if (chk->data) {
5326 				sctp_m_freem(chk->data);
5327 				chk->data = NULL;
5328 			}
5329 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5330 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5331 			/*
5332 			 * If the stream_seq is > than the purging one, we
5333 			 * are done
5334 			 */
5335 			break;
5336 		}
5337 	}
5338 }
5339 
5340 
5341 void
5342 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5343     struct sctp_forward_tsn_chunk *fwd,
5344     int *abort_flag, struct mbuf *m, int offset)
5345 {
5346 	/* The pr-sctp fwd tsn */
5347 	/*
5348 	 * here we will perform all the data receiver side steps for
5349 	 * processing FwdTSN, as required in by pr-sctp draft:
5350 	 *
5351 	 * Assume we get FwdTSN(x):
5352 	 *
5353 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5354 	 * others we have 3) examine and update re-ordering queue on
5355 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5356 	 * report where we are.
5357 	 */
5358 	struct sctp_association *asoc;
5359 	uint32_t new_cum_tsn, gap;
5360 	unsigned int i, fwd_sz, m_size;
5361 	uint32_t str_seq;
5362 	struct sctp_stream_in *strm;
5363 	struct sctp_tmit_chunk *chk, *nchk;
5364 	struct sctp_queued_to_read *ctl, *sv;
5365 
5366 	asoc = &stcb->asoc;
5367 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5368 		SCTPDBG(SCTP_DEBUG_INDATA1,
5369 		    "Bad size too small/big fwd-tsn\n");
5370 		return;
5371 	}
5372 	m_size = (stcb->asoc.mapping_array_size << 3);
5373 	/*************************************************************/
5374 	/* 1. Here we update local cumTSN and shift the bitmap array */
5375 	/*************************************************************/
5376 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5377 
5378 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5379 		/* Already got there ... */
5380 		return;
5381 	}
5382 	/*
5383 	 * now we know the new TSN is more advanced, let's find the actual
5384 	 * gap
5385 	 */
5386 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5387 	asoc->cumulative_tsn = new_cum_tsn;
5388 	if (gap >= m_size) {
5389 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5390 			struct mbuf *oper;
5391 
5392 			/*
5393 			 * out of range (of single byte chunks in the rwnd I
5394 			 * give out). This must be an attacker.
5395 			 */
5396 			*abort_flag = 1;
5397 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5398 			    0, M_NOWAIT, 1, MT_DATA);
5399 			if (oper) {
5400 				struct sctp_paramhdr *ph;
5401 				uint32_t *ippp;
5402 
5403 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5404 				    (sizeof(uint32_t) * 3);
5405 				ph = mtod(oper, struct sctp_paramhdr *);
5406 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5407 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5408 				ippp = (uint32_t *) (ph + 1);
5409 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5410 				ippp++;
5411 				*ippp = asoc->highest_tsn_inside_map;
5412 				ippp++;
5413 				*ippp = new_cum_tsn;
5414 			}
5415 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5416 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5417 			return;
5418 		}
5419 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5420 
5421 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5422 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5423 		asoc->highest_tsn_inside_map = new_cum_tsn;
5424 
5425 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5426 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5427 
5428 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5429 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5430 		}
5431 	} else {
5432 		SCTP_TCB_LOCK_ASSERT(stcb);
5433 		for (i = 0; i <= gap; i++) {
5434 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5435 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5436 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5437 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5438 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5439 				}
5440 			}
5441 		}
5442 	}
5443 	/*************************************************************/
5444 	/* 2. Clear up re-assembly queue                             */
5445 	/*************************************************************/
5446 	/*
5447 	 * First service it if pd-api is up, just in case we can progress it
5448 	 * forward
5449 	 */
5450 	if (asoc->fragmented_delivery_inprogress) {
5451 		sctp_service_reassembly(stcb, asoc);
5452 	}
5453 	/* For each one on here see if we need to toss it */
5454 	/*
5455 	 * For now large messages held on the reasmqueue that are complete
5456 	 * will be tossed too. We could in theory do more work to spin
5457 	 * through and stop after dumping one msg aka seeing the start of a
5458 	 * new msg at the head, and call the delivery function... to see if
5459 	 * it can be delivered... But for now we just dump everything on the
5460 	 * queue.
5461 	 */
5462 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5463 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5464 			/* It needs to be tossed */
5465 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5466 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5467 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5468 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5469 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5470 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5471 			}
5472 			asoc->size_on_reasm_queue -= chk->send_size;
5473 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5474 
5475 			/* Clear up any stream problem */
5476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5477 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5478 				/*
5479 				 * We must dump forward this streams
5480 				 * sequence number if the chunk is not
5481 				 * unordered that is being skipped. There is
5482 				 * a chance that if the peer does not
5483 				 * include the last fragment in its FWD-TSN
5484 				 * we WILL have a problem here since you
5485 				 * would have a partial chunk in queue that
5486 				 * may not be deliverable. Also if a Partial
5487 				 * delivery API as started the user may get
5488 				 * a partial chunk. The next read returning
5489 				 * a new chunk... really ugly but I see no
5490 				 * way around it! Maybe a notify??
5491 				 */
5492 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5493 			}
5494 			if (chk->data) {
5495 				sctp_m_freem(chk->data);
5496 				chk->data = NULL;
5497 			}
5498 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5499 		} else {
5500 			/*
5501 			 * Ok we have gone beyond the end of the fwd-tsn's
5502 			 * mark.
5503 			 */
5504 			break;
5505 		}
5506 	}
5507 	/*******************************************************/
5508 	/* 3. Update the PR-stream re-ordering queues and fix  */
5509 	/* delivery issues as needed.                       */
5510 	/*******************************************************/
5511 	fwd_sz -= sizeof(*fwd);
5512 	if (m && fwd_sz) {
5513 		/* New method. */
5514 		unsigned int num_str;
5515 		struct sctp_strseq *stseq, strseqbuf;
5516 
5517 		offset += sizeof(*fwd);
5518 
5519 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5520 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5521 		for (i = 0; i < num_str; i++) {
5522 			uint16_t st;
5523 
5524 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5525 			    sizeof(struct sctp_strseq),
5526 			    (uint8_t *) & strseqbuf);
5527 			offset += sizeof(struct sctp_strseq);
5528 			if (stseq == NULL) {
5529 				break;
5530 			}
5531 			/* Convert */
5532 			st = ntohs(stseq->stream);
5533 			stseq->stream = st;
5534 			st = ntohs(stseq->sequence);
5535 			stseq->sequence = st;
5536 
5537 			/* now process */
5538 
5539 			/*
5540 			 * Ok we now look for the stream/seq on the read
5541 			 * queue where its not all delivered. If we find it
5542 			 * we transmute the read entry into a PDI_ABORTED.
5543 			 */
5544 			if (stseq->stream >= asoc->streamincnt) {
5545 				/* screwed up streams, stop!  */
5546 				break;
5547 			}
5548 			if ((asoc->str_of_pdapi == stseq->stream) &&
5549 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5550 				/*
5551 				 * If this is the one we were partially
5552 				 * delivering now then we no longer are.
5553 				 * Note this will change with the reassembly
5554 				 * re-write.
5555 				 */
5556 				asoc->fragmented_delivery_inprogress = 0;
5557 			}
5558 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5559 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5560 				if ((ctl->sinfo_stream == stseq->stream) &&
5561 				    (ctl->sinfo_ssn == stseq->sequence)) {
5562 					str_seq = (stseq->stream << 16) | stseq->sequence;
5563 					ctl->end_added = 1;
5564 					ctl->pdapi_aborted = 1;
5565 					sv = stcb->asoc.control_pdapi;
5566 					stcb->asoc.control_pdapi = ctl;
5567 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5568 					    stcb,
5569 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5570 					    (void *)&str_seq,
5571 					    SCTP_SO_NOT_LOCKED);
5572 					stcb->asoc.control_pdapi = sv;
5573 					break;
5574 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5575 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5576 					/* We are past our victim SSN */
5577 					break;
5578 				}
5579 			}
5580 			strm = &asoc->strmin[stseq->stream];
5581 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5582 				/* Update the sequence number */
5583 				strm->last_sequence_delivered = stseq->sequence;
5584 			}
5585 			/* now kick the stream the new way */
5586 			/* sa_ignore NO_NULL_CHK */
5587 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5588 		}
5589 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5590 	}
5591 	/*
5592 	 * Now slide thing forward.
5593 	 */
5594 	sctp_slide_mapping_arrays(stcb);
5595 
5596 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5597 		/* now lets kick out and check for more fragmented delivery */
5598 		/* sa_ignore NO_NULL_CHK */
5599 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5600 	}
5601 }
5602