xref: /freebsd/sys/netinet/sctp_indata.c (revision 3157ba21)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 
295 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 		return;
297 	}
298 	cumackp1 = asoc->cumulative_tsn + 1;
299 	if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
300 		/*
301 		 * this tsn is behind the cum ack and thus we don't need to
302 		 * worry about it being moved from one to the other.
303 		 */
304 		return;
305 	}
306 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
307 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
308 		printf("gap:%x tsn:%x\n", gap, tsn);
309 		sctp_print_mapping_array(asoc);
310 #ifdef INVARIANTS
311 		panic("Things are really messed up now!!");
312 #endif
313 	}
314 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
315 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
317 		asoc->highest_tsn_inside_nr_map = tsn;
318 	}
319 	if (tsn == asoc->highest_tsn_inside_map) {
320 		/* We must back down to see what the new highest is */
321 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
322 		    (i == asoc->mapping_array_base_tsn)); i--) {
323 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
324 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
325 				asoc->highest_tsn_inside_map = i;
326 				fnd = 1;
327 				break;
328 			}
329 		}
330 		if (!fnd) {
331 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
332 		}
333 	}
334 }
335 
336 
337 /*
338  * We are delivering currently from the reassembly queue. We must continue to
339  * deliver until we either: 1) run out of space. 2) run out of sequential
340  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
341  */
342 static void
343 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
344 {
345 	struct sctp_tmit_chunk *chk;
346 	uint16_t nxt_todel;
347 	uint16_t stream_no;
348 	int end = 0;
349 	int cntDel;
350 
351 	struct sctp_queued_to_read *control, *ctl, *ctlat;
352 
353 	if (stcb == NULL)
354 		return;
355 
356 	cntDel = stream_no = 0;
357 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
358 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
359 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
360 		/* socket above is long gone or going.. */
361 abandon:
362 		asoc->fragmented_delivery_inprogress = 0;
363 		chk = TAILQ_FIRST(&asoc->reasmqueue);
364 		while (chk) {
365 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
366 			asoc->size_on_reasm_queue -= chk->send_size;
367 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
368 			/*
369 			 * Lose the data pointer, since its in the socket
370 			 * buffer
371 			 */
372 			if (chk->data) {
373 				sctp_m_freem(chk->data);
374 				chk->data = NULL;
375 			}
376 			/* Now free the address and data */
377 			sctp_free_a_chunk(stcb, chk);
378 			/* sa_ignore FREED_MEMORY */
379 			chk = TAILQ_FIRST(&asoc->reasmqueue);
380 		}
381 		return;
382 	}
383 	SCTP_TCB_LOCK_ASSERT(stcb);
384 	do {
385 		chk = TAILQ_FIRST(&asoc->reasmqueue);
386 		if (chk == NULL) {
387 			return;
388 		}
389 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
390 			/* Can't deliver more :< */
391 			return;
392 		}
393 		stream_no = chk->rec.data.stream_number;
394 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
395 		if (nxt_todel != chk->rec.data.stream_seq &&
396 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
397 			/*
398 			 * Not the next sequence to deliver in its stream OR
399 			 * unordered
400 			 */
401 			return;
402 		}
403 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
404 
405 			control = sctp_build_readq_entry_chk(stcb, chk);
406 			if (control == NULL) {
407 				/* out of memory? */
408 				return;
409 			}
410 			/* save it off for our future deliveries */
411 			stcb->asoc.control_pdapi = control;
412 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
413 				end = 1;
414 			else
415 				end = 0;
416 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
417 			sctp_add_to_readq(stcb->sctp_ep,
418 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
419 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
420 			cntDel++;
421 		} else {
422 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
423 				end = 1;
424 			else
425 				end = 0;
426 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
427 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
428 			    stcb->asoc.control_pdapi,
429 			    chk->data, end, chk->rec.data.TSN_seq,
430 			    &stcb->sctp_socket->so_rcv)) {
431 				/*
432 				 * something is very wrong, either
433 				 * control_pdapi is NULL, or the tail_mbuf
434 				 * is corrupt, or there is a EOM already on
435 				 * the mbuf chain.
436 				 */
437 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
438 					goto abandon;
439 				} else {
440 #ifdef INVARIANTS
441 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
442 						panic("This should not happen control_pdapi NULL?");
443 					}
444 					/* if we did not panic, it was a EOM */
445 					panic("Bad chunking ??");
446 #else
447 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
448 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
449 					}
450 					SCTP_PRINTF("Bad chunking ??\n");
451 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
452 
453 #endif
454 					goto abandon;
455 				}
456 			}
457 			cntDel++;
458 		}
459 		/* pull it we did it */
460 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
461 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
462 			asoc->fragmented_delivery_inprogress = 0;
463 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
464 				asoc->strmin[stream_no].last_sequence_delivered++;
465 			}
466 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
467 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
468 			}
469 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
470 			/*
471 			 * turn the flag back on since we just  delivered
472 			 * yet another one.
473 			 */
474 			asoc->fragmented_delivery_inprogress = 1;
475 		}
476 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
477 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
478 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
479 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
480 
481 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
482 		asoc->size_on_reasm_queue -= chk->send_size;
483 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
484 		/* free up the chk */
485 		chk->data = NULL;
486 		sctp_free_a_chunk(stcb, chk);
487 
488 		if (asoc->fragmented_delivery_inprogress == 0) {
489 			/*
490 			 * Now lets see if we can deliver the next one on
491 			 * the stream
492 			 */
493 			struct sctp_stream_in *strm;
494 
495 			strm = &asoc->strmin[stream_no];
496 			nxt_todel = strm->last_sequence_delivered + 1;
497 			ctl = TAILQ_FIRST(&strm->inqueue);
498 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
499 				while (ctl != NULL) {
500 					/* Deliver more if we can. */
501 					if (nxt_todel == ctl->sinfo_ssn) {
502 						ctlat = TAILQ_NEXT(ctl, next);
503 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
504 						asoc->size_on_all_streams -= ctl->length;
505 						sctp_ucount_decr(asoc->cnt_on_all_streams);
506 						strm->last_sequence_delivered++;
507 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
508 						sctp_add_to_readq(stcb->sctp_ep, stcb,
509 						    ctl,
510 						    &stcb->sctp_socket->so_rcv, 1,
511 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
512 						ctl = ctlat;
513 					} else {
514 						break;
515 					}
516 					nxt_todel = strm->last_sequence_delivered + 1;
517 				}
518 			}
519 			break;
520 		}
521 		/* sa_ignore FREED_MEMORY */
522 		chk = TAILQ_FIRST(&asoc->reasmqueue);
523 	} while (chk);
524 }
525 
526 /*
527  * Queue the chunk either right into the socket buffer if it is the next one
528  * to go OR put it in the correct place in the delivery queue.  If we do
529  * append to the so_buf, keep doing so until we are out of order. One big
530  * question still remains, what to do when the socket buffer is FULL??
531  */
532 static void
533 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
534     struct sctp_queued_to_read *control, int *abort_flag)
535 {
536 	/*
537 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
538 	 * all the data in one stream this could happen quite rapidly. One
539 	 * could use the TSN to keep track of things, but this scheme breaks
540 	 * down in the other type of stream useage that could occur. Send a
541 	 * single msg to stream 0, send 4Billion messages to stream 1, now
542 	 * send a message to stream 0. You have a situation where the TSN
543 	 * has wrapped but not in the stream. Is this worth worrying about
544 	 * or should we just change our queue sort at the bottom to be by
545 	 * TSN.
546 	 *
547 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
548 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
549 	 * assignment this could happen... and I don't see how this would be
550 	 * a violation. So for now I am undecided an will leave the sort by
551 	 * SSN alone. Maybe a hybred approach is the answer
552 	 *
553 	 */
554 	struct sctp_stream_in *strm;
555 	struct sctp_queued_to_read *at;
556 	int queue_needed;
557 	uint16_t nxt_todel;
558 	struct mbuf *oper;
559 
560 	queue_needed = 1;
561 	asoc->size_on_all_streams += control->length;
562 	sctp_ucount_incr(asoc->cnt_on_all_streams);
563 	strm = &asoc->strmin[control->sinfo_stream];
564 	nxt_todel = strm->last_sequence_delivered + 1;
565 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
567 	}
568 	SCTPDBG(SCTP_DEBUG_INDATA1,
569 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
570 	    (uint32_t) control->sinfo_stream,
571 	    (uint32_t) strm->last_sequence_delivered,
572 	    (uint32_t) nxt_todel);
573 	if (compare_with_wrap(strm->last_sequence_delivered,
574 	    control->sinfo_ssn, MAX_SEQ) ||
575 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
576 		/* The incoming sseq is behind where we last delivered? */
577 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
578 		    control->sinfo_ssn, strm->last_sequence_delivered);
579 protocol_error:
580 		/*
581 		 * throw it in the stream so it gets cleaned up in
582 		 * association destruction
583 		 */
584 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
585 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
586 		    0, M_DONTWAIT, 1, MT_DATA);
587 		if (oper) {
588 			struct sctp_paramhdr *ph;
589 			uint32_t *ippp;
590 
591 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
592 			    (sizeof(uint32_t) * 3);
593 			ph = mtod(oper, struct sctp_paramhdr *);
594 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
595 			ph->param_length = htons(SCTP_BUF_LEN(oper));
596 			ippp = (uint32_t *) (ph + 1);
597 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
598 			ippp++;
599 			*ippp = control->sinfo_tsn;
600 			ippp++;
601 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
602 		}
603 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
604 		sctp_abort_an_association(stcb->sctp_ep, stcb,
605 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
606 
607 		*abort_flag = 1;
608 		return;
609 
610 	}
611 	if (nxt_todel == control->sinfo_ssn) {
612 		/* can be delivered right away? */
613 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
615 		}
616 		/* EY it wont be queued if it could be delivered directly */
617 		queue_needed = 0;
618 		asoc->size_on_all_streams -= control->length;
619 		sctp_ucount_decr(asoc->cnt_on_all_streams);
620 		strm->last_sequence_delivered++;
621 
622 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
623 		sctp_add_to_readq(stcb->sctp_ep, stcb,
624 		    control,
625 		    &stcb->sctp_socket->so_rcv, 1,
626 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
627 		control = TAILQ_FIRST(&strm->inqueue);
628 		while (control != NULL) {
629 			/* all delivered */
630 			nxt_todel = strm->last_sequence_delivered + 1;
631 			if (nxt_todel == control->sinfo_ssn) {
632 				at = TAILQ_NEXT(control, next);
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				control = at;
654 				continue;
655 			}
656 			break;
657 		}
658 	}
659 	if (queue_needed) {
660 		/*
661 		 * Ok, we did not deliver this guy, find the correct place
662 		 * to put it on the queue.
663 		 */
664 		if ((compare_with_wrap(asoc->cumulative_tsn,
665 		    control->sinfo_tsn, MAX_TSN)) ||
666 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
667 			goto protocol_error;
668 		}
669 		if (TAILQ_EMPTY(&strm->inqueue)) {
670 			/* Empty queue */
671 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
673 			}
674 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
675 		} else {
676 			TAILQ_FOREACH(at, &strm->inqueue, next) {
677 				if (compare_with_wrap(at->sinfo_ssn,
678 				    control->sinfo_ssn, MAX_SEQ)) {
679 					/*
680 					 * one in queue is bigger than the
681 					 * new one, insert before this one
682 					 */
683 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 						sctp_log_strm_del(control, at,
685 						    SCTP_STR_LOG_FROM_INSERT_MD);
686 					}
687 					TAILQ_INSERT_BEFORE(at, control, next);
688 					break;
689 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690 					/*
691 					 * Gak, He sent me a duplicate str
692 					 * seq number
693 					 */
694 					/*
695 					 * foo bar, I guess I will just free
696 					 * this new guy, should we abort
697 					 * too? FIX ME MAYBE? Or it COULD be
698 					 * that the SSN's have wrapped.
699 					 * Maybe I should compare to TSN
700 					 * somehow... sigh for now just blow
701 					 * away the chunk!
702 					 */
703 
704 					if (control->data)
705 						sctp_m_freem(control->data);
706 					control->data = NULL;
707 					asoc->size_on_all_streams -= control->length;
708 					sctp_ucount_decr(asoc->cnt_on_all_streams);
709 					if (control->whoFrom)
710 						sctp_free_remote_addr(control->whoFrom);
711 					control->whoFrom = NULL;
712 					sctp_free_a_readq(stcb, control);
713 					return;
714 				} else {
715 					if (TAILQ_NEXT(at, next) == NULL) {
716 						/*
717 						 * We are at the end, insert
718 						 * it after this one
719 						 */
720 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
721 							sctp_log_strm_del(control, at,
722 							    SCTP_STR_LOG_FROM_INSERT_TL);
723 						}
724 						TAILQ_INSERT_AFTER(&strm->inqueue,
725 						    at, control, next);
726 						break;
727 					}
728 				}
729 			}
730 		}
731 	}
732 }
733 
734 /*
735  * Returns two things: You get the total size of the deliverable parts of the
736  * first fragmented message on the reassembly queue. And you get a 1 back if
737  * all of the message is ready or a 0 back if the message is still incomplete
738  */
739 static int
740 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
741 {
742 	struct sctp_tmit_chunk *chk;
743 	uint32_t tsn;
744 
745 	*t_size = 0;
746 	chk = TAILQ_FIRST(&asoc->reasmqueue);
747 	if (chk == NULL) {
748 		/* nothing on the queue */
749 		return (0);
750 	}
751 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
752 		/* Not a first on the queue */
753 		return (0);
754 	}
755 	tsn = chk->rec.data.TSN_seq;
756 	while (chk) {
757 		if (tsn != chk->rec.data.TSN_seq) {
758 			return (0);
759 		}
760 		*t_size += chk->send_size;
761 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
762 			return (1);
763 		}
764 		tsn++;
765 		chk = TAILQ_NEXT(chk, sctp_next);
766 	}
767 	return (0);
768 }
769 
770 static void
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	struct sctp_tmit_chunk *chk;
774 	uint16_t nxt_todel;
775 	uint32_t tsize, pd_point;
776 
777 doit_again:
778 	chk = TAILQ_FIRST(&asoc->reasmqueue);
779 	if (chk == NULL) {
780 		/* Huh? */
781 		asoc->size_on_reasm_queue = 0;
782 		asoc->cnt_on_reasm_queue = 0;
783 		return;
784 	}
785 	if (asoc->fragmented_delivery_inprogress == 0) {
786 		nxt_todel =
787 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 		    (nxt_todel == chk->rec.data.stream_seq ||
790 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791 			/*
792 			 * Yep the first one is here and its ok to deliver
793 			 * but should we?
794 			 */
795 			if (stcb->sctp_socket) {
796 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 				    stcb->sctp_ep->partial_delivery_point);
798 			} else {
799 				pd_point = stcb->sctp_ep->partial_delivery_point;
800 			}
801 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802 
803 				/*
804 				 * Yes, we setup to start reception, by
805 				 * backing down the TSN just in case we
806 				 * can't deliver. If we
807 				 */
808 				asoc->fragmented_delivery_inprogress = 1;
809 				asoc->tsn_last_delivered =
810 				    chk->rec.data.TSN_seq - 1;
811 				asoc->str_of_pdapi =
812 				    chk->rec.data.stream_number;
813 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 				asoc->fragment_flags = chk->rec.data.rcv_flags;
816 				sctp_service_reassembly(stcb, asoc);
817 			}
818 		}
819 	} else {
820 		/*
821 		 * Service re-assembly will deliver stream data queued at
822 		 * the end of fragmented delivery.. but it wont know to go
823 		 * back and call itself again... we do that here with the
824 		 * got doit_again
825 		 */
826 		sctp_service_reassembly(stcb, asoc);
827 		if (asoc->fragmented_delivery_inprogress == 0) {
828 			/*
829 			 * finished our Fragmented delivery, could be more
830 			 * waiting?
831 			 */
832 			goto doit_again;
833 		}
834 	}
835 }
836 
837 /*
838  * Dump onto the re-assembly queue, in its proper place. After dumping on the
839  * queue, see if anthing can be delivered. If so pull it off (or as much as
840  * we can. If we run out of space then we must dump what we can and set the
841  * appropriate flag to say we queued what we could.
842  */
843 static void
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845     struct sctp_tmit_chunk *chk, int *abort_flag)
846 {
847 	struct mbuf *oper;
848 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
849 	u_char last_flags;
850 	struct sctp_tmit_chunk *at, *prev, *next;
851 
852 	prev = next = NULL;
853 	cum_ackp1 = asoc->tsn_last_delivered + 1;
854 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 		/* This is the first one on the queue */
856 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
857 		/*
858 		 * we do not check for delivery of anything when only one
859 		 * fragment is here
860 		 */
861 		asoc->size_on_reasm_queue = chk->send_size;
862 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 		if (chk->rec.data.TSN_seq == cum_ackp1) {
864 			if (asoc->fragmented_delivery_inprogress == 0 &&
865 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 			    SCTP_DATA_FIRST_FRAG) {
867 				/*
868 				 * An empty queue, no delivery inprogress,
869 				 * we hit the next one and it does NOT have
870 				 * a FIRST fragment mark.
871 				 */
872 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 				    0, M_DONTWAIT, 1, MT_DATA);
875 
876 				if (oper) {
877 					struct sctp_paramhdr *ph;
878 					uint32_t *ippp;
879 
880 					SCTP_BUF_LEN(oper) =
881 					    sizeof(struct sctp_paramhdr) +
882 					    (sizeof(uint32_t) * 3);
883 					ph = mtod(oper, struct sctp_paramhdr *);
884 					ph->param_type =
885 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 					ph->param_length = htons(SCTP_BUF_LEN(oper));
887 					ippp = (uint32_t *) (ph + 1);
888 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
889 					ippp++;
890 					*ippp = chk->rec.data.TSN_seq;
891 					ippp++;
892 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
893 
894 				}
895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 				sctp_abort_an_association(stcb->sctp_ep, stcb,
897 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
898 				*abort_flag = 1;
899 			} else if (asoc->fragmented_delivery_inprogress &&
900 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
901 				/*
902 				 * We are doing a partial delivery and the
903 				 * NEXT chunk MUST be either the LAST or
904 				 * MIDDLE fragment NOT a FIRST
905 				 */
906 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
907 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
908 				    0, M_DONTWAIT, 1, MT_DATA);
909 				if (oper) {
910 					struct sctp_paramhdr *ph;
911 					uint32_t *ippp;
912 
913 					SCTP_BUF_LEN(oper) =
914 					    sizeof(struct sctp_paramhdr) +
915 					    (3 * sizeof(uint32_t));
916 					ph = mtod(oper, struct sctp_paramhdr *);
917 					ph->param_type =
918 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
919 					ph->param_length = htons(SCTP_BUF_LEN(oper));
920 					ippp = (uint32_t *) (ph + 1);
921 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
922 					ippp++;
923 					*ippp = chk->rec.data.TSN_seq;
924 					ippp++;
925 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
926 				}
927 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
928 				sctp_abort_an_association(stcb->sctp_ep, stcb,
929 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
930 				*abort_flag = 1;
931 			} else if (asoc->fragmented_delivery_inprogress) {
932 				/*
933 				 * Here we are ok with a MIDDLE or LAST
934 				 * piece
935 				 */
936 				if (chk->rec.data.stream_number !=
937 				    asoc->str_of_pdapi) {
938 					/* Got to be the right STR No */
939 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
940 					    chk->rec.data.stream_number,
941 					    asoc->str_of_pdapi);
942 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
943 					    0, M_DONTWAIT, 1, MT_DATA);
944 					if (oper) {
945 						struct sctp_paramhdr *ph;
946 						uint32_t *ippp;
947 
948 						SCTP_BUF_LEN(oper) =
949 						    sizeof(struct sctp_paramhdr) +
950 						    (sizeof(uint32_t) * 3);
951 						ph = mtod(oper,
952 						    struct sctp_paramhdr *);
953 						ph->param_type =
954 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
955 						ph->param_length =
956 						    htons(SCTP_BUF_LEN(oper));
957 						ippp = (uint32_t *) (ph + 1);
958 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
959 						ippp++;
960 						*ippp = chk->rec.data.TSN_seq;
961 						ippp++;
962 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
963 					}
964 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
965 					sctp_abort_an_association(stcb->sctp_ep,
966 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
967 					*abort_flag = 1;
968 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
969 					    SCTP_DATA_UNORDERED &&
970 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
971 					/* Got to be the right STR Seq */
972 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
973 					    chk->rec.data.stream_seq,
974 					    asoc->ssn_of_pdapi);
975 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
976 					    0, M_DONTWAIT, 1, MT_DATA);
977 					if (oper) {
978 						struct sctp_paramhdr *ph;
979 						uint32_t *ippp;
980 
981 						SCTP_BUF_LEN(oper) =
982 						    sizeof(struct sctp_paramhdr) +
983 						    (3 * sizeof(uint32_t));
984 						ph = mtod(oper,
985 						    struct sctp_paramhdr *);
986 						ph->param_type =
987 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
988 						ph->param_length =
989 						    htons(SCTP_BUF_LEN(oper));
990 						ippp = (uint32_t *) (ph + 1);
991 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
992 						ippp++;
993 						*ippp = chk->rec.data.TSN_seq;
994 						ippp++;
995 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
996 
997 					}
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
999 					sctp_abort_an_association(stcb->sctp_ep,
1000 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1001 					*abort_flag = 1;
1002 				}
1003 			}
1004 		}
1005 		return;
1006 	}
1007 	/* Find its place */
1008 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1009 		if (compare_with_wrap(at->rec.data.TSN_seq,
1010 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1011 			/*
1012 			 * one in queue is bigger than the new one, insert
1013 			 * before this one
1014 			 */
1015 			/* A check */
1016 			asoc->size_on_reasm_queue += chk->send_size;
1017 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1018 			next = at;
1019 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1020 			break;
1021 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1022 			/* Gak, He sent me a duplicate str seq number */
1023 			/*
1024 			 * foo bar, I guess I will just free this new guy,
1025 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1026 			 * that the SSN's have wrapped. Maybe I should
1027 			 * compare to TSN somehow... sigh for now just blow
1028 			 * away the chunk!
1029 			 */
1030 			if (chk->data) {
1031 				sctp_m_freem(chk->data);
1032 				chk->data = NULL;
1033 			}
1034 			sctp_free_a_chunk(stcb, chk);
1035 			return;
1036 		} else {
1037 			last_flags = at->rec.data.rcv_flags;
1038 			last_tsn = at->rec.data.TSN_seq;
1039 			prev = at;
1040 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1041 				/*
1042 				 * We are at the end, insert it after this
1043 				 * one
1044 				 */
1045 				/* check it first */
1046 				asoc->size_on_reasm_queue += chk->send_size;
1047 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1049 				break;
1050 			}
1051 		}
1052 	}
1053 	/* Now the audits */
1054 	if (prev) {
1055 		prev_tsn = chk->rec.data.TSN_seq - 1;
1056 		if (prev_tsn == prev->rec.data.TSN_seq) {
1057 			/*
1058 			 * Ok the one I am dropping onto the end is the
1059 			 * NEXT. A bit of valdiation here.
1060 			 */
1061 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 			    SCTP_DATA_FIRST_FRAG ||
1063 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1064 			    SCTP_DATA_MIDDLE_FRAG) {
1065 				/*
1066 				 * Insert chk MUST be a MIDDLE or LAST
1067 				 * fragment
1068 				 */
1069 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1070 				    SCTP_DATA_FIRST_FRAG) {
1071 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1072 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1073 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1074 					    0, M_DONTWAIT, 1, MT_DATA);
1075 					if (oper) {
1076 						struct sctp_paramhdr *ph;
1077 						uint32_t *ippp;
1078 
1079 						SCTP_BUF_LEN(oper) =
1080 						    sizeof(struct sctp_paramhdr) +
1081 						    (3 * sizeof(uint32_t));
1082 						ph = mtod(oper,
1083 						    struct sctp_paramhdr *);
1084 						ph->param_type =
1085 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1086 						ph->param_length =
1087 						    htons(SCTP_BUF_LEN(oper));
1088 						ippp = (uint32_t *) (ph + 1);
1089 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1090 						ippp++;
1091 						*ippp = chk->rec.data.TSN_seq;
1092 						ippp++;
1093 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1094 
1095 					}
1096 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1097 					sctp_abort_an_association(stcb->sctp_ep,
1098 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1099 					*abort_flag = 1;
1100 					return;
1101 				}
1102 				if (chk->rec.data.stream_number !=
1103 				    prev->rec.data.stream_number) {
1104 					/*
1105 					 * Huh, need the correct STR here,
1106 					 * they must be the same.
1107 					 */
1108 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1109 					    chk->rec.data.stream_number,
1110 					    prev->rec.data.stream_number);
1111 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1112 					    0, M_DONTWAIT, 1, MT_DATA);
1113 					if (oper) {
1114 						struct sctp_paramhdr *ph;
1115 						uint32_t *ippp;
1116 
1117 						SCTP_BUF_LEN(oper) =
1118 						    sizeof(struct sctp_paramhdr) +
1119 						    (3 * sizeof(uint32_t));
1120 						ph = mtod(oper,
1121 						    struct sctp_paramhdr *);
1122 						ph->param_type =
1123 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1124 						ph->param_length =
1125 						    htons(SCTP_BUF_LEN(oper));
1126 						ippp = (uint32_t *) (ph + 1);
1127 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1128 						ippp++;
1129 						*ippp = chk->rec.data.TSN_seq;
1130 						ippp++;
1131 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1132 					}
1133 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1134 					sctp_abort_an_association(stcb->sctp_ep,
1135 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1136 
1137 					*abort_flag = 1;
1138 					return;
1139 				}
1140 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1141 				    chk->rec.data.stream_seq !=
1142 				    prev->rec.data.stream_seq) {
1143 					/*
1144 					 * Huh, need the correct STR here,
1145 					 * they must be the same.
1146 					 */
1147 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1148 					    chk->rec.data.stream_seq,
1149 					    prev->rec.data.stream_seq);
1150 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1151 					    0, M_DONTWAIT, 1, MT_DATA);
1152 					if (oper) {
1153 						struct sctp_paramhdr *ph;
1154 						uint32_t *ippp;
1155 
1156 						SCTP_BUF_LEN(oper) =
1157 						    sizeof(struct sctp_paramhdr) +
1158 						    (3 * sizeof(uint32_t));
1159 						ph = mtod(oper,
1160 						    struct sctp_paramhdr *);
1161 						ph->param_type =
1162 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1163 						ph->param_length =
1164 						    htons(SCTP_BUF_LEN(oper));
1165 						ippp = (uint32_t *) (ph + 1);
1166 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1167 						ippp++;
1168 						*ippp = chk->rec.data.TSN_seq;
1169 						ippp++;
1170 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1171 					}
1172 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1173 					sctp_abort_an_association(stcb->sctp_ep,
1174 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1175 
1176 					*abort_flag = 1;
1177 					return;
1178 				}
1179 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180 			    SCTP_DATA_LAST_FRAG) {
1181 				/* Insert chk MUST be a FIRST */
1182 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1183 				    SCTP_DATA_FIRST_FRAG) {
1184 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1185 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1186 					    0, M_DONTWAIT, 1, MT_DATA);
1187 					if (oper) {
1188 						struct sctp_paramhdr *ph;
1189 						uint32_t *ippp;
1190 
1191 						SCTP_BUF_LEN(oper) =
1192 						    sizeof(struct sctp_paramhdr) +
1193 						    (3 * sizeof(uint32_t));
1194 						ph = mtod(oper,
1195 						    struct sctp_paramhdr *);
1196 						ph->param_type =
1197 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1198 						ph->param_length =
1199 						    htons(SCTP_BUF_LEN(oper));
1200 						ippp = (uint32_t *) (ph + 1);
1201 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1202 						ippp++;
1203 						*ippp = chk->rec.data.TSN_seq;
1204 						ippp++;
1205 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1206 
1207 					}
1208 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1209 					sctp_abort_an_association(stcb->sctp_ep,
1210 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1211 
1212 					*abort_flag = 1;
1213 					return;
1214 				}
1215 			}
1216 		}
1217 	}
1218 	if (next) {
1219 		post_tsn = chk->rec.data.TSN_seq + 1;
1220 		if (post_tsn == next->rec.data.TSN_seq) {
1221 			/*
1222 			 * Ok the one I am inserting ahead of is my NEXT
1223 			 * one. A bit of valdiation here.
1224 			 */
1225 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1226 				/* Insert chk MUST be a last fragment */
1227 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1228 				    != SCTP_DATA_LAST_FRAG) {
1229 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1230 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1231 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1232 					    0, M_DONTWAIT, 1, MT_DATA);
1233 					if (oper) {
1234 						struct sctp_paramhdr *ph;
1235 						uint32_t *ippp;
1236 
1237 						SCTP_BUF_LEN(oper) =
1238 						    sizeof(struct sctp_paramhdr) +
1239 						    (3 * sizeof(uint32_t));
1240 						ph = mtod(oper,
1241 						    struct sctp_paramhdr *);
1242 						ph->param_type =
1243 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1244 						ph->param_length =
1245 						    htons(SCTP_BUF_LEN(oper));
1246 						ippp = (uint32_t *) (ph + 1);
1247 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1248 						ippp++;
1249 						*ippp = chk->rec.data.TSN_seq;
1250 						ippp++;
1251 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1252 					}
1253 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1254 					sctp_abort_an_association(stcb->sctp_ep,
1255 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1256 
1257 					*abort_flag = 1;
1258 					return;
1259 				}
1260 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 				    SCTP_DATA_MIDDLE_FRAG ||
1262 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1263 			    SCTP_DATA_LAST_FRAG) {
1264 				/*
1265 				 * Insert chk CAN be MIDDLE or FIRST NOT
1266 				 * LAST
1267 				 */
1268 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1269 				    SCTP_DATA_LAST_FRAG) {
1270 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1271 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1272 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1273 					    0, M_DONTWAIT, 1, MT_DATA);
1274 					if (oper) {
1275 						struct sctp_paramhdr *ph;
1276 						uint32_t *ippp;
1277 
1278 						SCTP_BUF_LEN(oper) =
1279 						    sizeof(struct sctp_paramhdr) +
1280 						    (3 * sizeof(uint32_t));
1281 						ph = mtod(oper,
1282 						    struct sctp_paramhdr *);
1283 						ph->param_type =
1284 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1285 						ph->param_length =
1286 						    htons(SCTP_BUF_LEN(oper));
1287 						ippp = (uint32_t *) (ph + 1);
1288 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1289 						ippp++;
1290 						*ippp = chk->rec.data.TSN_seq;
1291 						ippp++;
1292 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1293 
1294 					}
1295 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1296 					sctp_abort_an_association(stcb->sctp_ep,
1297 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1298 
1299 					*abort_flag = 1;
1300 					return;
1301 				}
1302 				if (chk->rec.data.stream_number !=
1303 				    next->rec.data.stream_number) {
1304 					/*
1305 					 * Huh, need the correct STR here,
1306 					 * they must be the same.
1307 					 */
1308 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1309 					    chk->rec.data.stream_number,
1310 					    next->rec.data.stream_number);
1311 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1312 					    0, M_DONTWAIT, 1, MT_DATA);
1313 					if (oper) {
1314 						struct sctp_paramhdr *ph;
1315 						uint32_t *ippp;
1316 
1317 						SCTP_BUF_LEN(oper) =
1318 						    sizeof(struct sctp_paramhdr) +
1319 						    (3 * sizeof(uint32_t));
1320 						ph = mtod(oper,
1321 						    struct sctp_paramhdr *);
1322 						ph->param_type =
1323 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1324 						ph->param_length =
1325 						    htons(SCTP_BUF_LEN(oper));
1326 						ippp = (uint32_t *) (ph + 1);
1327 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1328 						ippp++;
1329 						*ippp = chk->rec.data.TSN_seq;
1330 						ippp++;
1331 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1332 
1333 					}
1334 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1335 					sctp_abort_an_association(stcb->sctp_ep,
1336 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1337 
1338 					*abort_flag = 1;
1339 					return;
1340 				}
1341 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1342 				    chk->rec.data.stream_seq !=
1343 				    next->rec.data.stream_seq) {
1344 					/*
1345 					 * Huh, need the correct STR here,
1346 					 * they must be the same.
1347 					 */
1348 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1349 					    chk->rec.data.stream_seq,
1350 					    next->rec.data.stream_seq);
1351 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1352 					    0, M_DONTWAIT, 1, MT_DATA);
1353 					if (oper) {
1354 						struct sctp_paramhdr *ph;
1355 						uint32_t *ippp;
1356 
1357 						SCTP_BUF_LEN(oper) =
1358 						    sizeof(struct sctp_paramhdr) +
1359 						    (3 * sizeof(uint32_t));
1360 						ph = mtod(oper,
1361 						    struct sctp_paramhdr *);
1362 						ph->param_type =
1363 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1364 						ph->param_length =
1365 						    htons(SCTP_BUF_LEN(oper));
1366 						ippp = (uint32_t *) (ph + 1);
1367 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1368 						ippp++;
1369 						*ippp = chk->rec.data.TSN_seq;
1370 						ippp++;
1371 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1372 					}
1373 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1374 					sctp_abort_an_association(stcb->sctp_ep,
1375 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1376 
1377 					*abort_flag = 1;
1378 					return;
1379 				}
1380 			}
1381 		}
1382 	}
1383 	/* Do we need to do some delivery? check */
1384 	sctp_deliver_reasm_check(stcb, asoc);
1385 }
1386 
1387 /*
1388  * This is an unfortunate routine. It checks to make sure a evil guy is not
1389  * stuffing us full of bad packet fragments. A broken peer could also do this
1390  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1391  * :< more cycles.
1392  */
1393 static int
1394 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1395     uint32_t TSN_seq)
1396 {
1397 	struct sctp_tmit_chunk *at;
1398 	uint32_t tsn_est;
1399 
1400 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1401 		if (compare_with_wrap(TSN_seq,
1402 		    at->rec.data.TSN_seq, MAX_TSN)) {
1403 			/* is it one bigger? */
1404 			tsn_est = at->rec.data.TSN_seq + 1;
1405 			if (tsn_est == TSN_seq) {
1406 				/* yep. It better be a last then */
1407 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 				    SCTP_DATA_LAST_FRAG) {
1409 					/*
1410 					 * Ok this guy belongs next to a guy
1411 					 * that is NOT last, it should be a
1412 					 * middle/last, not a complete
1413 					 * chunk.
1414 					 */
1415 					return (1);
1416 				} else {
1417 					/*
1418 					 * This guy is ok since its a LAST
1419 					 * and the new chunk is a fully
1420 					 * self- contained one.
1421 					 */
1422 					return (0);
1423 				}
1424 			}
1425 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1426 			/* Software error since I have a dup? */
1427 			return (1);
1428 		} else {
1429 			/*
1430 			 * Ok, 'at' is larger than new chunk but does it
1431 			 * need to be right before it.
1432 			 */
1433 			tsn_est = TSN_seq + 1;
1434 			if (tsn_est == at->rec.data.TSN_seq) {
1435 				/* Yep, It better be a first */
1436 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1437 				    SCTP_DATA_FIRST_FRAG) {
1438 					return (1);
1439 				} else {
1440 					return (0);
1441 				}
1442 			}
1443 		}
1444 	}
1445 	return (0);
1446 }
1447 
1448 
1449 static int
1450 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1451     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1452     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1453     int *break_flag, int last_chunk)
1454 {
1455 	/* Process a data chunk */
1456 	/* struct sctp_tmit_chunk *chk; */
1457 	struct sctp_tmit_chunk *chk;
1458 	uint32_t tsn, gap;
1459 	struct mbuf *dmbuf;
1460 	int indx, the_len;
1461 	int need_reasm_check = 0;
1462 	uint16_t strmno, strmseq;
1463 	struct mbuf *oper;
1464 	struct sctp_queued_to_read *control;
1465 	int ordered;
1466 	uint32_t protocol_id;
1467 	uint8_t chunk_flags;
1468 	struct sctp_stream_reset_list *liste;
1469 
1470 	chk = NULL;
1471 	tsn = ntohl(ch->dp.tsn);
1472 	chunk_flags = ch->ch.chunk_flags;
1473 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1474 		asoc->send_sack = 1;
1475 	}
1476 	protocol_id = ch->dp.protocol_id;
1477 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1478 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1479 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1480 	}
1481 	if (stcb == NULL) {
1482 		return (0);
1483 	}
1484 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1485 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1486 	    asoc->cumulative_tsn == tsn) {
1487 		/* It is a duplicate */
1488 		SCTP_STAT_INCR(sctps_recvdupdata);
1489 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1490 			/* Record a dup for the next outbound sack */
1491 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1492 			asoc->numduptsns++;
1493 		}
1494 		asoc->send_sack = 1;
1495 		return (0);
1496 	}
1497 	/* Calculate the number of TSN's between the base and this TSN */
1498 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1499 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1500 		/* Can't hold the bit in the mapping at max array, toss it */
1501 		return (0);
1502 	}
1503 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1504 		SCTP_TCB_LOCK_ASSERT(stcb);
1505 		if (sctp_expand_mapping_array(asoc, gap)) {
1506 			/* Can't expand, drop it */
1507 			return (0);
1508 		}
1509 	}
1510 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1511 		*high_tsn = tsn;
1512 	}
1513 	/* See if we have received this one already */
1514 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1515 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1516 		SCTP_STAT_INCR(sctps_recvdupdata);
1517 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1518 			/* Record a dup for the next outbound sack */
1519 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1520 			asoc->numduptsns++;
1521 		}
1522 		asoc->send_sack = 1;
1523 		return (0);
1524 	}
1525 	/*
1526 	 * Check to see about the GONE flag, duplicates would cause a sack
1527 	 * to be sent up above
1528 	 */
1529 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1530 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1531 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1532 	    ) {
1533 		/*
1534 		 * wait a minute, this guy is gone, there is no longer a
1535 		 * receiver. Send peer an ABORT!
1536 		 */
1537 		struct mbuf *op_err;
1538 
1539 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1540 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1541 		*abort_flag = 1;
1542 		return (0);
1543 	}
1544 	/*
1545 	 * Now before going further we see if there is room. If NOT then we
1546 	 * MAY let one through only IF this TSN is the one we are waiting
1547 	 * for on a partial delivery API.
1548 	 */
1549 
1550 	/* now do the tests */
1551 	if (((asoc->cnt_on_all_streams +
1552 	    asoc->cnt_on_reasm_queue +
1553 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1554 	    (((int)asoc->my_rwnd) <= 0)) {
1555 		/*
1556 		 * When we have NO room in the rwnd we check to make sure
1557 		 * the reader is doing its job...
1558 		 */
1559 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1560 			/* some to read, wake-up */
1561 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1562 			struct socket *so;
1563 
1564 			so = SCTP_INP_SO(stcb->sctp_ep);
1565 			atomic_add_int(&stcb->asoc.refcnt, 1);
1566 			SCTP_TCB_UNLOCK(stcb);
1567 			SCTP_SOCKET_LOCK(so, 1);
1568 			SCTP_TCB_LOCK(stcb);
1569 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1570 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1571 				/* assoc was freed while we were unlocked */
1572 				SCTP_SOCKET_UNLOCK(so, 1);
1573 				return (0);
1574 			}
1575 #endif
1576 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1577 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1578 			SCTP_SOCKET_UNLOCK(so, 1);
1579 #endif
1580 		}
1581 		/* now is it in the mapping array of what we have accepted? */
1582 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1583 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1584 			/* Nope not in the valid range dump it */
1585 			sctp_set_rwnd(stcb, asoc);
1586 			if ((asoc->cnt_on_all_streams +
1587 			    asoc->cnt_on_reasm_queue +
1588 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1589 				SCTP_STAT_INCR(sctps_datadropchklmt);
1590 			} else {
1591 				SCTP_STAT_INCR(sctps_datadroprwnd);
1592 			}
1593 			indx = *break_flag;
1594 			*break_flag = 1;
1595 			return (0);
1596 		}
1597 	}
1598 	strmno = ntohs(ch->dp.stream_id);
1599 	if (strmno >= asoc->streamincnt) {
1600 		struct sctp_paramhdr *phdr;
1601 		struct mbuf *mb;
1602 
1603 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1604 		    0, M_DONTWAIT, 1, MT_DATA);
1605 		if (mb != NULL) {
1606 			/* add some space up front so prepend will work well */
1607 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1608 			phdr = mtod(mb, struct sctp_paramhdr *);
1609 			/*
1610 			 * Error causes are just param's and this one has
1611 			 * two back to back phdr, one with the error type
1612 			 * and size, the other with the streamid and a rsvd
1613 			 */
1614 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1615 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1616 			phdr->param_length =
1617 			    htons(sizeof(struct sctp_paramhdr) * 2);
1618 			phdr++;
1619 			/* We insert the stream in the type field */
1620 			phdr->param_type = ch->dp.stream_id;
1621 			/* And set the length to 0 for the rsvd field */
1622 			phdr->param_length = 0;
1623 			sctp_queue_op_err(stcb, mb);
1624 		}
1625 		SCTP_STAT_INCR(sctps_badsid);
1626 		SCTP_TCB_LOCK_ASSERT(stcb);
1627 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1628 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1629 			asoc->highest_tsn_inside_nr_map = tsn;
1630 		}
1631 		if (tsn == (asoc->cumulative_tsn + 1)) {
1632 			/* Update cum-ack */
1633 			asoc->cumulative_tsn = tsn;
1634 		}
1635 		return (0);
1636 	}
1637 	/*
1638 	 * Before we continue lets validate that we are not being fooled by
1639 	 * an evil attacker. We can only have 4k chunks based on our TSN
1640 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1641 	 * way our stream sequence numbers could have wrapped. We of course
1642 	 * only validate the FIRST fragment so the bit must be set.
1643 	 */
1644 	strmseq = ntohs(ch->dp.stream_sequence);
1645 #ifdef SCTP_ASOCLOG_OF_TSNS
1646 	SCTP_TCB_LOCK_ASSERT(stcb);
1647 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1648 		asoc->tsn_in_at = 0;
1649 		asoc->tsn_in_wrapped = 1;
1650 	}
1651 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1652 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1653 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1654 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1655 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1656 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1657 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1658 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1659 	asoc->tsn_in_at++;
1660 #endif
1661 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1662 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1663 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1664 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1665 	    strmseq, MAX_SEQ) ||
1666 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1667 		/* The incoming sseq is behind where we last delivered? */
1668 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1669 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1670 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1671 		    0, M_DONTWAIT, 1, MT_DATA);
1672 		if (oper) {
1673 			struct sctp_paramhdr *ph;
1674 			uint32_t *ippp;
1675 
1676 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1677 			    (3 * sizeof(uint32_t));
1678 			ph = mtod(oper, struct sctp_paramhdr *);
1679 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1680 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1681 			ippp = (uint32_t *) (ph + 1);
1682 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1683 			ippp++;
1684 			*ippp = tsn;
1685 			ippp++;
1686 			*ippp = ((strmno << 16) | strmseq);
1687 
1688 		}
1689 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1690 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1691 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1692 		*abort_flag = 1;
1693 		return (0);
1694 	}
1695 	/************************************
1696 	 * From here down we may find ch-> invalid
1697 	 * so its a good idea NOT to use it.
1698 	 *************************************/
1699 
1700 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1701 	if (last_chunk == 0) {
1702 		dmbuf = SCTP_M_COPYM(*m,
1703 		    (offset + sizeof(struct sctp_data_chunk)),
1704 		    the_len, M_DONTWAIT);
1705 #ifdef SCTP_MBUF_LOGGING
1706 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1707 			struct mbuf *mat;
1708 
1709 			mat = dmbuf;
1710 			while (mat) {
1711 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1712 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1713 				}
1714 				mat = SCTP_BUF_NEXT(mat);
1715 			}
1716 		}
1717 #endif
1718 	} else {
1719 		/* We can steal the last chunk */
1720 		int l_len;
1721 
1722 		dmbuf = *m;
1723 		/* lop off the top part */
1724 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1725 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1726 			l_len = SCTP_BUF_LEN(dmbuf);
1727 		} else {
1728 			/*
1729 			 * need to count up the size hopefully does not hit
1730 			 * this to often :-0
1731 			 */
1732 			struct mbuf *lat;
1733 
1734 			l_len = 0;
1735 			lat = dmbuf;
1736 			while (lat) {
1737 				l_len += SCTP_BUF_LEN(lat);
1738 				lat = SCTP_BUF_NEXT(lat);
1739 			}
1740 		}
1741 		if (l_len > the_len) {
1742 			/* Trim the end round bytes off  too */
1743 			m_adj(dmbuf, -(l_len - the_len));
1744 		}
1745 	}
1746 	if (dmbuf == NULL) {
1747 		SCTP_STAT_INCR(sctps_nomem);
1748 		return (0);
1749 	}
1750 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1751 	    asoc->fragmented_delivery_inprogress == 0 &&
1752 	    TAILQ_EMPTY(&asoc->resetHead) &&
1753 	    ((ordered == 0) ||
1754 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1755 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1756 		/* Candidate for express delivery */
1757 		/*
1758 		 * Its not fragmented, No PD-API is up, Nothing in the
1759 		 * delivery queue, Its un-ordered OR ordered and the next to
1760 		 * deliver AND nothing else is stuck on the stream queue,
1761 		 * And there is room for it in the socket buffer. Lets just
1762 		 * stuff it up the buffer....
1763 		 */
1764 
1765 		/* It would be nice to avoid this copy if we could :< */
1766 		sctp_alloc_a_readq(stcb, control);
1767 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1768 		    protocol_id,
1769 		    stcb->asoc.context,
1770 		    strmno, strmseq,
1771 		    chunk_flags,
1772 		    dmbuf);
1773 		if (control == NULL) {
1774 			goto failed_express_del;
1775 		}
1776 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1777 		    control, &stcb->sctp_socket->so_rcv,
1778 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1779 
1780 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1781 			/* for ordered, bump what we delivered */
1782 			asoc->strmin[strmno].last_sequence_delivered++;
1783 		}
1784 		SCTP_STAT_INCR(sctps_recvexpress);
1785 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1786 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1787 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1788 		}
1789 		control = NULL;
1790 
1791 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1792 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1793 			asoc->highest_tsn_inside_nr_map = tsn;
1794 		}
1795 		goto finish_express_del;
1796 	}
1797 failed_express_del:
1798 	/* If we reach here this is a new chunk */
1799 	chk = NULL;
1800 	control = NULL;
1801 	/* Express for fragmented delivery? */
1802 	if ((asoc->fragmented_delivery_inprogress) &&
1803 	    (stcb->asoc.control_pdapi) &&
1804 	    (asoc->str_of_pdapi == strmno) &&
1805 	    (asoc->ssn_of_pdapi == strmseq)
1806 	    ) {
1807 		control = stcb->asoc.control_pdapi;
1808 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1809 			/* Can't be another first? */
1810 			goto failed_pdapi_express_del;
1811 		}
1812 		if (tsn == (control->sinfo_tsn + 1)) {
1813 			/* Yep, we can add it on */
1814 			int end = 0;
1815 			uint32_t cumack;
1816 
1817 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1818 				end = 1;
1819 			}
1820 			cumack = asoc->cumulative_tsn;
1821 			if ((cumack + 1) == tsn)
1822 				cumack = tsn;
1823 
1824 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1825 			    tsn,
1826 			    &stcb->sctp_socket->so_rcv)) {
1827 				SCTP_PRINTF("Append fails end:%d\n", end);
1828 				goto failed_pdapi_express_del;
1829 			}
1830 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1831 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1832 				asoc->highest_tsn_inside_nr_map = tsn;
1833 			}
1834 			SCTP_STAT_INCR(sctps_recvexpressm);
1835 			control->sinfo_tsn = tsn;
1836 			asoc->tsn_last_delivered = tsn;
1837 			asoc->fragment_flags = chunk_flags;
1838 			asoc->tsn_of_pdapi_last_delivered = tsn;
1839 			asoc->last_flags_delivered = chunk_flags;
1840 			asoc->last_strm_seq_delivered = strmseq;
1841 			asoc->last_strm_no_delivered = strmno;
1842 			if (end) {
1843 				/* clean up the flags and such */
1844 				asoc->fragmented_delivery_inprogress = 0;
1845 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1846 					asoc->strmin[strmno].last_sequence_delivered++;
1847 				}
1848 				stcb->asoc.control_pdapi = NULL;
1849 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1850 					/*
1851 					 * There could be another message
1852 					 * ready
1853 					 */
1854 					need_reasm_check = 1;
1855 				}
1856 			}
1857 			control = NULL;
1858 			goto finish_express_del;
1859 		}
1860 	}
1861 failed_pdapi_express_del:
1862 	control = NULL;
1863 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1864 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1865 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1866 			asoc->highest_tsn_inside_nr_map = tsn;
1867 		}
1868 	} else {
1869 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1870 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1871 			asoc->highest_tsn_inside_map = tsn;
1872 		}
1873 	}
1874 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1875 		sctp_alloc_a_chunk(stcb, chk);
1876 		if (chk == NULL) {
1877 			/* No memory so we drop the chunk */
1878 			SCTP_STAT_INCR(sctps_nomem);
1879 			if (last_chunk == 0) {
1880 				/* we copied it, free the copy */
1881 				sctp_m_freem(dmbuf);
1882 			}
1883 			return (0);
1884 		}
1885 		chk->rec.data.TSN_seq = tsn;
1886 		chk->no_fr_allowed = 0;
1887 		chk->rec.data.stream_seq = strmseq;
1888 		chk->rec.data.stream_number = strmno;
1889 		chk->rec.data.payloadtype = protocol_id;
1890 		chk->rec.data.context = stcb->asoc.context;
1891 		chk->rec.data.doing_fast_retransmit = 0;
1892 		chk->rec.data.rcv_flags = chunk_flags;
1893 		chk->asoc = asoc;
1894 		chk->send_size = the_len;
1895 		chk->whoTo = net;
1896 		atomic_add_int(&net->ref_count, 1);
1897 		chk->data = dmbuf;
1898 	} else {
1899 		sctp_alloc_a_readq(stcb, control);
1900 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1901 		    protocol_id,
1902 		    stcb->asoc.context,
1903 		    strmno, strmseq,
1904 		    chunk_flags,
1905 		    dmbuf);
1906 		if (control == NULL) {
1907 			/* No memory so we drop the chunk */
1908 			SCTP_STAT_INCR(sctps_nomem);
1909 			if (last_chunk == 0) {
1910 				/* we copied it, free the copy */
1911 				sctp_m_freem(dmbuf);
1912 			}
1913 			return (0);
1914 		}
1915 		control->length = the_len;
1916 	}
1917 
1918 	/* Mark it as received */
1919 	/* Now queue it where it belongs */
1920 	if (control != NULL) {
1921 		/* First a sanity check */
1922 		if (asoc->fragmented_delivery_inprogress) {
1923 			/*
1924 			 * Ok, we have a fragmented delivery in progress if
1925 			 * this chunk is next to deliver OR belongs in our
1926 			 * view to the reassembly, the peer is evil or
1927 			 * broken.
1928 			 */
1929 			uint32_t estimate_tsn;
1930 
1931 			estimate_tsn = asoc->tsn_last_delivered + 1;
1932 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1933 			    (estimate_tsn == control->sinfo_tsn)) {
1934 				/* Evil/Broke peer */
1935 				sctp_m_freem(control->data);
1936 				control->data = NULL;
1937 				if (control->whoFrom) {
1938 					sctp_free_remote_addr(control->whoFrom);
1939 					control->whoFrom = NULL;
1940 				}
1941 				sctp_free_a_readq(stcb, control);
1942 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1943 				    0, M_DONTWAIT, 1, MT_DATA);
1944 				if (oper) {
1945 					struct sctp_paramhdr *ph;
1946 					uint32_t *ippp;
1947 
1948 					SCTP_BUF_LEN(oper) =
1949 					    sizeof(struct sctp_paramhdr) +
1950 					    (3 * sizeof(uint32_t));
1951 					ph = mtod(oper, struct sctp_paramhdr *);
1952 					ph->param_type =
1953 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1954 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1955 					ippp = (uint32_t *) (ph + 1);
1956 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1957 					ippp++;
1958 					*ippp = tsn;
1959 					ippp++;
1960 					*ippp = ((strmno << 16) | strmseq);
1961 				}
1962 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1963 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1964 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1965 
1966 				*abort_flag = 1;
1967 				return (0);
1968 			} else {
1969 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1970 					sctp_m_freem(control->data);
1971 					control->data = NULL;
1972 					if (control->whoFrom) {
1973 						sctp_free_remote_addr(control->whoFrom);
1974 						control->whoFrom = NULL;
1975 					}
1976 					sctp_free_a_readq(stcb, control);
1977 
1978 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1979 					    0, M_DONTWAIT, 1, MT_DATA);
1980 					if (oper) {
1981 						struct sctp_paramhdr *ph;
1982 						uint32_t *ippp;
1983 
1984 						SCTP_BUF_LEN(oper) =
1985 						    sizeof(struct sctp_paramhdr) +
1986 						    (3 * sizeof(uint32_t));
1987 						ph = mtod(oper,
1988 						    struct sctp_paramhdr *);
1989 						ph->param_type =
1990 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1991 						ph->param_length =
1992 						    htons(SCTP_BUF_LEN(oper));
1993 						ippp = (uint32_t *) (ph + 1);
1994 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1995 						ippp++;
1996 						*ippp = tsn;
1997 						ippp++;
1998 						*ippp = ((strmno << 16) | strmseq);
1999 					}
2000 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2001 					sctp_abort_an_association(stcb->sctp_ep,
2002 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2003 
2004 					*abort_flag = 1;
2005 					return (0);
2006 				}
2007 			}
2008 		} else {
2009 			/* No PDAPI running */
2010 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2011 				/*
2012 				 * Reassembly queue is NOT empty validate
2013 				 * that this tsn does not need to be in
2014 				 * reasembly queue. If it does then our peer
2015 				 * is broken or evil.
2016 				 */
2017 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2018 					sctp_m_freem(control->data);
2019 					control->data = NULL;
2020 					if (control->whoFrom) {
2021 						sctp_free_remote_addr(control->whoFrom);
2022 						control->whoFrom = NULL;
2023 					}
2024 					sctp_free_a_readq(stcb, control);
2025 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2026 					    0, M_DONTWAIT, 1, MT_DATA);
2027 					if (oper) {
2028 						struct sctp_paramhdr *ph;
2029 						uint32_t *ippp;
2030 
2031 						SCTP_BUF_LEN(oper) =
2032 						    sizeof(struct sctp_paramhdr) +
2033 						    (3 * sizeof(uint32_t));
2034 						ph = mtod(oper,
2035 						    struct sctp_paramhdr *);
2036 						ph->param_type =
2037 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2038 						ph->param_length =
2039 						    htons(SCTP_BUF_LEN(oper));
2040 						ippp = (uint32_t *) (ph + 1);
2041 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2042 						ippp++;
2043 						*ippp = tsn;
2044 						ippp++;
2045 						*ippp = ((strmno << 16) | strmseq);
2046 					}
2047 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2048 					sctp_abort_an_association(stcb->sctp_ep,
2049 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2050 
2051 					*abort_flag = 1;
2052 					return (0);
2053 				}
2054 			}
2055 		}
2056 		/* ok, if we reach here we have passed the sanity checks */
2057 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2058 			/* queue directly into socket buffer */
2059 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2060 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2061 			    control,
2062 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2063 		} else {
2064 			/*
2065 			 * Special check for when streams are resetting. We
2066 			 * could be more smart about this and check the
2067 			 * actual stream to see if it is not being reset..
2068 			 * that way we would not create a HOLB when amongst
2069 			 * streams being reset and those not being reset.
2070 			 *
2071 			 * We take complete messages that have a stream reset
2072 			 * intervening (aka the TSN is after where our
2073 			 * cum-ack needs to be) off and put them on a
2074 			 * pending_reply_queue. The reassembly ones we do
2075 			 * not have to worry about since they are all sorted
2076 			 * and proceessed by TSN order. It is only the
2077 			 * singletons I must worry about.
2078 			 */
2079 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2080 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2081 			    ) {
2082 				/*
2083 				 * yep its past where we need to reset... go
2084 				 * ahead and queue it.
2085 				 */
2086 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2087 					/* first one on */
2088 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2089 				} else {
2090 					struct sctp_queued_to_read *ctlOn;
2091 					unsigned char inserted = 0;
2092 
2093 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2094 					while (ctlOn) {
2095 						if (compare_with_wrap(control->sinfo_tsn,
2096 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2097 							ctlOn = TAILQ_NEXT(ctlOn, next);
2098 						} else {
2099 							/* found it */
2100 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2101 							inserted = 1;
2102 							break;
2103 						}
2104 					}
2105 					if (inserted == 0) {
2106 						/*
2107 						 * must be put at end, use
2108 						 * prevP (all setup from
2109 						 * loop) to setup nextP.
2110 						 */
2111 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2112 					}
2113 				}
2114 			} else {
2115 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2116 				if (*abort_flag) {
2117 					return (0);
2118 				}
2119 			}
2120 		}
2121 	} else {
2122 		/* Into the re-assembly queue */
2123 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2124 		if (*abort_flag) {
2125 			/*
2126 			 * the assoc is now gone and chk was put onto the
2127 			 * reasm queue, which has all been freed.
2128 			 */
2129 			*m = NULL;
2130 			return (0);
2131 		}
2132 	}
2133 finish_express_del:
2134 	if (tsn == (asoc->cumulative_tsn + 1)) {
2135 		/* Update cum-ack */
2136 		asoc->cumulative_tsn = tsn;
2137 	}
2138 	if (last_chunk) {
2139 		*m = NULL;
2140 	}
2141 	if (ordered) {
2142 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2143 	} else {
2144 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2145 	}
2146 	SCTP_STAT_INCR(sctps_recvdata);
2147 	/* Set it present please */
2148 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2149 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2150 	}
2151 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2152 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2153 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2154 	}
2155 	/* check the special flag for stream resets */
2156 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2157 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2158 	    (asoc->cumulative_tsn == liste->tsn))
2159 	    ) {
2160 		/*
2161 		 * we have finished working through the backlogged TSN's now
2162 		 * time to reset streams. 1: call reset function. 2: free
2163 		 * pending_reply space 3: distribute any chunks in
2164 		 * pending_reply_queue.
2165 		 */
2166 		struct sctp_queued_to_read *ctl;
2167 
2168 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2169 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2170 		SCTP_FREE(liste, SCTP_M_STRESET);
2171 		/* sa_ignore FREED_MEMORY */
2172 		liste = TAILQ_FIRST(&asoc->resetHead);
2173 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2174 		if (ctl && (liste == NULL)) {
2175 			/* All can be removed */
2176 			while (ctl) {
2177 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2178 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2179 				if (*abort_flag) {
2180 					return (0);
2181 				}
2182 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2183 			}
2184 		} else if (ctl) {
2185 			/* more than one in queue */
2186 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2187 				/*
2188 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2189 				 * process it which is the NOT of
2190 				 * ctl->sinfo_tsn > liste->tsn
2191 				 */
2192 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2193 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2194 				if (*abort_flag) {
2195 					return (0);
2196 				}
2197 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2198 			}
2199 		}
2200 		/*
2201 		 * Now service re-assembly to pick up anything that has been
2202 		 * held on reassembly queue?
2203 		 */
2204 		sctp_deliver_reasm_check(stcb, asoc);
2205 		need_reasm_check = 0;
2206 	}
2207 	if (need_reasm_check) {
2208 		/* Another one waits ? */
2209 		sctp_deliver_reasm_check(stcb, asoc);
2210 	}
2211 	return (1);
2212 }
2213 
2214 int8_t sctp_map_lookup_tab[256] = {
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 4,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 5,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 4,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 6,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 4,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 5,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 4,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 7,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 4,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 5,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 4,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 6,
2239 	0, 1, 0, 2, 0, 1, 0, 3,
2240 	0, 1, 0, 2, 0, 1, 0, 4,
2241 	0, 1, 0, 2, 0, 1, 0, 3,
2242 	0, 1, 0, 2, 0, 1, 0, 5,
2243 	0, 1, 0, 2, 0, 1, 0, 3,
2244 	0, 1, 0, 2, 0, 1, 0, 4,
2245 	0, 1, 0, 2, 0, 1, 0, 3,
2246 	0, 1, 0, 2, 0, 1, 0, 8
2247 };
2248 
2249 
2250 void
2251 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2252 {
2253 	/*
2254 	 * Now we also need to check the mapping array in a couple of ways.
2255 	 * 1) Did we move the cum-ack point?
2256 	 *
2257 	 * When you first glance at this you might think that all entries that
2258 	 * make up the postion of the cum-ack would be in the nr-mapping
2259 	 * array only.. i.e. things up to the cum-ack are always
2260 	 * deliverable. Thats true with one exception, when its a fragmented
2261 	 * message we may not deliver the data until some threshold (or all
2262 	 * of it) is in place. So we must OR the nr_mapping_array and
2263 	 * mapping_array to get a true picture of the cum-ack.
2264 	 */
2265 	struct sctp_association *asoc;
2266 	int at;
2267 	uint8_t val;
2268 	int slide_from, slide_end, lgap, distance;
2269 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2270 
2271 	asoc = &stcb->asoc;
2272 	at = 0;
2273 
2274 	old_cumack = asoc->cumulative_tsn;
2275 	old_base = asoc->mapping_array_base_tsn;
2276 	old_highest = asoc->highest_tsn_inside_map;
2277 	/*
2278 	 * We could probably improve this a small bit by calculating the
2279 	 * offset of the current cum-ack as the starting point.
2280 	 */
2281 	at = 0;
2282 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2283 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2284 		if (val == 0xff) {
2285 			at += 8;
2286 		} else {
2287 			/* there is a 0 bit */
2288 			at += sctp_map_lookup_tab[val];
2289 			break;
2290 		}
2291 	}
2292 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2293 
2294 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2295 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2296 #ifdef INVARIANTS
2297 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2298 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2299 #else
2300 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2301 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2302 		sctp_print_mapping_array(asoc);
2303 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2305 		}
2306 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2307 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2308 #endif
2309 	}
2310 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2311 	    asoc->highest_tsn_inside_map,
2312 	    MAX_TSN)) {
2313 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2314 	} else {
2315 		highest_tsn = asoc->highest_tsn_inside_map;
2316 	}
2317 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2318 		/* The complete array was completed by a single FR */
2319 		/* highest becomes the cum-ack */
2320 		int clr;
2321 
2322 #ifdef INVARIANTS
2323 		unsigned int i;
2324 
2325 #endif
2326 
2327 		/* clear the array */
2328 		clr = ((at + 7) >> 3);
2329 		if (clr > asoc->mapping_array_size) {
2330 			clr = asoc->mapping_array_size;
2331 		}
2332 		memset(asoc->mapping_array, 0, clr);
2333 		memset(asoc->nr_mapping_array, 0, clr);
2334 #ifdef INVARIANTS
2335 		for (i = 0; i < asoc->mapping_array_size; i++) {
2336 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2337 				printf("Error Mapping array's not clean at clear\n");
2338 				sctp_print_mapping_array(asoc);
2339 			}
2340 		}
2341 #endif
2342 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2343 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2344 	} else if (at >= 8) {
2345 		/* we can slide the mapping array down */
2346 		/* slide_from holds where we hit the first NON 0xff byte */
2347 
2348 		/*
2349 		 * now calculate the ceiling of the move using our highest
2350 		 * TSN value
2351 		 */
2352 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2353 		slide_end = (lgap >> 3);
2354 		if (slide_end < slide_from) {
2355 			sctp_print_mapping_array(asoc);
2356 #ifdef INVARIANTS
2357 			panic("impossible slide");
2358 #else
2359 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2360 			    lgap, slide_end, slide_from, at);
2361 			return;
2362 #endif
2363 		}
2364 		if (slide_end > asoc->mapping_array_size) {
2365 #ifdef INVARIANTS
2366 			panic("would overrun buffer");
2367 #else
2368 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2369 			    asoc->mapping_array_size, slide_end);
2370 			slide_end = asoc->mapping_array_size;
2371 #endif
2372 		}
2373 		distance = (slide_end - slide_from) + 1;
2374 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2375 			sctp_log_map(old_base, old_cumack, old_highest,
2376 			    SCTP_MAP_PREPARE_SLIDE);
2377 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2378 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2379 		}
2380 		if (distance + slide_from > asoc->mapping_array_size ||
2381 		    distance < 0) {
2382 			/*
2383 			 * Here we do NOT slide forward the array so that
2384 			 * hopefully when more data comes in to fill it up
2385 			 * we will be able to slide it forward. Really I
2386 			 * don't think this should happen :-0
2387 			 */
2388 
2389 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2390 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2391 				    (uint32_t) asoc->mapping_array_size,
2392 				    SCTP_MAP_SLIDE_NONE);
2393 			}
2394 		} else {
2395 			int ii;
2396 
2397 			for (ii = 0; ii < distance; ii++) {
2398 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2399 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2400 
2401 			}
2402 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2403 				asoc->mapping_array[ii] = 0;
2404 				asoc->nr_mapping_array[ii] = 0;
2405 			}
2406 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2407 				asoc->highest_tsn_inside_map += (slide_from << 3);
2408 			}
2409 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2410 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2411 			}
2412 			asoc->mapping_array_base_tsn += (slide_from << 3);
2413 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2414 				sctp_log_map(asoc->mapping_array_base_tsn,
2415 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2416 				    SCTP_MAP_SLIDE_RESULT);
2417 			}
2418 		}
2419 	}
2420 }
2421 
2422 
2423 void
2424 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2425 {
2426 	struct sctp_association *asoc;
2427 	uint32_t highest_tsn;
2428 
2429 	asoc = &stcb->asoc;
2430 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2431 	    asoc->highest_tsn_inside_map,
2432 	    MAX_TSN)) {
2433 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2434 	} else {
2435 		highest_tsn = asoc->highest_tsn_inside_map;
2436 	}
2437 
2438 	/*
2439 	 * Now we need to see if we need to queue a sack or just start the
2440 	 * timer (if allowed).
2441 	 */
2442 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2443 		/*
2444 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2445 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2446 		 * SACK
2447 		 */
2448 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2449 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2450 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2451 		}
2452 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2453 		sctp_send_sack(stcb);
2454 	} else {
2455 		int is_a_gap;
2456 
2457 		/* is there a gap now ? */
2458 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2459 
2460 		/*
2461 		 * CMT DAC algorithm: increase number of packets received
2462 		 * since last ack
2463 		 */
2464 		stcb->asoc.cmt_dac_pkts_rcvd++;
2465 
2466 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2467 							 * SACK */
2468 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2469 							 * longer is one */
2470 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2471 		    (is_a_gap) ||	/* is still a gap */
2472 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2473 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2474 		    ) {
2475 
2476 			if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2477 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2478 			    (stcb->asoc.send_sack == 0) &&
2479 			    (stcb->asoc.numduptsns == 0) &&
2480 			    (stcb->asoc.delayed_ack) &&
2481 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2482 
2483 				/*
2484 				 * CMT DAC algorithm: With CMT, delay acks
2485 				 * even in the face of
2486 				 *
2487 				 * reordering. Therefore, if acks that do not
2488 				 * have to be sent because of the above
2489 				 * reasons, will be delayed. That is, acks
2490 				 * that would have been sent due to gap
2491 				 * reports will be delayed with DAC. Start
2492 				 * the delayed ack timer.
2493 				 */
2494 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2495 				    stcb->sctp_ep, stcb, NULL);
2496 			} else {
2497 				/*
2498 				 * Ok we must build a SACK since the timer
2499 				 * is pending, we got our first packet OR
2500 				 * there are gaps or duplicates.
2501 				 */
2502 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2503 				sctp_send_sack(stcb);
2504 			}
2505 		} else {
2506 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2507 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2508 				    stcb->sctp_ep, stcb, NULL);
2509 			}
2510 		}
2511 	}
2512 }
2513 
2514 void
2515 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2516 {
2517 	struct sctp_tmit_chunk *chk;
2518 	uint32_t tsize, pd_point;
2519 	uint16_t nxt_todel;
2520 
2521 	if (asoc->fragmented_delivery_inprogress) {
2522 		sctp_service_reassembly(stcb, asoc);
2523 	}
2524 	/* Can we proceed further, i.e. the PD-API is complete */
2525 	if (asoc->fragmented_delivery_inprogress) {
2526 		/* no */
2527 		return;
2528 	}
2529 	/*
2530 	 * Now is there some other chunk I can deliver from the reassembly
2531 	 * queue.
2532 	 */
2533 doit_again:
2534 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2535 	if (chk == NULL) {
2536 		asoc->size_on_reasm_queue = 0;
2537 		asoc->cnt_on_reasm_queue = 0;
2538 		return;
2539 	}
2540 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2541 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2542 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2543 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2544 		/*
2545 		 * Yep the first one is here. We setup to start reception,
2546 		 * by backing down the TSN just in case we can't deliver.
2547 		 */
2548 
2549 		/*
2550 		 * Before we start though either all of the message should
2551 		 * be here or the socket buffer max or nothing on the
2552 		 * delivery queue and something can be delivered.
2553 		 */
2554 		if (stcb->sctp_socket) {
2555 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2556 			    stcb->sctp_ep->partial_delivery_point);
2557 		} else {
2558 			pd_point = stcb->sctp_ep->partial_delivery_point;
2559 		}
2560 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2561 			asoc->fragmented_delivery_inprogress = 1;
2562 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2563 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2564 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2565 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2566 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2567 			sctp_service_reassembly(stcb, asoc);
2568 			if (asoc->fragmented_delivery_inprogress == 0) {
2569 				goto doit_again;
2570 			}
2571 		}
2572 	}
2573 }
2574 
2575 int
2576 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2577     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2578     struct sctp_nets *net, uint32_t * high_tsn)
2579 {
2580 	struct sctp_data_chunk *ch, chunk_buf;
2581 	struct sctp_association *asoc;
2582 	int num_chunks = 0;	/* number of control chunks processed */
2583 	int stop_proc = 0;
2584 	int chk_length, break_flag, last_chunk;
2585 	int abort_flag = 0, was_a_gap = 0;
2586 	struct mbuf *m;
2587 
2588 	/* set the rwnd */
2589 	sctp_set_rwnd(stcb, &stcb->asoc);
2590 
2591 	m = *mm;
2592 	SCTP_TCB_LOCK_ASSERT(stcb);
2593 	asoc = &stcb->asoc;
2594 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2595 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2596 		/* there was a gap before this data was processed */
2597 		was_a_gap = 1;
2598 	}
2599 	/*
2600 	 * setup where we got the last DATA packet from for any SACK that
2601 	 * may need to go out. Don't bump the net. This is done ONLY when a
2602 	 * chunk is assigned.
2603 	 */
2604 	asoc->last_data_chunk_from = net;
2605 
2606 	/*-
2607 	 * Now before we proceed we must figure out if this is a wasted
2608 	 * cluster... i.e. it is a small packet sent in and yet the driver
2609 	 * underneath allocated a full cluster for it. If so we must copy it
2610 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2611 	 * with cluster starvation. Note for __Panda__ we don't do this
2612 	 * since it has clusters all the way down to 64 bytes.
2613 	 */
2614 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2615 		/* we only handle mbufs that are singletons.. not chains */
2616 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2617 		if (m) {
2618 			/* ok lets see if we can copy the data up */
2619 			caddr_t *from, *to;
2620 
2621 			/* get the pointers and copy */
2622 			to = mtod(m, caddr_t *);
2623 			from = mtod((*mm), caddr_t *);
2624 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2625 			/* copy the length and free up the old */
2626 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2627 			sctp_m_freem(*mm);
2628 			/* sucess, back copy */
2629 			*mm = m;
2630 		} else {
2631 			/* We are in trouble in the mbuf world .. yikes */
2632 			m = *mm;
2633 		}
2634 	}
2635 	/* get pointer to the first chunk header */
2636 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2637 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2638 	if (ch == NULL) {
2639 		return (1);
2640 	}
2641 	/*
2642 	 * process all DATA chunks...
2643 	 */
2644 	*high_tsn = asoc->cumulative_tsn;
2645 	break_flag = 0;
2646 	asoc->data_pkts_seen++;
2647 	while (stop_proc == 0) {
2648 		/* validate chunk length */
2649 		chk_length = ntohs(ch->ch.chunk_length);
2650 		if (length - *offset < chk_length) {
2651 			/* all done, mutulated chunk */
2652 			stop_proc = 1;
2653 			break;
2654 		}
2655 		if (ch->ch.chunk_type == SCTP_DATA) {
2656 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2657 				/*
2658 				 * Need to send an abort since we had a
2659 				 * invalid data chunk.
2660 				 */
2661 				struct mbuf *op_err;
2662 
2663 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2664 				    0, M_DONTWAIT, 1, MT_DATA);
2665 
2666 				if (op_err) {
2667 					struct sctp_paramhdr *ph;
2668 					uint32_t *ippp;
2669 
2670 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2671 					    (2 * sizeof(uint32_t));
2672 					ph = mtod(op_err, struct sctp_paramhdr *);
2673 					ph->param_type =
2674 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2675 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2676 					ippp = (uint32_t *) (ph + 1);
2677 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2678 					ippp++;
2679 					*ippp = asoc->cumulative_tsn;
2680 
2681 				}
2682 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2683 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2684 				    op_err, 0, net->port);
2685 				return (2);
2686 			}
2687 #ifdef SCTP_AUDITING_ENABLED
2688 			sctp_audit_log(0xB1, 0);
2689 #endif
2690 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2691 				last_chunk = 1;
2692 			} else {
2693 				last_chunk = 0;
2694 			}
2695 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2696 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2697 			    last_chunk)) {
2698 				num_chunks++;
2699 			}
2700 			if (abort_flag)
2701 				return (2);
2702 
2703 			if (break_flag) {
2704 				/*
2705 				 * Set because of out of rwnd space and no
2706 				 * drop rep space left.
2707 				 */
2708 				stop_proc = 1;
2709 				break;
2710 			}
2711 		} else {
2712 			/* not a data chunk in the data region */
2713 			switch (ch->ch.chunk_type) {
2714 			case SCTP_INITIATION:
2715 			case SCTP_INITIATION_ACK:
2716 			case SCTP_SELECTIVE_ACK:
2717 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2718 			case SCTP_HEARTBEAT_REQUEST:
2719 			case SCTP_HEARTBEAT_ACK:
2720 			case SCTP_ABORT_ASSOCIATION:
2721 			case SCTP_SHUTDOWN:
2722 			case SCTP_SHUTDOWN_ACK:
2723 			case SCTP_OPERATION_ERROR:
2724 			case SCTP_COOKIE_ECHO:
2725 			case SCTP_COOKIE_ACK:
2726 			case SCTP_ECN_ECHO:
2727 			case SCTP_ECN_CWR:
2728 			case SCTP_SHUTDOWN_COMPLETE:
2729 			case SCTP_AUTHENTICATION:
2730 			case SCTP_ASCONF_ACK:
2731 			case SCTP_PACKET_DROPPED:
2732 			case SCTP_STREAM_RESET:
2733 			case SCTP_FORWARD_CUM_TSN:
2734 			case SCTP_ASCONF:
2735 				/*
2736 				 * Now, what do we do with KNOWN chunks that
2737 				 * are NOT in the right place?
2738 				 *
2739 				 * For now, I do nothing but ignore them. We
2740 				 * may later want to add sysctl stuff to
2741 				 * switch out and do either an ABORT() or
2742 				 * possibly process them.
2743 				 */
2744 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2745 					struct mbuf *op_err;
2746 
2747 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2748 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2749 					return (2);
2750 				}
2751 				break;
2752 			default:
2753 				/* unknown chunk type, use bit rules */
2754 				if (ch->ch.chunk_type & 0x40) {
2755 					/* Add a error report to the queue */
2756 					struct mbuf *merr;
2757 					struct sctp_paramhdr *phd;
2758 
2759 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2760 					if (merr) {
2761 						phd = mtod(merr, struct sctp_paramhdr *);
2762 						/*
2763 						 * We cheat and use param
2764 						 * type since we did not
2765 						 * bother to define a error
2766 						 * cause struct. They are
2767 						 * the same basic format
2768 						 * with different names.
2769 						 */
2770 						phd->param_type =
2771 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2772 						phd->param_length =
2773 						    htons(chk_length + sizeof(*phd));
2774 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2775 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2776 						    SCTP_SIZE32(chk_length),
2777 						    M_DONTWAIT);
2778 						if (SCTP_BUF_NEXT(merr)) {
2779 							sctp_queue_op_err(stcb, merr);
2780 						} else {
2781 							sctp_m_freem(merr);
2782 						}
2783 					}
2784 				}
2785 				if ((ch->ch.chunk_type & 0x80) == 0) {
2786 					/* discard the rest of this packet */
2787 					stop_proc = 1;
2788 				}	/* else skip this bad chunk and
2789 					 * continue... */
2790 				break;
2791 			};	/* switch of chunk type */
2792 		}
2793 		*offset += SCTP_SIZE32(chk_length);
2794 		if ((*offset >= length) || stop_proc) {
2795 			/* no more data left in the mbuf chain */
2796 			stop_proc = 1;
2797 			continue;
2798 		}
2799 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2800 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2801 		if (ch == NULL) {
2802 			*offset = length;
2803 			stop_proc = 1;
2804 			break;
2805 
2806 		}
2807 	}			/* while */
2808 	if (break_flag) {
2809 		/*
2810 		 * we need to report rwnd overrun drops.
2811 		 */
2812 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2813 	}
2814 	if (num_chunks) {
2815 		/*
2816 		 * Did we get data, if so update the time for auto-close and
2817 		 * give peer credit for being alive.
2818 		 */
2819 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2820 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2821 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2822 			    stcb->asoc.overall_error_count,
2823 			    0,
2824 			    SCTP_FROM_SCTP_INDATA,
2825 			    __LINE__);
2826 		}
2827 		stcb->asoc.overall_error_count = 0;
2828 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2829 	}
2830 	/* now service all of the reassm queue if needed */
2831 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2832 		sctp_service_queues(stcb, asoc);
2833 
2834 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2835 		/* Assure that we ack right away */
2836 		stcb->asoc.send_sack = 1;
2837 	}
2838 	/* Start a sack timer or QUEUE a SACK for sending */
2839 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2840 	if (abort_flag)
2841 		return (2);
2842 
2843 	return (0);
2844 }
2845 
2846 static int
2847 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2848     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2849     int *num_frs,
2850     uint32_t * biggest_newly_acked_tsn,
2851     uint32_t * this_sack_lowest_newack,
2852     int *ecn_seg_sums)
2853 {
2854 	struct sctp_tmit_chunk *tp1;
2855 	unsigned int theTSN;
2856 	int j, wake_him = 0, circled = 0;
2857 
2858 	/* Recover the tp1 we last saw */
2859 	tp1 = *p_tp1;
2860 	if (tp1 == NULL) {
2861 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2862 	}
2863 	for (j = frag_strt; j <= frag_end; j++) {
2864 		theTSN = j + last_tsn;
2865 		while (tp1) {
2866 			if (tp1->rec.data.doing_fast_retransmit)
2867 				(*num_frs) += 1;
2868 
2869 			/*-
2870 			 * CMT: CUCv2 algorithm. For each TSN being
2871 			 * processed from the sent queue, track the
2872 			 * next expected pseudo-cumack, or
2873 			 * rtx_pseudo_cumack, if required. Separate
2874 			 * cumack trackers for first transmissions,
2875 			 * and retransmissions.
2876 			 */
2877 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2878 			    (tp1->snd_count == 1)) {
2879 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2880 				tp1->whoTo->find_pseudo_cumack = 0;
2881 			}
2882 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2883 			    (tp1->snd_count > 1)) {
2884 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2885 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2886 			}
2887 			if (tp1->rec.data.TSN_seq == theTSN) {
2888 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2889 					/*-
2890 					 * must be held until
2891 					 * cum-ack passes
2892 					 */
2893 					/*-
2894 					 * ECN Nonce: Add the nonce
2895 					 * value to the sender's
2896 					 * nonce sum
2897 					 */
2898 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2899 						/*-
2900 						 * If it is less than RESEND, it is
2901 						 * now no-longer in flight.
2902 						 * Higher values may already be set
2903 						 * via previous Gap Ack Blocks...
2904 						 * i.e. ACKED or RESEND.
2905 						 */
2906 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2907 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2908 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2909 						}
2910 						/*-
2911 						 * CMT: SFR algo (and HTNA) - set
2912 						 * saw_newack to 1 for dest being
2913 						 * newly acked. update
2914 						 * this_sack_highest_newack if
2915 						 * appropriate.
2916 						 */
2917 						if (tp1->rec.data.chunk_was_revoked == 0)
2918 							tp1->whoTo->saw_newack = 1;
2919 
2920 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2921 						    tp1->whoTo->this_sack_highest_newack,
2922 						    MAX_TSN)) {
2923 							tp1->whoTo->this_sack_highest_newack =
2924 							    tp1->rec.data.TSN_seq;
2925 						}
2926 						/*-
2927 						 * CMT DAC algo: also update
2928 						 * this_sack_lowest_newack
2929 						 */
2930 						if (*this_sack_lowest_newack == 0) {
2931 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2932 								sctp_log_sack(*this_sack_lowest_newack,
2933 								    last_tsn,
2934 								    tp1->rec.data.TSN_seq,
2935 								    0,
2936 								    0,
2937 								    SCTP_LOG_TSN_ACKED);
2938 							}
2939 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2940 						}
2941 						/*-
2942 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2943 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2944 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2945 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2946 						 * Separate pseudo_cumack trackers for first transmissions and
2947 						 * retransmissions.
2948 						 */
2949 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2950 							if (tp1->rec.data.chunk_was_revoked == 0) {
2951 								tp1->whoTo->new_pseudo_cumack = 1;
2952 							}
2953 							tp1->whoTo->find_pseudo_cumack = 1;
2954 						}
2955 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2956 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2957 						}
2958 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2959 							if (tp1->rec.data.chunk_was_revoked == 0) {
2960 								tp1->whoTo->new_pseudo_cumack = 1;
2961 							}
2962 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2963 						}
2964 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2965 							sctp_log_sack(*biggest_newly_acked_tsn,
2966 							    last_tsn,
2967 							    tp1->rec.data.TSN_seq,
2968 							    frag_strt,
2969 							    frag_end,
2970 							    SCTP_LOG_TSN_ACKED);
2971 						}
2972 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2973 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2974 							    tp1->whoTo->flight_size,
2975 							    tp1->book_size,
2976 							    (uintptr_t) tp1->whoTo,
2977 							    tp1->rec.data.TSN_seq);
2978 						}
2979 						sctp_flight_size_decrease(tp1);
2980 						sctp_total_flight_decrease(stcb, tp1);
2981 
2982 						tp1->whoTo->net_ack += tp1->send_size;
2983 						if (tp1->snd_count < 2) {
2984 							/*-
2985 							 * True non-retransmited chunk
2986 							 */
2987 							tp1->whoTo->net_ack2 += tp1->send_size;
2988 
2989 							/*-
2990 							 * update RTO too ?
2991 							 */
2992 							if (tp1->do_rtt) {
2993 								tp1->whoTo->RTO =
2994 								    sctp_calculate_rto(stcb,
2995 								    &stcb->asoc,
2996 								    tp1->whoTo,
2997 								    &tp1->sent_rcv_time,
2998 								    sctp_align_safe_nocopy);
2999 								tp1->do_rtt = 0;
3000 							}
3001 						}
3002 					}
3003 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3004 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3005 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3006 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3007 						    stcb->asoc.this_sack_highest_gap,
3008 						    MAX_TSN)) {
3009 							stcb->asoc.this_sack_highest_gap =
3010 							    tp1->rec.data.TSN_seq;
3011 						}
3012 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3013 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3014 #ifdef SCTP_AUDITING_ENABLED
3015 							sctp_audit_log(0xB2,
3016 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3017 #endif
3018 						}
3019 					}
3020 					/*-
3021 					 * All chunks NOT UNSENT fall through here and are marked
3022 					 * (leave PR-SCTP ones that are to skip alone though)
3023 					 */
3024 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3025 						tp1->sent = SCTP_DATAGRAM_MARKED;
3026 
3027 					if (tp1->rec.data.chunk_was_revoked) {
3028 						/* deflate the cwnd */
3029 						tp1->whoTo->cwnd -= tp1->book_size;
3030 						tp1->rec.data.chunk_was_revoked = 0;
3031 					}
3032 					/* NR Sack code here */
3033 					if (nr_sacking) {
3034 						if (tp1->data) {
3035 							/*
3036 							 * sa_ignore
3037 							 * NO_NULL_CHK
3038 							 */
3039 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3040 							sctp_m_freem(tp1->data);
3041 							tp1->data = NULL;
3042 						}
3043 						wake_him++;
3044 					}
3045 				}
3046 				break;
3047 			}	/* if (tp1->TSN_seq == theTSN) */
3048 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3049 			    MAX_TSN))
3050 				break;
3051 
3052 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3053 			if ((tp1 == NULL) && (circled == 0)) {
3054 				circled++;
3055 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3056 			}
3057 		}		/* end while (tp1) */
3058 		if (tp1 == NULL) {
3059 			circled = 0;
3060 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3061 		}
3062 		/* In case the fragments were not in order we must reset */
3063 	}			/* end for (j = fragStart */
3064 	*p_tp1 = tp1;
3065 	return (wake_him);	/* Return value only used for nr-sack */
3066 }
3067 
3068 
3069 static int
3070 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3071     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3072     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3073     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3074 {
3075 	struct sctp_gap_ack_block *frag, block;
3076 	struct sctp_tmit_chunk *tp1;
3077 	int i;
3078 	int num_frs = 0;
3079 	int chunk_freed;
3080 	int non_revocable;
3081 	uint16_t frag_strt, frag_end;
3082 	uint32_t last_frag_high;
3083 
3084 	tp1 = NULL;
3085 	last_frag_high = 0;
3086 	chunk_freed = 0;
3087 
3088 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3089 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3090 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3091 		*offset += sizeof(block);
3092 		if (frag == NULL) {
3093 			return (chunk_freed);
3094 		}
3095 		frag_strt = ntohs(frag->start);
3096 		frag_end = ntohs(frag->end);
3097 		/* some sanity checks on the fragment offsets */
3098 		if (frag_strt > frag_end) {
3099 			/* this one is malformed, skip */
3100 			continue;
3101 		}
3102 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3103 		    MAX_TSN))
3104 			*biggest_tsn_acked = frag_end + last_tsn;
3105 
3106 		/* mark acked dgs and find out the highestTSN being acked */
3107 		if (tp1 == NULL) {
3108 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3109 			/* save the locations of the last frags */
3110 			last_frag_high = frag_end + last_tsn;
3111 		} else {
3112 			/*
3113 			 * now lets see if we need to reset the queue due to
3114 			 * a out-of-order SACK fragment
3115 			 */
3116 			if (compare_with_wrap(frag_strt + last_tsn,
3117 			    last_frag_high, MAX_TSN)) {
3118 				/*
3119 				 * if the new frag starts after the last TSN
3120 				 * frag covered, we are ok and this one is
3121 				 * beyond the last one
3122 				 */
3123 				;
3124 			} else {
3125 				/*
3126 				 * ok, they have reset us, so we need to
3127 				 * reset the queue this will cause extra
3128 				 * hunting but hey, they chose the
3129 				 * performance hit when they failed to order
3130 				 * their gaps
3131 				 */
3132 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3133 			}
3134 			last_frag_high = frag_end + last_tsn;
3135 		}
3136 		if (i < num_seg) {
3137 			non_revocable = 0;
3138 		} else {
3139 			non_revocable = 1;
3140 		}
3141 		if (i == num_seg) {
3142 			tp1 = NULL;
3143 		}
3144 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3145 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3146 		    this_sack_lowest_newack, ecn_seg_sums)) {
3147 			chunk_freed = 1;
3148 		}
3149 	}
3150 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3151 		if (num_frs)
3152 			sctp_log_fr(*biggest_tsn_acked,
3153 			    *biggest_newly_acked_tsn,
3154 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3155 	}
3156 	return (chunk_freed);
3157 }
3158 
3159 static void
3160 sctp_check_for_revoked(struct sctp_tcb *stcb,
3161     struct sctp_association *asoc, uint32_t cumack,
3162     uint32_t biggest_tsn_acked)
3163 {
3164 	struct sctp_tmit_chunk *tp1;
3165 	int tot_revoked = 0;
3166 
3167 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3168 	while (tp1) {
3169 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3170 		    MAX_TSN)) {
3171 			/*
3172 			 * ok this guy is either ACK or MARKED. If it is
3173 			 * ACKED it has been previously acked but not this
3174 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3175 			 * again.
3176 			 */
3177 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3178 			    MAX_TSN))
3179 				break;
3180 
3181 
3182 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3183 				/* it has been revoked */
3184 				tp1->sent = SCTP_DATAGRAM_SENT;
3185 				tp1->rec.data.chunk_was_revoked = 1;
3186 				/*
3187 				 * We must add this stuff back in to assure
3188 				 * timers and such get started.
3189 				 */
3190 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3191 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3192 					    tp1->whoTo->flight_size,
3193 					    tp1->book_size,
3194 					    (uintptr_t) tp1->whoTo,
3195 					    tp1->rec.data.TSN_seq);
3196 				}
3197 				sctp_flight_size_increase(tp1);
3198 				sctp_total_flight_increase(stcb, tp1);
3199 				/*
3200 				 * We inflate the cwnd to compensate for our
3201 				 * artificial inflation of the flight_size.
3202 				 */
3203 				tp1->whoTo->cwnd += tp1->book_size;
3204 				tot_revoked++;
3205 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3206 					sctp_log_sack(asoc->last_acked_seq,
3207 					    cumack,
3208 					    tp1->rec.data.TSN_seq,
3209 					    0,
3210 					    0,
3211 					    SCTP_LOG_TSN_REVOKED);
3212 				}
3213 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3214 				/* it has been re-acked in this SACK */
3215 				tp1->sent = SCTP_DATAGRAM_ACKED;
3216 			}
3217 		}
3218 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3219 			break;
3220 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3221 	}
3222 	if (tot_revoked > 0) {
3223 		/*
3224 		 * Setup the ecn nonce re-sync point. We do this since once
3225 		 * data is revoked we begin to retransmit things, which do
3226 		 * NOT have the ECN bits set. This means we are now out of
3227 		 * sync and must wait until we get back in sync with the
3228 		 * peer to check ECN bits.
3229 		 */
3230 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3231 		if (tp1 == NULL) {
3232 			asoc->nonce_resync_tsn = asoc->sending_seq;
3233 		} else {
3234 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3235 		}
3236 		asoc->nonce_wait_for_ecne = 0;
3237 		asoc->nonce_sum_check = 0;
3238 	}
3239 }
3240 
3241 
3242 static void
3243 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3244     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3245 {
3246 	struct sctp_tmit_chunk *tp1;
3247 	int strike_flag = 0;
3248 	struct timeval now;
3249 	int tot_retrans = 0;
3250 	uint32_t sending_seq;
3251 	struct sctp_nets *net;
3252 	int num_dests_sacked = 0;
3253 
3254 	/*
3255 	 * select the sending_seq, this is either the next thing ready to be
3256 	 * sent but not transmitted, OR, the next seq we assign.
3257 	 */
3258 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3259 	if (tp1 == NULL) {
3260 		sending_seq = asoc->sending_seq;
3261 	} else {
3262 		sending_seq = tp1->rec.data.TSN_seq;
3263 	}
3264 
3265 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3266 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3267 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3268 			if (net->saw_newack)
3269 				num_dests_sacked++;
3270 		}
3271 	}
3272 	if (stcb->asoc.peer_supports_prsctp) {
3273 		(void)SCTP_GETTIME_TIMEVAL(&now);
3274 	}
3275 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3276 	while (tp1) {
3277 		strike_flag = 0;
3278 		if (tp1->no_fr_allowed) {
3279 			/* this one had a timeout or something */
3280 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3281 			continue;
3282 		}
3283 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3285 				sctp_log_fr(biggest_tsn_newly_acked,
3286 				    tp1->rec.data.TSN_seq,
3287 				    tp1->sent,
3288 				    SCTP_FR_LOG_CHECK_STRIKE);
3289 		}
3290 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3291 		    MAX_TSN) ||
3292 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3293 			/* done */
3294 			break;
3295 		}
3296 		if (stcb->asoc.peer_supports_prsctp) {
3297 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3298 				/* Is it expired? */
3299 				if (
3300 				/*
3301 				 * TODO sctp_constants.h needs alternative
3302 				 * time macros when _KERNEL is undefined.
3303 				 */
3304 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3305 				    ) {
3306 					/* Yes so drop it */
3307 					if (tp1->data != NULL) {
3308 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3309 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3310 						    SCTP_SO_NOT_LOCKED);
3311 					}
3312 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3313 					continue;
3314 				}
3315 			}
3316 		}
3317 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3318 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3319 			/* we are beyond the tsn in the sack  */
3320 			break;
3321 		}
3322 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3323 			/* either a RESEND, ACKED, or MARKED */
3324 			/* skip */
3325 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3326 			continue;
3327 		}
3328 		/*
3329 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3330 		 */
3331 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3332 			/*
3333 			 * No new acks were receieved for data sent to this
3334 			 * dest. Therefore, according to the SFR algo for
3335 			 * CMT, no data sent to this dest can be marked for
3336 			 * FR using this SACK.
3337 			 */
3338 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3339 			continue;
3340 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3341 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3342 			/*
3343 			 * CMT: New acks were receieved for data sent to
3344 			 * this dest. But no new acks were seen for data
3345 			 * sent after tp1. Therefore, according to the SFR
3346 			 * algo for CMT, tp1 cannot be marked for FR using
3347 			 * this SACK. This step covers part of the DAC algo
3348 			 * and the HTNA algo as well.
3349 			 */
3350 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3351 			continue;
3352 		}
3353 		/*
3354 		 * Here we check to see if we were have already done a FR
3355 		 * and if so we see if the biggest TSN we saw in the sack is
3356 		 * smaller than the recovery point. If so we don't strike
3357 		 * the tsn... otherwise we CAN strike the TSN.
3358 		 */
3359 		/*
3360 		 * @@@ JRI: Check for CMT if (accum_moved &&
3361 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3362 		 * 0)) {
3363 		 */
3364 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3365 			/*
3366 			 * Strike the TSN if in fast-recovery and cum-ack
3367 			 * moved.
3368 			 */
3369 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3370 				sctp_log_fr(biggest_tsn_newly_acked,
3371 				    tp1->rec.data.TSN_seq,
3372 				    tp1->sent,
3373 				    SCTP_FR_LOG_STRIKE_CHUNK);
3374 			}
3375 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3376 				tp1->sent++;
3377 			}
3378 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3379 				/*
3380 				 * CMT DAC algorithm: If SACK flag is set to
3381 				 * 0, then lowest_newack test will not pass
3382 				 * because it would have been set to the
3383 				 * cumack earlier. If not already to be
3384 				 * rtx'd, If not a mixed sack and if tp1 is
3385 				 * not between two sacked TSNs, then mark by
3386 				 * one more. NOTE that we are marking by one
3387 				 * additional time since the SACK DAC flag
3388 				 * indicates that two packets have been
3389 				 * received after this missing TSN.
3390 				 */
3391 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3392 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3393 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3394 						sctp_log_fr(16 + num_dests_sacked,
3395 						    tp1->rec.data.TSN_seq,
3396 						    tp1->sent,
3397 						    SCTP_FR_LOG_STRIKE_CHUNK);
3398 					}
3399 					tp1->sent++;
3400 				}
3401 			}
3402 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3403 			/*
3404 			 * For those that have done a FR we must take
3405 			 * special consideration if we strike. I.e the
3406 			 * biggest_newly_acked must be higher than the
3407 			 * sending_seq at the time we did the FR.
3408 			 */
3409 			if (
3410 #ifdef SCTP_FR_TO_ALTERNATE
3411 			/*
3412 			 * If FR's go to new networks, then we must only do
3413 			 * this for singly homed asoc's. However if the FR's
3414 			 * go to the same network (Armando's work) then its
3415 			 * ok to FR multiple times.
3416 			 */
3417 			    (asoc->numnets < 2)
3418 #else
3419 			    (1)
3420 #endif
3421 			    ) {
3422 
3423 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3424 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3425 				    (biggest_tsn_newly_acked ==
3426 				    tp1->rec.data.fast_retran_tsn)) {
3427 					/*
3428 					 * Strike the TSN, since this ack is
3429 					 * beyond where things were when we
3430 					 * did a FR.
3431 					 */
3432 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3433 						sctp_log_fr(biggest_tsn_newly_acked,
3434 						    tp1->rec.data.TSN_seq,
3435 						    tp1->sent,
3436 						    SCTP_FR_LOG_STRIKE_CHUNK);
3437 					}
3438 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3439 						tp1->sent++;
3440 					}
3441 					strike_flag = 1;
3442 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3443 						/*
3444 						 * CMT DAC algorithm: If
3445 						 * SACK flag is set to 0,
3446 						 * then lowest_newack test
3447 						 * will not pass because it
3448 						 * would have been set to
3449 						 * the cumack earlier. If
3450 						 * not already to be rtx'd,
3451 						 * If not a mixed sack and
3452 						 * if tp1 is not between two
3453 						 * sacked TSNs, then mark by
3454 						 * one more. NOTE that we
3455 						 * are marking by one
3456 						 * additional time since the
3457 						 * SACK DAC flag indicates
3458 						 * that two packets have
3459 						 * been received after this
3460 						 * missing TSN.
3461 						 */
3462 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3463 						    (num_dests_sacked == 1) &&
3464 						    compare_with_wrap(this_sack_lowest_newack,
3465 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3466 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3467 								sctp_log_fr(32 + num_dests_sacked,
3468 								    tp1->rec.data.TSN_seq,
3469 								    tp1->sent,
3470 								    SCTP_FR_LOG_STRIKE_CHUNK);
3471 							}
3472 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3473 								tp1->sent++;
3474 							}
3475 						}
3476 					}
3477 				}
3478 			}
3479 			/*
3480 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3481 			 * algo covers HTNA.
3482 			 */
3483 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3484 		    biggest_tsn_newly_acked, MAX_TSN)) {
3485 			/*
3486 			 * We don't strike these: This is the  HTNA
3487 			 * algorithm i.e. we don't strike If our TSN is
3488 			 * larger than the Highest TSN Newly Acked.
3489 			 */
3490 			;
3491 		} else {
3492 			/* Strike the TSN */
3493 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3494 				sctp_log_fr(biggest_tsn_newly_acked,
3495 				    tp1->rec.data.TSN_seq,
3496 				    tp1->sent,
3497 				    SCTP_FR_LOG_STRIKE_CHUNK);
3498 			}
3499 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3500 				tp1->sent++;
3501 			}
3502 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3503 				/*
3504 				 * CMT DAC algorithm: If SACK flag is set to
3505 				 * 0, then lowest_newack test will not pass
3506 				 * because it would have been set to the
3507 				 * cumack earlier. If not already to be
3508 				 * rtx'd, If not a mixed sack and if tp1 is
3509 				 * not between two sacked TSNs, then mark by
3510 				 * one more. NOTE that we are marking by one
3511 				 * additional time since the SACK DAC flag
3512 				 * indicates that two packets have been
3513 				 * received after this missing TSN.
3514 				 */
3515 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3516 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3517 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3518 						sctp_log_fr(48 + num_dests_sacked,
3519 						    tp1->rec.data.TSN_seq,
3520 						    tp1->sent,
3521 						    SCTP_FR_LOG_STRIKE_CHUNK);
3522 					}
3523 					tp1->sent++;
3524 				}
3525 			}
3526 		}
3527 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3528 			struct sctp_nets *alt;
3529 
3530 			/* fix counts and things */
3531 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3532 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3533 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3534 				    tp1->book_size,
3535 				    (uintptr_t) tp1->whoTo,
3536 				    tp1->rec.data.TSN_seq);
3537 			}
3538 			if (tp1->whoTo) {
3539 				tp1->whoTo->net_ack++;
3540 				sctp_flight_size_decrease(tp1);
3541 			}
3542 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3543 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3544 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3545 			}
3546 			/* add back to the rwnd */
3547 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3548 
3549 			/* remove from the total flight */
3550 			sctp_total_flight_decrease(stcb, tp1);
3551 
3552 			if ((stcb->asoc.peer_supports_prsctp) &&
3553 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3554 				/*
3555 				 * Has it been retransmitted tv_sec times? -
3556 				 * we store the retran count there.
3557 				 */
3558 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3559 					/* Yes, so drop it */
3560 					if (tp1->data != NULL) {
3561 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3562 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3563 						    SCTP_SO_NOT_LOCKED);
3564 					}
3565 					/* Make sure to flag we had a FR */
3566 					tp1->whoTo->net_ack++;
3567 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3568 					continue;
3569 				}
3570 			}
3571 			/* printf("OK, we are now ready to FR this guy\n"); */
3572 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3573 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3574 				    0, SCTP_FR_MARKED);
3575 			}
3576 			if (strike_flag) {
3577 				/* This is a subsequent FR */
3578 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3579 			}
3580 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3581 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3582 				/*
3583 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3584 				 * If CMT is being used, then pick dest with
3585 				 * largest ssthresh for any retransmission.
3586 				 */
3587 				tp1->no_fr_allowed = 1;
3588 				alt = tp1->whoTo;
3589 				/* sa_ignore NO_NULL_CHK */
3590 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3591 					/*
3592 					 * JRS 5/18/07 - If CMT PF is on,
3593 					 * use the PF version of
3594 					 * find_alt_net()
3595 					 */
3596 					alt = sctp_find_alternate_net(stcb, alt, 2);
3597 				} else {
3598 					/*
3599 					 * JRS 5/18/07 - If only CMT is on,
3600 					 * use the CMT version of
3601 					 * find_alt_net()
3602 					 */
3603 					/* sa_ignore NO_NULL_CHK */
3604 					alt = sctp_find_alternate_net(stcb, alt, 1);
3605 				}
3606 				if (alt == NULL) {
3607 					alt = tp1->whoTo;
3608 				}
3609 				/*
3610 				 * CUCv2: If a different dest is picked for
3611 				 * the retransmission, then new
3612 				 * (rtx-)pseudo_cumack needs to be tracked
3613 				 * for orig dest. Let CUCv2 track new (rtx-)
3614 				 * pseudo-cumack always.
3615 				 */
3616 				if (tp1->whoTo) {
3617 					tp1->whoTo->find_pseudo_cumack = 1;
3618 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3619 				}
3620 			} else {/* CMT is OFF */
3621 
3622 #ifdef SCTP_FR_TO_ALTERNATE
3623 				/* Can we find an alternate? */
3624 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3625 #else
3626 				/*
3627 				 * default behavior is to NOT retransmit
3628 				 * FR's to an alternate. Armando Caro's
3629 				 * paper details why.
3630 				 */
3631 				alt = tp1->whoTo;
3632 #endif
3633 			}
3634 
3635 			tp1->rec.data.doing_fast_retransmit = 1;
3636 			tot_retrans++;
3637 			/* mark the sending seq for possible subsequent FR's */
3638 			/*
3639 			 * printf("Marking TSN for FR new value %x\n",
3640 			 * (uint32_t)tpi->rec.data.TSN_seq);
3641 			 */
3642 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3643 				/*
3644 				 * If the queue of send is empty then its
3645 				 * the next sequence number that will be
3646 				 * assigned so we subtract one from this to
3647 				 * get the one we last sent.
3648 				 */
3649 				tp1->rec.data.fast_retran_tsn = sending_seq;
3650 			} else {
3651 				/*
3652 				 * If there are chunks on the send queue
3653 				 * (unsent data that has made it from the
3654 				 * stream queues but not out the door, we
3655 				 * take the first one (which will have the
3656 				 * lowest TSN) and subtract one to get the
3657 				 * one we last sent.
3658 				 */
3659 				struct sctp_tmit_chunk *ttt;
3660 
3661 				ttt = TAILQ_FIRST(&asoc->send_queue);
3662 				tp1->rec.data.fast_retran_tsn =
3663 				    ttt->rec.data.TSN_seq;
3664 			}
3665 
3666 			if (tp1->do_rtt) {
3667 				/*
3668 				 * this guy had a RTO calculation pending on
3669 				 * it, cancel it
3670 				 */
3671 				tp1->do_rtt = 0;
3672 			}
3673 			if (alt != tp1->whoTo) {
3674 				/* yes, there is an alternate. */
3675 				sctp_free_remote_addr(tp1->whoTo);
3676 				/* sa_ignore FREED_MEMORY */
3677 				tp1->whoTo = alt;
3678 				atomic_add_int(&alt->ref_count, 1);
3679 			}
3680 		}
3681 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3682 	}			/* while (tp1) */
3683 
3684 	if (tot_retrans > 0) {
3685 		/*
3686 		 * Setup the ecn nonce re-sync point. We do this since once
3687 		 * we go to FR something we introduce a Karn's rule scenario
3688 		 * and won't know the totals for the ECN bits.
3689 		 */
3690 		asoc->nonce_resync_tsn = sending_seq;
3691 		asoc->nonce_wait_for_ecne = 0;
3692 		asoc->nonce_sum_check = 0;
3693 	}
3694 }
3695 
3696 struct sctp_tmit_chunk *
3697 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3698     struct sctp_association *asoc)
3699 {
3700 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3701 	struct timeval now;
3702 	int now_filled = 0;
3703 
3704 	if (asoc->peer_supports_prsctp == 0) {
3705 		return (NULL);
3706 	}
3707 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3708 	while (tp1) {
3709 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3710 		    tp1->sent != SCTP_DATAGRAM_ACKED &&
3711 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3712 			/* no chance to advance, out of here */
3713 			break;
3714 		}
3715 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3716 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3717 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3718 				    asoc->advanced_peer_ack_point,
3719 				    tp1->rec.data.TSN_seq, 0, 0);
3720 			}
3721 		}
3722 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3723 			/*
3724 			 * We can't fwd-tsn past any that are reliable aka
3725 			 * retransmitted until the asoc fails.
3726 			 */
3727 			break;
3728 		}
3729 		if (!now_filled) {
3730 			(void)SCTP_GETTIME_TIMEVAL(&now);
3731 			now_filled = 1;
3732 		}
3733 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3734 		/*
3735 		 * now we got a chunk which is marked for another
3736 		 * retransmission to a PR-stream but has run out its chances
3737 		 * already maybe OR has been marked to skip now. Can we skip
3738 		 * it if its a resend?
3739 		 */
3740 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3741 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3742 			/*
3743 			 * Now is this one marked for resend and its time is
3744 			 * now up?
3745 			 */
3746 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3747 				/* Yes so drop it */
3748 				if (tp1->data) {
3749 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3750 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3751 					    SCTP_SO_NOT_LOCKED);
3752 				}
3753 			} else {
3754 				/*
3755 				 * No, we are done when hit one for resend
3756 				 * whos time as not expired.
3757 				 */
3758 				break;
3759 			}
3760 		}
3761 		/*
3762 		 * Ok now if this chunk is marked to drop it we can clean up
3763 		 * the chunk, advance our peer ack point and we can check
3764 		 * the next chunk.
3765 		 */
3766 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3767 		    (tp1->sent == SCTP_DATAGRAM_ACKED)) {
3768 			/* advance PeerAckPoint goes forward */
3769 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3770 			    asoc->advanced_peer_ack_point,
3771 			    MAX_TSN)) {
3772 
3773 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3774 				a_adv = tp1;
3775 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3776 				/* No update but we do save the chk */
3777 				a_adv = tp1;
3778 			}
3779 		} else {
3780 			/*
3781 			 * If it is still in RESEND we can advance no
3782 			 * further
3783 			 */
3784 			break;
3785 		}
3786 		/*
3787 		 * If we hit here we just dumped tp1, move to next tsn on
3788 		 * sent queue.
3789 		 */
3790 		tp1 = tp2;
3791 	}
3792 	return (a_adv);
3793 }
3794 
3795 static int
3796 sctp_fs_audit(struct sctp_association *asoc)
3797 {
3798 	struct sctp_tmit_chunk *chk;
3799 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3800 	int entry_flight, entry_cnt, ret;
3801 
3802 	entry_flight = asoc->total_flight;
3803 	entry_cnt = asoc->total_flight_count;
3804 	ret = 0;
3805 
3806 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3807 		return (0);
3808 
3809 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3810 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3811 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3812 			    chk->rec.data.TSN_seq,
3813 			    chk->send_size,
3814 			    chk->snd_count
3815 			    );
3816 			inflight++;
3817 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3818 			resend++;
3819 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3820 			inbetween++;
3821 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3822 			above++;
3823 		} else {
3824 			acked++;
3825 		}
3826 	}
3827 
3828 	if ((inflight > 0) || (inbetween > 0)) {
3829 #ifdef INVARIANTS
3830 		panic("Flight size-express incorrect? \n");
3831 #else
3832 		printf("asoc->total_flight:%d cnt:%d\n",
3833 		    entry_flight, entry_cnt);
3834 
3835 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3836 		    inflight, inbetween, resend, above, acked);
3837 		ret = 1;
3838 #endif
3839 	}
3840 	return (ret);
3841 }
3842 
3843 
3844 static void
3845 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3846     struct sctp_association *asoc,
3847     struct sctp_nets *net,
3848     struct sctp_tmit_chunk *tp1)
3849 {
3850 	tp1->window_probe = 0;
3851 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3852 		/* TSN's skipped we do NOT move back. */
3853 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3854 		    tp1->whoTo->flight_size,
3855 		    tp1->book_size,
3856 		    (uintptr_t) tp1->whoTo,
3857 		    tp1->rec.data.TSN_seq);
3858 		return;
3859 	}
3860 	/* First setup this by shrinking flight */
3861 	sctp_flight_size_decrease(tp1);
3862 	sctp_total_flight_decrease(stcb, tp1);
3863 	/* Now mark for resend */
3864 	tp1->sent = SCTP_DATAGRAM_RESEND;
3865 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3866 
3867 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3868 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3869 		    tp1->whoTo->flight_size,
3870 		    tp1->book_size,
3871 		    (uintptr_t) tp1->whoTo,
3872 		    tp1->rec.data.TSN_seq);
3873 	}
3874 }
3875 
3876 void
3877 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3878     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3879 {
3880 	struct sctp_nets *net;
3881 	struct sctp_association *asoc;
3882 	struct sctp_tmit_chunk *tp1, *tp2;
3883 	uint32_t old_rwnd;
3884 	int win_probe_recovery = 0;
3885 	int win_probe_recovered = 0;
3886 	int j, done_once = 0;
3887 
3888 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3889 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3890 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3891 	}
3892 	SCTP_TCB_LOCK_ASSERT(stcb);
3893 #ifdef SCTP_ASOCLOG_OF_TSNS
3894 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3895 	stcb->asoc.cumack_log_at++;
3896 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3897 		stcb->asoc.cumack_log_at = 0;
3898 	}
3899 #endif
3900 	asoc = &stcb->asoc;
3901 	old_rwnd = asoc->peers_rwnd;
3902 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3903 		/* old ack */
3904 		return;
3905 	} else if (asoc->last_acked_seq == cumack) {
3906 		/* Window update sack */
3907 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3908 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3909 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3910 			/* SWS sender side engages */
3911 			asoc->peers_rwnd = 0;
3912 		}
3913 		if (asoc->peers_rwnd > old_rwnd) {
3914 			goto again;
3915 		}
3916 		return;
3917 	}
3918 	/* First setup for CC stuff */
3919 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3920 		net->prev_cwnd = net->cwnd;
3921 		net->net_ack = 0;
3922 		net->net_ack2 = 0;
3923 
3924 		/*
3925 		 * CMT: Reset CUC and Fast recovery algo variables before
3926 		 * SACK processing
3927 		 */
3928 		net->new_pseudo_cumack = 0;
3929 		net->will_exit_fast_recovery = 0;
3930 	}
3931 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3932 		uint32_t send_s;
3933 
3934 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3935 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3936 			    sctpchunk_listhead);
3937 			send_s = tp1->rec.data.TSN_seq + 1;
3938 		} else {
3939 			send_s = asoc->sending_seq;
3940 		}
3941 		if ((cumack == send_s) ||
3942 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3943 #ifndef INVARIANTS
3944 			struct mbuf *oper;
3945 
3946 #endif
3947 #ifdef INVARIANTS
3948 			panic("Impossible sack 1");
3949 #else
3950 
3951 			*abort_now = 1;
3952 			/* XXX */
3953 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3954 			    0, M_DONTWAIT, 1, MT_DATA);
3955 			if (oper) {
3956 				struct sctp_paramhdr *ph;
3957 				uint32_t *ippp;
3958 
3959 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3960 				    sizeof(uint32_t);
3961 				ph = mtod(oper, struct sctp_paramhdr *);
3962 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3963 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3964 				ippp = (uint32_t *) (ph + 1);
3965 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3966 			}
3967 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3968 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3969 			return;
3970 #endif
3971 		}
3972 	}
3973 	asoc->this_sack_highest_gap = cumack;
3974 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3975 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3976 		    stcb->asoc.overall_error_count,
3977 		    0,
3978 		    SCTP_FROM_SCTP_INDATA,
3979 		    __LINE__);
3980 	}
3981 	stcb->asoc.overall_error_count = 0;
3982 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3983 		/* process the new consecutive TSN first */
3984 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3985 		while (tp1) {
3986 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3987 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3988 			    MAX_TSN) ||
3989 			    cumack == tp1->rec.data.TSN_seq) {
3990 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3991 					printf("Warning, an unsent is now acked?\n");
3992 				}
3993 				/*
3994 				 * ECN Nonce: Add the nonce to the sender's
3995 				 * nonce sum
3996 				 */
3997 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3998 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3999 					/*
4000 					 * If it is less than ACKED, it is
4001 					 * now no-longer in flight. Higher
4002 					 * values may occur during marking
4003 					 */
4004 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4005 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4006 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4007 							    tp1->whoTo->flight_size,
4008 							    tp1->book_size,
4009 							    (uintptr_t) tp1->whoTo,
4010 							    tp1->rec.data.TSN_seq);
4011 						}
4012 						sctp_flight_size_decrease(tp1);
4013 						/* sa_ignore NO_NULL_CHK */
4014 						sctp_total_flight_decrease(stcb, tp1);
4015 					}
4016 					tp1->whoTo->net_ack += tp1->send_size;
4017 					if (tp1->snd_count < 2) {
4018 						/*
4019 						 * True non-retransmited
4020 						 * chunk
4021 						 */
4022 						tp1->whoTo->net_ack2 +=
4023 						    tp1->send_size;
4024 
4025 						/* update RTO too? */
4026 						if (tp1->do_rtt) {
4027 							tp1->whoTo->RTO =
4028 							/*
4029 							 * sa_ignore
4030 							 * NO_NULL_CHK
4031 							 */
4032 							    sctp_calculate_rto(stcb,
4033 							    asoc, tp1->whoTo,
4034 							    &tp1->sent_rcv_time,
4035 							    sctp_align_safe_nocopy);
4036 							tp1->do_rtt = 0;
4037 						}
4038 					}
4039 					/*
4040 					 * CMT: CUCv2 algorithm. From the
4041 					 * cumack'd TSNs, for each TSN being
4042 					 * acked for the first time, set the
4043 					 * following variables for the
4044 					 * corresp destination.
4045 					 * new_pseudo_cumack will trigger a
4046 					 * cwnd update.
4047 					 * find_(rtx_)pseudo_cumack will
4048 					 * trigger search for the next
4049 					 * expected (rtx-)pseudo-cumack.
4050 					 */
4051 					tp1->whoTo->new_pseudo_cumack = 1;
4052 					tp1->whoTo->find_pseudo_cumack = 1;
4053 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4054 
4055 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4056 						/* sa_ignore NO_NULL_CHK */
4057 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4058 					}
4059 				}
4060 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4061 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4062 				}
4063 				if (tp1->rec.data.chunk_was_revoked) {
4064 					/* deflate the cwnd */
4065 					tp1->whoTo->cwnd -= tp1->book_size;
4066 					tp1->rec.data.chunk_was_revoked = 0;
4067 				}
4068 				tp1->sent = SCTP_DATAGRAM_ACKED;
4069 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4070 				if (tp1->data) {
4071 					/* sa_ignore NO_NULL_CHK */
4072 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4073 					sctp_m_freem(tp1->data);
4074 				}
4075 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4076 					sctp_log_sack(asoc->last_acked_seq,
4077 					    cumack,
4078 					    tp1->rec.data.TSN_seq,
4079 					    0,
4080 					    0,
4081 					    SCTP_LOG_FREE_SENT);
4082 				}
4083 				tp1->data = NULL;
4084 				asoc->sent_queue_cnt--;
4085 				sctp_free_a_chunk(stcb, tp1);
4086 				tp1 = tp2;
4087 			} else {
4088 				break;
4089 			}
4090 		}
4091 
4092 	}
4093 	/* sa_ignore NO_NULL_CHK */
4094 	if (stcb->sctp_socket) {
4095 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4096 		struct socket *so;
4097 
4098 #endif
4099 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4100 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4101 			/* sa_ignore NO_NULL_CHK */
4102 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4103 		}
4104 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4105 		so = SCTP_INP_SO(stcb->sctp_ep);
4106 		atomic_add_int(&stcb->asoc.refcnt, 1);
4107 		SCTP_TCB_UNLOCK(stcb);
4108 		SCTP_SOCKET_LOCK(so, 1);
4109 		SCTP_TCB_LOCK(stcb);
4110 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4111 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4112 			/* assoc was freed while we were unlocked */
4113 			SCTP_SOCKET_UNLOCK(so, 1);
4114 			return;
4115 		}
4116 #endif
4117 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4118 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4119 		SCTP_SOCKET_UNLOCK(so, 1);
4120 #endif
4121 	} else {
4122 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4123 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4124 		}
4125 	}
4126 
4127 	/* JRS - Use the congestion control given in the CC module */
4128 	if (asoc->last_acked_seq != cumack)
4129 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4130 
4131 	asoc->last_acked_seq = cumack;
4132 
4133 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4134 		/* nothing left in-flight */
4135 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4136 			net->flight_size = 0;
4137 			net->partial_bytes_acked = 0;
4138 		}
4139 		asoc->total_flight = 0;
4140 		asoc->total_flight_count = 0;
4141 	}
4142 	/* ECN Nonce updates */
4143 	if (asoc->ecn_nonce_allowed) {
4144 		if (asoc->nonce_sum_check) {
4145 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4146 				if (asoc->nonce_wait_for_ecne == 0) {
4147 					struct sctp_tmit_chunk *lchk;
4148 
4149 					lchk = TAILQ_FIRST(&asoc->send_queue);
4150 					asoc->nonce_wait_for_ecne = 1;
4151 					if (lchk) {
4152 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4153 					} else {
4154 						asoc->nonce_wait_tsn = asoc->sending_seq;
4155 					}
4156 				} else {
4157 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4158 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4159 						/*
4160 						 * Misbehaving peer. We need
4161 						 * to react to this guy
4162 						 */
4163 						asoc->ecn_allowed = 0;
4164 						asoc->ecn_nonce_allowed = 0;
4165 					}
4166 				}
4167 			}
4168 		} else {
4169 			/* See if Resynchronization Possible */
4170 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4171 				asoc->nonce_sum_check = 1;
4172 				/*
4173 				 * Now we must calculate what the base is.
4174 				 * We do this based on two things, we know
4175 				 * the total's for all the segments
4176 				 * gap-acked in the SACK (none). We also
4177 				 * know the SACK's nonce sum, its in
4178 				 * nonce_sum_flag. So we can build a truth
4179 				 * table to back-calculate the new value of
4180 				 * asoc->nonce_sum_expect_base:
4181 				 *
4182 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4183 				 * 1                    0 1 0 1 1 1
4184 				 * 1 0
4185 				 */
4186 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4187 			}
4188 		}
4189 	}
4190 	/* RWND update */
4191 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4192 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4193 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4194 		/* SWS sender side engages */
4195 		asoc->peers_rwnd = 0;
4196 	}
4197 	if (asoc->peers_rwnd > old_rwnd) {
4198 		win_probe_recovery = 1;
4199 	}
4200 	/* Now assure a timer where data is queued at */
4201 again:
4202 	j = 0;
4203 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4204 		int to_ticks;
4205 
4206 		if (win_probe_recovery && (net->window_probe)) {
4207 			win_probe_recovered = 1;
4208 			/*
4209 			 * Find first chunk that was used with window probe
4210 			 * and clear the sent
4211 			 */
4212 			/* sa_ignore FREED_MEMORY */
4213 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4214 				if (tp1->window_probe) {
4215 					/* move back to data send queue */
4216 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4217 					break;
4218 				}
4219 			}
4220 		}
4221 		if (net->RTO == 0) {
4222 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4223 		} else {
4224 			to_ticks = MSEC_TO_TICKS(net->RTO);
4225 		}
4226 		if (net->flight_size) {
4227 			j++;
4228 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4229 			    sctp_timeout_handler, &net->rxt_timer);
4230 			if (net->window_probe) {
4231 				net->window_probe = 0;
4232 			}
4233 		} else {
4234 			if (net->window_probe) {
4235 				/*
4236 				 * In window probes we must assure a timer
4237 				 * is still running there
4238 				 */
4239 				net->window_probe = 0;
4240 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4241 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4242 					    sctp_timeout_handler, &net->rxt_timer);
4243 				}
4244 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4245 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4246 				    stcb, net,
4247 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4248 			}
4249 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4250 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4251 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4252 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4253 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4254 				}
4255 			}
4256 		}
4257 	}
4258 	if ((j == 0) &&
4259 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4260 	    (asoc->sent_queue_retran_cnt == 0) &&
4261 	    (win_probe_recovered == 0) &&
4262 	    (done_once == 0)) {
4263 		/*
4264 		 * huh, this should not happen unless all packets are
4265 		 * PR-SCTP and marked to skip of course.
4266 		 */
4267 		if (sctp_fs_audit(asoc)) {
4268 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4269 				net->flight_size = 0;
4270 			}
4271 			asoc->total_flight = 0;
4272 			asoc->total_flight_count = 0;
4273 			asoc->sent_queue_retran_cnt = 0;
4274 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4275 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4276 					sctp_flight_size_increase(tp1);
4277 					sctp_total_flight_increase(stcb, tp1);
4278 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4279 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4280 				}
4281 			}
4282 		}
4283 		done_once = 1;
4284 		goto again;
4285 	}
4286 	/**********************************/
4287 	/* Now what about shutdown issues */
4288 	/**********************************/
4289 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4290 		/* nothing left on sendqueue.. consider done */
4291 		/* clean up */
4292 		if ((asoc->stream_queue_cnt == 1) &&
4293 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4294 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4295 		    (asoc->locked_on_sending)
4296 		    ) {
4297 			struct sctp_stream_queue_pending *sp;
4298 
4299 			/*
4300 			 * I may be in a state where we got all across.. but
4301 			 * cannot write more due to a shutdown... we abort
4302 			 * since the user did not indicate EOR in this case.
4303 			 * The sp will be cleaned during free of the asoc.
4304 			 */
4305 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4306 			    sctp_streamhead);
4307 			if ((sp) && (sp->length == 0)) {
4308 				/* Let cleanup code purge it */
4309 				if (sp->msg_is_complete) {
4310 					asoc->stream_queue_cnt--;
4311 				} else {
4312 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4313 					asoc->locked_on_sending = NULL;
4314 					asoc->stream_queue_cnt--;
4315 				}
4316 			}
4317 		}
4318 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4319 		    (asoc->stream_queue_cnt == 0)) {
4320 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4321 				/* Need to abort here */
4322 				struct mbuf *oper;
4323 
4324 		abort_out_now:
4325 				*abort_now = 1;
4326 				/* XXX */
4327 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4328 				    0, M_DONTWAIT, 1, MT_DATA);
4329 				if (oper) {
4330 					struct sctp_paramhdr *ph;
4331 					uint32_t *ippp;
4332 
4333 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4334 					    sizeof(uint32_t);
4335 					ph = mtod(oper, struct sctp_paramhdr *);
4336 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4337 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4338 					ippp = (uint32_t *) (ph + 1);
4339 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4340 				}
4341 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4342 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4343 			} else {
4344 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4345 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4346 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4347 				}
4348 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4349 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4350 				sctp_stop_timers_for_shutdown(stcb);
4351 				sctp_send_shutdown(stcb,
4352 				    stcb->asoc.primary_destination);
4353 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4354 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4355 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4356 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4357 			}
4358 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4359 		    (asoc->stream_queue_cnt == 0)) {
4360 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4361 				goto abort_out_now;
4362 			}
4363 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4366 			sctp_send_shutdown_ack(stcb,
4367 			    stcb->asoc.primary_destination);
4368 
4369 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4370 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4371 		}
4372 	}
4373 	/*********************************************/
4374 	/* Here we perform PR-SCTP procedures        */
4375 	/* (section 4.2)                             */
4376 	/*********************************************/
4377 	/* C1. update advancedPeerAckPoint */
4378 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4379 		asoc->advanced_peer_ack_point = cumack;
4380 	}
4381 	/* PR-Sctp issues need to be addressed too */
4382 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4383 		struct sctp_tmit_chunk *lchk;
4384 		uint32_t old_adv_peer_ack_point;
4385 
4386 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4387 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4388 		/* C3. See if we need to send a Fwd-TSN */
4389 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4390 		    MAX_TSN)) {
4391 			/*
4392 			 * ISSUE with ECN, see FWD-TSN processing for notes
4393 			 * on issues that will occur when the ECN NONCE
4394 			 * stuff is put into SCTP for cross checking.
4395 			 */
4396 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4397 			    MAX_TSN)) {
4398 				send_forward_tsn(stcb, asoc);
4399 				/*
4400 				 * ECN Nonce: Disable Nonce Sum check when
4401 				 * FWD TSN is sent and store resync tsn
4402 				 */
4403 				asoc->nonce_sum_check = 0;
4404 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4405 			} else if (lchk) {
4406 				/* try to FR fwd-tsn's that get lost too */
4407 				lchk->rec.data.fwd_tsn_cnt++;
4408 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4409 					send_forward_tsn(stcb, asoc);
4410 					lchk->rec.data.fwd_tsn_cnt = 0;
4411 				}
4412 			}
4413 		}
4414 		if (lchk) {
4415 			/* Assure a timer is up */
4416 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4417 			    stcb->sctp_ep, stcb, lchk->whoTo);
4418 		}
4419 	}
4420 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4421 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4422 		    rwnd,
4423 		    stcb->asoc.peers_rwnd,
4424 		    stcb->asoc.total_flight,
4425 		    stcb->asoc.total_output_queue_size);
4426 	}
4427 }
4428 
4429 void
4430 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4431     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4432     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4433     int *abort_now, uint8_t flags,
4434     uint32_t cum_ack, uint32_t rwnd)
4435 {
4436 	struct sctp_association *asoc;
4437 	struct sctp_tmit_chunk *tp1, *tp2;
4438 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4439 	uint32_t sav_cum_ack;
4440 	uint16_t wake_him = 0;
4441 	uint32_t send_s = 0;
4442 	long j;
4443 	int accum_moved = 0;
4444 	int will_exit_fast_recovery = 0;
4445 	uint32_t a_rwnd, old_rwnd;
4446 	int win_probe_recovery = 0;
4447 	int win_probe_recovered = 0;
4448 	struct sctp_nets *net = NULL;
4449 	int nonce_sum_flag, ecn_seg_sums = 0;
4450 	int done_once;
4451 	uint8_t reneged_all = 0;
4452 	uint8_t cmt_dac_flag;
4453 
4454 	/*
4455 	 * we take any chance we can to service our queues since we cannot
4456 	 * get awoken when the socket is read from :<
4457 	 */
4458 	/*
4459 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4460 	 * old sack, if so discard. 2) If there is nothing left in the send
4461 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4462 	 * too, update any rwnd change and verify no timers are running.
4463 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4464 	 * moved process these first and note that it moved. 4) Process any
4465 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4466 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4467 	 * sync up flightsizes and things, stop all timers and also check
4468 	 * for shutdown_pending state. If so then go ahead and send off the
4469 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4470 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4471 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4472 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4473 	 * if in shutdown_recv state.
4474 	 */
4475 	SCTP_TCB_LOCK_ASSERT(stcb);
4476 	/* CMT DAC algo */
4477 	this_sack_lowest_newack = 0;
4478 	j = 0;
4479 	SCTP_STAT_INCR(sctps_slowpath_sack);
4480 	last_tsn = cum_ack;
4481 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4482 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4483 #ifdef SCTP_ASOCLOG_OF_TSNS
4484 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4485 	stcb->asoc.cumack_log_at++;
4486 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4487 		stcb->asoc.cumack_log_at = 0;
4488 	}
4489 #endif
4490 	a_rwnd = rwnd;
4491 
4492 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4493 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4494 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4495 	}
4496 	old_rwnd = stcb->asoc.peers_rwnd;
4497 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4498 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4499 		    stcb->asoc.overall_error_count,
4500 		    0,
4501 		    SCTP_FROM_SCTP_INDATA,
4502 		    __LINE__);
4503 	}
4504 	stcb->asoc.overall_error_count = 0;
4505 	asoc = &stcb->asoc;
4506 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4507 		sctp_log_sack(asoc->last_acked_seq,
4508 		    cum_ack,
4509 		    0,
4510 		    num_seg,
4511 		    num_dup,
4512 		    SCTP_LOG_NEW_SACK);
4513 	}
4514 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4515 		uint16_t i;
4516 		uint32_t *dupdata, dblock;
4517 
4518 		for (i = 0; i < num_dup; i++) {
4519 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4520 			    sizeof(uint32_t), (uint8_t *) & dblock);
4521 			if (dupdata == NULL) {
4522 				break;
4523 			}
4524 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4525 		}
4526 	}
4527 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4528 		/* reality check */
4529 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4530 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4531 			    sctpchunk_listhead);
4532 			send_s = tp1->rec.data.TSN_seq + 1;
4533 		} else {
4534 			tp1 = NULL;
4535 			send_s = asoc->sending_seq;
4536 		}
4537 		if (cum_ack == send_s ||
4538 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4539 			struct mbuf *oper;
4540 
4541 			/*
4542 			 * no way, we have not even sent this TSN out yet.
4543 			 * Peer is hopelessly messed up with us.
4544 			 */
4545 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4546 			    cum_ack, send_s);
4547 			if (tp1) {
4548 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4549 				    tp1->rec.data.TSN_seq, tp1);
4550 			}
4551 	hopeless_peer:
4552 			*abort_now = 1;
4553 			/* XXX */
4554 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4555 			    0, M_DONTWAIT, 1, MT_DATA);
4556 			if (oper) {
4557 				struct sctp_paramhdr *ph;
4558 				uint32_t *ippp;
4559 
4560 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4561 				    sizeof(uint32_t);
4562 				ph = mtod(oper, struct sctp_paramhdr *);
4563 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4564 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4565 				ippp = (uint32_t *) (ph + 1);
4566 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4567 			}
4568 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4569 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4570 			return;
4571 		}
4572 	}
4573 	/**********************/
4574 	/* 1) check the range */
4575 	/**********************/
4576 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4577 		/* acking something behind */
4578 		return;
4579 	}
4580 	sav_cum_ack = asoc->last_acked_seq;
4581 
4582 	/* update the Rwnd of the peer */
4583 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4584 	    TAILQ_EMPTY(&asoc->send_queue) &&
4585 	    (asoc->stream_queue_cnt == 0)) {
4586 		/* nothing left on send/sent and strmq */
4587 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4588 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4589 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4590 		}
4591 		asoc->peers_rwnd = a_rwnd;
4592 		if (asoc->sent_queue_retran_cnt) {
4593 			asoc->sent_queue_retran_cnt = 0;
4594 		}
4595 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4596 			/* SWS sender side engages */
4597 			asoc->peers_rwnd = 0;
4598 		}
4599 		/* stop any timers */
4600 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4601 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4602 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4603 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4604 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4605 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4606 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4607 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4608 				}
4609 			}
4610 			net->partial_bytes_acked = 0;
4611 			net->flight_size = 0;
4612 		}
4613 		asoc->total_flight = 0;
4614 		asoc->total_flight_count = 0;
4615 		return;
4616 	}
4617 	/*
4618 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4619 	 * things. The total byte count acked is tracked in netAckSz AND
4620 	 * netAck2 is used to track the total bytes acked that are un-
4621 	 * amibguious and were never retransmitted. We track these on a per
4622 	 * destination address basis.
4623 	 */
4624 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4625 		net->prev_cwnd = net->cwnd;
4626 		net->net_ack = 0;
4627 		net->net_ack2 = 0;
4628 
4629 		/*
4630 		 * CMT: Reset CUC and Fast recovery algo variables before
4631 		 * SACK processing
4632 		 */
4633 		net->new_pseudo_cumack = 0;
4634 		net->will_exit_fast_recovery = 0;
4635 	}
4636 	/* process the new consecutive TSN first */
4637 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4638 	while (tp1) {
4639 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4640 		    MAX_TSN) ||
4641 		    last_tsn == tp1->rec.data.TSN_seq) {
4642 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4643 				/*
4644 				 * ECN Nonce: Add the nonce to the sender's
4645 				 * nonce sum
4646 				 */
4647 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4648 				accum_moved = 1;
4649 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4650 					/*
4651 					 * If it is less than ACKED, it is
4652 					 * now no-longer in flight. Higher
4653 					 * values may occur during marking
4654 					 */
4655 					if ((tp1->whoTo->dest_state &
4656 					    SCTP_ADDR_UNCONFIRMED) &&
4657 					    (tp1->snd_count < 2)) {
4658 						/*
4659 						 * If there was no retran
4660 						 * and the address is
4661 						 * un-confirmed and we sent
4662 						 * there and are now
4663 						 * sacked.. its confirmed,
4664 						 * mark it so.
4665 						 */
4666 						tp1->whoTo->dest_state &=
4667 						    ~SCTP_ADDR_UNCONFIRMED;
4668 					}
4669 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4670 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4671 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4672 							    tp1->whoTo->flight_size,
4673 							    tp1->book_size,
4674 							    (uintptr_t) tp1->whoTo,
4675 							    tp1->rec.data.TSN_seq);
4676 						}
4677 						sctp_flight_size_decrease(tp1);
4678 						sctp_total_flight_decrease(stcb, tp1);
4679 					}
4680 					tp1->whoTo->net_ack += tp1->send_size;
4681 
4682 					/* CMT SFR and DAC algos */
4683 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4684 					tp1->whoTo->saw_newack = 1;
4685 
4686 					if (tp1->snd_count < 2) {
4687 						/*
4688 						 * True non-retransmited
4689 						 * chunk
4690 						 */
4691 						tp1->whoTo->net_ack2 +=
4692 						    tp1->send_size;
4693 
4694 						/* update RTO too? */
4695 						if (tp1->do_rtt) {
4696 							tp1->whoTo->RTO =
4697 							    sctp_calculate_rto(stcb,
4698 							    asoc, tp1->whoTo,
4699 							    &tp1->sent_rcv_time,
4700 							    sctp_align_safe_nocopy);
4701 							tp1->do_rtt = 0;
4702 						}
4703 					}
4704 					/*
4705 					 * CMT: CUCv2 algorithm. From the
4706 					 * cumack'd TSNs, for each TSN being
4707 					 * acked for the first time, set the
4708 					 * following variables for the
4709 					 * corresp destination.
4710 					 * new_pseudo_cumack will trigger a
4711 					 * cwnd update.
4712 					 * find_(rtx_)pseudo_cumack will
4713 					 * trigger search for the next
4714 					 * expected (rtx-)pseudo-cumack.
4715 					 */
4716 					tp1->whoTo->new_pseudo_cumack = 1;
4717 					tp1->whoTo->find_pseudo_cumack = 1;
4718 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4719 
4720 
4721 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4722 						sctp_log_sack(asoc->last_acked_seq,
4723 						    cum_ack,
4724 						    tp1->rec.data.TSN_seq,
4725 						    0,
4726 						    0,
4727 						    SCTP_LOG_TSN_ACKED);
4728 					}
4729 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4730 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4731 					}
4732 				}
4733 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4734 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4735 #ifdef SCTP_AUDITING_ENABLED
4736 					sctp_audit_log(0xB3,
4737 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4738 #endif
4739 				}
4740 				if (tp1->rec.data.chunk_was_revoked) {
4741 					/* deflate the cwnd */
4742 					tp1->whoTo->cwnd -= tp1->book_size;
4743 					tp1->rec.data.chunk_was_revoked = 0;
4744 				}
4745 				tp1->sent = SCTP_DATAGRAM_ACKED;
4746 			}
4747 		} else {
4748 			break;
4749 		}
4750 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4751 	}
4752 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4753 	/* always set this up to cum-ack */
4754 	asoc->this_sack_highest_gap = last_tsn;
4755 
4756 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4757 
4758 		/*
4759 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4760 		 * to be greater than the cumack. Also reset saw_newack to 0
4761 		 * for all dests.
4762 		 */
4763 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4764 			net->saw_newack = 0;
4765 			net->this_sack_highest_newack = last_tsn;
4766 		}
4767 
4768 		/*
4769 		 * thisSackHighestGap will increase while handling NEW
4770 		 * segments this_sack_highest_newack will increase while
4771 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4772 		 * used for CMT DAC algo. saw_newack will also change.
4773 		 */
4774 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4775 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4776 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4777 			wake_him++;
4778 		}
4779 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4780 			/*
4781 			 * validate the biggest_tsn_acked in the gap acks if
4782 			 * strict adherence is wanted.
4783 			 */
4784 			if ((biggest_tsn_acked == send_s) ||
4785 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4786 				/*
4787 				 * peer is either confused or we are under
4788 				 * attack. We must abort.
4789 				 */
4790 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4791 				    biggest_tsn_acked,
4792 				    send_s);
4793 
4794 				goto hopeless_peer;
4795 			}
4796 		}
4797 	}
4798 	/*******************************************/
4799 	/* cancel ALL T3-send timer if accum moved */
4800 	/*******************************************/
4801 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4802 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4803 			if (net->new_pseudo_cumack)
4804 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4805 				    stcb, net,
4806 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4807 
4808 		}
4809 	} else {
4810 		if (accum_moved) {
4811 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4812 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4813 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4814 			}
4815 		}
4816 	}
4817 	/********************************************/
4818 	/* drop the acked chunks from the sendqueue */
4819 	/********************************************/
4820 	asoc->last_acked_seq = cum_ack;
4821 
4822 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4823 	if (tp1 == NULL)
4824 		goto done_with_it;
4825 	do {
4826 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4827 		    MAX_TSN)) {
4828 			break;
4829 		}
4830 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4831 			/* no more sent on list */
4832 			printf("Warning, tp1->sent == %d and its now acked?\n",
4833 			    tp1->sent);
4834 		}
4835 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4836 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4837 		if (tp1->pr_sctp_on) {
4838 			if (asoc->pr_sctp_cnt != 0)
4839 				asoc->pr_sctp_cnt--;
4840 		}
4841 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4842 		    (asoc->total_flight > 0)) {
4843 #ifdef INVARIANTS
4844 			panic("Warning flight size is postive and should be 0");
4845 #else
4846 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4847 			    asoc->total_flight);
4848 #endif
4849 			asoc->total_flight = 0;
4850 		}
4851 		if (tp1->data) {
4852 			/* sa_ignore NO_NULL_CHK */
4853 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4854 			sctp_m_freem(tp1->data);
4855 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4856 				asoc->sent_queue_cnt_removeable--;
4857 			}
4858 		}
4859 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4860 			sctp_log_sack(asoc->last_acked_seq,
4861 			    cum_ack,
4862 			    tp1->rec.data.TSN_seq,
4863 			    0,
4864 			    0,
4865 			    SCTP_LOG_FREE_SENT);
4866 		}
4867 		tp1->data = NULL;
4868 		asoc->sent_queue_cnt--;
4869 		sctp_free_a_chunk(stcb, tp1);
4870 		wake_him++;
4871 		tp1 = tp2;
4872 	} while (tp1 != NULL);
4873 
4874 done_with_it:
4875 	/* sa_ignore NO_NULL_CHK */
4876 	if ((wake_him) && (stcb->sctp_socket)) {
4877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4878 		struct socket *so;
4879 
4880 #endif
4881 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4882 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4883 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4884 		}
4885 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4886 		so = SCTP_INP_SO(stcb->sctp_ep);
4887 		atomic_add_int(&stcb->asoc.refcnt, 1);
4888 		SCTP_TCB_UNLOCK(stcb);
4889 		SCTP_SOCKET_LOCK(so, 1);
4890 		SCTP_TCB_LOCK(stcb);
4891 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4892 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4893 			/* assoc was freed while we were unlocked */
4894 			SCTP_SOCKET_UNLOCK(so, 1);
4895 			return;
4896 		}
4897 #endif
4898 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4899 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4900 		SCTP_SOCKET_UNLOCK(so, 1);
4901 #endif
4902 	} else {
4903 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4904 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4905 		}
4906 	}
4907 
4908 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4909 		if (compare_with_wrap(asoc->last_acked_seq,
4910 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4911 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4912 			/* Setup so we will exit RFC2582 fast recovery */
4913 			will_exit_fast_recovery = 1;
4914 		}
4915 	}
4916 	/*
4917 	 * Check for revoked fragments:
4918 	 *
4919 	 * if Previous sack - Had no frags then we can't have any revoked if
4920 	 * Previous sack - Had frag's then - If we now have frags aka
4921 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4922 	 * some of them. else - The peer revoked all ACKED fragments, since
4923 	 * we had some before and now we have NONE.
4924 	 */
4925 
4926 	if (num_seg)
4927 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4928 	else if (asoc->saw_sack_with_frags) {
4929 		int cnt_revoked = 0;
4930 
4931 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4932 		if (tp1 != NULL) {
4933 			/* Peer revoked all dg's marked or acked */
4934 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4935 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4936 					tp1->sent = SCTP_DATAGRAM_SENT;
4937 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4938 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4939 						    tp1->whoTo->flight_size,
4940 						    tp1->book_size,
4941 						    (uintptr_t) tp1->whoTo,
4942 						    tp1->rec.data.TSN_seq);
4943 					}
4944 					sctp_flight_size_increase(tp1);
4945 					sctp_total_flight_increase(stcb, tp1);
4946 					tp1->rec.data.chunk_was_revoked = 1;
4947 					/*
4948 					 * To ensure that this increase in
4949 					 * flightsize, which is artificial,
4950 					 * does not throttle the sender, we
4951 					 * also increase the cwnd
4952 					 * artificially.
4953 					 */
4954 					tp1->whoTo->cwnd += tp1->book_size;
4955 					cnt_revoked++;
4956 				}
4957 			}
4958 			if (cnt_revoked) {
4959 				reneged_all = 1;
4960 			}
4961 		}
4962 		asoc->saw_sack_with_frags = 0;
4963 	}
4964 	if (num_seg || num_nr_seg)
4965 		asoc->saw_sack_with_frags = 1;
4966 	else
4967 		asoc->saw_sack_with_frags = 0;
4968 
4969 	/* JRS - Use the congestion control given in the CC module */
4970 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4971 
4972 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4973 		/* nothing left in-flight */
4974 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 			/* stop all timers */
4976 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4977 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4978 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4979 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4980 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4981 				}
4982 			}
4983 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4984 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4985 			net->flight_size = 0;
4986 			net->partial_bytes_acked = 0;
4987 		}
4988 		asoc->total_flight = 0;
4989 		asoc->total_flight_count = 0;
4990 	}
4991 	/**********************************/
4992 	/* Now what about shutdown issues */
4993 	/**********************************/
4994 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4995 		/* nothing left on sendqueue.. consider done */
4996 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4997 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4998 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4999 		}
5000 		asoc->peers_rwnd = a_rwnd;
5001 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 			/* SWS sender side engages */
5003 			asoc->peers_rwnd = 0;
5004 		}
5005 		/* clean up */
5006 		if ((asoc->stream_queue_cnt == 1) &&
5007 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5008 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5009 		    (asoc->locked_on_sending)
5010 		    ) {
5011 			struct sctp_stream_queue_pending *sp;
5012 
5013 			/*
5014 			 * I may be in a state where we got all across.. but
5015 			 * cannot write more due to a shutdown... we abort
5016 			 * since the user did not indicate EOR in this case.
5017 			 */
5018 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5019 			    sctp_streamhead);
5020 			if ((sp) && (sp->length == 0)) {
5021 				asoc->locked_on_sending = NULL;
5022 				if (sp->msg_is_complete) {
5023 					asoc->stream_queue_cnt--;
5024 				} else {
5025 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5026 					asoc->stream_queue_cnt--;
5027 				}
5028 			}
5029 		}
5030 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5031 		    (asoc->stream_queue_cnt == 0)) {
5032 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5033 				/* Need to abort here */
5034 				struct mbuf *oper;
5035 
5036 		abort_out_now:
5037 				*abort_now = 1;
5038 				/* XXX */
5039 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5040 				    0, M_DONTWAIT, 1, MT_DATA);
5041 				if (oper) {
5042 					struct sctp_paramhdr *ph;
5043 					uint32_t *ippp;
5044 
5045 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5046 					    sizeof(uint32_t);
5047 					ph = mtod(oper, struct sctp_paramhdr *);
5048 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5049 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5050 					ippp = (uint32_t *) (ph + 1);
5051 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5052 				}
5053 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5054 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5055 				return;
5056 			} else {
5057 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5058 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5059 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5060 				}
5061 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5062 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5063 				sctp_stop_timers_for_shutdown(stcb);
5064 				sctp_send_shutdown(stcb,
5065 				    stcb->asoc.primary_destination);
5066 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5067 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5068 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5069 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5070 			}
5071 			return;
5072 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5073 		    (asoc->stream_queue_cnt == 0)) {
5074 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5075 				goto abort_out_now;
5076 			}
5077 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5078 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5079 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5080 			sctp_send_shutdown_ack(stcb,
5081 			    stcb->asoc.primary_destination);
5082 
5083 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5084 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5085 			return;
5086 		}
5087 	}
5088 	/*
5089 	 * Now here we are going to recycle net_ack for a different use...
5090 	 * HEADS UP.
5091 	 */
5092 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5093 		net->net_ack = 0;
5094 	}
5095 
5096 	/*
5097 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5098 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5099 	 * automatically ensure that.
5100 	 */
5101 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5102 		this_sack_lowest_newack = cum_ack;
5103 	}
5104 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5105 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5106 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5107 	}
5108 	/* JRS - Use the congestion control given in the CC module */
5109 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5110 
5111 	/******************************************************************
5112 	 *  Here we do the stuff with ECN Nonce checking.
5113 	 *  We basically check to see if the nonce sum flag was incorrect
5114 	 *  or if resynchronization needs to be done. Also if we catch a
5115 	 *  misbehaving receiver we give him the kick.
5116 	 ******************************************************************/
5117 
5118 	if (asoc->ecn_nonce_allowed) {
5119 		if (asoc->nonce_sum_check) {
5120 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5121 				if (asoc->nonce_wait_for_ecne == 0) {
5122 					struct sctp_tmit_chunk *lchk;
5123 
5124 					lchk = TAILQ_FIRST(&asoc->send_queue);
5125 					asoc->nonce_wait_for_ecne = 1;
5126 					if (lchk) {
5127 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5128 					} else {
5129 						asoc->nonce_wait_tsn = asoc->sending_seq;
5130 					}
5131 				} else {
5132 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5133 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5134 						/*
5135 						 * Misbehaving peer. We need
5136 						 * to react to this guy
5137 						 */
5138 						asoc->ecn_allowed = 0;
5139 						asoc->ecn_nonce_allowed = 0;
5140 					}
5141 				}
5142 			}
5143 		} else {
5144 			/* See if Resynchronization Possible */
5145 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5146 				asoc->nonce_sum_check = 1;
5147 				/*
5148 				 * now we must calculate what the base is.
5149 				 * We do this based on two things, we know
5150 				 * the total's for all the segments
5151 				 * gap-acked in the SACK, its stored in
5152 				 * ecn_seg_sums. We also know the SACK's
5153 				 * nonce sum, its in nonce_sum_flag. So we
5154 				 * can build a truth table to back-calculate
5155 				 * the new value of
5156 				 * asoc->nonce_sum_expect_base:
5157 				 *
5158 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5159 				 * 1                    0 1 0 1 1 1
5160 				 * 1 0
5161 				 */
5162 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5163 			}
5164 		}
5165 	}
5166 	/* Now are we exiting loss recovery ? */
5167 	if (will_exit_fast_recovery) {
5168 		/* Ok, we must exit fast recovery */
5169 		asoc->fast_retran_loss_recovery = 0;
5170 	}
5171 	if ((asoc->sat_t3_loss_recovery) &&
5172 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5173 	    MAX_TSN) ||
5174 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5175 		/* end satellite t3 loss recovery */
5176 		asoc->sat_t3_loss_recovery = 0;
5177 	}
5178 	/*
5179 	 * CMT Fast recovery
5180 	 */
5181 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5182 		if (net->will_exit_fast_recovery) {
5183 			/* Ok, we must exit fast recovery */
5184 			net->fast_retran_loss_recovery = 0;
5185 		}
5186 	}
5187 
5188 	/* Adjust and set the new rwnd value */
5189 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5190 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5191 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5192 	}
5193 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5194 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5195 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5196 		/* SWS sender side engages */
5197 		asoc->peers_rwnd = 0;
5198 	}
5199 	if (asoc->peers_rwnd > old_rwnd) {
5200 		win_probe_recovery = 1;
5201 	}
5202 	/*
5203 	 * Now we must setup so we have a timer up for anyone with
5204 	 * outstanding data.
5205 	 */
5206 	done_once = 0;
5207 again:
5208 	j = 0;
5209 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5210 		if (win_probe_recovery && (net->window_probe)) {
5211 			win_probe_recovered = 1;
5212 			/*-
5213 			 * Find first chunk that was used with
5214 			 * window probe and clear the event. Put
5215 			 * it back into the send queue as if has
5216 			 * not been sent.
5217 			 */
5218 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5219 				if (tp1->window_probe) {
5220 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5221 					break;
5222 				}
5223 			}
5224 		}
5225 		if (net->flight_size) {
5226 			j++;
5227 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5228 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5229 				    stcb->sctp_ep, stcb, net);
5230 			}
5231 			if (net->window_probe) {
5232 				net->window_probe = 0;
5233 			}
5234 		} else {
5235 			if (net->window_probe) {
5236 				/*
5237 				 * In window probes we must assure a timer
5238 				 * is still running there
5239 				 */
5240 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5241 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5242 					    stcb->sctp_ep, stcb, net);
5243 
5244 				}
5245 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5246 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5247 				    stcb, net,
5248 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5249 			}
5250 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5251 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5252 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5253 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5254 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5255 				}
5256 			}
5257 		}
5258 	}
5259 	if ((j == 0) &&
5260 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5261 	    (asoc->sent_queue_retran_cnt == 0) &&
5262 	    (win_probe_recovered == 0) &&
5263 	    (done_once == 0)) {
5264 		/*
5265 		 * huh, this should not happen unless all packets are
5266 		 * PR-SCTP and marked to skip of course.
5267 		 */
5268 		if (sctp_fs_audit(asoc)) {
5269 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5270 				net->flight_size = 0;
5271 			}
5272 			asoc->total_flight = 0;
5273 			asoc->total_flight_count = 0;
5274 			asoc->sent_queue_retran_cnt = 0;
5275 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5276 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5277 					sctp_flight_size_increase(tp1);
5278 					sctp_total_flight_increase(stcb, tp1);
5279 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5280 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5281 				}
5282 			}
5283 		}
5284 		done_once = 1;
5285 		goto again;
5286 	}
5287 	/*********************************************/
5288 	/* Here we perform PR-SCTP procedures        */
5289 	/* (section 4.2)                             */
5290 	/*********************************************/
5291 	/* C1. update advancedPeerAckPoint */
5292 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5293 		asoc->advanced_peer_ack_point = cum_ack;
5294 	}
5295 	/* C2. try to further move advancedPeerAckPoint ahead */
5296 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5297 		struct sctp_tmit_chunk *lchk;
5298 		uint32_t old_adv_peer_ack_point;
5299 
5300 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5301 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5302 		/* C3. See if we need to send a Fwd-TSN */
5303 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5304 		    MAX_TSN)) {
5305 			/*
5306 			 * ISSUE with ECN, see FWD-TSN processing for notes
5307 			 * on issues that will occur when the ECN NONCE
5308 			 * stuff is put into SCTP for cross checking.
5309 			 */
5310 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5311 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5312 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5313 				    old_adv_peer_ack_point);
5314 			}
5315 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5316 			    MAX_TSN)) {
5317 				send_forward_tsn(stcb, asoc);
5318 				/*
5319 				 * ECN Nonce: Disable Nonce Sum check when
5320 				 * FWD TSN is sent and store resync tsn
5321 				 */
5322 				asoc->nonce_sum_check = 0;
5323 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5324 			} else if (lchk) {
5325 				/* try to FR fwd-tsn's that get lost too */
5326 				lchk->rec.data.fwd_tsn_cnt++;
5327 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5328 					send_forward_tsn(stcb, asoc);
5329 					lchk->rec.data.fwd_tsn_cnt = 0;
5330 				}
5331 			}
5332 		}
5333 		if (lchk) {
5334 			/* Assure a timer is up */
5335 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5336 			    stcb->sctp_ep, stcb, lchk->whoTo);
5337 		}
5338 	}
5339 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5340 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5341 		    a_rwnd,
5342 		    stcb->asoc.peers_rwnd,
5343 		    stcb->asoc.total_flight,
5344 		    stcb->asoc.total_output_queue_size);
5345 	}
5346 }
5347 
5348 void
5349 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5350     struct sctp_nets *netp, int *abort_flag)
5351 {
5352 	/* Copy cum-ack */
5353 	uint32_t cum_ack, a_rwnd;
5354 
5355 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5356 	/* Arrange so a_rwnd does NOT change */
5357 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5358 
5359 	/* Now call the express sack handling */
5360 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5361 }
5362 
5363 static void
5364 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5365     struct sctp_stream_in *strmin)
5366 {
5367 	struct sctp_queued_to_read *ctl, *nctl;
5368 	struct sctp_association *asoc;
5369 	uint16_t tt;
5370 
5371 	asoc = &stcb->asoc;
5372 	tt = strmin->last_sequence_delivered;
5373 	/*
5374 	 * First deliver anything prior to and including the stream no that
5375 	 * came in
5376 	 */
5377 	ctl = TAILQ_FIRST(&strmin->inqueue);
5378 	while (ctl) {
5379 		nctl = TAILQ_NEXT(ctl, next);
5380 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5381 		    (tt == ctl->sinfo_ssn)) {
5382 			/* this is deliverable now */
5383 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5384 			/* subtract pending on streams */
5385 			asoc->size_on_all_streams -= ctl->length;
5386 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5387 			/* deliver it to at least the delivery-q */
5388 			if (stcb->sctp_socket) {
5389 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5390 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5391 				    ctl,
5392 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5393 			}
5394 		} else {
5395 			/* no more delivery now. */
5396 			break;
5397 		}
5398 		ctl = nctl;
5399 	}
5400 	/*
5401 	 * now we must deliver things in queue the normal way  if any are
5402 	 * now ready.
5403 	 */
5404 	tt = strmin->last_sequence_delivered + 1;
5405 	ctl = TAILQ_FIRST(&strmin->inqueue);
5406 	while (ctl) {
5407 		nctl = TAILQ_NEXT(ctl, next);
5408 		if (tt == ctl->sinfo_ssn) {
5409 			/* this is deliverable now */
5410 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5411 			/* subtract pending on streams */
5412 			asoc->size_on_all_streams -= ctl->length;
5413 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5414 			/* deliver it to at least the delivery-q */
5415 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5416 			if (stcb->sctp_socket) {
5417 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5418 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5419 				    ctl,
5420 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5421 
5422 			}
5423 			tt = strmin->last_sequence_delivered + 1;
5424 		} else {
5425 			break;
5426 		}
5427 		ctl = nctl;
5428 	}
5429 }
5430 
5431 static void
5432 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5433     struct sctp_association *asoc,
5434     uint16_t stream, uint16_t seq)
5435 {
5436 	struct sctp_tmit_chunk *chk, *at;
5437 
5438 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5439 		/* For each one on here see if we need to toss it */
5440 		/*
5441 		 * For now large messages held on the reasmqueue that are
5442 		 * complete will be tossed too. We could in theory do more
5443 		 * work to spin through and stop after dumping one msg aka
5444 		 * seeing the start of a new msg at the head, and call the
5445 		 * delivery function... to see if it can be delivered... But
5446 		 * for now we just dump everything on the queue.
5447 		 */
5448 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5449 		while (chk) {
5450 			at = TAILQ_NEXT(chk, sctp_next);
5451 			/*
5452 			 * Do not toss it if on a different stream or marked
5453 			 * for unordered delivery in which case the stream
5454 			 * sequence number has no meaning.
5455 			 */
5456 			if ((chk->rec.data.stream_number != stream) ||
5457 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5458 				chk = at;
5459 				continue;
5460 			}
5461 			if (chk->rec.data.stream_seq == seq) {
5462 				/* It needs to be tossed */
5463 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5464 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5465 				    asoc->tsn_last_delivered, MAX_TSN)) {
5466 					asoc->tsn_last_delivered =
5467 					    chk->rec.data.TSN_seq;
5468 					asoc->str_of_pdapi =
5469 					    chk->rec.data.stream_number;
5470 					asoc->ssn_of_pdapi =
5471 					    chk->rec.data.stream_seq;
5472 					asoc->fragment_flags =
5473 					    chk->rec.data.rcv_flags;
5474 				}
5475 				asoc->size_on_reasm_queue -= chk->send_size;
5476 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5477 
5478 				/* Clear up any stream problem */
5479 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5480 				    SCTP_DATA_UNORDERED &&
5481 				    (compare_with_wrap(chk->rec.data.stream_seq,
5482 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5483 				    MAX_SEQ))) {
5484 					/*
5485 					 * We must dump forward this streams
5486 					 * sequence number if the chunk is
5487 					 * not unordered that is being
5488 					 * skipped. There is a chance that
5489 					 * if the peer does not include the
5490 					 * last fragment in its FWD-TSN we
5491 					 * WILL have a problem here since
5492 					 * you would have a partial chunk in
5493 					 * queue that may not be
5494 					 * deliverable. Also if a Partial
5495 					 * delivery API as started the user
5496 					 * may get a partial chunk. The next
5497 					 * read returning a new chunk...
5498 					 * really ugly but I see no way
5499 					 * around it! Maybe a notify??
5500 					 */
5501 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5502 					    chk->rec.data.stream_seq;
5503 				}
5504 				if (chk->data) {
5505 					sctp_m_freem(chk->data);
5506 					chk->data = NULL;
5507 				}
5508 				sctp_free_a_chunk(stcb, chk);
5509 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5510 				/*
5511 				 * If the stream_seq is > than the purging
5512 				 * one, we are done
5513 				 */
5514 				break;
5515 			}
5516 			chk = at;
5517 		}
5518 	}
5519 }
5520 
5521 
5522 void
5523 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5524     struct sctp_forward_tsn_chunk *fwd,
5525     int *abort_flag, struct mbuf *m, int offset)
5526 {
5527 	/*
5528 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5529 	 * forward TSN, when the SACK comes back that acknowledges the
5530 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5531 	 * get quite tricky since we may have sent more data interveneing
5532 	 * and must carefully account for what the SACK says on the nonce
5533 	 * and any gaps that are reported. This work will NOT be done here,
5534 	 * but I note it here since it is really related to PR-SCTP and
5535 	 * FWD-TSN's
5536 	 */
5537 
5538 	/* The pr-sctp fwd tsn */
5539 	/*
5540 	 * here we will perform all the data receiver side steps for
5541 	 * processing FwdTSN, as required in by pr-sctp draft:
5542 	 *
5543 	 * Assume we get FwdTSN(x):
5544 	 *
5545 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5546 	 * others we have 3) examine and update re-ordering queue on
5547 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5548 	 * report where we are.
5549 	 */
5550 	struct sctp_association *asoc;
5551 	uint32_t new_cum_tsn, gap;
5552 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5553 	uint32_t str_seq;
5554 	struct sctp_stream_in *strm;
5555 	struct sctp_tmit_chunk *chk, *at;
5556 	struct sctp_queued_to_read *ctl, *sv;
5557 
5558 	cumack_set_flag = 0;
5559 	asoc = &stcb->asoc;
5560 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5561 		SCTPDBG(SCTP_DEBUG_INDATA1,
5562 		    "Bad size too small/big fwd-tsn\n");
5563 		return;
5564 	}
5565 	m_size = (stcb->asoc.mapping_array_size << 3);
5566 	/*************************************************************/
5567 	/* 1. Here we update local cumTSN and shift the bitmap array */
5568 	/*************************************************************/
5569 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5570 
5571 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5572 	    asoc->cumulative_tsn == new_cum_tsn) {
5573 		/* Already got there ... */
5574 		return;
5575 	}
5576 	/*
5577 	 * now we know the new TSN is more advanced, let's find the actual
5578 	 * gap
5579 	 */
5580 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5581 	asoc->cumulative_tsn = new_cum_tsn;
5582 	if (gap >= m_size) {
5583 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5584 			struct mbuf *oper;
5585 
5586 			/*
5587 			 * out of range (of single byte chunks in the rwnd I
5588 			 * give out). This must be an attacker.
5589 			 */
5590 			*abort_flag = 1;
5591 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5592 			    0, M_DONTWAIT, 1, MT_DATA);
5593 			if (oper) {
5594 				struct sctp_paramhdr *ph;
5595 				uint32_t *ippp;
5596 
5597 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5598 				    (sizeof(uint32_t) * 3);
5599 				ph = mtod(oper, struct sctp_paramhdr *);
5600 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5601 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5602 				ippp = (uint32_t *) (ph + 1);
5603 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5604 				ippp++;
5605 				*ippp = asoc->highest_tsn_inside_map;
5606 				ippp++;
5607 				*ippp = new_cum_tsn;
5608 			}
5609 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5610 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5611 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5612 			return;
5613 		}
5614 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5615 
5616 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5617 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5618 		asoc->highest_tsn_inside_map = new_cum_tsn;
5619 
5620 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5621 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5622 
5623 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5624 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5625 		}
5626 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5627 	} else {
5628 		SCTP_TCB_LOCK_ASSERT(stcb);
5629 		for (i = 0; i <= gap; i++) {
5630 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5631 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5632 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5633 				if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5634 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5635 				}
5636 			}
5637 		}
5638 	}
5639 	/*************************************************************/
5640 	/* 2. Clear up re-assembly queue                             */
5641 	/*************************************************************/
5642 	/*
5643 	 * First service it if pd-api is up, just in case we can progress it
5644 	 * forward
5645 	 */
5646 	if (asoc->fragmented_delivery_inprogress) {
5647 		sctp_service_reassembly(stcb, asoc);
5648 	}
5649 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5650 		/* For each one on here see if we need to toss it */
5651 		/*
5652 		 * For now large messages held on the reasmqueue that are
5653 		 * complete will be tossed too. We could in theory do more
5654 		 * work to spin through and stop after dumping one msg aka
5655 		 * seeing the start of a new msg at the head, and call the
5656 		 * delivery function... to see if it can be delivered... But
5657 		 * for now we just dump everything on the queue.
5658 		 */
5659 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5660 		while (chk) {
5661 			at = TAILQ_NEXT(chk, sctp_next);
5662 			if ((compare_with_wrap(new_cum_tsn,
5663 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5664 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5665 				/* It needs to be tossed */
5666 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5667 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5668 				    asoc->tsn_last_delivered, MAX_TSN)) {
5669 					asoc->tsn_last_delivered =
5670 					    chk->rec.data.TSN_seq;
5671 					asoc->str_of_pdapi =
5672 					    chk->rec.data.stream_number;
5673 					asoc->ssn_of_pdapi =
5674 					    chk->rec.data.stream_seq;
5675 					asoc->fragment_flags =
5676 					    chk->rec.data.rcv_flags;
5677 				}
5678 				asoc->size_on_reasm_queue -= chk->send_size;
5679 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5680 
5681 				/* Clear up any stream problem */
5682 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5683 				    SCTP_DATA_UNORDERED &&
5684 				    (compare_with_wrap(chk->rec.data.stream_seq,
5685 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5686 				    MAX_SEQ))) {
5687 					/*
5688 					 * We must dump forward this streams
5689 					 * sequence number if the chunk is
5690 					 * not unordered that is being
5691 					 * skipped. There is a chance that
5692 					 * if the peer does not include the
5693 					 * last fragment in its FWD-TSN we
5694 					 * WILL have a problem here since
5695 					 * you would have a partial chunk in
5696 					 * queue that may not be
5697 					 * deliverable. Also if a Partial
5698 					 * delivery API as started the user
5699 					 * may get a partial chunk. The next
5700 					 * read returning a new chunk...
5701 					 * really ugly but I see no way
5702 					 * around it! Maybe a notify??
5703 					 */
5704 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5705 					    chk->rec.data.stream_seq;
5706 				}
5707 				if (chk->data) {
5708 					sctp_m_freem(chk->data);
5709 					chk->data = NULL;
5710 				}
5711 				sctp_free_a_chunk(stcb, chk);
5712 			} else {
5713 				/*
5714 				 * Ok we have gone beyond the end of the
5715 				 * fwd-tsn's mark.
5716 				 */
5717 				break;
5718 			}
5719 			chk = at;
5720 		}
5721 	}
5722 	/*******************************************************/
5723 	/* 3. Update the PR-stream re-ordering queues and fix  */
5724 	/* delivery issues as needed.                       */
5725 	/*******************************************************/
5726 	fwd_sz -= sizeof(*fwd);
5727 	if (m && fwd_sz) {
5728 		/* New method. */
5729 		unsigned int num_str;
5730 		struct sctp_strseq *stseq, strseqbuf;
5731 
5732 		offset += sizeof(*fwd);
5733 
5734 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5735 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5736 		for (i = 0; i < num_str; i++) {
5737 			uint16_t st;
5738 
5739 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5740 			    sizeof(struct sctp_strseq),
5741 			    (uint8_t *) & strseqbuf);
5742 			offset += sizeof(struct sctp_strseq);
5743 			if (stseq == NULL) {
5744 				break;
5745 			}
5746 			/* Convert */
5747 			st = ntohs(stseq->stream);
5748 			stseq->stream = st;
5749 			st = ntohs(stseq->sequence);
5750 			stseq->sequence = st;
5751 
5752 			/* now process */
5753 
5754 			/*
5755 			 * Ok we now look for the stream/seq on the read
5756 			 * queue where its not all delivered. If we find it
5757 			 * we transmute the read entry into a PDI_ABORTED.
5758 			 */
5759 			if (stseq->stream >= asoc->streamincnt) {
5760 				/* screwed up streams, stop!  */
5761 				break;
5762 			}
5763 			if ((asoc->str_of_pdapi == stseq->stream) &&
5764 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5765 				/*
5766 				 * If this is the one we were partially
5767 				 * delivering now then we no longer are.
5768 				 * Note this will change with the reassembly
5769 				 * re-write.
5770 				 */
5771 				asoc->fragmented_delivery_inprogress = 0;
5772 			}
5773 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5774 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5775 				if ((ctl->sinfo_stream == stseq->stream) &&
5776 				    (ctl->sinfo_ssn == stseq->sequence)) {
5777 					str_seq = (stseq->stream << 16) | stseq->sequence;
5778 					ctl->end_added = 1;
5779 					ctl->pdapi_aborted = 1;
5780 					sv = stcb->asoc.control_pdapi;
5781 					stcb->asoc.control_pdapi = ctl;
5782 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5783 					    stcb,
5784 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5785 					    (void *)&str_seq,
5786 					    SCTP_SO_NOT_LOCKED);
5787 					stcb->asoc.control_pdapi = sv;
5788 					break;
5789 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5790 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5791 					/* We are past our victim SSN */
5792 					break;
5793 				}
5794 			}
5795 			strm = &asoc->strmin[stseq->stream];
5796 			if (compare_with_wrap(stseq->sequence,
5797 			    strm->last_sequence_delivered, MAX_SEQ)) {
5798 				/* Update the sequence number */
5799 				strm->last_sequence_delivered =
5800 				    stseq->sequence;
5801 			}
5802 			/* now kick the stream the new way */
5803 			/* sa_ignore NO_NULL_CHK */
5804 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5805 		}
5806 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5807 	}
5808 	/*
5809 	 * Now slide thing forward.
5810 	 */
5811 	sctp_slide_mapping_arrays(stcb);
5812 
5813 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5814 		/* now lets kick out and check for more fragmented delivery */
5815 		/* sa_ignore NO_NULL_CHK */
5816 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5817 	}
5818 }
5819